How do I process the numpy array(img1) with cv2? - numpy

How do I process img1(numpy array) with cv2? I've tried converting it with CreateMat.But CreateMat is not supported by cv2.
# Load dimensions based on the number of rows, columns, and slices (along
the Z axis)
ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM))
# Load spacing values (in mm)
ConstPixelSpacing = (float(RefDs.PixelSpacing[0]),
float(RefDs.PixelSpacing[1]), float(RefDs.SliceThickness))
x = np.arange(0.0, (ConstPixelDims[0]+1)*ConstPixelSpacing[0],
ConstPixelSpacing[0])
y = np.arange(0.0, (ConstPixelDims[1]+1)*ConstPixelSpacing[1],
ConstPixelSpacing[1])
z = np.arange(0.0, (ConstPixelDims[2]+1)*ConstPixelSpacing[2],
ConstPixelSpacing[2])
# The array is sized based on 'ConstPixelDims'
ArrayDicom = np.zeros(ConstPixelDims,
dtype=RefDs.pixel_array.dtype,np.float32)
# loop through all the DICOM files
for filenameDCM in lstFilesDCM:
# read the file
ds = dicom.read_file(filenameDCM)
# store the raw image data
ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array
#numpy array for the 11th image in the directory
img1 = ArrayDicom[:, :, 10]
Is there any way I can convert img1?

Related

networkx position (biggest) nodes in the middle of a graph

I've been creating graphs with the networkx package and everything works fine. I would like to make the graphs even better by placing the bigger nodes in the middle of the graph and the layout functions from networkx does not seem to do the job. The nodes represent the size of degree (the higher connected the node, the bigger).
Is there any way to program these graphs in such a way that the bigger nodes are positioned in the middle? It does not have to be automated, i could also manually choose the nodes and give them the middle position but i can also not find how to do this.
If this is not possible with networkx or something else; is there any way to do it with Gephi or cytoscape? I had trouble with Gephi that it does not import the graph the same way i see it in my jupyter notebook (the colors, the node- and edge-sizes do not import).
To summarize; i want to put bigger nodes in the middle of my graph but i dont mind how i get it done (with networkx, matplotlib or whatever).
Unfortunately i cannot provide my actual graphs but here is an example which can look like one of my graphs; it is a directed weighted graph.
G = nx.gnp_random_graph(15, 0.2, directed=True)
d = dict(G.degree(weight='weight'))
d = {k: v/10 for k, v in d.items()}
edge_size = [(float(i)/sum(weights))*100 for i in weights]
node_size = [(v*1000) for v in d.values()]
nx.draw(G,width=edge_size,node_size=node_size)
There are several options:
import networkx as nx
G = nx.gnp_random_graph(15, 0.2, directed=True)
node_degree = dict(G.degree(weight='weight'))
# A) Precompute node positions, and then manually over-ride some node positions.
node_positions = nx.spring_layout(G)
node_positions[0] = (0.5, 0.5) # by default, networkx plots on a canvas with the origin at (0, 0) and a width and height of 1; (0.5, 0.5) is hence the center
nx.draw(G, pos=node_positions, node_size=[100 * node_degree[node] for node in G])
plt.show()
# B) Use netgraph to draw the graph and then drag the nodes around with the mouse.
from netgraph import InteractiveGraph # pip install netgraph
plot_instance = InteractiveGraph(G, node_size=node_degree)
plt.show()
# C) Modify the Fruchterman-Reingold algorithm to include a gravitational force that pulls nodes with a large "mass" towards the center.
# This is left as an exercise to the interested reader (i.e. very non-trivial).
Edit: option C is non-trivial but also very do-able.
Here is my stab at it.
#!/usr/bin/env python
# coding: utf-8
"""
FR layout but with an additional gravitational pull towards a gravitational center.
The pull is proportional to the mass of the node.
"""
import numpy as np
import matplotlib.pyplot as plt
# pip install netgraph
from netgraph._main import BASE_SCALE
from netgraph._utils import (
_get_unique_nodes,
_edge_list_to_adjacency_matrix,
)
from netgraph._node_layout import (
_is_within_bbox,
_get_temperature_decay,
_get_fr_repulsion,
_get_fr_attraction,
_rescale_to_frame,
_handle_multiple_components,
_reduce_node_overlap,
)
DEBUG = False
#_handle_multiple_components
def get_fruchterman_reingold_newton_layout(edges,
edge_weights = None,
k = None,
g = 1.,
scale = None,
origin = None,
gravitational_center = None,
initial_temperature = 1.,
total_iterations = 50,
node_size = 0,
node_mass = 1,
node_positions = None,
fixed_nodes = None,
*args, **kwargs):
"""Modified Fruchterman-Reingold node layout.
Uses a modified Fruchterman-Reingold algorithm [Fruchterman1991]_ to compute node positions.
This algorithm simulates the graph as a physical system, in which nodes repell each other.
For connected nodes, this repulsion is counteracted by an attractive force exerted by the edges, which are simulated as springs.
Unlike the original algorithm, there is an additional attractive force pulling nodes towards a gravitational center, in proportion to their masses.
Parameters
----------
edges : list
The edges of the graph, with each edge being represented by a (source node ID, target node ID) tuple.
edge_weights : dict
Mapping of edges to edge weights.
k : float or None, default None
Expected mean edge length. If None, initialized to the sqrt(area / total nodes).
g : float or None, default 1.
Gravitational constant that sets the magnitude of the gravitational pull towards the center.
origin : tuple or None, default None
The (float x, float y) coordinates corresponding to the lower left hand corner of the bounding box specifying the extent of the canvas.
If None is given, the origin is placed at (0, 0).
scale : tuple or None, default None
The (float x, float y) dimensions representing the width and height of the bounding box specifying the extent of the canvas.
If None is given, the scale is set to (1, 1).
gravitational_center : tuple or None, default None
The (float x, float y) coordinates towards which nodes experience a gravitational pull.
If None, the gravitational center is placed at the center of the canvas defined by origin and scale.
total_iterations : int, default 50
Number of iterations.
initial_temperature: float, default 1.
Temperature controls the maximum node displacement on each iteration.
Temperature is decreased on each iteration to eventually force the algorithm into a particular solution.
The size of the initial temperature determines how quickly that happens.
Values should be much smaller than the values of `scale`.
node_size : scalar or dict, default 0.
Size (radius) of nodes.
Providing the correct node size minimises the overlap of nodes in the graph,
which can otherwise occur if there are many nodes, or if the nodes differ considerably in size.
node_mass : scalar or dict, default 1.
Mass of nodes.
Nodes with higher mass experience a larger gravitational pull towards the center.
node_positions : dict or None, default None
Mapping of nodes to their (initial) x,y positions. If None are given,
nodes are initially placed randomly within the bounding box defined by `origin` and `scale`.
If the graph has multiple components, explicit initial positions may result in a ValueError,
if the initial positions fall outside of the area allocated to that specific component.
fixed_nodes : list or None, default None
Nodes to keep fixed at their initial positions.
Returns
-------
node_positions : dict
Dictionary mapping each node ID to (float x, float y) tuple, the node position.
References
----------
.. [Fruchterman1991] Fruchterman, TMJ and Reingold, EM (1991) ‘Graph drawing by force‐directed placement’,
Software: Practice and Experience
"""
# This is just a wrapper around `_fruchterman_reingold`, which implements (the loop body of) the algorithm proper.
# This wrapper handles the initialization of variables to their defaults (if not explicitely provided),
# and checks inputs for self-consistency.
assert len(edges) > 0, "The list of edges has to be non-empty."
if origin is None:
if node_positions:
minima = np.min(list(node_positions.values()), axis=0)
origin = np.min(np.stack([minima, np.zeros_like(minima)], axis=0), axis=0)
else:
origin = np.zeros((2))
else:
# ensure that it is an array
origin = np.array(origin)
if scale is None:
if node_positions:
delta = np.array(list(node_positions.values())) - origin[np.newaxis, :]
maxima = np.max(delta, axis=0)
scale = np.max(np.stack([maxima, np.ones_like(maxima)], axis=0), axis=0)
else:
scale = np.ones((2))
else:
# ensure that it is an array
scale = np.array(scale)
assert len(origin) == len(scale), \
"Arguments `origin` (d={}) and `scale` (d={}) need to have the same number of dimensions!".format(len(origin), len(scale))
dimensionality = len(origin)
if gravitational_center is None:
gravitational_center = origin + 0.5 * scale
else:
# ensure that it is an array
gravitational_center = np.array(gravitational_center)
if fixed_nodes is None:
fixed_nodes = []
connected_nodes = _get_unique_nodes(edges)
if node_positions is None: # assign random starting positions to all nodes
node_positions_as_array = np.random.rand(len(connected_nodes), dimensionality) * scale + origin
unique_nodes = connected_nodes
else:
# 1) check input dimensionality
dimensionality_node_positions = np.array(list(node_positions.values())).shape[1]
assert dimensionality_node_positions == dimensionality, \
"The dimensionality of values of `node_positions` (d={}) must match the dimensionality of `origin`/ `scale` (d={})!".format(dimensionality_node_positions, dimensionality)
is_valid = _is_within_bbox(list(node_positions.values()), origin=origin, scale=scale)
if not np.all(is_valid):
error_message = "Some given node positions are not within the data range specified by `origin` and `scale`!"
error_message += "\n\tOrigin : {}, {}".format(*origin)
error_message += "\n\tScale : {}, {}".format(*scale)
error_message += "\nThe following nodes do not fall within this range:"
for ii, (node, position) in enumerate(node_positions.items()):
if not is_valid[ii]:
error_message += "\n\t{} : {}".format(node, position)
error_message += "\nThis error can occur if the graph contains multiple components but some or all node positions are initialised explicitly (i.e. node_positions != None)."
raise ValueError(error_message)
# 2) handle discrepancies in nodes listed in node_positions and nodes extracted from edges
if set(node_positions.keys()) == set(connected_nodes):
# all starting positions are given;
# no superfluous nodes in node_positions;
# nothing left to do
unique_nodes = connected_nodes
else:
# some node positions are provided, but not all
for node in connected_nodes:
if not (node in node_positions):
warnings.warn("Position of node {} not provided. Initializing to random position within frame.".format(node))
node_positions[node] = np.random.rand(2) * scale + origin
unconnected_nodes = []
for node in node_positions:
if not (node in connected_nodes):
unconnected_nodes.append(node)
fixed_nodes.append(node)
# warnings.warn("Node {} appears to be unconnected. The current node position will be kept.".format(node))
unique_nodes = connected_nodes + unconnected_nodes
node_positions_as_array = np.array([node_positions[node] for node in unique_nodes])
total_nodes = len(unique_nodes)
if isinstance(node_size, (int, float)):
node_size = node_size * np.ones((total_nodes))
elif isinstance(node_size, dict):
node_size = np.array([node_size[node] if node in node_size else 0. for node in unique_nodes])
if isinstance(node_mass, (int, float)):
node_mass = node_mass * np.ones((total_nodes))
elif isinstance(node_mass, dict):
node_mass = np.array([node_mass[node] if node in node_mass else 0. for node in unique_nodes])
adjacency = _edge_list_to_adjacency_matrix(
edges, edge_weights=edge_weights, unique_nodes=unique_nodes)
# Forces in FR are symmetric.
# Hence we need to ensure that the adjacency matrix is also symmetric.
adjacency = adjacency + adjacency.transpose()
if fixed_nodes:
is_mobile = np.array([False if node in fixed_nodes else True for node in unique_nodes], dtype=bool)
mobile_positions = node_positions_as_array[is_mobile]
fixed_positions = node_positions_as_array[~is_mobile]
mobile_node_sizes = node_size[is_mobile]
fixed_node_sizes = node_size[~is_mobile]
mobile_node_masses = node_mass[is_mobile]
fixed_node_masses = node_mass[~is_mobile]
# reorder adjacency
total_mobile = np.sum(is_mobile)
reordered = np.zeros((adjacency.shape[0], total_mobile))
reordered[:total_mobile, :total_mobile] = adjacency[is_mobile][:, is_mobile]
reordered[total_mobile:, :total_mobile] = adjacency[~is_mobile][:, is_mobile]
adjacency = reordered
else:
is_mobile = np.ones((total_nodes), dtype=bool)
mobile_positions = node_positions_as_array
fixed_positions = np.zeros((0, 2))
mobile_node_sizes = node_size
fixed_node_sizes = np.array([])
mobile_node_masses = node_mass
fixed_node_masses = np.array([])
if k is None:
area = np.product(scale)
k = np.sqrt(area / float(total_nodes))
temperatures = _get_temperature_decay(initial_temperature, total_iterations)
# --------------------------------------------------------------------------------
# main loop
for ii, temperature in enumerate(temperatures):
candidate_positions = _fruchterman_reingold_newton(mobile_positions, fixed_positions,
mobile_node_sizes, fixed_node_sizes,
adjacency, temperature, k,
mobile_node_masses, fixed_node_masses,
gravitational_center, g)
is_valid = _is_within_bbox(candidate_positions, origin=origin, scale=scale)
mobile_positions[is_valid] = candidate_positions[is_valid]
# --------------------------------------------------------------------------------
# format output
node_positions_as_array[is_mobile] = mobile_positions
if np.all(is_mobile):
node_positions_as_array = _rescale_to_frame(node_positions_as_array, origin, scale)
node_positions = dict(zip(unique_nodes, node_positions_as_array))
return node_positions
def _fruchterman_reingold_newton(mobile_positions, fixed_positions,
mobile_node_radii, fixed_node_radii,
adjacency, temperature, k,
mobile_node_masses, fixed_node_masses,
gravitational_center, g):
"""Inner loop of modified Fruchterman-Reingold layout algorithm."""
combined_positions = np.concatenate([mobile_positions, fixed_positions], axis=0)
combined_node_radii = np.concatenate([mobile_node_radii, fixed_node_radii])
delta = mobile_positions[np.newaxis, :, :] - combined_positions[:, np.newaxis, :]
distance = np.linalg.norm(delta, axis=-1)
# alternatively: (hack adapted from igraph)
if np.sum(distance==0) - np.trace(distance==0) > 0: # i.e. if off-diagonal entries in distance are zero
warnings.warn("Some nodes have the same position; repulsion between the nodes is undefined.")
rand_delta = np.random.rand(*delta.shape) * 1e-9
is_zero = distance <= 0
delta[is_zero] = rand_delta[is_zero]
distance = np.linalg.norm(delta, axis=-1)
# subtract node radii from distances to prevent nodes from overlapping
distance -= mobile_node_radii[np.newaxis, :] + combined_node_radii[:, np.newaxis]
# prevent distances from becoming less than zero due to overlap of nodes
distance[distance <= 0.] = 1e-6 # 1e-13 is numerical accuracy, and we will be taking the square shortly
with np.errstate(divide='ignore', invalid='ignore'):
direction = delta / distance[..., None] # i.e. the unit vector
# calculate forces
repulsion = _get_fr_repulsion(distance, direction, k)
attraction = _get_fr_attraction(distance, direction, adjacency, k)
gravity = _get_gravitational_pull(mobile_positions, mobile_node_masses, gravitational_center, g)
if DEBUG:
r = np.median(np.linalg.norm(repulsion, axis=-1))
a = np.median(np.linalg.norm(attraction, axis=-1))
g = np.median(np.linalg.norm(gravity, axis=-1))
print(r, a, g)
displacement = attraction + repulsion + gravity
# limit maximum displacement using temperature
displacement_length = np.linalg.norm(displacement, axis=-1)
displacement = displacement / displacement_length[:, None] * np.clip(displacement_length, None, temperature)[:, None]
mobile_positions = mobile_positions + displacement
return mobile_positions
def _get_gravitational_pull(mobile_positions, mobile_node_masses, gravitational_center, g):
delta = gravitational_center[np.newaxis, :] - mobile_positions
direction = delta / np.linalg.norm(delta, axis=-1)[:, np.newaxis]
magnitude = mobile_node_masses - np.mean(mobile_node_masses)
return g * magnitude[:, np.newaxis] * direction
if __name__ == '__main__':
import networkx as nx
from netgraph import Graph
G = nx.gnp_random_graph(15, 0.2, directed=True)
node_degree = dict(G.degree(weight='weight'))
node_positions = get_fruchterman_reingold_newton_layout(
list(G.edges()),
node_size={node : BASE_SCALE * degree for node, degree in node_degree.items()},
node_mass=node_degree, g=2
)
Graph(G, node_layout=node_positions, node_size=node_degree)
plt.show()

Combining multiple values from database into one image

I'm trying to take 5 consecutive pixels from each image of a database, and position them consecutively to create a new image of 250x250px. all images in the database are 250x250px.
The Numpy array I'm getting has only 250 items in it, although the database has about 13,000 photos in it. Can someone help me spot the problem?
Current output for 'len(new_img_pxl)' = 250
Illustration
#edit:
from imutils import paths
import cv2
import numpy as np
# access database
database_path = list(paths.list_images('database'))
#grey scale database
img_gray = []
x = -5
y = 0
r = 0
new_img_pxl = []
# open as grayscale, resize
for img_path in database_path:
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img_resize = cv2.resize(img, (250, 250))
img_gray.append(img_resize)
# take five consecutive pixel from each image
for item in img_gray:
x += 5
y += 5
five_pix = item[[r][x:y]]
for pix in five_pix:
new_img_pxl.append(pix)
if y == 250:
r += 1
x = -5
y = 0
# convert to array
new_img_pxl_array = np.array(new_img_pxl)
reshape_new_img = new_img_pxl_array.reshape(25,10)
# Convert the pixels into an array using numpy
array = np.array(reshape_new_img, dtype=np.uint8)
new_img_output = cv2.imwrite('new_output_save/001.png',reshape_new_img)
your bug is in the second loop.
for item in img_gray:
for every image (i) in the list img_gray you do:
for a in item:
for each row (j) in the image (i), extract 5 pixels and append them to new_img_pxl.
the first bug is that you don't take just 5 pixels from each image, you take 5 pixels from each row of each image.
your 2nd bug is that after extracting 250 pixels the values of the variables x and y are higher than 250 (the length of a row). As a result, when you try to access the pixels [250:255] and so on you get 'None'.
If I understand your intentions, then the way you should have implemented this is as follows:
r = 0
# As Mark Setchell suggested, you might want to change iterating
# over a list of images to iterating over the list of paths
# for img_path in database_path:
for item in img_gray:
# As Mark Setchell suggested, you might wat to load and
# process your image here, overwriting the past image and
# having the memory released
x += 5
y += 5
# when you finish a row jump to the next?
if x==250:
x = 0
y = 5
r+=1
# not sure what you wanna do when you get to the end of the image.
# roll back to the start?
if r==249 && x==250:
r = 0
x = 0
y = 5
five_pix = a[r, x:y]
for pix in five_pix:
new_img_pxl.append(pix)

How to add a colorbar to a thematic map plot in Julia?

I would like to add a gradient (vertical) colorbar to my thematic map plot in Julia with values from the variable values. The map below is a population density choropleth.
Any suggestions how to add a gradient legend bar?
My code (including downloading en unzipping open data shapefiles):
using DataFrames
using Plots
using Shapefile
using ZipFile
# make directory downloads
downloads = joinpath(pwd(), "downloads")
if ~ispath(downloads)
mkpath(downloads)
end
# make directory shapefiles
shapefiles = joinpath(pwd(), "shapefiles")
if ~ispath(shapefiles)
mkpath(shapefiles)
end
# download shapefiles
url = "https://www.cbs.nl/-/media/cbs/dossiers/nederland-regionaal/wijk-en-buurtstatistieken/wijkbuurtkaart_2020_v1.zip"
name_zipfile = split(url, "/")[end]
path_zipfile = joinpath(pwd(), "downloads", name_zipfile)
if ~isfile(path_zipfile)
download(url, path_zipfile)
end
# extract shapefiles
path_shapefile = joinpath(pwd(), "shapefiles", "gemeente_2020_v1.shp")
if ~isfile(path_shapefile)
r = ZipFile.Reader(path_zipfile)
for file in r.files
open(joinpath(pwd(), "shapefiles", file.name), "w") do io
write(io, read(file))
end
end
end
# read shapefile
table = Shapefile.Table(path_shapefile)
df = table |> DataFrame
# filter for land (i.e. not water)
row_filter = df.H2O .== "NEE"
# filter data and shapes
municipality_data = df[row_filter, :]
municipality_shape = Shapefile.shapes(table)[row_filter]
function normalize(array)
"""
Normalize array to values between 0 and 1
"""
return [(x - minimum(array))/(maximum(array) - minimum(array)) for x in array]
end
# select variable to plot
var = "BEV_DICHTH" # population density
# values to plot
values = municipality_data[:, var]
normalized_values = normalize(values)
# colors
colormap = :heat
colors = Array([cgrad(colormap)[value] for value in normalized_values])
# plot thematic map
p = plot(size=(500, 600), axis=false, ticks=false)
for i = 1:nrow(municipality_data)
plot!(municipality_shape[i], color=colors[i])
end
p
Colorbar example:
x = y = 0:10
plot(x, y, (x,y)->x*y, st=:contourf, fill=(true, cgrad(:heat)))

how to make a memory efficient multiple dimension groupby/stack using xarray?

I have a large time series of np.float64 with a 5-min frequency (size is ~2,500,000 ~=24 years).
I'm using Xarray to represent it in-memory and the time-dimension is named 'time'.
I want to group-by 'time.hour' and then 'time.dayofyear' (or vice-versa) and remove both their mean from the time-series.
In order to do that efficiently, i need to reorder the time-series into a new xr.DataArray with the dimensions of ['hour', 'dayofyear', 'rest'].
I wrote a function that plays with the GroupBy objects of Xarray and manages to do just that although it takes a lot of memory to do that...
I have a machine with 32GB RAM and i still get the MemoryError from numpy.
I know the code works because i used it on an hourly re-sampled version of my original time-series. so here's the code:
def time_series_stack(time_da, time_dim='time', grp1='hour', grp2='dayofyear'):
"""Takes a time-series xr.DataArray objects and reshapes it using
grp1 and grp2. outout is a xr.Dataset that includes the reshaped DataArray
, its datetime-series and the grps."""
import xarray as xr
import numpy as np
import pandas as pd
# try to infer the freq and put it into attrs for later reconstruction:
freq = pd.infer_freq(time_da[time_dim].values)
name = time_da.name
time_da.attrs['freq'] = freq
attrs = time_da.attrs
# drop all NaNs:
time_da = time_da.dropna(time_dim)
# group grp1 and concat:
grp_obj1 = time_da.groupby(time_dim + '.' + grp1)
s_list = []
for grp_name, grp_inds in grp_obj1.groups.items():
da = time_da.isel({time_dim: grp_inds})
s_list.append(da)
grps1 = [x for x in grp_obj1.groups.keys()]
stacked_da = xr.concat(s_list, dim=grp1)
stacked_da[grp1] = grps1
# group over the concatenated da and concat again:
grp_obj2 = stacked_da.groupby(time_dim + '.' + grp2)
s_list = []
for grp_name, grp_inds in grp_obj2.groups.items():
da = stacked_da.isel({time_dim: grp_inds})
s_list.append(da)
grps2 = [x for x in grp_obj2.groups.keys()]
stacked_da = xr.concat(s_list, dim=grp2)
stacked_da[grp2] = grps2
# numpy part:
# first, loop over both dims and drop NaNs, append values and datetimes:
vals = []
dts = []
for i, grp1_val in enumerate(stacked_da[grp1]):
da = stacked_da.sel({grp1: grp1_val})
for j, grp2_val in enumerate(da[grp2]):
val = da.sel({grp2: grp2_val}).dropna(time_dim)
vals.append(val.values)
dts.append(val[time_dim].values)
# second, we get the max of the vals after the second groupby:
max_size = max([len(x) for x in vals])
# we fill NaNs and NaT for the remainder of them:
concat_sizes = [max_size - len(x) for x in vals]
concat_arrys = [np.empty((x)) * np.nan for x in concat_sizes]
concat_vals = [np.concatenate(x) for x in list(zip(vals, concat_arrys))]
# 1970-01-01 is the NaT for this time-series:
concat_arrys = [np.zeros((x), dtype='datetime64[ns]')
for x in concat_sizes]
concat_dts = [np.concatenate(x) for x in list(zip(dts, concat_arrys))]
concat_vals = np.array(concat_vals)
concat_dts = np.array(concat_dts)
# finally , we reshape them:
concat_vals = concat_vals.reshape((stacked_da[grp1].shape[0],
stacked_da[grp2].shape[0],
max_size))
concat_dts = concat_dts.reshape((stacked_da[grp1].shape[0],
stacked_da[grp2].shape[0],
max_size))
# create a Dataset and DataArrays for them:
sda = xr.Dataset()
sda.attrs = attrs
sda[name] = xr.DataArray(concat_vals, dims=[grp1, grp2, 'rest'])
sda[time_dim] = xr.DataArray(concat_dts, dims=[grp1, grp2, 'rest'])
sda[grp1] = grps1
sda[grp2] = grps2
sda['rest'] = range(max_size)
return sda
So for the 2,500,000 items time-series, numpy throws the MemoryError so I'm guessing this has to be my memory bottle-neck. What can i do to solve this ?
Would Dask help me ? and if so how can i implement it ?
Like you, I ran it without issue when inputting a small time series (10,000 long). However, when inputting a 100,000 long time series xr.DataArraythe grp_obj2 for loop ran away and used all the memory of the system.
This is what I used to generate the time series xr.DataArray:
n = 10**5
times = np.datetime64('2000-01-01') + np.arange(n) * np.timedelta64(5,'m')
data = np.random.randn(n)
time_da = xr.DataArray(data, name='rand_data', dims=('time'), coords={'time': times})
# time_da.to_netcdf('rand_time_series.nc')
As you point out, Dask would be a way to solve it but I can't see a clear path at the moment...
Typically, the kind of problem with Dask would be to:
Make the input a dataset from a file (like NetCDF). This will not load the file in memory but allow Dask to pull data from disk one chunk at a time.
Define all calculations with dask.delayed or dask.futures methods for entire body of code up until the writing the output. This is what allows Dask to chunk a small piece of data to read then write.
Calculate one chunk of work and immediately write output to new dataset file. Effectively you ending up steaming one chunk of input to one chunk of output at a time (but also threaded/parallelized).
I tried importing Dask and breaking the input time_da xr.DataArray into chunks for Dask to work on but it didn't help. From what I can tell, the line stacked_da = xr.concat(s_list, dim=grp1) forces Dask to make a full copy of stacked_da in memory and much more...
One workaround to this is to write stacked_da to disk then immediately read it again:
##For group1
xr.concat(s_list, dim=grp1).to_netcdf('stacked_da1.nc')
stacked_da = xr.load_dataset('stacked_da1.nc')
stacked_da[grp1] = grps1
##For group2
xr.concat(s_list, dim=grp2).to_netcdf('stacked_da2.nc')
stacked_da = xr.load_dataset('stacked_da2.nc')
stacked_da[grp2] = grps2
However, the file size for stacked_da1.nc is 19MB and stacked_da2.nc gets huge at 6.5GB. This is for time_da with 100,000 elements... so there's clearly something amiss...
Originally, it sounded like you want to subtract the mean of the groups from the time series data. It looks like Xarray docs has an example for that. http://xarray.pydata.org/en/stable/groupby.html#grouped-arithmetic
The key is to group once and loop over the groups and then group again on each of the groups and append it to list.
Next i concat and use pd.MultiIndex.from_product for the groups.
No Memory problems and no Dask needed and it only takes a few seconds to run.
here's the code, enjoy:
def time_series_stack(time_da, time_dim='time', grp1='hour', grp2='month',
plot=True):
"""Takes a time-series xr.DataArray objects and reshapes it using
grp1 and grp2. output is a xr.Dataset that includes the reshaped DataArray
, its datetime-series and the grps. plots the mean also"""
import xarray as xr
import pandas as pd
# try to infer the freq and put it into attrs for later reconstruction:
freq = pd.infer_freq(time_da[time_dim].values)
name = time_da.name
time_da.attrs['freq'] = freq
attrs = time_da.attrs
# drop all NaNs:
time_da = time_da.dropna(time_dim)
# first grouping:
grp_obj1 = time_da.groupby(time_dim + '.' + grp1)
da_list = []
t_list = []
for grp1_name, grp1_inds in grp_obj1.groups.items():
da = time_da.isel({time_dim: grp1_inds})
# second grouping:
grp_obj2 = da.groupby(time_dim + '.' + grp2)
for grp2_name, grp2_inds in grp_obj2.groups.items():
da2 = da.isel({time_dim: grp2_inds})
# extract datetimes and rewrite time coord to 'rest':
times = da2[time_dim]
times = times.rename({time_dim: 'rest'})
times.coords['rest'] = range(len(times))
t_list.append(times)
da2 = da2.rename({time_dim: 'rest'})
da2.coords['rest'] = range(len(da2))
da_list.append(da2)
# get group keys:
grps1 = [x for x in grp_obj1.groups.keys()]
grps2 = [x for x in grp_obj2.groups.keys()]
# concat and convert to dataset:
stacked_ds = xr.concat(da_list, dim='all').to_dataset(name=name)
stacked_ds[time_dim] = xr.concat(t_list, 'all')
# create a multiindex for the groups:
mindex = pd.MultiIndex.from_product([grps1, grps2], names=[grp1, grp2])
stacked_ds.coords['all'] = mindex
# unstack:
ds = stacked_ds.unstack('all')
ds.attrs = attrs
return ds

Batching with tensor flow contrib not working as expected

I am using the following code for batching using the Tensorflow contrib library.
def input_fn_batch(batch_size, train_data):
"""Input builder function."""
default = [tf.constant([''], dtype=tf.string)] * len(COLUMNS)
base_data_values = tf.contrib.learn.read_batch_examples([train_data],
batch_size=batch_size,
reader=tf.TextLineReader,
num_epochs=1,
parse_fn=lambda x: tf.decode_csv(x, record_defaults=default))
df_train = {}
for i, column in enumerate(COLUMNS):
df_train[column] = base_data_values[:, i]
for column in CATEGORICAL_INT_COLUMNS:
df_train[column] = tf.string_to_number(df_train[column], out_type=tf.int32)
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.string_to_number(df_train[k])
for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {k: dense_to_sparse(df_train[k])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
# Converts the label column into a constant Tensor.
label = tf.string_to_number(df_train[LABEL_COLUMN], out_type=tf.int32)
# Returns the feature columns and the label.
return feature_cols, label
def dense_to_sparse(dense_tensor):
indices = tf.to_int64(tf.transpose([tf.range(tf.shape(dense_tensor)[
0]), tf.zeros_like(dense_tensor, dtype=tf.int32)]))
values = dense_tensor
shape = tf.to_int64([tf.shape(dense_tensor)[0], tf.constant(1)])
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=shape
)
I call the fit function as follows
estimator.fit(input_fn=lambda: input_fn_batch(1000,train_data), steps=200)
For some reason, it executes only for one step. It's as if it is ignoring the steps parameter.