Convert a multiway pandas.crosstab to an xarray - pandas

I want to create a multiway contingency table from my pandas dataframe and store it in an xarray. It seems to me it ought to be straightfoward enough using pandas.crosstab followed by DataFrame.to_xarray() but I'm getting "TypeError: Cannot interpret 'interval[int64]' as a data type" in pandas v1.1.5. (v1.0.1 gives "ValueError: all arrays must be same length").
In [1]: import numpy as np
...: import pandas as pd
...: pd.__version__
Out[1]: '1.1.5'
In [2]: import xarray as xr
...: xr.__version__
Out[2]: '0.17.0'
In [3]: n = 100
...: np.random.seed(42)
...: x = pd.cut(np.random.uniform(low=0, high=3, size=n), range(5))
...: x
Out[3]:
[(1, 2], (2, 3], (2, 3], (1, 2], (0, 1], ..., (1, 2], (1, 2], (1, 2], (0, 1], (0, 1]]
Length: 100
Categories (4, interval[int64]): [(0, 1] < (1, 2] < (2, 3] < (3, 4]]
In [4]: x.value_counts().sort_index()
Out[4]:
(0, 1] 41
(1, 2] 28
(2, 3] 31
(3, 4] 0
dtype: int64
Note I need my table to include empty categories such as (3, 4].
In [6]: idx=pd.date_range('2001-01-01', periods=n, freq='8H')
...: df = pd.DataFrame({'x': x}, index=idx)
...: df['xlag'] = df.x.shift(1, 'D')
...: df['h'] = df.index.hour
...: xtab = pd.crosstab([df.h, df.xlag], df.x, dropna=False, normalize='index')
...: xtab
Out[6]:
x (0, 1] (1, 2] (2, 3] (3, 4]
h xlag
0 (0, 1] 0.000000 0.700000 0.300000 0.0
(1, 2] 0.470588 0.411765 0.117647 0.0
(2, 3] 0.500000 0.333333 0.166667 0.0
(3, 4] 0.000000 0.000000 0.000000 0.0
8 (0, 1] 0.588235 0.000000 0.411765 0.0
(1, 2] 1.000000 0.000000 0.000000 0.0
(2, 3] 0.428571 0.142857 0.428571 0.0
(3, 4] 0.000000 0.000000 0.000000 0.0
16 (0, 1] 0.333333 0.250000 0.416667 0.0
(1, 2] 0.444444 0.222222 0.333333 0.0
(2, 3] 0.454545 0.363636 0.181818 0.0
(3, 4] 0.000000 0.000000 0.000000 0.0
That's fine, but my actual application has more categories and more dimensions, so this seems a clear use-case for xarray, but I get an error:
In [8]: xtab.to_xarray()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-8-aaedf730bb97> in <module>
----> 1 xtab.to_xarray()
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/pandas/core/generic.py in to_xarray(self)
2818 return xarray.DataArray.from_series(self)
2819 else:
-> 2820 return xarray.Dataset.from_dataframe(self)
2821
2822 #Substitution(returns=fmt.return_docstring)
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/dataset.py in from_dataframe(cls, dataframe, sparse)
5131 obj._set_sparse_data_from_dataframe(idx, arrays, dims)
5132 else:
-> 5133 obj._set_numpy_data_from_dataframe(idx, arrays, dims)
5134 return obj
5135
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/dataset.py in _set_numpy_data_from_dataframe(self, idx, arrays, dims)
5062 data = np.zeros(shape, values.dtype)
5063 data[indexer] = values
-> 5064 self[name] = (dims, data)
5065
5066 #classmethod
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/dataset.py in __setitem__(self, key, value)
1427 )
1428
-> 1429 self.update({key: value})
1430
1431 def __delitem__(self, key: Hashable) -> None:
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/dataset.py in update(self, other)
3897 Dataset.assign
3898 """
-> 3899 merge_result = dataset_update_method(self, other)
3900 return self._replace(inplace=True, **merge_result._asdict())
3901
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/merge.py in dataset_update_method(dataset, other)
958 priority_arg=1,
959 indexes=indexes,
--> 960 combine_attrs="override",
961 )
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/merge.py in merge_core(objects, compat, join, combine_attrs, priority_arg, explicit_coords, indexes, fill_value)
609 coerced = coerce_pandas_values(objects)
610 aligned = deep_align(
--> 611 coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value
612 )
613 collected = collect_variables_and_indexes(aligned)
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/alignment.py in deep_align(objects, join, copy, indexes, exclude, raise_on_invalid, fill_value)
428 indexes=indexes,
429 exclude=exclude,
--> 430 fill_value=fill_value,
431 )
432
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/alignment.py in align(join, copy, indexes, exclude, fill_value, *objects)
352 if not valid_indexers:
353 # fast path for no reindexing necessary
--> 354 new_obj = obj.copy(deep=copy)
355 else:
356 new_obj = obj.reindex(
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/dataset.py in copy(self, deep, data)
1218 """
1219 if data is None:
-> 1220 variables = {k: v.copy(deep=deep) for k, v in self._variables.items()}
1221 elif not utils.is_dict_like(data):
1222 raise ValueError("Data must be dict-like")
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/dataset.py in <dictcomp>(.0)
1218 """
1219 if data is None:
-> 1220 variables = {k: v.copy(deep=deep) for k, v in self._variables.items()}
1221 elif not utils.is_dict_like(data):
1222 raise ValueError("Data must be dict-like")
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/variable.py in copy(self, deep, data)
2632 """
2633 if data is None:
-> 2634 data = self._data.copy(deep=deep)
2635 else:
2636 data = as_compatible_data(data)
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/indexing.py in copy(self, deep)
1484 # 8000341
1485 array = self.array.copy(deep=True) if deep else self.array
-> 1486 return PandasIndexAdapter(array, self._dtype)
/opt/scitools/environments/default/2021_03_18-1/lib/python3.6/site-packages/xarray/core/indexing.py in __init__(self, array, dtype)
1407 dtype_ = array.dtype
1408 else:
-> 1409 dtype_ = np.dtype(dtype)
1410 self._dtype = dtype_
1411
TypeError: Cannot interpret 'interval[int64]' as a data type
I can avoid the error by converting x (and xlag) to a different dtype instead of pandas.Categorical before using pandas.crosstab, but then I lose any empty categories, which I need to keep in my real application.

The issue here is not the use of a CategoricalIndex but the category labels (x.categories) is an IntervalIndex which xarray doesn't like.
To remedy this, you can simply replace the categories within your x variable with their string representation, which coerces x.categories to be an "object" dtype instead of an "interval[int64]" dtype:
x = (
pd.cut(np.random.uniform(low=0, high=3, size=n), range(5))
.rename_categories(str)
)
Then calculate your crosstab as you have already done and it should work!
To get your dataset in the coordinates you want (I think), all you need to do is to stack everything in a single MultiIndex row shape. (instead of a crosstab MultiIndex row/Index column shape).
xtab = (
pd.crosstab([df.h, df.xlag], df.x, dropna=False, normalize="index")
.stack()
.reorder_levels(["x", "h", "xlag"])
.sort_index()
)
xtab.to_xarray()
If you want to shorten your code and lose some of the explicit ordering of index levels, you can also use unstack instead of stack which gives you the correct ordering right away:
xtab = (
pd.crosstab([df.h, df.xlag], df.x, dropna=False, normalize="index")
.unstack([0, 1])
)
xtab.to_xarray()
Regardless of the stack() vs unstack([0, 1]) approach you use, you get this output:
<xarray.DataArray (x: 4, h: 3, xlag: 4)>
array([[[0. , 0.47058824, 0.5 , 0. ],
[0.58823529, 1. , 0.42857143, 0. ],
[0.33333333, 0.44444444, 0.45454545, 0. ]],
[[0.7 , 0.41176471, 0.33333333, 0. ],
[0. , 0. , 0.14285714, 0. ],
[0.25 , 0.22222222, 0.36363636, 0. ]],
[[0.3 , 0.11764706, 0.16666667, 0. ],
[0.41176471, 0. , 0.42857143, 0. ],
[0.41666667, 0.33333333, 0.18181818, 0. ]],
[[0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. ]]])
Coordinates:
* x (x) object '(0, 1]' '(1, 2]' '(2, 3]' '(3, 4]'
* h (h) int64 0 8 16
* xlag (xlag) object '(0, 1]' '(1, 2]' '(2, 3]' '(3, 4]'

#Cameron-Riddell's answer is the key to my problem, but there are a couple of additional reshaping wriggles to smooth out. Applying rename_categories(str) to my x variable as he suggests then proceeding as in my question allows the final line to work:
In [8]: xtab = pd.crosstab([df.h, df.xlag], df.x, dropna=False, normalize='index')
...: xtab.to_xarray()
Out[8]:
<xarray.Dataset>
Dimensions: (h: 3, xlag: 4)
Coordinates:
* h (h) int64 0 8 16
* xlag (xlag) object '(0, 1]' '(1, 2]' '(2, 3]' '(3, 4]'
Data variables:
(0, 1] (h, xlag) float64 0.0 0.4706 0.5 0.0 ... 0.3333 0.4444 0.4545 0.0
(1, 2] (h, xlag) float64 0.7 0.4118 0.3333 0.0 ... 0.25 0.2222 0.3636 0.0
(2, 3] (h, xlag) float64 0.3 0.1176 0.1667 0.0 ... 0.3333 0.1818 0.0
(3, 4] (h, xlag) float64 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
But I wanted a 3-d array with one variable, not a 2-d array with 3 variables. To convert it I need to apply .to_array(dim='x'). But then my dimensions are in the order x, h, xlag and I clearly don't want h in the middle so I also need to transpose them:
In [9]: xtab.to_xarray().to_array(dim='x').transpose('h', 'xlag', 'x')
Out[9]:
<xarray.DataArray (h: 3, xlag: 4, x: 4)>
array([[[0. , 0.7 , 0.3 , 0. ],
[0.47058824, 0.41176471, 0.11764706, 0. ],
[0.5 , 0.33333333, 0.16666667, 0. ],
[0. , 0. , 0. , 0. ]],
[[0.58823529, 0. , 0.41176471, 0. ],
[1. , 0. , 0. , 0. ],
[0.42857143, 0.14285714, 0.42857143, 0. ],
[0. , 0. , 0. , 0. ]],
[[0.33333333, 0.25 , 0.41666667, 0. ],
[0.44444444, 0.22222222, 0.33333333, 0. ],
[0.45454545, 0.36363636, 0.18181818, 0. ],
[0. , 0. , 0. , 0. ]]])
Coordinates:
* h (h) int64 0 8 16
* xlag (xlag) object '(0, 1]' '(1, 2]' '(2, 3]' '(3, 4]'
* x (x) <U6 '(0, 1]' '(1, 2]' '(2, 3]' '(3, 4]'
That's what I'd envisaged! It displays similarly to pd.crosstab, but it's a 3-d xarray instead of a pandas dataframe with a multiindex. That'll be much easier to handle in the subsequent stages of my program (the crosstab is just an intermediate step, not a result in itself).
I must say that ended up more complicated than I'd anticipated... I found a question from #kilojoules back in 2017 "When to use multiindexing vs. xarray in pandas" to which #Tkanno wrote an answer beginning "There does seem to be a transition to xarray for doing work on multi-dimensional arrays." Seems a shame to me that there isn't a version of pd.crosstab that returns an xarray - or am I asking for more pandas-xarray integration than is possible?

Related

Pandas - Unexpected results when indexing a DataFrame containing missing entries

First, we create a large dataset with MultiIndex whose first record contains missing values np.NaN
In [200]: data = []
...: val = 0
...: for ind_1 in range(3000):
...: if ind_1 == 0:
...: data.append({'ind_1': 0, 'ind_2': np.NaN, 'val': np.NaN})
...: else:
...: for ind_2 in range(3000):
...: data.append({'ind_1': ind_1, 'ind_2': ind_2, 'val': val})
...: val += 1
...: df = pd.DataFrame(data).set_index(['ind_1', 'ind_2'])
In [201]: df
Out[201]:
val
ind_1 ind_2
0 NaN NaN
1 0.0 0.0
1.0 1.0
2.0 2.0
3.0 3.0
... ...
2999 2995.0 8996995.0
2996.0 8996996.0
2997.0 8996997.0
2998.0 8996998.0
2999.0 8996999.0
[8997001 rows x 1 columns]
I want to select all rows where ind_1 < 3 and ind_2 < 3
First I create an MultiIndex i1 where ind_1 < 3
In [202]: i1 = df.loc[df.index.get_level_values('ind_1') < 3].index
In [203]: i1
Out[203]:
MultiIndex([(0, nan),
(1, 0.0),
(1, 1.0),
(1, 2.0),
(1, 3.0),
(1, 4.0),
(1, 5.0),
(1, 6.0),
(1, 7.0),
(1, 8.0),
...
(2, 2990.0),
(2, 2991.0),
(2, 2992.0),
(2, 2993.0),
(2, 2994.0),
(2, 2995.0),
(2, 2996.0),
(2, 2997.0),
(2, 2998.0),
(2, 2999.0)],
names=['ind_1', 'ind_2'], length=6001)
Then I create an MultiIndex i2 where ind_2 < 3
In [204]: i2 = df.loc[~(df.index.get_level_values('ind_2') > 2)].index
In [205]: i2
Out[205]:
MultiIndex([( 0, nan),
( 1, 0.0),
( 1, 1.0),
( 1, 2.0),
( 2, 0.0),
( 2, 1.0),
( 2, 2.0),
( 3, 0.0),
( 3, 1.0),
( 3, 2.0),
...
(2996, 2.0),
(2997, 0.0),
(2997, 1.0),
(2997, 2.0),
(2998, 0.0),
(2998, 1.0),
(2998, 2.0),
(2999, 0.0),
(2999, 1.0),
(2999, 2.0)],
names=['ind_1', 'ind_2'], length=8998)
Logically, the solution should be the intersection of these two sets
In [206]: df.loc[i1 & i2]
Out[206]:
val
ind_1 ind_2
1 0.0 0.0
1.0 1.0
2.0 2.0
2 0.0 3000.0
1.0 3001.0
2.0 3002.0
Why is the first record (0, nan) filtered out?
Use boolean arrays i1, i2 instead of indexes
In [27]: i1 = df.index.get_level_values('ind_1') < 3
In [28]: i2 = ~(df.index.get_level_values('ind_2') > 2)
In [29]: i1
Out[29]: array([ True, True, True, ..., False, False, False])
In [30]: i2
Out[30]: array([ True, True, True, ..., False, False, False])
In [31]: df.loc[i1 & i2]
Out[31]:
val
ind_1 ind_2
0 NaN NaN
1 0.0 0.0
1.0 1.0
2.0 2.0
2 0.0 3000.0
1.0 3001.0
2.0 3002.0

Partitioned matrix multiplication in tensorflow or pytorch

Assume I have matrices P with the size [4, 4] which partitioned (block) into 4 smaller matrices [2,2]. How can I efficiently multiply this block-matrix into another matrix (not partitioned matrix but smaller)?
Let's Assume our original matric is:
P = [ 1 1 2 2
1 1 2 2
3 3 4 4
3 3 4 4]
Which split into submatrices:
P_1 = [1 1 , P_2 = [2 2 , P_3 = [3 3 P_4 = [4 4
1 1] 2 2] 3 3] 4 4]
Now our P is:
P = [P_1 P_2
P_3 p_4]
In the next step, I want to do element-wise multiplication between P and smaller matrices which its size is equal to number of sub-matrices:
P * [ 1 0 = [P_1 0 = [1 1 0 0
0 0 ] 0 0] 1 1 0 0
0 0 0 0
0 0 0 0]
You can think of representing your large block matrix in a more efficient way.
For instance, a block matrix
P = [ 1 1 2 2
1 1 2 2
3 3 4 4
3 3 4 4]
Can be represented using
a = [ 1 0 b = [ 1 1 0 0 p = [ 1 2
1 0 0 0 1 1 ] 3 4 ]
0 1
0 1 ]
As
P = a # p # b
With (# representing matrix multiplication). Matrices a and b represents/encode the block structure of P and the small p represents the values of each block.
Now, if you want to multiply (element-wise) p with a small (2x2) matrix q you simply
a # (p * q) # b
A simple pytorch example
In [1]: a = torch.tensor([[1., 0], [1., 0], [0., 1], [0, 1]])
In [2]: b = torch.tensor([[1., 1., 0, 0], [0, 0, 1., 1]])
In [3]: p=torch.tensor([[1., 2.], [3., 4.]])
In [4]: q = torch.tensor([[1., 0], [0., 0]])
In [5]: a # p # b
Out[5]:
tensor([[1., 1., 2., 2.],
[1., 1., 2., 2.],
[3., 3., 4., 4.],
[3., 3., 4., 4.]])
In [6]: a # (p*q) # b
Out[6]:
tensor([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
I leave it to you as an exercise how to efficiently produce the "structure" matrices a and b given the sizes of the blocks.
Following is a general Tensorflow-based solution that works for input matrices p (large) and m (small) of arbitrary shapes as long as the sizes of p are divisible by the sizes of m on both axes.
def block_mul(p, m):
p_x, p_y = p.shape
m_x, m_y = m.shape
m_4d = tf.reshape(m, (m_x, 1, m_y, 1))
m_broadcasted = tf.broadcast_to(m_4d, (m_x, p_x // m_x, m_y, p_y // m_y))
mp = tf.reshape(m_broadcasted, (p_x, p_y))
return p * mp
Test:
import tensorflow as tf
tf.enable_eager_execution()
p = tf.reshape(tf.constant(range(36)), (6, 6))
m = tf.reshape(tf.constant(range(9)), (3, 3))
print(f"p:\n{p}\n")
print(f"m:\n{m}\n")
print(f"block_mul(p, m):\n{block_mul(p, m)}")
Output (Python 3.7.3, Tensorflow 1.13.1):
p:
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]
[24 25 26 27 28 29]
[30 31 32 33 34 35]]
m:
[[0 1 2]
[3 4 5]
[6 7 8]]
block_mul(p, m):
[[ 0 0 2 3 8 10]
[ 0 0 8 9 20 22]
[ 36 39 56 60 80 85]
[ 54 57 80 84 110 115]
[144 150 182 189 224 232]
[180 186 224 231 272 280]]
Another solution that uses implicit broadcasting is the following:
def block_mul2(p, m):
p_x, p_y = p.shape
m_x, m_y = m.shape
p_4d = tf.reshape(p, (m_x, p_x // m_x, m_y, p_y // m_y))
m_4d = tf.reshape(m, (m_x, 1, m_y, 1))
return tf.reshape(p_4d * m_4d, (p_x, p_y))
Don't know about the efficient method, but you can try these:
Method 1:
Using torch.cat()
import torch
def multiply(a, b):
x1 = a[0:2, 0:2]*b[0,0]
x2 = a[0:2, 2:]*b[0,1]
x3 = a[2:, 0:2]*b[1,0]
x4 = a[2:, 2:]*b[1,1]
return torch.cat((torch.cat((x1, x2), 1), torch.cat((x3, x4), 1)), 0)
a = torch.tensor([[1, 1, 2, 2],[1, 1, 2, 2],[3, 3, 4, 4,],[3, 3, 4, 4]])
b = torch.tensor([[1, 0],[0, 0]])
print(multiply(a, b))
output:
tensor([[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
Method 2:
Using torch.nn.functional.pad()
import torch.nn.functional as F
import torch
def multiply(a, b):
b = F.pad(input=b, pad=(1, 1, 1, 1), mode='constant', value=0)
b[0,0] = 1
b[0,1] = 1
b[1,0] = 1
return a*b
a = torch.tensor([[1, 1, 2, 2],[1, 1, 2, 2],[3, 3, 4, 4,],[3, 3, 4, 4]])
b = torch.tensor([[1, 0],[0, 0]])
print(multiply(a, b))
output:
tensor([[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
If the matrices are small, you are probably fine with cat or pad. The solution with factorization is very elegant, as the one with a block_mul implementation.
Another solution is turning the 2D block matrix in a 3D volume where each 2D slice is a block (P_1, P_2, P_3, P_4). Then use the power of broadcasting to multiply each 2D slice by a scalar. Finally reshape the output. Reshaping is not immediate but it's doable, port from numpy to pytorch of https://stackoverflow.com/a/16873755/4892874
In Pytorch:
import torch
h = w = 4
x = torch.ones(h, w)
x[:2, 2:] = 2
x[2:, :2] = 3
x[2:, 2:] = 4
# number of blocks along x and y
nrows=2
ncols=2
vol3d = x.reshape(h//nrows, nrows, -1, ncols)
vol3d = vol3d.permute(0, 2, 1, 3).reshape(-1, nrows, ncols)
out = vol3d * torch.Tensor([1, 0, 0, 0])[:, None, None].float()
# reshape to original
n, nrows, ncols = out.shape
out = out.reshape(h//nrows, -1, nrows, ncols)
out = out.permute(0, 2, 1, 3)
out = out.reshape(h, w)
print(out)
tensor([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
I haven't benchmarked this against the others, but this doesn't consume additional memory like padding would do and it doesn't do slow operations like concatenation. It has also ther advantage of being easy to understand and visualize.
You can generalize it to any situation by playing with h, w, nrows, ncols.
Although the other answer may be the solution, it is not an efficient way. I come up with another one to tackle the problem (but still is not perfect). The following implementation needs too much memory when our inputs are 3 or 4 dimensions. For example, for input size of 20*75*1024*1024, the following calculation needs around 12gb ram.
Here is my implementation:
import tensorflow as tf
tf.enable_eager_execution()
inps = tf.constant([
[1, 1, 1, 1, 2, 2, 2, 2],
[1, 1, 1, 1, 2, 2, 2, 2],
[1, 1, 1, 1, 2, 2, 2, 2],
[1, 1, 1, 1, 2, 2, 2, 2],
[3, 3, 3, 3, 4, 4, 4, 4],
[3, 3, 3, 3, 4, 4, 4, 4],
[3, 3, 3, 3, 4, 4, 4, 4],
[3, 3, 3, 3, 4, 4, 4, 4]])
on_cells = tf.constant([[1, 0, 0, 1]])
on_cells = tf.expand_dims(on_cells, axis=-1)
# replicate the value to block-size (4*4)
on_cells = tf.tile(on_cells, [1, 1, 4 * 4])
# reshape to a format for permutation
on_cells = tf.reshape(on_cells, (1, 2, 2, 4, 4))
# permutation
on_cells = tf.transpose(on_cells, [0, 1, 3, 2, 4])
# reshape
on_cells = tf.reshape(on_cells, [1, 8, 8])
# element-wise operation
print(inps * on_cells)
Output:
tf.Tensor(
[[[1 1 1 1 0 0 0 0]
[1 1 1 1 0 0 0 0]
[1 1 1 1 0 0 0 0]
[1 1 1 1 0 0 0 0]
[0 0 0 0 4 4 4 4]
[0 0 0 0 4 4 4 4]
[0 0 0 0 4 4 4 4]
[0 0 0 0 4 4 4 4]]], shape=(1, 8, 8), dtype=int32)

xarray: simple weighted rolling mean example using .construct()

Xarray can do weighted rolling mean via the .construct() object as stated in answer on SO here and also in the docs.
The weighted rolling mean example in the docs doesn't quite look right as it seems to give the same answer as the ordinary rolling mean.
import xarray as xr
import numpy as np
arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5),
... dims=('x', 'y'))
arr.rolling(y=3, center=True).mean()
#<xarray.DataArray (x: 3, y: 5)>
#array([[nan, 0.5, 1. , 1.5, nan],
# [nan, 3. , 3.5, 4. , nan],
# [nan, 5.5, 6. , 6.5, nan]])
#Dimensions without coordinates: x, y
weight = xr.DataArray([0.25, 0.5, 0.25], dims=['window'])
arr.rolling(y=3, center=True).construct('window').dot(weight)
#<xarray.DataArray (x: 3, y: 5)>
#array([[nan, 0.5, 1. , 1.5, nan],
# [nan, 3. , 3.5, 4. , nan],
# [nan, 5.5, 6. , 6.5, nan]])
#Dimensions without coordinates: x, y
Here is a more simple example which I would like to get the syntax right on:
da = xr.DataArray(np.arange(1,6), dims='x')
da.rolling(x=3, center=True).mean()
#<xarray.DataArray (x: 5)>
#array([nan, 2., 3., 4., nan])
#Dimensions without coordinates: x
weight = xr.DataArray([0.5, 1, 0.5], dims=['window'])
da.rolling(x=3, center=True).construct('window').dot(weight)
#<xarray.DataArray (x: 5)>
#array([nan, 4., 6., 8., nan])
#Dimensions without coordinates: x
It returns 4, 6, 8. I thought it would do:
(1 x 0.5) + (2 x 1) + (3 x 0.5) / 3 = 4/3
(2 x 0.5) + (3 x 1) + (4 x 0.5) / 3 = 2
(3 x 0.5) + (4 x 1) + (5 x 0.5) / 3 = 8/3
1.33, 2. 2.66
In the first example, you use evenly spaced data for arr.
Therefore, the weighted mean (with [0.25, 5, 0.25]) will be the same as the simple mean.
If you consider non-linear data, the result differs
In [50]: arr = xr.DataArray((np.arange(0, 7.5, 0.5)**2).reshape(3, 5),
...: dims=('x', 'y'))
...:
In [51]: arr.rolling(y=3, center=True).mean()
Out[51]:
<xarray.DataArray (x: 3, y: 5)>
array([[ nan, 0.416667, 1.166667, 2.416667, nan],
[ nan, 9.166667, 12.416667, 16.166667, nan],
[ nan, 30.416667, 36.166667, 42.416667, nan]])
Dimensions without coordinates: x, y
In [52]: weight = xr.DataArray([0.25, 0.5, 0.25], dims=['window'])
...: arr.rolling(y=3, center=True).construct('window').dot(weight)
...:
Out[52]:
<xarray.DataArray (x: 3, y: 5)>
array([[ nan, 0.375, 1.125, 2.375, nan],
[ nan, 9.125, 12.375, 16.125, nan],
[ nan, 30.375, 36.125, 42.375, nan]])
Dimensions without coordinates: x, y
For the second example, you use [0.5, 1, 0.5] as weight, the total of which is 2.
Therefore, the first non-nan item will be
(1 x 0.5) + (2 x 1) + (3 x 0.5) = 4
If you want weighted mean, rather than the weighted sum, use [0.25, 0.5, 0.25] instead.

The shape of the predicted_ids in the outputs of `tf.contrib.seq2seq.BeamSearchDecoder`

What is the shape of the contents in the outputs of tf.contrib.seq2seq.BeamSearchDecoder. I know that it is an instance of class BeamSearchDecoderOutput(scores, predicted_ids, parent_ids), but what is the shape of the scores, predicted_ids and parent_ids?
I wrote followig toy code to explore it a little bit myself.
tgt_vocab_size = 20
embedding_decoder = tf.one_hot(list(range(0, tgt_vocab_size)), tgt_vocab_size)
batch_size = 2
start_tokens = tf.fill([batch_size], 0)
end_token = 1
beam_width = 3
num_units=18
decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units)
encoder_outputs = decoder_cell.zero_state(batch_size, dtype=tf.float32)
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(encoder_outputs, multiplier=beam_width)
my_decoder = tf.contrib.seq2seq.BeamSearchDecoder(cell=decoder_cell,
embedding=embedding_decoder,
start_tokens=start_tokens,
end_token=end_token,
initial_state=tiled_encoder_outputs,
beam_width=beam_width)
# dynamic decoding
outputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(my_decoder,
maximum_iterations=4,
output_time_major=True)
final_predicted_ids = outputs.predicted_ids
scores = outputs.beam_search_decoder_output.scores
predicted_ids = outputs.beam_search_decoder_output.predicted_ids
parent_ids = outputs.beam_search_decoder_output.parent_ids
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
final_predicted_ids_vals = sess.run(final_predicted_ids)
print("final_predicted_ids shape:")
print(final_predicted_ids_vals.shape)
print("final_predicted_ids_vals: \n%s" %final_predicted_ids_vals)
print("scores shape:")
print(sess.run(scores).shape)
print("scores values: \n %s" % sess.run(scores))
print("predicted_ids shape: ")
print(sess.run(predicted_ids).shape)
print("predicted_ids values: \n %s" % sess.run(predicted_ids))
print("parent_ids shape:")
print(sess.run(parent_ids).shape)
print("parent_ids values: \n %s" % sess.run(parent_ids))
The print is as follows:
final_predicted_ids shape:
(4, 2, 3)
final_predicted_ids_vals:
[[[ 1 8 8]
[ 1 8 8]]
[[ 1 13 13]
[ 1 13 13]]
[[ 1 13 13]
[ 1 13 13]]
[[ 1 13 2]
[ 1 13 2]]]
scores shape:
(4, 2, 3)
scores values:
[[[ -2.8376358 -2.843168 -2.8478816]
[ -2.8376358 -2.843168 -2.8478816]]
[[ -2.8478816 -5.655898 -5.6810265]
[ -2.8478816 -5.655898 -5.6810265]]
[[ -2.8478816 -8.478384 -8.495466 ]
[ -2.8478816 -8.478384 -8.495466 ]]
[[ -2.8478816 -11.292251 -11.307263 ]
[ -2.8478816 -11.292251 -11.307263 ]]]
predicted_ids shape:
(4, 2, 3)
predicted_ids values:
[[[ 8 13 1]
[ 8 13 1]]
[[ 1 13 13]
[ 1 13 13]]
[[ 1 13 12]
[ 1 13 12]]
[[ 1 13 2]
[ 1 13 2]]]
parent_ids shape:
(4, 2, 3)
parent_ids values:
[[[0 0 0]
[0 0 0]]
[[2 0 1]
[2 0 1]]
[[0 1 1]
[0 1 1]]
[[0 1 1]
[0 1 1]]]
The outputs of tf.contrib.seq2seq.dynamic_decode(BeamSearchDecoder) is actually an instance of class FinalBeamSearchDecoderOutput which consists of:
predicted_ids: Final outputs returned by the beam search after all decoding is finished. A tensor of shape [batch_size, num_steps, beam_width] (or [num_steps, batch_size, beam_width] if output_time_major is True). Beams are ordered from best to worst.
beam_search_decoder_output: An instance of BeamSearchDecoderOutput that describes the state of the beam search.
So need to make sure the final predictions/translations are of shape [beam_width, batch_size, num_steps] by transpose([2, 0, 1]) or tf.transpose(final_predicted_ids) if output_time_major=True.

Get coordinates of non-nan values of xarray Dataset

I have this sample Dataset containing worldwide air temperature, and more importantly, a mask land, marking land/non-water areas.
<xarray.Dataset>
Dimensions: (lat: 55, lon: 143, time: 5)
Coordinates:
* time (time) datetime64[ns] 2016-01-01 2016-01-02 2016-01-03 ...
* lat (lat) float64 -52.5 -50.0 -47.5 -45.0 -42.5 -40.0 -37.5 -35.0 ...
* lon (lon) float64 -177.5 -175.0 -172.5 -170.0 -167.5 -165.0 -162.5 ...
land (lat, lon) bool False False False False False False False False ...
Data variables:
airt (time, lat, lon) float64 7.952 7.61 7.389 7.267 7.124 6.989 ...
I can now mask the oceans and plot it
dry_areas = ds.where(ds.land)
dry_areas.airt.plot()
dry_areas looks like this
<xarray.Dataset>
Dimensions: (lat: 55, lon: 143)
Coordinates:
* lat (lat) float64 -52.5 -50.0 -47.5 -45.0 -42.5 -40.0 -37.5 -35.0 ...
* lon (lon) float64 -177.5 -175.0 -172.5 -170.0 -167.5 -165.0 -162.5 ...
land (lat, lon) bool False False False False False False False False ...
Data variables:
airt (lat, lon) float64 nan nan nan nan nan nan nan nan nan nan nan ...
How can I now get the coordinates for all non-nan values?
dry_areas.coords gives me the bounding box and I can't get lat and lon into the (55, 143) shape so I could apply the mask on.
The only working workaround I could find is
dry_areas.to_dataframe().dropna().reset_index()[['lat', 'lon']].values, which does not feel very lean and clean.
I feel this is quite simply, however I am clearly not a numpy/matrix ninja.
Best solution so far
This is the shortest I could come with so far:
lon, lat = np.meshgrid(ds.coords['lon'], ds.coords['lat'])
lat_masked = ma.array(lat, mask=dry_areas.airt.fillna(False))
lon_masked = ma.array(lon, mask=dry_areas.airt.fillna(False))
land_coordinates = zip(lat_masked[lat_masked.mask].data, lon_masked[lon_masked.mask].data)
You can use .stack to get an array of coord pairs of the non-null values:
In [31]: da=xr.DataArray(np.arange(20).reshape(5,4))
In [33]: da_nans = da.where(da % 2 == 1)
In [34]: da_nans
Out[34]:
<xarray.DataArray (dim_0: 5, dim_1: 4)>
array([[ nan, 1., nan, 3.],
[ nan, 5., nan, 7.],
[ nan, 9., nan, 11.],
[ nan, 13., nan, 15.],
[ nan, 17., nan, 19.]])
Coordinates:
* dim_0 (dim_0) int64 0 1 2 3 4
* dim_1 (dim_1) int64 0 1 2 3
In [35]: da_stacked = da_nans.stack(x=['dim_0','dim_1'])
In [36]: da_stacked
Out[36]:
<xarray.DataArray (x: 20)>
array([ nan, 1., nan, 3., nan, 5., nan, 7., nan, 9., nan,
11., nan, 13., nan, 15., nan, 17., nan, 19.])
Coordinates:
* x (x) object (0, 0) (0, 1) (0, 2) (0, 3) (1, 0) (1, 1) (1, 2) ...
In [37]: da_stacked[da_stacked.notnull()]
Out[37]:
<xarray.DataArray (x: 10)>
array([ 1., 3., 5., 7., 9., 11., 13., 15., 17., 19.])
Coordinates:
* x (x) object (0, 1) (0, 3) (1, 1) (1, 3) (2, 1) (2, 3) (3, 1) ...