I need to quickly process a huge two-dimensional array and have already pre-marked the required data.
array([[ 0., 1., 2., 3., 4., 5. , 6. , 7.],
[ 6., 7., 8., 9., 10., 4.2, 4.3, 11.],
[ 12., 13., 14., 15., 16., 4.2, 4.3, 17.],
[ 18., 19., 20., 21., 22., 4.2, 4.3, 23.]])
array([[False, True, True, True, False, True, True , False],
[False, False, False, True, True, True, True , False],
[False, False, True, True, False, False, False, False],
[False, True, True, False, False, False, True , True ]])
I expect to sum up the data of the markers in each row of the array.But np.cumsum can't do this, I need solutions or good ideas, thanks
Expected output:
array([[ 0., 1., 3., 6., 0., 5. , 11. , 0.],
[ 0., 0., 0., 9., 19., 23.2, 27.5, 0.],
[ 0., 0., 14., 29., 0., 0, 0, 0.],
[ 0., 19., 39., 0., 0., 0, 4.3, 27.3]])
The difficulty of the solution is that each fragment cannot contain the result of the previous fragment
def mask_to_size(self,axis=-1):
if self.ndim==2:
if axis == 0:
mask = np.zeros((self.shape[0]+1,self.shape[1]), dtype=bool)
mask[:-1] = self ; mask[0] = False ; mask = mask.ravel('F')
else:
mask = np.zeros((self.shape[0],self.shape[1]+1), dtype=bool)
mask[:,0:-1]= self ;mask[:,0]=False; mask = mask.ravel('C')
else:
mask = np.zeros((self.shape[0]+1), dtype=bool)
mask[:-1] = self ; mask[0] = False
return np.diff(np.nonzero(mask[1:]!= mask[:-1])[0])[::2].astype(int)
# https://stackoverflow.com/a/49179628/ by #Divakar
def intervaled_cumsum(ar, sizes):
out = ar.copy()
arc = ar.cumsum() ; idx = sizes.cumsum()
out[idx[0]] = ar[idx[0]] - arc[idx[0]-1]
out[idx[1:-1]] = ar[idx[1:-1]] - np.diff(arc[idx[:-1]-1])
return out.cumsum()
def cumsum_masked(self,mask,axis=-1):
sizes = mask_to_size(mask,axis);out = np.zeros(self.size);shape = self.shape
if len(shape)==2:
if axis == 0:
mask = mask.ravel('F') ; self = self.ravel('F')
else:
mask = mask.ravel('C') ; self = self.ravel('C')
out[mask] = intervaled_cumsum(self[mask],sizes)
if len(shape)==2:
if axis == 0:
return out.reshape(shape[1],shape[0]).T
else:
return out.reshape(shape)
return out
cumsum_masked(a,m,axis=1)
I sorted out the answers and tried to optimize the speed but it didn't work.I think other people may need it.
There's intervaled_cumsum for 1D arrays. For this case, we simply need to get the masked elements and setup their island lengths and feed it to that function.
Hence, one vectorized approach would be -
# https://stackoverflow.com/a/49179628/ by #Divakar
def intervaled_cumsum(ar, sizes):
# Make a copy to be used as output array
out = ar.copy()
# Get cumumlative values of array
arc = ar.cumsum()
# Get cumsumed indices to be used to place differentiated values into
# input array's copy
idx = sizes.cumsum()
# Place differentiated values that when cumumlatively summed later on would
# give us the desired intervaled cumsum
out[idx[0]] = ar[idx[0]] - arc[idx[0]-1]
out[idx[1:-1]] = ar[idx[1:-1]] - np.diff(arc[idx[:-1]-1])
return out.cumsum()
def intervaled_cumsum_masked_rowwise(a, mask):
z = np.zeros((mask.shape[0],1), dtype=bool)
maskz = np.hstack((z,mask,z))
out = np.zeros_like(a)
sizes = np.diff(np.flatnonzero(maskz[:,1:] != maskz[:,:-1]))[::2]
out[mask] = intervaled_cumsum(a[mask], sizes)
return out
Sample run -
In [95]: a
Out[95]:
array([[ 0. , 1. , 2. , 3. , 4. , 5. , 6. , 7. ],
[ 6. , 7. , 8. , 9. , 10. , 4.2, 4.3, 11. ],
[12. , 13. , 14. , 15. , 16. , 4.2, 4.3, 17. ],
[18. , 19. , 20. , 21. , 22. , 4.2, 4.3, 23. ]])
In [96]: mask
Out[96]:
array([[False, True, True, True, False, True, True, False],
[False, False, False, True, True, True, True, False],
[False, False, True, True, False, False, False, False],
[False, True, True, False, False, False, True, True]])
In [97]: intervaled_cumsum_masked_rowwise(a, mask)
Out[97]:
array([[ 0. , 1. , 3. , 6. , 0. , 5. , 11. , 0. ],
[ 0. , 0. , 0. , 9. , 19. , 23.2, 27.5, 0. ],
[ 0. , 0. , 14. , 29. , 0. , 0. , 0. , 0. ],
[ 0. , 19. , 39. , 0. , 0. , 0. , 4.3, 27.3]])
Works just as well with negative numbers -
In [109]: a = -a
In [110]: a
Out[110]:
array([[ -0. , -1. , -2. , -3. , -4. , -5. , -6. , -7. ],
[ -6. , -7. , -8. , -9. , -10. , -4.2, -4.3, -11. ],
[-12. , -13. , -14. , -15. , -16. , -4.2, -4.3, -17. ],
[-18. , -19. , -20. , -21. , -22. , -4.2, -4.3, -23. ]])
In [111]: intervaled_cumsum_masked_rowwise(a, mask)
Out[111]:
array([[ 0. , -1. , -3. , -6. , 0. , -5. , -11. , 0. ],
[ 0. , 0. , 0. , -9. , -19. , -23.2, -27.5, 0. ],
[ 0. , 0. , -14. , -29. , 0. , 0. , 0. , 0. ],
[ 0. , -19. , -39. , 0. , 0. , 0. , -4.3, -27.3]])
Here is an approach that is quite a bit slower than #Divakar's and #filippo's but more robust. The problem with "global cumsummy" approaches is that they can suffer from loss of significance, see below:
import numpy as np
from scipy import linalg
def cumsums(data, mask, break_lines=True):
dr = data[mask]
if break_lines:
msk = mask.copy()
msk[:, 0] = False
mr = msk.ravel()[1:][mask.ravel()[:-1]][:dr.size-1]
else:
mr = mask.ravel()[1:][mask.ravel()[:-1]][:dr.size-1]
D = np.empty((2, dr.size))
D.T[...] = 1, 0
D[1, :-1] -= mr
out = np.zeros_like(data)
out[mask] = linalg.solve_banded((1, 0), D, dr)
return out
def f_staircase(a, m):
return np.cumsum(a, axis=1) - np.maximum.accumulate(np.cumsum(a, axis=1)*~m, axis=1)
# https://stackoverflow.com/a/49179628/ by #Divakar
def intervaled_cumsum(ar, sizes):
# Make a copy to be used as output array
out = ar.copy()
# Get cumumlative values of array
arc = ar.cumsum()
# Get cumsumed indices to be used to place differentiated values into
# input array's copy
idx = sizes.cumsum()
# Place differentiated values that when cumumlatively summed later on would
# give us the desired intervaled cumsum
out[idx[0]] = ar[idx[0]] - arc[idx[0]-1]
out[idx[1:-1]] = ar[idx[1:-1]] - np.diff(arc[idx[:-1]-1])
return out.cumsum()
def intervaled_cumsum_masked_rowwise(a, mask):
z = np.zeros((mask.shape[0],1), dtype=bool)
maskz = np.hstack((z,mask,z))
out = np.zeros_like(a)
sizes = np.diff(np.flatnonzero(maskz[:,1:] != maskz[:,:-1]))[::2]
out[mask] = intervaled_cumsum(a[mask], sizes)
return out
data = np.array([[ 0., 1., 2., 3., 4., 5. , 6. , 7.],
[ 6., 7., 8., 9., 10., 4.2, 4.3, 11.],
[ 12., 13., 14., 15., 16., 4.2, 4.3, 17.],
[ 18., 19., 20., 21., 22., 4.2, 4.3, 23.]])
mask = np.array([[False, True, True, True, False, True, True , False],
[False, False, False, True, True, True, True , False],
[False, False, True, True, False, False, False, False],
[False, True, True, False, False, False, True , True ]])
from timeit import timeit
print('fast?')
print('filippo', timeit(lambda: f_staircase(data, mask), number=1000))
print('pp ', timeit(lambda: cumsums(data, mask), number=1000))
print('divakar', timeit(lambda: intervaled_cumsum_masked_rowwise(data, mask), number=1000))
data = np.random.uniform(-10, 10, (5000, 5000))
mask = np.random.random((5000, 5000)) < 0.125
mask[:, 1:] |= mask[:, :-1]
mask[:, 2:] |= mask[:, :-2]
print()
print('fast on large data?')
print('filippo', timeit(lambda: f_staircase(data, mask), number=3))
print('pp ', timeit(lambda: cumsums(data, mask), number=3))
print('divakar', timeit(lambda: intervaled_cumsum_masked_rowwise(data, mask), number=3))
data = np.random.uniform(-10, 10, (10000, 10000))
mask = np.random.random((10000, 10000)) < 0.025
mask[:, 1:] |= mask[:, :-1]
mask[:, 2:] |= mask[:, :-2]
print()
print('fast on large sparse data?')
print('filippo', timeit(lambda: f_staircase(data, mask), number=3))
print('pp ', timeit(lambda: cumsums(data, mask), number=3))
print('divakar', timeit(lambda: intervaled_cumsum_masked_rowwise(data, mask), number=3))
data = np.exp(-np.linspace(-24, 24, 100))[None]
mask = (np.arange(100) % 4).astype(bool)[None]
print()
print('numerically sound?')
print('correct', data[0, -3:].sum())
print('filippo', f_staircase(data, mask)[0,-1])
print('pp ', cumsums(data, mask)[0,-1])
print('divakar', intervaled_cumsum_masked_rowwise(data, mask)[0,-1])
Output:
fast?
filippo 0.008435532916337252
pp 0.07329772273078561
divakar 0.0336935929954052
fast on large data?
filippo 1.6037923698313534
pp 3.982803522143513
divakar 1.706403402145952
fast on large sparse data?
filippo 6.11361704999581
pp 4.717669038102031
divakar 2.9474888620898128
numerically sound?
correct 1.9861262739950047e-10
filippo 0.0
pp 1.9861262739950047e-10
divakar 9.737630365237156e-06
We see that with the falling exponential example the cumsum based approaches don't work. Obviously, this is an engineered example, but it showcases a real problem.
Here's an attempt to implement #hpaulj suggestion
>>> a = np.array([[ 0. , 1. , 2. , 3. , 4. , 5. , 6. , 7. ],
... [ 6. , 7. , 8. , 9. , 10. , 4.2, 4.3, 11. ],
... [12. , 13. , 14. , 15. , 16. , 4.2, 4.3, 17. ],
... [18. , 19. , 20. , 21. , 22. , 4.2, 4.3, 23. ]])
>>> m = np.array([[False, True, True, True, False, True, True, False],
... [False, False, False, True, True, True, True, False],
... [False, False, True, True, False, False, False, False],
... [False, True, True, False, False, False, True, True]])
>>> np.maximum.accumulate(np.cumsum(a, axis=1)*~m, axis=1)
array([[ 0. , 0. , 0. , 0. , 10. , 10. , 10. , 28. ],
[ 6. , 13. , 21. , 21. , 21. , 21. , 21. , 59.5],
[ 12. , 25. , 25. , 25. , 70. , 74.2, 78.5, 95.5],
[ 18. , 18. , 18. , 78. , 100. , 104.2, 104.2, 104.2]])
>>> np.cumsum(a, axis=1) - np.maximum.accumulate(np.cumsum(a, axis=1)*~m, axis=1)
array([[ 0. , 1. , 3. , 6. , 0. , 5. , 11. , 0. ],
[ 0. , 0. , 0. , 9. , 19. , 23.2, 27.5, 0. ],
[ 0. , 0. , 14. , 29. , 0. , 0. , 0. , 0. ],
[ 0. , 19. , 39. , 0. , 0. , 0. , 4.3, 27.3]])
See also Most efficient way to forward-fill NaN values in numpy array which seems somewhat related, especially if your array is not >= 0 like in this toy example, the approved answer there should be helpful.
EDIT
For future reference here's a version that removes the above >= 0 assumption. Should still be pretty fast, didn't benchmark it against the other methods though.
In [38]: def masked_cumsum(a, m):
...: idx = np.maximum.accumulate(np.where(m, 0, np.arange(m.size).reshape(m.shape)), axis=1)
...: c = np.cumsum(a, axis=-1)
...: return c - c[np.unravel_index(idx, m.shape)]
...:
In [43]: masked_cumsum(-a, m)
Out[43]:
array([[ 0. , -1. , -3. , -6. , 0. , -5. , -11. , 0. ],
[ 0. , 0. , 0. , -9. , -19. , -23.2, -27.5, 0. ],
[ 0. , 0. , -14. , -29. , 0. , 0. , 0. , 0. ],
[ 0. , -19. , -39. , 0. , 0. , 0. , -4.3, -27.3]])
Related
This question is related to both Apply FFT to a both channels of a stereo signal separately? and How to represent stereo audio data for FFT, but specifically for numpy's fft package.
How do I take the FFT of a (real-valued) FFT in numpy, and how to I get it back to the time domain?
If your stereo data is in two columns (i.e. left channel in column 0 and right channel in column 1), you can do it in a single operation - you only need to transpose the data first. To demonstrate:
Here are two channels of data, eight samples long. The left is a sine wave at f1 (it completes one cycle in the eight samples), the right is a sine wave at f2 (it completes two cycles):
s = array([[ 0.14285714, 0.14285714],
[ 0.12870984, 0.08906997],
[ 0.08906997, -0.0317887 ],
[ 0.0317887 , -0.12870984],
[-0.0317887 , -0.12870984],
[-0.08906997, -0.0317887 ],
[-0.12870984, 0.08906997],
[-0.14285714, 0.14285714],
[-0.12870984, 0.08906997],
[-0.08906997, -0.0317887 ],
[-0.0317887 , -0.12870984],
[ 0.0317887 , -0.12870984],
[ 0.08906997, -0.0317887 ],
[ 0.12870984, 0.08906997]])
If you transpose it (so left channel is row 0 and right channel is row 1), you can then pass it directly to np.fft.rfft() for conversions:
>>> s_t = s.transpose()
>>> s_t
array([[ 0.14285714, 0.12870984, 0.08906997, 0.0317887 , -0.0317887 ,
-0.08906997, -0.12870984, -0.14285714, -0.12870984, -0.08906997,
-0.0317887 , 0.0317887 , 0.08906997, 0.12870984],
[ 0.14285714, 0.08906997, -0.0317887 , -0.12870984, -0.12870984,
-0.0317887 , 0.08906997, 0.14285714, 0.08906997, -0.0317887 ,
-0.12870984, -0.12870984, -0.0317887 , 0.08906997]])
>>> f = np.fft.rfft(s_t)
>>> np.set_printoptions(suppress=True) # make it easier to read
>>> f
array([[ 0.+0.j, 1.+0.j, 0.+0.j, -0.-0.j, 0.-0.j, -0.+0.j, 0.+0.j, 0.+0.j],
[-0.+0.j, 0.+0.j, 1.+0.j, -0.-0.j, 0.-0.j, 0.+0.j, -0.+0.j, 0.+0.j]])
>>>
You can see from above that the left channel (row 0) has a '1' in bin 1 and the right channel (row 1) has a '1' in bin 2, which is what we'd expect. If you want your frequency data to be in column format, of course you can transpose that. And if you want just the real components, you can do that at the same time:
>>> f.transpose().real
array([[ 0., -0.],
[ 1., 0.],
[ 0., 1.],
[-0., -0.],
[ 0., 0.],
[-0., 0.],
[ 0., -0.],
[ 0., 0.]])
To prove that this is a proper transform of our original stereo data, compare this to s (above):
>>> np.fft.irfft(f).transpose().real
array([[ 0.14285714, 0.14285714],
[ 0.12870984, 0.08906997],
[ 0.08906997, -0.0317887 ],
[ 0.0317887 , -0.12870984],
[-0.0317887 , -0.12870984],
[-0.08906997, -0.0317887 ],
[-0.12870984, 0.08906997],
[-0.14285714, 0.14285714],
[-0.12870984, 0.08906997],
[-0.08906997, -0.0317887 ],
[-0.0317887 , -0.12870984],
[ 0.0317887 , -0.12870984],
[ 0.08906997, -0.0317887 ],
[ 0.12870984, 0.08906997]])
Say I have a np.array, e.g. a = np.array([np.nan, 2., 3., 4., 5., np.nan, np.nan, np.nan, 8., 9., 10., np.nan, 14., np.nan, 16.]). I want to obtain all sub-arrays with no np.nan value, i.e. my desired output is:
sub_arrays_list = [array([2., 3., 4., 5.]), array([8., 9., 10.]), array([14.]), array([16.])]
I kind of managed to solve this with the following but it is quite inefficient:
sub_arrays_list = []
start, end = 0, 0
while end < len(a) - 1:
if np.isnan(a[end]).any():
end += 1
start = end
else:
while not np.isnan(a[end]).any():
if end < len(a) - 1:
end += 1
else:
sub_arrays_list.append(a[start:])
break
else:
sub_arrays_list.append(a[start:end])
start = end
Would anyone please suggest a faster and better alternative to achieve this? Many thanks!
You can use:
# identify NaN values
m = np.isnan(a)
# array([ True, False, False, False, False, True, True, True, False,
# False, False, True, False, True, False])
# compute groups
idx = np.cumsum(m)
# array([1, 1, 1, 1, 1, 2, 3, 4, 4, 4, 4, 5, 5, 6, 6])
# remove NaNs, get indices of first non-NaN per group and split
out = np.split(a[~m], np.unique(idx[~m], return_index=True)[1][1:])
output:
[array([2., 3., 4., 5.]), array([ 8., 9., 10.]), array([14.]), array([16.])]
I meet this question on np.max() fuction, my code is
err_qt = np.asarray(err_qt)
err_qt = np.max(err_qt, axis=1)
err_qt = err_qt * 180.0 / np.pi`
The err_qt is a matrix and I print it.
[[{'K': array([[682.10083008, 0. , 239.82280391],
[ 0. , 683.28820801, 427.01684648],
[ 0. , 0. , 1. ]]), 'R': array([[-9.93854702e-01, -1.10692268e-01, -2.28596966e-04],
[ 1.16574434e-03, -8.40159629e-03, -9.99964026e-01],
[ 1.10686366e-01, -9.93819216e-01, 8.47900486e-03]]), 'T': array([-15.13970906, 1.19332025, 16.90097562]), 'q': array([0., 0., 0., 0.])}
{'K': array([[682.10083008, 0. , 239.82280391],
[ 0. , 683.28820801, 427.01684648],
[ 0. , 0. , 1. ]]), 'R': array([[-0.88778025, -0.45392103, -0.0761704 ],
[-0.04630574, 0.2527365 , -0.96642643],
[ 0.45793232, -0.85444716, -0.24539362]]), 'T': array([-5.09243763, -4.6725806 , 24.14722353]), 'q': array([0., 0., 0., 0.])}]
[{'K': array([[682.10083008, 0. , 239.82280391],
[ 0. , 683.28820801, 427.01684648],
[ 0. , 0. , 1. ]]), 'R': array([[-9.93854702e-01, -1.10692268e-01, -2.28596966e-04],
[ 1.16574434e-03, -8.40159629e-03, -9.99964026e-01],
[ 1.10686366e-01, -9.93819216e-01, 8.47900486e-03]]), 'T': array([-15.13970906, 1.19332025, 16.90097562]), 'q': array([0., 0., 0., 0.])}
...
How can I solve it?
I have a tensor like this :
A=array([[[[ 1.4033688 , -0.95642966, 1.0958625 , -0.64104766],
[-1.2625898 , -0.59444463, 0.72382635, -0.5195144 ],
[ 0.15248759, 2.4054656 , 1.0738292 , 1.0531213 ],
[ 1.2878437 , -1.1945801 , -1.0729346 , -1.6739473 ]],
[[ 0. , -0. , 0. , -0. ],
[-0. , -0. , 0. , -0. ],
[ 0. , 0. , 0. , 0. ],
[ 0. , -0. , -0. , -0. ]]],
[[[ 0. , -0. , 0. , -0. ],
[-0. , -0. , 0. , -0. ],
[ 0. , 0. , 0. , 0. ],
[ 0. , -0. , -0. , -0. ]],
[[ 1.4033688 , -0.95642966, 1.0958625 , -0.64104766],
[-1.2625898 , -0.59444463, 0.72382635, -0.5195144 ],
[ 0.15248759, 2.4054656 , 1.0738292 , 1.0531213 ],
[ 1.2878437 , -1.1945801 , -1.0729346 , -1.6739473 ]]]],
dtype=float32)
In particular,
A[0][0]=[[ 1.4033688 , -0.95642966, 1.0958625 , -0.64104766],
[-1.2625898 , -0.59444463, 0.72382635, -0.5195144 ],
[ 0.15248759, 2.4054656 , 1.0738292 , 1.0531213 ],
[ 1.2878437 , -1.1945801 , -1.0729346 , -1.6739473 ]]
A[1][0]=[[[ 0. , -0. , 0. , -0. ],
[-0. , -0. , 0. , -0. ],
[ 0. , 0. , 0. , 0. ],
[ 0. , -0. , -0. , -0. ]]
A[0][1]=[[[ 0. , -0. , 0. , -0. ],
[-0. , -0. , 0. , -0. ],
[ 0. , 0. , 0. , 0. ],
[ 0. , -0. , -0. , -0. ]]
A[1][1]=[[ 1.4033688 , -0.95642966, 1.0958625 , -0.64104766],
[-1.2625898 , -0.59444463, 0.72382635, -0.5195144 ],
[ 0.15248759, 2.4054656 , 1.0738292 , 1.0531213 ],
[ 1.2878437 , -1.1945801 , -1.0729346 , -1.6739473 ]]
I want to reshape A into an (8 , 8) array such that I keep the positions of the elements as if the array just removed the intermediate brackets. In other words, after reshaping A into the new array, let's call this new reshaped array A_reshaped, then I want A_reshaped to be equal to A in the following
A_reshaped[:4,:4]=A[0][0]
A_reshaped[4:8,0:4]=A[1][0]
A_reshaped[:4,4:8]=A[0][1]
A_reshaped[4:8,4:8]=A[1][1]
The straightforward command :
np.reshape(A,(8,8))[:4,:4]
does not work and it yields the following results :
array([[ 1.4033688 , -0.95642966, 1.0958625 , -0.64104766],
[ 0.15248759, 2.4054656 , 1.0738292 , 1.0531213 ],
[ 0. , -0. , 0. , -0. ],
[ 0. , 0. , 0. , 0. ]],
dtype=float32)
Any hint is appreciated.
Using gather_nd():
import tensorflow as tf
input1 = tf.constant([[0, 1], [2, 3]])
input2 = tf.constant([[4, 5], [6, 7]])
input3 = tf.constant([[8, 9], [10, 11]])
input4 = tf.constant([[12, 13], [14, 15]])
input = tf.stack([input1, input2, input3, input4])
# prepare indices for gather_nd
my_shape = tf.shape(input)[1:]
my_range = my_shape[0]*my_shape[1]
m = tf.range(0, my_range)
def get_inds(t, last_dim):
return tf.convert_to_tensor([t // last_dim, t % last_dim])
inds = tf.map_fn(fn=lambda t: get_inds(t, my_shape[-1]), elems=m)
sh = tf.concat([my_shape, [2]], -1)
inds = tf.reshape(inds, sh)
mat0 = tf.zeros(my_shape, dtype=tf.int32)
mat0 = mat0[..., tf.newaxis]
mat1 = mat0 + 1
mat2 = mat0 + 2
mat3 = mat0 + 3
mat0 = tf.concat([mat0, inds], -1)
mat1 = tf.concat([mat1, inds], -1)
mat2 = tf.concat([mat2, inds], -1)
mat3 = tf.concat([mat3, inds], -1)
mat0 = tf.concat([mat0, mat1], 1)
mat1 = tf.concat([mat2, mat3], 1)
inds = tf.concat([mat0, mat1], 0)
res = tf.gather_nd(input, inds)
Preparing indices matrix is complicated. I didn't manage to simplify it
I have a 3x1 point vector representing the start point of some line, and a 3x1 point vector representing the end of some line. I would like to sample an arbitrary amount of points along the line connected by these two points.
np.linspace does exactly what I need but in 1 dimension. Is there a similar functionality that can be extended to 3 dimensions?
Thanks
My interpolation suggestion:
In [664]: p1=np.array([0,1,2])
In [665]: p2=np.array([10,9,8])
In [666]: l1 = np.linspace(0,1,11)
In [667]: l1
Out[667]: array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ])
In [668]: p1+(p2-p1)*l1[:,None]
Out[668]:
array([[ 0. , 1. , 2. ],
[ 1. , 1.8, 2.6],
[ 2. , 2.6, 3.2],
[ 3. , 3.4, 3.8],
[ 4. , 4.2, 4.4],
[ 5. , 5. , 5. ],
[ 6. , 5.8, 5.6],
[ 7. , 6.6, 6.2],
[ 8. , 7.4, 6.8],
[ 9. , 8.2, 7.4],
[10. , 9. , 8. ]])
Equivalent with 3 linspace calls
In [671]: np.stack([np.linspace(i,j,11) for i,j in zip(p1,p2)],axis=1)
Out[671]:
array([[ 0. , 1. , 2. ],
[ 1. , 1.8, 2.6],
[ 2. , 2.6, 3.2],
[ 3. , 3.4, 3.8],
[ 4. , 4.2, 4.4],
[ 5. , 5. , 5. ],
[ 6. , 5.8, 5.6],
[ 7. , 6.6, 6.2],
[ 8. , 7.4, 6.8],
[ 9. , 8.2, 7.4],
[10. , 9. , 8. ]])
A variation on this is:
np.c_[tuple(slice(i,j,11j) for i,j in zip(p1,p2))]
Really the same calculation, just different syntax.
outer can be used instead:
p1+np.outer(l1,(p2-p1))
But even that uses broadcasting. p1 is (3,) and the outer is (11,3), the result is (11,3).
#Brad's approach handles end points differently
In [686]: np.append(p1[:, None], np.repeat((p2 - p1) / 10, [10, 10, 10]).reshape
...: (3, -1).cumsum(axis=1), axis=1)
Out[686]:
array([[ 0. , 1. , 2. , 3. , 4. , 5. , 6. , 7. , 8. , 9. , 10. ],
[ 1. , 0.8, 1.6, 2.4, 3.2, 4. , 4.8, 5.6, 6.4, 7.2, 8. ],
[ 2. , 0.6, 1.2, 1.8, 2.4, 3. , 3.6, 4.2, 4.8, 5.4, 6. ]])
In [687]: _.shape
Out[687]: (3, 11)
Not sure if np.linspace has changed in the 4 years since this question was asked, but you can pass array-like values as start and stop, and the results are the same as hpaulj's answer.
Example (using random points):
import numpy as np
startpts = np.array([0, 0, 0])
endpts = np.array([12, 3, 8])
out = np.linspace(start=startpts, stop=endpts, num=10)
returns the same thing as:
out = startpts+(endpts-startpts)*np.linspace(0,1,10)[:,np.newaxis]
And it can also be expanded to take in multiple pairs of points:
startpts = np.array([[0, 0, 0],[1, 2, 0],[2,3,4]])
endpts = np.array([[12,3, 8],[13,5, 8],[14,4,5]])
out = np.linspace(start=startpts, stop=endpts, num=10, axis=1)