Sample array along path - numpy

I have this binary image:
as numpy array of 0 and 1 values.
I want to sample it on every 10th pixel along the path, like:
I know how to sample orthogonal objects, by slicing the array, but I don't know what to do on irregular shape, and get evenly distributed set of "points".

You can use OpenCV to find the path by findContours. Here is the demo code, x & y are the coordinates of the pixels on the path.
import numpy as np
import cv2
import pylab as pl
img = cv2.imread("img.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,img = cv2.threshold(img,127,255,0)
r = cv2.findContours(255-img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
c = r[0][0]
x, y = cc[::10, 0, 0], cc[::10, 0, 1]
pl.figure(figsize=(10, 10))
pl.imshow(img, cmap="gray", interpolation="nearest")
pl.plot(cc[::10, 0, 0], cc[::10, 0, 1], "o")
Here is the output:
I just select one point every 10 points from the path, so the distance between two nearby points are not the same. But you can use some interpolation method to convert the path to a smooth curve, and then find equidistance points.

Related

implementing tensorflow cvae but couldn't find out the meaning for the resulted image shape [duplicate]

I am using nibabel lib to load data from nii file. I read the document of the lib at http://nipy.org/nibabel/gettingstarted.html, and found that
This information is available without the need to load anything of the main image data into the memory. Of course there is also access to the image data as a NumPy array
This is my code to load the data and it shapes
import nibabel as nib
img = nib.load('example.nii')
data = img.get_data()
data = np.squeeze(data)
data = np.copy(data, order="C")
print data.shape
I got the result
128, 128, 64
What is order of data shape? Is it WidthxHeightxDepth? And my input must arranged as depth, height, width. So I will use input=data.transpose(2,0,1). Is it right? Thanks all
Update: I found that the Numpy will read the image by order Height x Width x Depth as the reference http://www.python-course.eu/images/axis.jpeg
OK, here's my take:
Using scipy.ndimage.imread('img.jpg', mode='RGB'), the resulting array will always have this order: (H, W, D) i.e. (height, width, depth) because of the terminology that numpy uses for ndarrays (axis=0, axis=1, axis=2) or analogously (Y, X, Z) if one would like to visualize in 3 dimensions.
# read image
In [21]: img = scipy.ndimage.imread('suza.jpg', mode='RGB')
# image shape as (H, W, D)
In [22]: img.shape
Out[22]: (634, 1366, 3)
# transpose to shape as (D, H, W)
In [23]: tr_img = img.transpose((-1, 0, 1))
In [23]: tr_img.shape
Out[23]: (3, 634, 1366)
If you consider the img_shape as a tuple,
# index (0, 1, 2)
img_shape = (634, 1366, 3)
# or index (-3, -2, -1)
Choose which one is a convenient way for you to remember.
NOTE: The scipy.ndimage.imread() API has been removed since Scipy 1.2.0. So, it is now recommended to use imageio.imread(), which reads the image and returns Array, a subclass of numpy array, following the same conventions discussed above.
# read image
$ img = imageio.imread('suza.jpg', format='jpg')
# convert the image to a numpy array
$ img_np = np.asarray(img)
PS: It should also be noted that libraries like tensorflow also (almost) follows the same convention as numpy.
tf.image_decode_jpeg() returns:
A Tensor of type uint8. 3-D with shape [height, width, channels]

Orientation of matplotlib 2d patches transformed to 3d with arbitrary normals do not match orientation given by quiver

I have followed
How can matplotlib 2D patches be transformed to 3D with arbitrary normals?
to transform a matplotlib 2d patch (circle) into a 3d patch with an arbitrary normal vector. However, when I plot this normal vector using quiver, it turns out that the patch and the vector are not perpendicular.
Here is my code (where I load the functions given as 2nd Answer in link above):
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import art3d
from mpl_toolkits.mplot3d import proj3d
def rotation_matrix(v1,v2):
"""
Calculates the rotation matrix that changes v1 into v2.
"""
v1/=np.linalg.norm(v1)
v2/=np.linalg.norm(v2)
cos_angle=np.dot(v1,v2)
d=np.cross(v1,v2)
sin_angle=np.linalg.norm(d)
if sin_angle == 0:
M = np.identity(3) if cos_angle>0. else -np.identity(3)
else:
d/=sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=np.float64)
M = ddt + cos_angle * (eye - ddt) + sin_angle * skew
return M
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1,0,0), index)
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
M = rotation_matrix(normal,(0, 0, 1)) #Get the rotation matrix
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
def pathpatch_translate(pathpatch, delta):
"""
Translates the 3D pathpatch by the amount delta.
"""
pathpatch._segment3d += delta
fig = plt.figure()
ax = fig.gca(projection='3d')
from matplotlib.patches import Circle, PathPatch
dirvec =(-0.420, -0.757, -0.500)
normal=dirvec
p = Circle((0,0), 18., facecolor = 'g', alpha = .6)
ax.add_patch(p)
pathpatch_2d_to_3d(p, z = 0, normal = normal)
pathpatch_translate(p, (0.,0.,0.) )
ax.quiver( 0.,0.,0., -0.420, -0.757, -0.500, length=50, color='g', lw=2, pivot='tail')
xlim( -50., 50. );ylim( -50., 50. );ax.set_zlim(-50.,50)
The result is not far from perpendicular but is clearly not exactly 90 degrees as should be. I appreciate any help to clarify what is going on. Thank you

Pandas Colormap - Changing line color [duplicate]

If you have a Colormap cmap, for example:
cmap = matplotlib.cm.get_cmap('Spectral')
How can you get a particular colour out of it between 0 and 1, where 0 is the first colour in the map and 1 is the last colour in the map?
Ideally, I would be able to get the middle colour in the map by doing:
>>> do_some_magic(cmap, 0.5) # Return an RGBA tuple
(0.1, 0.2, 0.3, 1.0)
You can do this with the code below, and the code in your question was actually very close to what you needed, all you have to do is call the cmap object you have.
import matplotlib
cmap = matplotlib.cm.get_cmap('Spectral')
rgba = cmap(0.5)
print(rgba) # (0.99807766255210428, 0.99923106502084169, 0.74602077638401709, 1.0)
For values outside of the range [0.0, 1.0] it will return the under and over colour (respectively). This, by default, is the minimum and maximum colour within the range (so 0.0 and 1.0). This default can be changed with cmap.set_under() and cmap.set_over().
For "special" numbers such as np.nan and np.inf the default is to use the 0.0 value, this can be changed using cmap.set_bad() similarly to under and over as above.
Finally it may be necessary for you to normalize your data such that it conforms to the range [0.0, 1.0]. This can be done using matplotlib.colors.Normalize simply as shown in the small example below where the arguments vmin and vmax describe what numbers should be mapped to 0.0 and 1.0 respectively.
import matplotlib
norm = matplotlib.colors.Normalize(vmin=10.0, vmax=20.0)
print(norm(15.0)) # 0.5
A logarithmic normaliser (matplotlib.colors.LogNorm) is also available for data ranges with a large range of values.
(Thanks to both Joe Kington and tcaswell for suggestions on how to improve the answer.)
In order to get rgba integer value instead of float value, we can do
rgba = cmap(0.5,bytes=True)
So to simplify the code based on answer from Ffisegydd, the code would be like this:
#import colormap
from matplotlib import cm
#normalize item number values to colormap
norm = matplotlib.colors.Normalize(vmin=0, vmax=1000)
#colormap possible values = viridis, jet, spectral
rgba_color = cm.jet(norm(400),bytes=True)
#400 is one of value between 0 and 1000
I once ran into a similar situation where I needed "n" no. of colors from a colormap so that I can assign each color to my data.
I have compiled a code to this in a package called "mycolorpy".
You can pip install it using:
pip install mycolorpy
You can then do:
from mycolorpy import colorlist as mcp
import numpy as np
Example: To create a list of 5 hex strings from cmap "winter"
color1=mcp.gen_color(cmap="winter",n=5)
print(color1)
Output:
['#0000ff', '#0040df', '#0080bf', '#00c09f', '#00ff80']
Another example to generate 16 list of colors from cmap bwr:
color2=mcp.gen_color(cmap="bwr",n=16)
print(color2)
Output:
['#0000ff', '#2222ff', '#4444ff', '#6666ff', '#8888ff', '#aaaaff', '#ccccff', '#eeeeff', '#ffeeee', '#ffcccc', '#ffaaaa', '#ff8888', '#ff6666', '#ff4444', '#ff2222', '#ff0000']
There is a python notebook with usage examples to better visualize this.
Say you want to generate a list of colors from a cmap that is normalized to a given data. You can do that using:
a=random.randint(1000, size=(200))
a=np.array(a)
color1=mcp.gen_color_normalized(cmap="seismic",data_arr=a)
plt.scatter(a,a,c=color1)
Output:
You can also reverse the color using:
color1=mcp.gen_color_normalized(cmap="seismic",data_arr=a,reverse=True)
plt.scatter(a,a,c=color1)
Output:
I had precisely this problem, but I needed sequential plots to have highly contrasting color. I was also doing plots with a common sub-plot containing reference data, so I wanted the color sequence to be consistently repeatable.
I initially tried simply generating colors randomly, reseeding the RNG before each plot. This worked OK (commented-out in code below), but could generate nearly indistinguishable colors. I wanted highly contrasting colors, ideally sampled from a colormap containing all colors.
I could have as many as 31 data series in a single plot, so I chopped the colormap into that many steps. Then I walked the steps in an order that ensured I wouldn't return to the neighborhood of a given color very soon.
My data is in a highly irregular time series, so I wanted to see the points and the lines, with the point having the 'opposite' color of the line.
Given all the above, it was easiest to generate a dictionary with the relevant parameters for plotting the individual series, then expand it as part of the call.
Here's my code. Perhaps not pretty, but functional.
from matplotlib import cm
cmap = cm.get_cmap('gist_rainbow') #('hsv') #('nipy_spectral')
max_colors = 31 # Constant, max mumber of series in any plot. Ideally prime.
color_number = 0 # Variable, incremented for each series.
def restart_colors():
global color_number
color_number = 0
#np.random.seed(1)
def next_color():
global color_number
color_number += 1
#color = tuple(np.random.uniform(0.0, 0.5, 3))
color = cmap( ((5 * color_number) % max_colors) / max_colors )
return color
def plot_args(): # Invoked for each plot in a series as: '**(plot_args())'
mkr = next_color()
clr = (1 - mkr[0], 1 - mkr[1], 1 - mkr[2], mkr[3]) # Give line inverse of marker color
return {
"marker": "o",
"color": clr,
"mfc": mkr,
"mec": mkr,
"markersize": 0.5,
"linewidth": 1,
}
My context is JupyterLab and Pandas, so here's sample plot code:
restart_colors() # Repeatable color sequence for every plot
fig, axs = plt.subplots(figsize=(15, 8))
plt.title("%s + T-meter"%name)
# Plot reference temperatures:
axs.set_ylabel("°C", rotation=0)
for s in ["T1", "T2", "T3", "T4"]:
df_tmeter.plot(ax=axs, x="Timestamp", y=s, label="T-meter:%s" % s, **(plot_args()))
# Other series gets their own axis labels
ax2 = axs.twinx()
ax2.set_ylabel(units)
for c in df_uptime_sensors:
df_uptime[df_uptime["UUID"] == c].plot(
ax=ax2, x="Timestamp", y=units, label="%s - %s" % (units, c), **(plot_args())
)
fig.tight_layout()
plt.show()
The resulting plot may not be the best example, but it becomes more relevant when interactively zoomed in.
To build on the solutions from Ffisegydd and amaliammr, here's an example where we make CSV representation for a custom colormap:
#! /usr/bin/env python3
import matplotlib
import numpy as np
vmin = 0.1
vmax = 1000
norm = matplotlib.colors.Normalize(np.log10(vmin), np.log10(vmax))
lognum = norm(np.log10([.5, 2., 10, 40, 150,1000]))
cdict = {
'red':
(
(0., 0, 0),
(lognum[0], 0, 0),
(lognum[1], 0, 0),
(lognum[2], 1, 1),
(lognum[3], 0.8, 0.8),
(lognum[4], .7, .7),
(lognum[5], .7, .7)
),
'green':
(
(0., .6, .6),
(lognum[0], 0.8, 0.8),
(lognum[1], 1, 1),
(lognum[2], 1, 1),
(lognum[3], 0, 0),
(lognum[4], 0, 0),
(lognum[5], 0, 0)
),
'blue':
(
(0., 0, 0),
(lognum[0], 0, 0),
(lognum[1], 0, 0),
(lognum[2], 0, 0),
(lognum[3], 0, 0),
(lognum[4], 0, 0),
(lognum[5], 1, 1)
)
}
mycmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256)
norm = matplotlib.colors.LogNorm(vmin, vmax)
colors = {}
count = 0
step_size = 0.001
for value in np.arange(vmin, vmax+step_size, step_size):
count += 1
print("%d/%d %f%%" % (count, vmax*(1./step_size), 100.*count/(vmax*(1./step_size))))
rgba = mycmap(norm(value), bytes=True)
color = (rgba[0], rgba[1], rgba[2])
if color not in colors.values():
colors[value] = color
print ("value, red, green, blue")
for value in sorted(colors.keys()):
rgb = colors[value]
print("%s, %s, %s, %s" % (value, rgb[0], rgb[1], rgb[2]))
Colormaps come with their own normalize method, so if you have a plot already made you can access the color at a certain value.
import matplotlib.pyplot as plt
import numpy as np
cmap = plt.cm.viridis
cm = plt.pcolormesh(np.random.randn(10, 10), cmap=cmap)
print(cmap(cm.norm(2.2)))
For a quick and dirty you can use the map directly.
Or you can just do what #amaliammr says.
data_size = 23 # range 0..23
colors = plt.cm.turbo
color_normal = colours.N/data_size
for i in range(data_size):
col = colours.colors[int(i*color_normal)]

About use tf.image.crop_and_resize

I'm working on the ROI pooling layer which work for fast-rcnn and I am used to use tensorflow. I found tf.image.crop_and_resize can act as the ROI pooling layer.
But I try many times and cannot get the result that I expected.Or did the true result is exactly what I got?
here is my code
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
img_path = r'F:\IMG_0016.JPG'
img = cv2.imread(img_path)
img = img.reshape([1,580,580,3])
img = img.astype(np.float32)
#img = np.concatenate([img,img],axis=0)
img_ = tf.Variable(img) # img shape is [580,580,3]
boxes = tf.Variable([[100,100,300,300],[0.5,0.1,0.9,0.5]])
box_ind = tf.Variable([0,0])
crop_size = tf.Variable([100,100])
#b = tf.image.crop_and_resize(img,[[0.5,0.1,0.9,0.5]],[0],[50,50])
c = tf.image.crop_and_resize(img_,boxes,box_ind,crop_size)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
a = c.eval(session=sess)
plt.imshow(a[0])
plt.imshow(a[1])
And I handed in my origin img and result:a0,a1
if I was wrong can anyone teach me how to use this function? thanks.
Actually, there's no problem with Tensorflow here.
From the doc of tf.image.crop_and_resize (emphasis is mine) :
boxes: A Tensor of type float32. A 2-D tensor of shape [num_boxes, 4].
The i-th row of the tensor specifies the coordinates of a box in the
box_ind[i] image and is specified in normalized coordinates [y1, x1,
y2, x2]. A normalized coordinate value of y is mapped to the image
coordinate at y * (image_height - 1), so as the [0, 1] interval of
normalized image height is mapped to [0, image_height - 1] in image
height coordinates. We do allow y1 > y2, in which case the sampled
crop is an up-down flipped version of the original image. The width
dimension is treated similarly. Normalized coordinates outside the [0,
1] range are allowed, in which case we use extrapolation_value to
extrapolate the input image values.
The boxes argument needs normalized coordinates. That's why you get a black box with your first set of coordinates [100,100,300,300] (not normalized, and no extrapolation value provided), and not with your second set [0.5,0.1,0.9,0.5].
However, as that why matplotlib show you gibberish on your second attempt, it's just because you're using the wrong datatype.
Quoting the matplotlib documentation of plt.imshow (emphasis is mine):
All values should be in the range [0 .. 1] for floats or [0 .. 255]
for integers. Out-of-range values will be clipped to these bounds.
As you're using float outside the [0,1] range, matplotlib is bounding your values to 1. That's why you get those colored pixels (either solid red, solid green or solid blue, or a mixing of these). Cast your array to uint_8 to get an image that make sense.
plt.imshow( a[1].astype(np.uint8))
Edit :
As requested, I will dive a bit more into
tf.image.crop_and_resize.
[When providing non normalized coordinates and no extrapolation values], why I just get a blank result?
Quoting the doc :
Normalized coordinates outside the [0, 1] range are allowed, in which
case we use extrapolation_value to extrapolate the input image values.
So, normalized coordinates outside [0,1] are allowed. But they still need to be normalized !
With your example, [100,100,300,300], the coordinates you provide makes the red square. Your original image is the little green dot in the upper left corner! The default value of the argument extrapolation_value is 0, so the values outside the frame of the original image are inferred as [0,0,0] hence the black.
But if your usecase needs another value, you can provide it. The pixels will take a RGB value of extrapolation_value%256 on each channel. This option is useful if the zone you need to crop is not fully included in you original images. (A possible usecase would be sliding windows for example).
It seems that tf.image.crop_and_resize expects pixel values in the range [0,1].
Changing your code to
test = tf.image.crop_and_resize(image=image_np_expanded/255., ...)
solved the problem for me.
Yet another variant is to use tf.central_crop function.
Below is a concrete implementation of the tf.image.crop_and_resize API. tf version 1.14
import tensorflow as tf
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
tf.enable_eager_execution()
def single_data_2(img_path):
img = tf.read_file(img_path)
img = tf.image.decode_bmp(img,channels=1)
img_4d = tf.expand_dims(img, axis=0)
processed_img = tf.image.crop_and_resize(img_4d,boxes=
[[0.4529,0.72,0.4664,0.7358]],crop_size=[64,64],box_ind=[0])
processed_img_2 = tf.squeeze(processed_img,0)
raw_img_3 = tf.squeeze(img_4d,0)
return raw_img_3, processed_img_2
def plot_two_image(raw,processed):
fig=plt.figure(figsize=(35,35))
raw_ = fig.add_subplot(1,2,1)
raw_.set_title('Raw Image')
raw_.imshow(raw,cmap='gray')
processed_ = fig.add_subplot(1,2,2)
processed_.set_title('Processed Image')
processed_.imshow(processed,cmap='gray')
img_path = 'D:/samples/your_bmp_image.bmp'
raw_img, process_img = single_data_2(img_path)
print(raw_img.dtype,process_img.dtype)
print(raw_img.shape,process_img.shape)
raw_img=tf.squeeze(raw_img,-1)
process_img=tf.squeeze(process_img,-1)
print(raw_img.dtype,process_img.dtype)
print(raw_img.shape,process_img.shape)
plot_two_image(raw_img,process_img)
Below is my working code, also output image is not black, this can be of help to someone
for idx in range(len(bboxes)):
if bscores[idx] >= Threshold:
#Region of Interest
y_min = int(bboxes[idx][0] * im_height)
x_min = int(bboxes[idx][1] * im_width)
y_max = int(bboxes[idx][2] * im_height)
x_max = int(bboxes[idx][3] * im_width)
class_label = category_index[int(bclasses[idx])]['name']
class_labels.append(class_label)
bbox.append([x_min, y_min, x_max, y_max, class_label, float(bscores[idx])])
#Crop Image - Working Code
cropped_image = tf.image.crop_to_bounding_box(image, y_min, x_min, y_max - y_min, x_max - x_min).numpy().astype(np.int32)
# encode_jpeg encodes a tensor of type uint8 to string
output_image = tf.image.encode_jpeg(cropped_image)
# decode_jpeg decodes the string tensor to a tensor of type uint8
#output_image = tf.image.decode_jpeg(output_image)
score = bscores[idx] * 100
file_name = tf.constant(OUTPUT_PATH+image_name[:-4]+'_'+str(idx)+'_'+class_label+'_'+str(round(score))+'%'+'_'+os.path.splitext(image_name)[1])
writefile = tf.io.write_file(file_name, output_image)

Removing numpy meshgrid points outside of a Shapely polygon

I have a 10 x 10 grid that I would like to remove points outside of a shapely Polygon:
import numpy as np
from shapely.geometry import Polygon, Point
from descartes import PolygonPatch
gridX, gridY = np.mgrid[0.0:10.0, 0.0:10.0]
poly = Polygon([[1,1],[1,7],[7,7],[7,1]])
#plot original figure
fig = plt.figure()
ax = fig.add_subplot(111)
polyp = PolygonPatch(poly)
ax.add_patch(polyp)
ax.scatter(gridX,gridY)
plt.show()
Here is the resulting figure:
And what I want the end result to look like:
I know that I can reshape the array to a 100 x 2 array of grid points:
stacked = np.dstack([gridX,gridY])
reshaped = stacked.reshape(100,2)
I can see if the point lies within the polygon easily:
for i in reshaped:
if Point(i).within(poly):
print True
But I am having trouble taking this information and modifying the original grid
You're pretty close already; instead of printing True, you could just append the points to a list.
output = []
for i in reshaped:
if Point(i).within(poly):
output.append(i)
output = np.array(output)
x, y = output[:, 0], output[:, 1]
It seems that Point.within doesn't consider points that lie on the edge of the polygon to be "within" it though.