How can I plot the label on the line of a lineplot? - matplotlib

I would like to plot labels on a line of a lineplot in matplotlib.
Minimal example
#!/usr/bin/env python
import numpy as np
import seaborn as sns
sns.set_style("whitegrid")
sns.set_palette(sns.color_palette("Greens", 8))
from scipy.ndimage.filters import gaussian_filter1d
for i in range(8):
# Create data
y = np.roll(np.cumsum(np.random.randn(1000, 1)),
np.random.randint(0, 1000))
y = gaussian_filter1d(y, 10)
sns.plt.plot(y, label=str(i))
sns.plt.legend()
sns.plt.show()
generates
instead, I would prefer something like

Maybe a bit hacky, but does this solve your problem?
#!/usr/bin/env python
import numpy as np
import seaborn as sns
sns.set_style("whitegrid")
sns.set_palette(sns.color_palette("Greens", 8))
from scipy.ndimage.filters import gaussian_filter1d
for i in range(8):
# Create data
y = np.roll(np.cumsum(np.random.randn(1000, 1)),
np.random.randint(0, 1000))
y = gaussian_filter1d(y, 10)
p = sns.plt.plot(y, label=str(i))
color = p[0].get_color()
for x in [250, 500, 750]:
y2 = y[x]
sns.plt.plot(x, y2, 'o', color='white', markersize=9)
sns.plt.plot(x, y2, 'k', marker="$%s$" % str(i), color=color,
markersize=7)
sns.plt.legend()
sns.plt.show()
Here's the result I get:
Edit: I gave it a little more thought and came up with a solution that automatically tries to find the best possible position for the labels in order to avoid the labels being positioned at x-values where two lines are very close to each other (which could e.g. lead to overlap between the labels):
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set_style("whitegrid")
sns.set_palette(sns.color_palette("Greens", 8))
from scipy.ndimage.filters import gaussian_filter1d
# -----------------------------------------------------------------------------
def inline_legend(lines, n_markers=1):
"""
Take a list containing the lines of a plot (typically the result of
calling plt.gca().get_lines()), and add the labels for those lines on the
lines themselves; more precisely, put each label n_marker times on the
line.
[Source of problem: https://stackoverflow.com/q/43573623/4100721]
"""
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from math import fabs
def chunkify(a, n):
"""
Split list a into n approximately equally sized chunks and return the
indices (start/end) of those chunks.
[Idea: Props to http://stackoverflow.com/a/2135920/4100721 :)]
"""
k, m = divmod(len(a), n)
return list([(i * k + min(i, m), (i + 1) * k + min(i + 1, m))
for i in range(n)])
# Calculate linear interpolations of every line. This is necessary to
# compare the values of the lines if they use different x-values
interpolations = [interp1d(_.get_xdata(), _.get_ydata())
for _ in lines]
# Loop over all lines
for idx, line in enumerate(lines):
# Get basic properties of the current line
label = line.get_label()
color = line.get_color()
x_values = line.get_xdata()
y_values = line.get_ydata()
# Get all lines that are not the current line, as well as the
# functions that are linear interpolations of them
other_lines = lines[0:idx] + lines[idx+1:]
other_functions = interpolations[0:idx] + interpolations[idx+1:]
# Split the x-values in chunks to get regions in which to put
# labels. Creating 3 times as many chunks as requested and using only
# every third ensures that no two labels for the same line are too
# close to each other.
chunks = list(chunkify(line.get_xdata(), 3*n_markers))[::3]
# For each chunk, find the optimal position of the label
for chunk_nr in range(n_markers):
# Start and end index of the current chunk
chunk_start = chunks[chunk_nr][0]
chunk_end = chunks[chunk_nr][1]
# For the given chunk, loop over all x-values of the current line,
# evaluate the value of every other line at every such x-value,
# and store the result.
other_values = [[fabs(y_values[int(x)] - f(x)) for x in
x_values[chunk_start:chunk_end]]
for f in other_functions]
# Now loop over these values and find the minimum, i.e. for every
# x-value in the current chunk, find the distance to the closest
# other line ("closest" meaning abs_value(value(current line at x)
# - value(other lines at x)) being at its minimum)
distances = [min([_ for _ in [row[i] for row in other_values]])
for i in range(len(other_values[0]))]
# Now find the value of x in the current chunk where the distance
# is maximal, i.e. the best position for the label and add the
# necessary offset to take into account that the index obtained
# from "distances" is relative to the current chunk
best_pos = distances.index(max(distances)) + chunks[chunk_nr][0]
# Short notation for the position of the label
x = best_pos
y = y_values[x]
# Actually plot the label onto the line at the calculated position
plt.plot(x, y, 'o', color='white', markersize=9)
plt.plot(x, y, 'k', marker="$%s$" % label, color=color,
markersize=7)
# -----------------------------------------------------------------------------
for i in range(8):
# Create data
y = np.roll(np.cumsum(np.random.randn(1000, 1)),
np.random.randint(0, 1000))
y = gaussian_filter1d(y, 10)
sns.plt.plot(y, label=str(i))
inline_legend(plt.gca().get_lines(), n_markers=3)
sns.plt.show()
Example output of this solution (note how the x-positions of the labels are no longer all the same):
If one wants to avoid the use of scipy.interpolate.interp1d, one might consider a solution where for a given x-value of line A, one finds the x-value of line B that is closest to that. I think this might be problematic though if the lines use very different and/or sparse grids?

Related

Connecting point without continus boundaries

I want to plot trajectories, without connecting the points from boundaries. Attached an image of what i mean.
My code:
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# import polygon as poly
x, y = np.loadtxt('c55.txt', delimiter=' ', unpack=True)
plt.plot(x, y, '.' ,color = 'k' , markersize=0.5)
#for i in range(1, len(x),1):
#if abs(x[i]-x[i+1])>300:
plt.plot(x,y,'-o',color='red',ms=5,label="Window 1")
plt.show()
Your x-values go several times from low to high. plt.plot connects all points in the order they are encountered in the x and y arrays.
The following approach firsts looks for the indices where the x-values start again (so, where the difference of successive x's isn't positive).
These indices are then used to draw the separate curves.
from matplotlib.colors import ListedColormap
import numpy as np
# first create some test data a bit similar to the given ones.
x = np.tile(np.linspace(-3, 3, 20), 4)
y = np.cos(x) + np.repeat(np.linspace(-3, 3, 4), 20)
fig, axs = plt.subplots(ncols=2, figsize=(15, 4))
# plotting the test data without change
axs[0].plot(x, y, '-o')
bounds = np.argwhere(np.diff(x) < 0).squeeze() # find the boundaries
bounds = np.concatenate([[0], bounds + 1, [len(x)]]) # additional boundaries for the first and last point
for b0, b1 in zip(bounds[:-1], bounds[1:]):
axs[1].plot(x[b0:b1], y[b0:b1], '-o') # use '-ro' for only red curves
plt.show()

Separate halves of split violinplot to compare tail data

Is there a way to physically separate the two halves of a "split" seaborn violinplot (or other type of violinplot)? I'm trying to compare two different treatments, but there is a skinny tail, and it's difficult (impossible) to tell whether one or both halves of the split violin go up all the way to the tip of the tail.
One thought I had was that if the two halves were slightly separated instead of right up next to each other, then it would be easy to absorb the data accurately.
Here is my code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
# load data into a dataframe
df1 = pd.read_excel('Modeling analysis charts.xlsx',
sheetname='lmps',
parse_cols=[0,5],
skiprows=0,
header=1)
# identify which dispatch run this data is from
df1['Run']='Scheduling'
# load data into a dataframe
df2 = pd.read_excel('Modeling analysis charts.xlsx',
sheetname='lmps',
parse_cols=[7,12],
skiprows=0,
header=1)
# identify which dispatch run this data is from
df2['Run']='Pricing'
# drop rows with missing data
df1 = df1.dropna(how='any')
df2 = df2.dropna(how='any')
# merge data from different runs
df = pd.concat([df1,df2])
# LMPs are all opposite of actual values, so correct that
df['LMP'] = -df['LMP']
fontsize = 10
style.use('fivethirtyeight')
fig, axes = plt.subplots()
sns.violinplot(x='Scenario', y='LMP', hue='Run', split=True, data=df, inner=None, scale='area', bw=0.2, cut=0, linewidth=0.5, ax = axes)
axes.set_title('Day Ahead Market')
#axes.set_ylim([-15,90])
axes.yaxis.grid(True)
axes.set_xlabel('Scenario')
axes.set_ylabel('LMP ($/MWh)')
#plt.savefig('DAMarket.pdf', bbox_inches='tight')
plt.show()
EDIT: For historical reasons this is the accepted answer, but have a look at #conchoecia more recent and much cleaner implementation.
Cool idea. The basic idea of my implementation is to draw the whole thing, grab the patches corresponding to the two half-violins, and then shift paths of those patches left or right. Code is hopefully self-explanatory, otherwise let me know in the comments.
import numpy as np
import matplotlib.pyplot as plt;
import matplotlib.collections
import seaborn as sns
import pandas as pd
# create some data
n = 10000 # number of samples
c = 5 # classes
y = np.random.randn(n)
x = np.random.randint(0, c, size=n)
z = np.random.rand(n) > 0.5 # sub-class
data = pd.DataFrame(dict(x=x, y=y, z=z))
# initialise new axis;
# if there is random other crap on the axis (e.g. a previous plot),
# the hacky code below won't work
fig, ax = plt.subplots(1,1)
# plot
inner = None # Note: 'box' is default
ax = sns.violinplot(data=data, x='x', y='y', hue='z', split=True, inner=inner, ax=ax)
# offset stuff
delta = 0.02
for ii, item in enumerate(ax.collections):
# axis contains PolyCollections and PathCollections
if isinstance(item, matplotlib.collections.PolyCollection):
# get path
path, = item.get_paths()
vertices = path.vertices
# shift x-coordinates of path
if not inner:
if ii % 2: # -> to right
vertices[:,0] += delta
else: # -> to left
vertices[:,0] -= delta
else: # inner='box' adds another type of PollyCollection
if ii % 3 == 0:
vertices[:,0] -= delta
elif ii % 3 == 1:
vertices[:,0] += delta
else: # ii % 3 = 2
pass
I expanded on #Paul's answer above and made it more robust. It now supports both vertical and horizontal orientation, and I implemented it to work with inner='sticks' since that fits my application.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.collections
import seaborn as sns
import pandas as pd
def offset_violinplot_halves(ax, delta, width, inner, direction):
"""
This function offsets the halves of a violinplot to compare tails
or to plot something else in between them. This is specifically designed
for violinplots by Seaborn that use the option `split=True`.
For lines, this works on the assumption that Seaborn plots everything with
integers as the center.
Args:
<ax> The axis that contains the violinplots.
<delta> The amount of space to put between the two halves of the violinplot
<width> The total width of the violinplot, as passed to sns.violinplot()
<inner> The type of inner in the seaborn
<direction> Orientation of violinplot. 'hotizontal' or 'vertical'.
Returns:
- NA, modifies the <ax> directly
"""
# offset stuff
if inner == 'sticks':
lines = ax.get_lines()
for line in lines:
if direction == 'horizontal':
data = line.get_ydata()
print(data)
if int(data[0] + 1)/int(data[1] + 1) < 1:
# type is top, move neg, direction backwards for horizontal
data -= delta
else:
# type is bottom, move pos, direction backward for hori
data += delta
line.set_ydata(data)
elif direction == 'vertical':
data = line.get_xdata()
print(data)
if int(data[0] + 1)/int(data[1] + 1) < 1:
# type is left, move neg
data -= delta
else:
# type is left, move pos
data += delta
line.set_xdata(data)
for ii, item in enumerate(ax.collections):
# axis contains PolyCollections and PathCollections
if isinstance(item, matplotlib.collections.PolyCollection):
# get path
path, = item.get_paths()
vertices = path.vertices
half_type = _wedge_dir(vertices, direction)
# shift x-coordinates of path
if half_type in ['top','bottom']:
if inner in ["sticks", None]:
if half_type == 'top': # -> up
vertices[:,1] -= delta
elif half_type == 'bottom': # -> down
vertices[:,1] += delta
elif half_type in ['left', 'right']:
if inner in ["sticks", None]:
if half_type == 'left': # -> left
vertices[:,0] -= delta
elif half_type == 'right': # -> down
vertices[:,0] += delta
def _wedge_dir(vertices, direction):
"""
Args:
<vertices> The vertices from matplotlib.collections.PolyCollection
<direction> Direction must be 'horizontal' or 'vertical' according to how
your plot is laid out.
Returns:
- a string in ['top', 'bottom', 'left', 'right'] that determines where the
half of the violinplot is relative to the center.
"""
if direction == 'horizontal':
result = (direction, len(set(vertices[1:5,1])) == 1)
elif direction == 'vertical':
result = (direction, len(set(vertices[-3:-1,0])) == 1)
outcome_key = {('horizontal', True): 'bottom',
('horizontal', False): 'top',
('vertical', True): 'left',
('vertical', False): 'right'}
# if the first couple x/y values after the start are the same, it
# is the input direction. If not, it is the opposite
return outcome_key[result]
# create some data
n = 100 # number of samples
c = ['cats', 'rats', 'bears', 'pears', 'snares'] # classes
y = np.random.randn(n)
x = np.random.choice(c, size=n)
z = np.random.rand(n) > 0.5 # sub-class
data = pd.DataFrame(dict(x=x, y=y, z=z))
print('done making data')
# initialise new axes;
fig, (ax1, ax2) = plt.subplots(2)
inner = "sticks" # Note: 'box' is default
width = 0.75
delta = 0.05
final_width = width - delta
print(data)
sns.violinplot(data=data, x='y', y='x',
split=True, hue = 'z',
ax = ax1, inner='sticks',
bw = 0.2)
sns.violinplot(data=data, x='x', y='y',
split=True, hue = 'z',
ax = ax2, inner='sticks',
bw = 0.2)
offset_violinplot_halves(ax1, delta, final_width, inner, 'horizontal')
offset_violinplot_halves(ax2, delta, final_width, inner, 'vertical')
plt.show()

Discrete Color Bar with Tick labels in between colors

I am trying to plot some data with a discrete color bar. I was following the example given (https://gist.github.com/jakevdp/91077b0cae40f8f8244a) but the issue is this example does not work 1-1 with different spacing. For example, the spacing in the example in the link is for only increasing by 1 but my data is increasing by 0.5. You can see the output from the code I have.. Any help with this would be appreciated. I know I am missing something key here but cant figure it out.
import matplotlib.pylab as plt
import numpy as np
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
num=11
x = np.random.randn(40)
y = np.random.randn(40)
c = np.random.randint(num, size=40)
plt.figure(figsize=(10,7.5))
plt.scatter(x, y, c=c, s=50, cmap=discrete_cmap(num, 'jet'))
plt.colorbar(ticks=np.arange(0,5.5,0.5))
plt.clim(-0.5, num - 0.5)
plt.show()
Not sure what version of matplotlib/pyplot introduced this, but plt.get_cmap now supports an int argument specifying the number of colors you want to get, for discrete colormaps.
This automatically results in the colorbar being discrete.
By the way, pandas has an even better handling of the colorbar.
import numpy as np
from matplotlib import pyplot as plt
plt.style.use('ggplot')
# remove if not using Jupyter/IPython
%matplotlib inline
# choose number of clusters and number of points in each cluster
n_clusters = 5
n_samples = 20
# there are fancier ways to do this
clusters = np.array([k for k in range(n_clusters) for i in range(n_samples)])
# generate the coordinates of the center
# of each cluster by shuffling a range of values
clusters_x = np.arange(n_clusters)
clusters_y = np.arange(n_clusters)
np.random.shuffle(clusters_x)
np.random.shuffle(clusters_y)
# get dicts like cluster -> center coordinate
x_dict = dict(enumerate(clusters_x))
y_dict = dict(enumerate(clusters_y))
# get coordinates of cluster center for each point
x = np.array(list(x_dict[k] for k in clusters)).astype(float)
y = np.array(list(y_dict[k] for k in clusters)).astype(float)
# add noise
x += np.random.normal(scale=0.5, size=n_clusters*n_samples)
y += np.random.normal(scale=0.5, size=n_clusters*n_samples)
### Finally, plot
fig, ax = plt.subplots(figsize=(12,8))
# get discrete colormap
cmap = plt.get_cmap('viridis', n_clusters)
# scatter points
scatter = ax.scatter(x, y, c=clusters, cmap=cmap)
# scatter cluster centers
ax.scatter(clusters_x, clusters_y, c='red')
# add colorbar
cbar = plt.colorbar(scatter)
# set ticks locations (not very elegant, but it works):
# - shift by 0.5
# - scale so that the last value is at the center of the last color
tick_locs = (np.arange(n_clusters) + 0.5)*(n_clusters-1)/n_clusters
cbar.set_ticks(tick_locs)
# set tick labels (as before)
cbar.set_ticklabels(np.arange(n_clusters))
Ok so this is the hack I found for my own question. I am sure there is a better way to do this but this works for what I am doing. Feel free to suggest a better way to do this.
import numpy as np
import matplotlib.pylab as plt
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
num=11
plt.figure(figsize=(10,7.5))
x = np.random.randn(40)
y = np.random.randn(40)
c = np.random.randint(num, size=40)
plt.scatter(x, y, c=c, s=50, cmap=discrete_cmap(num, 'jet'))
cbar=plt.colorbar(ticks=range(num))
plt.clim(-0.5, num - 0.5)
cbar.ax.set_yticklabels(np.arange(0.0,5.5,0.5))
plt.show()
For some reason I cannot upload the image associated with the code above. I get an error when uploading so not sure how to show the final example. But simply I set the color bar axes for tick labels for a vertical color bar and passed in the labels I want and it produced the correct output.

Creating subplots from multiple text files in a loop

So I have been trying to load text files onto multiple subplots but the plots always seem to come up as one text file. Can anyone point me into the right #direction as to how to go about this?
import numpy as np
import matplotlib.pyplot as plt
RiverData1 = np.loadtxt('Gray1961.txt', skiprows = 2)
RiverData2 = np.loadtxt('Hack1957.txt', skiprows = 2)
RiverData3 = np.loadtxt('Rignon1996.txt', skiprows = 2)
RiverData4 = np.loadtxt('Robert1990.txt', skiprows = 2)
RiverData5 = np.loadtxt('Langbein1947_p145.txt', skiprows = 2)
RiverData6 = np.loadtxt('Langbein1947_p146.txt', skiprows = 2)
RiverData7 = np.loadtxt('Langbein1947_p149.txt', skiprows = 2)
RiverData8 = np.loadtxt('Langbein1947_p152.txt', skiprows = 2)
plotnums = 1
for plotnums in range (1,9):
plt.subplot(2,4,plotnums)
plt.plot((RiverData1[:,0]), (RiverData1[:,1]),'ko')
plt.plot((RiverData2[:,0]), (RiverData2[:,1]),'ko')
plt.plot((RiverData3[:,0]), (RiverData3[:,1]),'ko')
plt.plot((RiverData4[:,0]), (RiverData4[:,1]),'ko')
plt.plot((RiverData5[:,0]), (RiverData5[:,1]),'ko')
plt.plot((RiverData6[:,0]), (RiverData6[:,1]),'ko')
plt.plot((RiverData7[:,0]), (RiverData7[:,1]),'ko')
plt.plot((RiverData8[:,0]), (RiverData8[:,1]),'ko')
plt.xlabel('River Length (km)')
plt.ylabel('Area (Km$^2$)')
plt.xscale('log')
plt.yscale('log')
plotnums=plotnums+1
plt.show()
I suggest loading the data inside the loop as well. Additionally, you should capture the axis-handle in a variable to control which axis is used for plotting the data. To avoid any data artifacts, I suggest setting the variables to None at the end of each iteration.
import numpy as np
import matplotlib.pyplot as plt
# store your file names in a list to be able to iterate over them:
FILES = ['Gray1961.txt','Hack1957.txt','Rignon1996.txt',\
'Robert1990.txt','Langbein1947_p145.txt','Langbein1947_p146.txt',\
'Langbein1947_p149.txt','Langbein1947_p152.txt']
# specify desired conversion factors for each file, separated by x and y
xFactor =[1.00, 1.00, 1.00, 1.00\
2.59, 2.59, 2.59, 2.59]
yFactor = [1.000, 1.000, 1.000, 1.000\
1.609, 1.609, 1.609, 1.609]
# loop through all files;
# range(len(FILES)) returns a list of integers from 0 to 7 in this example
for n in range(len(FILES)):
# load the data from each file:
RiverData = np.loadtext(FILES[n], skiprows = 2)
# convert the data per the specified factors:
X = [xi * xFactor[n] for xi in RiverData[:,0]]
Y = [yi * yFactor[n] for yi in RiverData[:,1]]
# create sub-plot, here, you need to use n+1,
# because your loop iterable counts from 0,
# but your sub-plots count from 1
ax = plt.subplot(2,4,n+1)
# use the created axis object to plot your data;
# use ax.plot instead of plt.plot
ax.plot(X, Y,'ko')
# specify your axis details, again use ax.plot instead of plt.plot
ax.set_xlabel('River Length (km)')
ax.set_ylabel('Area (Km$^2$)')
# the file name can be used as plot title
# (if you want to omit the .txt ending, use [:-4]
# to omit the last for characters in the title string)
ax.set_title(FILES[n][:-4])
ax.set_xscale('log')
ax.set_yscale('log')
# to avoid surprises going from one loop to the next,
# clear the data from the variables
RiverData = None
ax = None
plt.show()
As Thiru pointed out you do not need to increment your iterable inside a for-loop.
Please check the matplotlib documentation on subplots
http://matplotlib.org/examples/animation/subplots.html
You can create a figure and add multiple subplots to it.
fig = plt.figure()
for plotnums in range(1,9):
plot1 = fig.add_subplot(2,4,plotnums) # update the numbers as required
...

want to smooth a contour from a masked array

I have a masked array which is used by matplotlib.plt.contourf to project a temperature contour on a glabal map. I was trying to smooth the contour, but unfortunately none of the proposed solutions seems to be able to handle masked array. I tested these solutions:
-scipy.ndimage.gaussian_filter - moving averages
scipy.ndimage.zoom
none of them works(they count in the masked values also). Is there any way I can smooth my contour on maskedArray
I have added this part after trying the proposed 'inpaint' solution and the results were unchanged. here is the code (if it helps)
import Scientific.IO.NetCDF as S
import mpl_toolkits.basemap as bm
import numpy.ma as MA
import numpy as np
import matplotlib.pyplot as plt
import inpaint
def main():
fileobj = S.NetCDFFile('Bias.ANN.tas_A1_1.nc', mode='r')
# take the values
set1 = {'time', 'lat', 'lon'}
set2 = set(fileobj.variables.keys())
set3 = set2 - set1
datadim = set3.pop()
print "******************datadim: "+datadim
data = fileobj.variables[datadim].getValue()[0,:,:]
lon = fileobj.variables['lon'].getValue()
lat = fileobj.variables['lat'].getValue()
fileobj.close()
data, lon = bm.shiftgrid(180.,data, lon,start=False)
data = MA.masked_equal(data, 1.0e20)
#data2 = inpaint.replace_nans(data, 10, 0.25, 2, 'idw')
#- Make 2-D longitude and latitude arrays:
[lon2d, lat2d] =np.meshgrid(lon, lat)
#- Set up map:
mapproj = bm.Basemap(projection='cyl',
llcrnrlat=-90.0, llcrnrlon=-180.00,
urcrnrlat=90.0, urcrnrlon=180.0)
mapproj.drawcoastlines(linewidth=.5)
mapproj.drawmapboundary(fill_color='.8')
#mapproj.drawparallels(N.array([-90, -45, 0, 45, 90]), labels=[1,0,0,0])
#mapproj.drawmeridians(N.array([0, 90, 180, 270, 360]), labels=[0,0,0,1])
lonall, latall = mapproj(lon2d, lat2d)
cmap=plt.cm.Spectral
#- Make a contour plot of the temperature:
mymapf = plt.contourf(lonall, latall, data, 20, cmap=cmap)
#plt.clabel(mymapf, fontsize=12)
plt.title(cmap.name)
plt.colorbar(mymapf, orientation='horizontal')
plt.savefig('sample2.png', dpi=150, edgecolor='red', format='png', bbox_inches='tight', pad_inches=.2)
plt.close()
if __name__ == "__main__":
main()
I am comparing the output from this code (the first figure), with output of the same datafile from Panoply. Zoomin in and looking more precisely it seems like it is not the smoothness problem, but the pyplot model provides one stripe slimmer, or the contours are cut earlier (the outer boundaries shows this clearly, and inner contours are different due to this fact). It makes it to look like that the pyplot model is not as smooth as the Panoply one. how can I get (nearly) the same model? Am I distinguishing it right?
I had similar problem and google pointed me to this: blog post. Basically he's using inpaint algorithm to interpolate missing values and produce valid array for filtering.
The code is at the end of the post, and you can save it to site-packages (or else) and load it as module (i.e. inpaint.py):
import inpaint
filled = inpaint.replace_nans(NANMask, 5, 0.5, 2, 'idw')
I'm happy with the result, and I guess it will suite missing temperature values just fine. There is also next version here: github but code will need some cleaning for general usage as it's part of a project.
For reference, easy use and preservation sake I'll post the code (of initial version) here:
# -*- coding: utf-8 -*-
"""A module for various utilities and helper functions"""
import numpy as np
#cimport numpy as np
#cimport cython
DTYPEf = np.float64
#ctypedef np.float64_t DTYPEf_t
DTYPEi = np.int32
#ctypedef np.int32_t DTYPEi_t
##cython.boundscheck(False) # turn of bounds-checking for entire function
##cython.wraparound(False) # turn of bounds-checking for entire function
def replace_nans(array, max_iter, tol,kernel_size=1,method='localmean'):
"""Replace NaN elements in an array using an iterative image inpainting algorithm.
The algorithm is the following:
1) For each element in the input array, replace it by a weighted average
of the neighbouring elements which are not NaN themselves. The weights depends
of the method type. If ``method=localmean`` weight are equal to 1/( (2*kernel_size+1)**2 -1 )
2) Several iterations are needed if there are adjacent NaN elements.
If this is the case, information is "spread" from the edges of the missing
regions iteratively, until the variation is below a certain threshold.
Parameters
----------
array : 2d np.ndarray
an array containing NaN elements that have to be replaced
max_iter : int
the number of iterations
kernel_size : int
the size of the kernel, default is 1
method : str
the method used to replace invalid values. Valid options are
`localmean`, 'idw'.
Returns
-------
filled : 2d np.ndarray
a copy of the input array, where NaN elements have been replaced.
"""
# cdef int i, j, I, J, it, n, k, l
# cdef int n_invalids
filled = np.empty( [array.shape[0], array.shape[1]], dtype=DTYPEf)
kernel = np.empty( (2*kernel_size+1, 2*kernel_size+1), dtype=DTYPEf )
# cdef np.ndarray[np.int_t, ndim=1] inans
# cdef np.ndarray[np.int_t, ndim=1] jnans
# indices where array is NaN
inans, jnans = np.nonzero( np.isnan(array) )
# number of NaN elements
n_nans = len(inans)
# arrays which contain replaced values to check for convergence
replaced_new = np.zeros( n_nans, dtype=DTYPEf)
replaced_old = np.zeros( n_nans, dtype=DTYPEf)
# depending on kernel type, fill kernel array
if method == 'localmean':
print 'kernel_size', kernel_size
for i in range(2*kernel_size+1):
for j in range(2*kernel_size+1):
kernel[i,j] = 1
print kernel, 'kernel'
elif method == 'idw':
kernel = np.array([[0, 0.5, 0.5, 0.5,0],
[0.5,0.75,0.75,0.75,0.5],
[0.5,0.75,1,0.75,0.5],
[0.5,0.75,0.75,0.5,1],
[0, 0.5, 0.5 ,0.5 ,0]])
print kernel, 'kernel'
else:
raise ValueError( 'method not valid. Should be one of `localmean`.')
# fill new array with input elements
for i in range(array.shape[0]):
for j in range(array.shape[1]):
filled[i,j] = array[i,j]
# make several passes
# until we reach convergence
for it in range(max_iter):
print 'iteration', it
# for each NaN element
for k in range(n_nans):
i = inans[k]
j = jnans[k]
# initialize to zero
filled[i,j] = 0.0
n = 0
# loop over the kernel
for I in range(2*kernel_size+1):
for J in range(2*kernel_size+1):
# if we are not out of the boundaries
if i+I-kernel_size < array.shape[0] and i+I-kernel_size >= 0:
if j+J-kernel_size < array.shape[1] and j+J-kernel_size >= 0:
# if the neighbour element is not NaN itself.
if filled[i+I-kernel_size, j+J-kernel_size] == filled[i+I-kernel_size, j+J-kernel_size] :
# do not sum itself
if I-kernel_size != 0 and J-kernel_size != 0:
# convolve kernel with original array
filled[i,j] = filled[i,j] + filled[i+I-kernel_size, j+J-kernel_size]*kernel[I, J]
n = n + 1*kernel[I,J]
# divide value by effective number of added elements
if n != 0:
filled[i,j] = filled[i,j] / n
replaced_new[k] = filled[i,j]
else:
filled[i,j] = np.nan
# check if mean square difference between values of replaced
#elements is below a certain tolerance
print 'tolerance', np.mean( (replaced_new-replaced_old)**2 )
if np.mean( (replaced_new-replaced_old)**2 ) < tol:
break
else:
for l in range(n_nans):
replaced_old[l] = replaced_new[l]
return filled
def sincinterp(image, x, y, kernel_size=3 ):
"""Re-sample an image at intermediate positions between pixels.
This function uses a cardinal interpolation formula which limits
the loss of information in the resampling process. It uses a limited
number of neighbouring pixels.
The new image :math:`im^+` at fractional locations :math:`x` and :math:`y` is computed as:
.. math::
im^+(x,y) = \sum_{i=-\mathtt{kernel\_size}}^{i=\mathtt{kernel\_size}} \sum_{j=-\mathtt{kernel\_size}}^{j=\mathtt{kernel\_size}} \mathtt{image}(i,j) sin[\pi(i-\mathtt{x})] sin[\pi(j-\mathtt{y})] / \pi(i-\mathtt{x}) / \pi(j-\mathtt{y})
Parameters
----------
image : np.ndarray, dtype np.int32
the image array.
x : two dimensions np.ndarray of floats
an array containing fractional pixel row
positions at which to interpolate the image
y : two dimensions np.ndarray of floats
an array containing fractional pixel column
positions at which to interpolate the image
kernel_size : int
interpolation is performed over a ``(2*kernel_size+1)*(2*kernel_size+1)``
submatrix in the neighbourhood of each interpolation point.
Returns
-------
im : np.ndarray, dtype np.float64
the interpolated value of ``image`` at the points specified
by ``x`` and ``y``
"""
# indices
# cdef int i, j, I, J
# the output array
r = np.zeros( [x.shape[0], x.shape[1]], dtype=DTYPEf)
# fast pi
pi = 3.1419
# for each point of the output array
for I in range(x.shape[0]):
for J in range(x.shape[1]):
#loop over all neighbouring grid points
for i in range( int(x[I,J])-kernel_size, int(x[I,J])+kernel_size+1 ):
for j in range( int(y[I,J])-kernel_size, int(y[I,J])+kernel_size+1 ):
# check that we are in the boundaries
if i >= 0 and i <= image.shape[0] and j >= 0 and j <= image.shape[1]:
if (i-x[I,J]) == 0.0 and (j-y[I,J]) == 0.0:
r[I,J] = r[I,J] + image[i,j]
elif (i-x[I,J]) == 0.0:
r[I,J] = r[I,J] + image[i,j] * np.sin( pi*(j-y[I,J]) )/( pi*(j-y[I,J]) )
elif (j-y[I,J]) == 0.0:
r[I,J] = r[I,J] + image[i,j] * np.sin( pi*(i-x[I,J]) )/( pi*(i-x[I,J]) )
else:
r[I,J] = r[I,J] + image[i,j] * np.sin( pi*(i-x[I,J]) )*np.sin( pi*(j-y[I,J]) )/( pi*pi*(i-x[I,J])*(j-y[I,J]))
return r
#cdef extern from "math.h":
# double sin(double)
A simple smoothing function that works with masked data will solve this. One can then avoid the approaches that involve making up data (ie, interpolating, inpainting, etc); and making up data should always be avoided.
The main issue that arises when smoothing masked data is that for each point, smoothing uses the neighboring values to calculate a new value at a center point, but when those neighbors are masked, the new value for the center point will also become masked due to the rules of masked arrays. Therefore, one needs to do the calculation with unmasked data, and explicitly account for the mask. That's easy to do, and is not in the function smooth below.
from numpy import *
import pylab as plt
# make a grid and a striped mask as test data
N = 100
x = linspace(0, 5, N, endpoint=True)
grid = 2. + 1.*(sin(2*pi*x)[:,newaxis]*sin(2*pi*x)>0.)
m = resize((sin(pi*x)>0), (N,N))
plt.imshow(grid.copy(), cmap='jet', interpolation='nearest')
plt.colorbar()
plt.title('original data')
def smooth(u, mask):
m = ~mask
r = u*m # set all 'masked' points to 0. so they aren't used in the smoothing
a = 4*r[1:-1,1:-1] + r[2:,1:-1] + r[:-2,1:-1] + r[1:-1,2:] + r[1:-1,:-2]
b = 4*m[1:-1,1:-1] + m[2:,1:-1] + m[:-2,1:-1] + m[1:-1,2:] + m[1:-1,:-2] # a divisor that accounts for masked points
b[b==0] = 1. # for avoiding divide by 0 error (region is masked so value doesn't matter)
u[1:-1,1:-1] = a/b
# run the data through the smoothing filter a few times
for i in range(10):
smooth(grid, m)
mg = ma.array(grid, mask=m) # put together the mask and the data
plt.figure()
plt.imshow(mg, cmap='jet', interpolation='nearest')
plt.colorbar()
plt.title('smoothed with mask')
plt.show()
The main point is that at the boundary of the mask, the masked values are not used in the smoothing. (This is also where the grid squares switch values, so it would be clear in the figure if the masked neighboring values were being used.)
We also just had this problem and the astropy package has us covered:
import numpy as np
import matplotlib.pyplot as plt
# Some Axes
x = np.arange(100)
y = np.arange(100)
#Some Interesting Shape
z = np.array(np.outer(np.sin((x+y)/10),np.sin(y/3)),dtype=float)
# some mask
mask = np.outer(np.sin((x+y)/20),np.sin(y/5))**2>.9
# masked data represent noise, so lets put in some trash into the masked points
z[mask] = (np.random.random(size = (100,100))*10)[mask]
# masked data
z_masked = np.ma.masked_array(z, mask)
# "Conventional" filter
filter_kernelsize = 2
import scipy.ndimage
z_filtered_bad = scipy.ndimage.gaussian_filter(z_masked,filter_kernelsize)
# Lets filter it
import astropy.convolution.convolve
from astropy.convolution import Gaussian2DKernel
k = Gaussian2DKernel(1.5)
z_filtered = astropy.convolution.convolve(z_masked, k, boundary='extend')
### Plots:
fig, axes = plt.subplots(2,2)
plt.sca(axes[0,0])
plt.title('Raw Data')
plt.imshow(z)
plt.colorbar()
plt.sca(axes[0,1])
plt.title('Raw Data Masked')
plt.imshow(z_masked)
plt.colorbar()
plt.sca(axes[1,0])
plt.title('ndimage filter (ignores mask)')
plt.imshow(z_filtered_bad)
plt.colorbar()
plt.sca(axes[1,1])
plt.title('astropy filter (uses mask)')
plt.imshow(z_filtered)
plt.colorbar()
plt.tight_layout()
Output plot of the code