What is negative_slope argument of tf.keras.layers.ReLU? - tensorflow

tf.keras.layers.ReLU has negative_slope argument which is explained as Float >= 0. Negative slope coefficient. Default to 0.
tf.keras.layers.ReLU(
max_value=None,
negative_slope=0.0,
threshold=0.0,
**kwargs
)
Is this to make it as Leaky ReLU? If so, is it the same with the alpha argument of the tf.keras.layers.LeakyReLU?
tf.keras.layers.LeakyReLU(alpha=0.3, **kwargs)
* alpha: Float >= 0. Negative slope coefficient. Default to 0.3.

Short answer:
Is this to make it as Leaky ReLU?
Yes, the negative_slope parameter of tf.keras.layers.ReLU plays the same role as alpha does in tf.keras.layers.LeakyReLU. For example, tf.keras.layers.ReLU(negative_slope=0.5) and tf.keras.layers.LeakyReLU(alpha=0.5) have the same behavior.
Here is a visualization of their behavior:
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
relu=tf.keras.layers.ReLU(max_value=None, negative_slope=0.0, threshold=0.0)
relu_neg_half=tf.keras.layers.ReLU(max_value=None, negative_slope=0.5, threshold=0.0)
relu_neg_1tenth=tf.keras.layers.ReLU(max_value=None, negative_slope=0.1, threshold=0.0)
relu_neg_1tenth_thresh_1=tf.keras.layers.ReLU(max_value=None, negative_slope=0.1, threshold=1.)
relu_neg_1tenth_thresh_2=tf.keras.layers.ReLU(max_value=None, negative_slope=0.1, threshold=2.)
lrelu_alph_half=tf.keras.layers.LeakyReLU(alpha=0.5)
lrelu_alph_1tenth=tf.keras.layers.LeakyReLU(alpha=0.1)
x=np.linspace(-5,5,101)
fig = plt.figure(figsize=(6,6), dpi=150)
markevery=0.05
plt.plot(x, relu(x), '-o', markevery=markevery, label="ReLU | negative_slope=0.0")
plt.plot(x, relu_neg_half(x), '--s', markevery=markevery, label="ReLU | negative_slope=0.5")
plt.plot(x, relu_neg_1tenth(x), '--p', markevery=markevery, label="ReLU | negative_slope=0.1")
plt.plot(x, relu_neg_1tenth_thresh_2(x), '--d', markevery=markevery, label="ReLU | negative_slope=0.1 | threshold=2.0")
plt.plot(x, lrelu_alph_half(x), '--v', markevery=markevery*1.2, label="LeakyReLU | alpha=0.5")
plt.plot(x, lrelu_alph_1tenth(x), '--^', markevery=markevery*1.2, label="LeakyReLU | alpha=0.5")
plt.legend(frameon=False)
plt.savefig('relu.png', bbox_inches='tight')
Output:

Related

How do I plot boundary decisions of multiple classifiers in one figure?

I want to plot the decision boundary conditions for multiple decision grain boundary in the same figure
The code is as follows:
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis,QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier,BaggingClassifier,RandomForestClassifier,HistGradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import VotingClassifier,ExtraTreesClassifier
iris = load_iris()
X = iris.data[:, :2]
classifiers = [LogisticRegression(solver='sag',penalty='l2',multi_class='ovr',
max_iter=25000,random_state=None,fit_intercept=True),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis(),
DecisionTreeClassifier(min_samples_leaf=1),
BaggingClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
HistGradientBoostingClassifier(),
VotingClassifier(estimators=[('rfc',RandomForestClassifier()),
('dtc',DecisionTreeClassifier())],voting ='soft'),
ExtraTreesClassifier()]
for classifier in classifiers:
classifier.fit(X,iris.target)
disp = DecisionBoundaryDisplay.from_estimator(classifier, X, response_method="predict", xlabel=iris.feature_names[0], ylabel=iris.feature_names[1], alpha=0.5)
disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
plt.show()
I did not get the result that I want, I need those plots in the same figure.
Can someone help me in this case?
To get the decision boundaries of different classifiers in one figure, make sure to pass the argument ax in DecisionBoundaryDisplay.from_estimator:
# Assuming there are 10 classifiers
fig, ax = plt.subplots(nrows=5, ncols=2)
ax = ax.T.flatten()
i = 0
for classifier in classifiers:
classifier.fit(X,iris.target)
disp = DecisionBoundaryDisplay.from_estimator(classifier,
X, response_method="predict",
xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
alpha=0.5, ax=ax[i])
disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
# Refer to next Axes in a 5 * 2 grid created by plt.subplots
i += 1
# To get labels for contour plots
labels = np.unique(iris.target)
proxy = [plt.Rectangle((0,0),1,1,fc = pc.get_facecolor()[0]) for pc in disp.surface_.collections]
disp.ax_.legend(proxy, labels)
plt.show()
This gives:
This answer was motivated by this answer and this answer.

Can matplotlib.pyplot.plot color code a curve pointwise

Here is an example from matplotlib, where pyplot.plot is used and a curve is piecewise color coded.
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2 * np.pi * t)
upper = 0.77
lower = -0.77
supper = np.ma.masked_where(s < upper, s)
slower = np.ma.masked_where(s > lower, s)
smiddle = np.ma.masked_where((s < lower) | (s > upper), s)
fig, ax = plt.subplots()
ax.plot(t, smiddle, t, slower, t, supper)
plt.show()
My question is: Can matplotlib.pyplot.plot color code a curve also pointwise (using any color map). I know that I could use matplotlib.pyplot.scatter instead to do that.
No, it can't. See the documentation. As you say, use plt.scatter() for this.
You could call it for every point in your dataset using a different marker format for each, but that would be insanity, because it would effectively call .plot() for every point it plots, which is very wasteful when .scatter() exists.
If you insist though:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
n = 1000
x = np.linspace(0, 2*np.pi, n)
y = np.sin(x)
cmap = plt.get_cmap('hsv')
norm = mpl.colors.Normalize(vmin=y.min(), vmax=y.max())
for i in range(n):
plt.plot(x[i], y[i], marker='.', markersize=25, c=cmap(norm(y[i])))
plt.show()

Matplotlib hist2d() and numpy masked_where()?

I may be misunderstanding how numpy.ma.masked_where() works, but it doesn't seem to work properly with matplotlib hist2d().
In the code below, I create an ndarray, mask it, then plot the original and masked ndarrays with matplotlib.pyplot.plot(). This works.
However, when I try to plot both with matplotlib.pyplot.hist2d(), the mask doesn't seem to be taken into account. I have tested this with matplot lib 1.3.1 and 3.2.1, and with numpy 1.18.5.
Any suggestions?
import math
import numpy as np
import time
import sys
import numpy.ma as ma
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import expon, poisson, uniform, norm
print(matplotlib.__version__, np.__version__)
nSiz=10000
maxx, maxy = 1.0, 10.0
x, y, z = uniform.rvs(scale=maxx, size=nSiz), uniform.rvs(scale=maxy, size=nSiz), norm.rvs(scale=1.0, size=nSiz)
binx, biny = np.linspace(0, maxx, 20), np.linspace(0, maxy, 20)
d = np.array([(xx, yy, zz) for xx, yy, zz in zip(x, y, z)], dtype=[('X', 'f4'), ('Y', 'f4'), ('Z', 'f4')])
print("Col titles: " + str(d.dtype.names))
dc = ma.masked_where(d['X'] < 0.5, d) # Mask data
fig, axx = plt.subplots(2, 2, figsize=(10, 10), dpi=300)
ax = axx.ravel()
ax[0].plot(d['X'], d['Y'], 'bv', ms=3)
ax[1].plot(dc['X'], dc['Y'], 'ro', ms=6, alpha=0.1) ### Mask seems to work
ax[2].hist2d(d['X'], d['Y'], bins=[binx, biny], cmap='Blues')
ax[3].hist2d(dc['X'], dc['Y'], bins=[binx, biny], cmap='Blues') ### Mask doesn't seem to work
for axx in ax:
axx.set_xlabel(d.dtype.names[0], fontsize = 15)
axx.set_ylabel(d.dtype.names[1], fontsize = 15)
axx.set_xlim(0.0, maxx)
axx.set_ylim(0.0, maxy)
ax[0].set_title('No cut')
ax[1].set_title('Cut')
plt.show()
The last plot is incorrect:
Turns out a simple solution in my case is not to use:
dc = ma.masked_where(d['X'] < 0.5, d)
but instead to simply replace that command with:
dc = d[d['X'] < 0.5]
That does the job for me (though I still don't know the purpose of numpy.ma.masked_where()).

How can Matplotlib axes be scaled hyperbolically?

I have a plot a bit like this:
The differences between the two lines (red and blue) are most important in my actual data (a ROC curve) at say the grid cell 0.2<x<0.4, 0.8<y<1. Now, I could crop for that grid cell, but let's say I'd rather scale both the x and y axes hyperbolically -- where the y-axis hyperbolic curve has its peak at about 0.9 and the x-axis has its peak at about 0.3 -- such that the 2D space gets stretched out for the grid cell of interest and gets compacted elsewhere (and preserving the meaning of the axes tick numbers). How would one accomplish this? The beginnings of my attempt are below. How would my code be modified to implement the axis scaling I described?
from matplotlib import gridspec
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import NullFormatter, NullLocator, MultipleLocator
import math
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pylab as plt
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import seaborn as sns
sns.set_palette('husl')
sns.set()
plt.rcParams["figure.figsize"] = [5, 5]
x = np.arange(0, 1, step=0.01)
y1 = 1-1/np.exp(10*x)
y2 = 1-1.1/np.exp(10*x)
plt.scatter(x, y1, s=1, facecolor='red')
plt.scatter(x, y2, s=1, facecolor='blue')
plt.show();
class CustomScale(mscale.ScaleBase):
name = 'custom'
def __init__(self, axis, **kwargs):
mscale.ScaleBase.__init__(self)
self.thresh = None #thresh
self.name = 'custom'
def get_transform(self):
return self.CustomTransform(self.thresh)
def set_default_locators_and_formatters(self, axis):
pass
class CustomTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
#return np.log(1+a)
return np.exp(a)-1
#return 1+(1/2)*a
mscale.register_scale(CustomScale)
plt.scatter(x, y1, s=1, facecolor='red')
plt.scatter(x, y2, s=1, facecolor='blue')
plt.xscale('custom')
plt.show();
You may be able to achieve this using FuncScale (registered as 'function').
f = lambda a: np.exp(a) - 1
g = lambda b: np.log(b + 1)
plt.xscale('function', functions=(f, g))
For hyperbolic scaling, you could use lambda x: 1 / x for both functions.
See the example in the scales documentation: https://matplotlib.org/3.3.4/gallery/scales/scales.html

Making sure 0 gets white in a RdBu colorbar

I create a heatmap with the following snippet:
import numpy as np
import matplotlib.pyplot as plt
d = np.random.normal(.4,2,(10,10))
plt.imshow(d,cmap=plt.cm.RdBu)
plt.colorbar()
plt.show()
The result is plot below:
Now, since the middle point of the data is not 0, the cells in which the colormap has value 0 are not white, but rather a little reddish.
How do I force the colormap so that max=blue, min=red and 0=white?
Use a DivergingNorm.
Note: From matplotlib 3.2 onwards DivergingNorm is renamed to TwoSlopeNorm.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
d = np.random.normal(.4,2,(10,10))
norm = mcolors.DivergingNorm(vmin=d.min(), vmax = d.max(), vcenter=0)
plt.imshow(d, cmap=plt.cm.RdBu, norm=norm)
plt.colorbar()
plt.show()
A previous SO post (Change colorbar gradient in matplotlib) wanted a solution for a more complicated situation, but one of the answers talked about the MidpointNormalize subclass in the matplotlib documentation. With that, the solution becomes:
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
class MidpointNormalize(mpl.colors.Normalize):
## class from the mpl docs:
# https://matplotlib.org/users/colormapnorms.html
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
super().__init__(vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
d = np.random.normal(.4,2,(10,10))
plt.imshow(d,cmap=plt.cm.RdBu,norm=MidpointNormalize(midpoint=0))
plt.colorbar()
plt.show()
Kudos to Joe Kington for writing the subclass, and to Rutger Kassies for pointing out the answer.