Show evaluation metrics in decision boundary plot - matplotlib

I am working on imbalanced classification. I wanted to add g-mean, and accuracy in my decision boundary plot. It would be nice to see the differences of these scoring metrics in plot. I don't see any option to compute these scores within this decision boundary plot. Is there way I can add this extra information in my decision boundary plot. I appreciate your time. Thanks!
import numpy as np
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_decision_regions
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.metrics import make_scorer
from imblearn.metrics import geometric_mean_score
from mlxtend.plotting import plot_decision_regions
import matplotlib.gridspec as gridspec
import itertools
gmean = make_scorer(geometric_mean_score, greater_is_better=True)
scoring = {'G-mean': gmean, 'Accuracy':'accuracy'}
X, y = make_blobs(n_samples=[1000, 10],centers=[[0.0, 0.0], [2.0, 2.0]],cluster_std= [1.5, 0.5],random_state=0, shuffle=False)
clf1 = LogisticRegression(max_iter=100000)
clf2 = LogisticRegression(class_weight="balanced",max_iter=100000)
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(10,8))
labels = ['Logistic Regression', 'Weighted Logistic Regression']
for clf, lab, grd in zip([clf1, clf2],
labels,
itertools.product([0, 1], repeat=2)):
clf.fit(X, y)
ax = plt.subplot(gs[grd[0], grd[1]])
fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2)
plt.title(lab)
plt.show()

You can use plt.text() to add g-mean, and accuracy in your decision boundary plot.
For example:
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(15, 8))
labels = ['Logistic Regression', 'Weighted Logistic Regression']
for clf, lab, grd in zip([clf1, clf2],
labels,
itertools.product([0, 1], repeat=2)):
clf.fit(X, y)
ax = plt.subplot(gs[grd[0], grd[1]])
ax.text(6, 4, "gmean : ", fontsize=10)
ax.text(6, 2, "accuracy : ", fontsize=10)
fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2)
plt.title(lab)
plt.show()

Related

matplotlib - mask portion of standalone colorbar

Below is the code to build a standalone continuous colorbar. I would like to mask, with black, all values between -3 and 3.
import matplotlib.pyplot as plt
import matplotlib as mpl
fig, ax = plt.subplots(figsize=(8, .25))
cmap = mpl.cm.twilight
norm = mpl.colors.Normalize(vmin=-9.6, vmax=9.6)
cbar = mpl.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, orientation='horizontal', ticks=[-3,3])
The function colors.ListedColormap creates a new colormap from a list of colors. The following code retrieves these colors from an existing map and makes the desired modifications:
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
cmap = mpl.cm.get_cmap('twilight', 256)
norm = mpl.colors.Normalize(vmin=-9.6, vmax=9.6)
maskedcolors = cmap(np.linspace(0, 1, 256))
black = np.array([0, 0, 0, 1])
maskedcolors[int(round(norm(-3) * 256)) : int(round(norm(3) * 256)) + 1] = black
maskedcmp = mpl.colors.ListedColormap(maskedcolors)
fig, ax = plt.subplots(figsize=(8, .5))
cbar = mpl.colorbar.ColorbarBase(ax, cmap=maskedcmp, norm=norm, orientation='horizontal', ticks=[-3, 3])
fig.subplots_adjust(bottom=0.5)
plt.show()

Average ROC curve across folds for multi-class classification case in sklearn

I have a multi-class classification problem with 3 classes in total.
I am using LinearDiscriminantAnalysis for the classification and I want to plot the average ROC across KFolds (k = 5).
I am able to do this for a binary classification case but I cannot find a way to make it work for my multi-class case.
Below is my code for the binary case:
import matplotlib.pyplot as plt
import numpy as np
from scipy import interp
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc
from itertools import cycle
from scipy import interp
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
plt.style.use('ggplot')
X, y = make_classification(n_samples=500, random_state=100, flip_y=0.3)
kf = KFold(n_splits = 5, shuffle = True, random_state= 0)
clf = LinearDiscriminantAnalysis()
pipe= Pipeline([('scaler', StandardScaler()), ('clf', clf)])
tprs = []
aucs = []
base_fpr = np.linspace(0, 1, 101)
colors = ['darksalmon', 'gold', 'royalblue', 'mediumseagreen', 'violet']
for i, (train, test) in enumerate(kf.split(X,y)):
model = pipe.fit(X[train], y[train])
y_score = model.predict_proba(X[test])
fpr, tpr, _ = roc_curve(y[test], y_score[:, 1])
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
#plt.plot(fpr, tpr, lw=1, alpha=0.6, label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc), c = colors[i])
tpr = interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
tprs.append(tpr)
tprs = np.array(tprs)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
mean_auc = auc(base_fpr, mean_tprs)
std_auc = np.std(aucs)
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = mean_tprs - std
plt.figure(figsize=(12, 8))
plt.plot(base_fpr, mean_tprs, 'b', alpha = 0.8, label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),)
plt.fill_between(base_fpr, tprs_lower, tprs_upper, color = 'blue', alpha = 0.2)
plt.plot([0, 1], [0, 1], linestyle = '--', lw = 2, color = 'r', label = 'Luck', alpha= 0.8)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc="lower right")
plt.title('Receiver operating characteristic (ROC) curve')
#plt.axes().set_aspect('equal', 'datalim')
plt.show()
EDIT 1:
My attempt to make it work for the multiclass case using OneVsRestClassifier:
import matplotlib.pyplot as plt
import numpy as np
from scipy import interp
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc
from itertools import cycle
from scipy import interp
from sklearn.multiclass import OneVsRestClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import label_binarize
plt.style.use('ggplot')
plt.figure(figsize=(12, 8))
X, y = make_classification(n_samples=500, random_state=100, n_classes=3,n_clusters_per_class=1, flip_y=0.3)
kf = KFold(n_splits = 5, shuffle = True, random_state= 0)
clf = OneVsRestClassifier(LinearDiscriminantAnalysis())
pipe= Pipeline([('scaler', StandardScaler()), ('clf', clf)])
classes = np.unique(y)
y_true = label_binarize(y, classes=classes)
n_classes = y_true.shape[1]
base_fpr = np.linspace(0, 1, 101)
colors = ['darksalmon', 'gold', 'royalblue', 'mediumseagreen', 'violet']
fpr = dict()
tpr = dict()
roc_auc = dict()
fff=[]
ttt=[]
aucc=[]
# Fit the model for each fold
for i, (train, test) in enumerate(kf.split(X,y)):
model = pipe.fit(X[train], y[train])
y_score = model.predict_proba(X[test])
# Compute ROC curve and ROC area for each class PER FOLD
for j in range(n_classes):
fpr[j], tpr[j], _ = roc_curve(y_true[test][:, j], y_score[:, j])
roc_auc[j] = auc(fpr[j], tpr[j])
# First aggregate all false positive rates per classe for each fold
all_fpr = np.unique(np.concatenate([fpr[j] for j in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for j in range(n_classes):
mean_tpr += interp(all_fpr, fpr[j], tpr[j])
# Finally average it and compute AUC for EACH FOLD
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
fff.append(all_fpr)
ttt.append(mean_tpr)
aucc.append(roc_auc["macro"])
# Compute average across Folds
fff = np.array(fff)
ttt = np.array(ttt)
aucc = np.array(aucc)
all_fpr_folds = np.unique(np.concatenate([fff[j] for j in range(kf.get_n_splits())]))
# Then interpolate all ROC curves at this points
mean_tpr_folds = np.zeros_like(all_fpr_folds)
for j in range(kf.get_n_splits()):
mean_tpr_folds += interp(all_fpr_folds, fff[j], ttt[j])
# Finally average it and compute AUC
mean_tpr_folds /= float(kf.get_n_splits())
mean_mean_tpr_folds= mean_tpr_folds.mean(axis = 0)
std = mean_tpr_folds.std(axis=0)
tprs_upper = np.minimum(mean_mean_tpr_folds + std, 1)
tprs_lower = mean_mean_tpr_folds - std
plt.plot(all_fpr_folds, mean_tpr_folds, 'b', alpha = 0.8, label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (aucc.mean(), aucc.std()),)
plt.fill_between(all_fpr_folds, tprs_lower, tprs_upper, color = 'blue', alpha = 0.2)
plt.plot([0, 1], [0, 1], linestyle = '--', lw = 2, color = 'r', label = 'Luck', alpha= 0.8)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.legend(loc="lower right")
plt.title('Receiver operating characteristic (ROC) curve')
#plt.axes().set_aspect('equal', 'datalim')
plt.show()
I am missing something here...

Line of no data runs down map

I am currently plotting precipitation from a netcdf dataset. When I plot my data a line of no data appears which isn't there is a different plot I made. Here is my code:
import matplotlib.pyplot as plt
from netCDF4 import Dataset as netcdf_dataset
import numpy as np
from cartopy import config
import cartopy.crs as ccrs
fig = plt.figure(figsize=(20,5))
dataset = netcdf_dataset(datapath_1 +
'/PREC.E_2000_CAM5_1850aero.cam.mean.40-100.nc')
precip = dataset.variables['PREC'][0, :, :]
lats = dataset.variables['lat'][:]
lons = dataset.variables['lon'][:]
ax = plt.axes(projection=ccrs.Robinson())
ax.contourf(lons, lats, precip, 100, transform=ccrs.PlateCarree())
ax.coastlines()
plt.show()
and here is the output map:
output map
You may need to add a cyclic point to your data array and longitude coordinate (https://scitools.org.uk/cartopy/docs/v0.16/cartopy/util/util.html#cartopy.util.add_cyclic_point). Your example modified to do this (but not tested because we don't have your input data):
import matplotlib.pyplot as plt
from netCDF4 import Dataset as netcdf_dataset
import numpy as np
from cartopy import config
import cartopy.crs as ccrs
from cartopy.util import add_cyclic_point
dataset = netcdf_dataset(datapath_1 +
'/PREC.E_2000_CAM5_1850aero.cam.mean.40-100.nc')
precip = dataset.variables['PREC'][0, :, :]
lats = dataset.variables['lat'][:]
lons = dataset.variables['lon'][:]
precip_c, lons_c = add_cyclic_point(precip, coord=lons)
fig = plt.figure(figsize=(20,5))
ax = plt.axes(projection=ccrs.Robinson())
ax.contourf(lons_c, lats, precip_c, 100, transform=ccrs.PlateCarree())
ax.coastlines()
plt.show()

unexpected constant color using matplotlib surface_plot and facecolors

I am plotting a function on the surface of a sphere. To test my code, I simply plot the spherical coordinate phi divided by pi. I get
Unexpectedly, half of the sphere is of the same color, and the colors on the other half aren't correct (at phi=pi, i should get 1, not 2). If I divide the data array by 2, the problem disappears. Can someone explain to me what is happening?
Here is the code I use:
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
# prepare the sphere surface
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
phi = np.linspace(0,2*np.pi, 50)
theta = np.linspace(0, np.pi, 25)
x=np.outer(np.cos(phi), np.sin(theta))
y=np.outer(np.sin(phi), np.sin(theta))
z=np.outer(np.ones(np.size(phi)), np.cos(theta))
# prepare function to plot
PHI=np.outer(phi,np.ones(np.size(theta)))
THETA=np.outer(np.ones(np.size(phi)),theta)
data = PHI/np.pi
# plot
surface=ax.plot_surface(x, y, z, cstride=1, rstride=1,
facecolors=cm.jet(data),cmap=plt.get_cmap('jet'))
# add colorbar
m = cm.ScalarMappable(cmap=surface.cmap,norm=surface.norm)
m.set_array(data)
plt.colorbar(m)
plt.show()
There is a little bit of chaos in the code.
When specifying facecolors, there is no reason to supply a colormap, because the facecolors do not need to be retrieved from a colormap.
Colormaps range from 0 to 1. Your data ranges from 0 to 2. Hence half of the facecolors are just the same. So you first need to normalize the data to the (0,1)-range, e.g. using a Normalize instance, then you can apply the colormap.
norm = plt.Normalize(vmin=data.min(), vmax=data.max())
surface=ax.plot_surface(x, y, z, cstride=1, rstride=1,
facecolors=cm.jet(norm(data)))
For the colorbar you should then use the same colormap and the same normalization as for the plot itself.
m = cm.ScalarMappable(cmap=cm.jet,norm=norm)
m.set_array(data)
Complete code:
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
# prepare the sphere surface
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
phi = np.linspace(0,2*np.pi, 50)
theta = np.linspace(0, np.pi, 25)
x=np.outer(np.cos(phi), np.sin(theta))
y=np.outer(np.sin(phi), np.sin(theta))
z=np.outer(np.ones(np.size(phi)), np.cos(theta))
# prepare function to plot
PHI=np.outer(phi,np.ones(np.size(theta)))
THETA=np.outer(np.ones(np.size(phi)),theta)
data = PHI/np.pi
# plot
norm = plt.Normalize(vmin=data.min(), vmax=data.max())
surface=ax.plot_surface(x, y, z, cstride=1, rstride=1,
facecolors=cm.jet(norm(data)))
# add colorbar
m = cm.ScalarMappable(cmap=cm.jet,norm=norm)
m.set_array(data)
plt.colorbar(m)
plt.show()

Plotting 3D Decision Boundary From Linear SVM

I've fit a 3 feature data set using sklearn.svm.svc(). I can plot the point for each observation using matplotlib and Axes3D. I want to plot the decision boundary to see the fit. I've tried adapting the 2D examples for plotting the decision boundary to no avail. I understand that clf.coef_ is a vector normal to the decision boundary. How can I plot this to see where it divides the points?
Here is an example on a toy dataset. Note that plotting in 3D is funky with matplotlib. Sometimes points that are behind the plane might appear as though they are in front of it, so you may have to fiddle with rotating the plot to ascertain what's going on.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.svm import SVC
rs = np.random.RandomState(1234)
# Generate some fake data.
n_samples = 200
# X is the input features by row.
X = np.zeros((200,3))
X[:n_samples/2] = rs.multivariate_normal( np.ones(3), np.eye(3), size=n_samples/2)
X[n_samples/2:] = rs.multivariate_normal(-np.ones(3), np.eye(3), size=n_samples/2)
# Y is the class labels for each row of X.
Y = np.zeros(n_samples); Y[n_samples/2:] = 1
# Fit the data with an svm
svc = SVC(kernel='linear')
svc.fit(X,Y)
# The equation of the separating plane is given by all x in R^3 such that:
# np.dot(svc.coef_[0], x) + b = 0. We should solve for the last coordinate
# to plot the plane in terms of x and y.
z = lambda x,y: (-svc.intercept_[0]-svc.coef_[0][0]*x-svc.coef_[0][1]*y) / svc.coef_[0][2]
tmp = np.linspace(-2,2,51)
x,y = np.meshgrid(tmp,tmp)
# Plot stuff.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z(x,y))
ax.plot3D(X[Y==0,0], X[Y==0,1], X[Y==0,2],'ob')
ax.plot3D(X[Y==1,0], X[Y==1,1], X[Y==1,2],'sr')
plt.show()
Output:
EDIT (Key Mathematical Linear Algebra Statement In Comment Above):
# The equation of the separating plane is given by all x in R^3 such that:
# np.dot(coefficients, x_vector) + intercept_value = 0.
# We should solve for the last coordinate: x_vector[2] == z
# to plot the plane in terms of x and y.
You cannot visualize the decision surface for a lot of features. This is because the dimensions will be too many and there is no way to visualize an N-dimensional surface.
However, you can use 2 features and plot nice decision surfaces as follows.
I have also written an article about this here:
https://towardsdatascience.com/support-vector-machines-svm-clearly-explained-a-python-tutorial-for-classification-problems-29c539f3ad8?source=friends_link&sk=80f72ab272550d76a0cc3730d7c8af35
Case 1: 2D plot for 2 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
model = svm.SVC(kernel='linear')
clf = model.fit(X, y)
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of linear SVC ')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_ylabel('y label here')
ax.set_xlabel('x label here')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.show()
Case 2: 3D plot for 2 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from mpl_toolkits.mplot3d import Axes3D
iris = datasets.load_iris()
X = iris.data[:, :3] # we only take the first three features.
Y = iris.target
#make it binary classification problem
X = X[np.logical_or(Y==0,Y==1)]
Y = Y[np.logical_or(Y==0,Y==1)]
model = svm.SVC(kernel='linear')
clf = model.fit(X, Y)
# The equation of the separating plane is given by all x so that np.dot(svc.coef_[0], x) + b = 0.
# Solve for w3 (z)
z = lambda x,y: (-clf.intercept_[0]-clf.coef_[0][0]*x -clf.coef_[0][1]*y) / clf.coef_[0][2]
tmp = np.linspace(-5,5,30)
x,y = np.meshgrid(tmp,tmp)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot3D(X[Y==0,0], X[Y==0,1], X[Y==0,2],'ob')
ax.plot3D(X[Y==1,0], X[Y==1,1], X[Y==1,2],'sr')
ax.plot_surface(x, y, z(x,y))
ax.view_init(30, 60)
plt.show()