I used make_classification library and MLPClassifier from sklearn. However, I could not make my points separated like on this screenshot. And this screenshot is what my plot shows. Could you help me to separate the points or what is the problem?
My code is:
from sklearn.datasets import make_classification
X,y=make_classification(n_samples=550, n_features=10, n_informative=2,random_state=0)
from sklearn.model_selection import train_test_split
X_train,X_test , y_train , y_test = train_test_split(X, y, test_size=0.3, random_state=1)
from sklearn.neural_network import MLPClassifier
mlp= MLPClassifier(hidden_layer_sizes=(), max_iter=300, random_state=0)
clf = mlp.fit(X_test, y_test)
z = lambda x,y: (-clf.intercepts_[0]-clf.coefs_[0][0]*x -clf.coefs_[0][1]*y) / clf.coefs_[0][2]
tmp = np.linspace(-5,5,30)
x,y = np.meshgrid(tmp,tmp)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_test[y_test==0,0]+2, X_test[y_test==0,1]-2, X_test[y_test==0,2]-5, c='b', marker='^')
ax.scatter(X_test[y_test==1,0]-2, X_test[y_test==1,1]+2, X_test[y_test==1,2]+5, c='r', marker='o')
ax.plot_surface(x, y, z(x,y))
ax.view_init(30, 60)
plt.show()
There is nothing (seriously) wrong on your code. You need to set some parameters to get what you desire. First, you may need larger figure.
# To set figure size other than the default value, specify width and height
fig = plt.figure( figsize=(8,8) )
Secondly, for the size of markers in scatter() function:
# To set marker size, use `s=number` as an option
ax.scatter(X_test[y_test==0,0]+2, X_test[y_test==0,1]-2, X_test[y_test==0,2]-5, \
c='b', marker='^', s=3)
ax.scatter(X_test[y_test==1,0]-2, X_test[y_test==1,1]+2, X_test[y_test==1,2]+5, \
c='r', marker='o', s=3)
The plot should be similar to this:
Related
I am working on imbalanced classification. I wanted to add g-mean, and accuracy in my decision boundary plot. It would be nice to see the differences of these scoring metrics in plot. I don't see any option to compute these scores within this decision boundary plot. Is there way I can add this extra information in my decision boundary plot. I appreciate your time. Thanks!
import numpy as np
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_decision_regions
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.metrics import make_scorer
from imblearn.metrics import geometric_mean_score
from mlxtend.plotting import plot_decision_regions
import matplotlib.gridspec as gridspec
import itertools
gmean = make_scorer(geometric_mean_score, greater_is_better=True)
scoring = {'G-mean': gmean, 'Accuracy':'accuracy'}
X, y = make_blobs(n_samples=[1000, 10],centers=[[0.0, 0.0], [2.0, 2.0]],cluster_std= [1.5, 0.5],random_state=0, shuffle=False)
clf1 = LogisticRegression(max_iter=100000)
clf2 = LogisticRegression(class_weight="balanced",max_iter=100000)
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(10,8))
labels = ['Logistic Regression', 'Weighted Logistic Regression']
for clf, lab, grd in zip([clf1, clf2],
labels,
itertools.product([0, 1], repeat=2)):
clf.fit(X, y)
ax = plt.subplot(gs[grd[0], grd[1]])
fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2)
plt.title(lab)
plt.show()
You can use plt.text() to add g-mean, and accuracy in your decision boundary plot.
For example:
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(15, 8))
labels = ['Logistic Regression', 'Weighted Logistic Regression']
for clf, lab, grd in zip([clf1, clf2],
labels,
itertools.product([0, 1], repeat=2)):
clf.fit(X, y)
ax = plt.subplot(gs[grd[0], grd[1]])
ax.text(6, 4, "gmean : ", fontsize=10)
ax.text(6, 2, "accuracy : ", fontsize=10)
fig = plot_decision_regions(X=X, y=y, clf=clf, legend=2)
plt.title(lab)
plt.show()
The code below classifies three groups of Iris through the Decision Tree classifier.
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.tree import DecisionTreeClassifier
iris = datasets.load_iris()
dataset = pd.DataFrame(iris['data'], columns=iris['feature_names'])
dataset['target'] = iris['target']
X=dataset[[dataset.columns[1], dataset.columns[2]]]
y=dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
model = DecisionTreeClassifier(max_depth=3)
model.fit(X_train, y_train)
And For plotting this classification we can use these lines of code:
import numpy as np
from matplotlib.colors import ListedColormap
X_set, y_set = X_test.values, y_test.values
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, model.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green','blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green','blue'))(i), label = j)
plt.title('Classifier (Test set)')
plt.xlabel('sepal width (cm)')
plt.ylabel('petal length (cm)')
plt.legend()
plt.show()
the result would be like below:
Visualising the Test set results
But when I wanted to use more than two features for training,
X=dataset[[dataset.columns[1], dataset.columns[2], dataset.columns[3]]]
y=dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
I couldn't visualize the results like the picture above! Could someone please explain to me how I can visualize the results?
Thank you
Since you've 3 data and its corresponding label, you can only show it in a 3D plot.
I've tried to do that in the following code:
%matplotlib notebook
from sklearn.linear_model import Ridge
X_set, y_set = X_test.values, y_test.values
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop =X_set[:, 1].max() + 1, step = 0.01))
model = Ridge()
model.fit(np.array([X_set[:, 0],X_set[:, 1]]).T,X_set[:,2])
X3=model.predict(np.array([X1.flatten(),X2.flatten()]).T)
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
Dict={0:'red',1:'blue',2:'purple'}
ax.plot_surface(X1, X2, X3.reshape(X1.shape), cmap="YlGn", linewidth=0, antialiased=False, alpha=0.5)
for Id in range(X_set.shape[0]):
ax.scatter3D(*X_set[Id,:],color=Dict[y_set[Id]],linewidths=10)
ax.set_xlabel("Data_1")
ax.set_ylabel('Data_2')
ax.set_zlabel("Data_3")
plt.show()
Also since ax.plot_surface wants given shapes as X1.shape=X2.shape=X3.shape, I have predicted X3 values with a linear model(If you use a tree model it gives a different shape).
One can ask why we haven't used a meshgrid for the 3 data features and create a 3d plot with it. The reason for that is matplotlib plot_surface or 3dcountrp. just accepts 2d params and meshgrid with 3 features returns 3d data for each.
Hope that questions your answer.
How to improve this strange, illegible number format in the matrix so that it shows me only simple numbers?
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
NBC = GaussianNB()
LRE = LogisticRegression(solver='lbfgs')
GBC = GradientBoostingClassifier()
RFC = RandomForestClassifier()
LGBM = LGBMClassifier()
CBC = CatBoostClassifier(verbose=0, n_estimators=100)
classifiers = [NBC,LRE,GBC,RFC,LGBM,CBC]
for cls in classifiers:
cls.fit(X_train, y_train)
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10,6))
target_names = ['0','1']
for cls, ax in zip(classifiers, axes.flatten()):
plot_confusion_matrix(cls,
X_test,
y_test,
ax=ax,
cmap='Reds',
display_labels=target_names)
ax.title.set_text(type(cls).__name__)
plt.tight_layout()
plt.show()
Try passing a blank value format as the argument to the plot_confusion_matrix. The docs state
values_format : str, default=None
Format specification for values in confusion matrix. If None, the format specification is ādā or ā.2gā whichever is shorter.
plot_confusion_matrix(cls, X_test, y_test, ax=ax, cmap='Reds',
display_labels=target_names,
values_format='') # <--------- Passed here
I am plotting a function on the surface of a sphere. To test my code, I simply plot the spherical coordinate phi divided by pi. I get
Unexpectedly, half of the sphere is of the same color, and the colors on the other half aren't correct (at phi=pi, i should get 1, not 2). If I divide the data array by 2, the problem disappears. Can someone explain to me what is happening?
Here is the code I use:
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
# prepare the sphere surface
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
phi = np.linspace(0,2*np.pi, 50)
theta = np.linspace(0, np.pi, 25)
x=np.outer(np.cos(phi), np.sin(theta))
y=np.outer(np.sin(phi), np.sin(theta))
z=np.outer(np.ones(np.size(phi)), np.cos(theta))
# prepare function to plot
PHI=np.outer(phi,np.ones(np.size(theta)))
THETA=np.outer(np.ones(np.size(phi)),theta)
data = PHI/np.pi
# plot
surface=ax.plot_surface(x, y, z, cstride=1, rstride=1,
facecolors=cm.jet(data),cmap=plt.get_cmap('jet'))
# add colorbar
m = cm.ScalarMappable(cmap=surface.cmap,norm=surface.norm)
m.set_array(data)
plt.colorbar(m)
plt.show()
There is a little bit of chaos in the code.
When specifying facecolors, there is no reason to supply a colormap, because the facecolors do not need to be retrieved from a colormap.
Colormaps range from 0 to 1. Your data ranges from 0 to 2. Hence half of the facecolors are just the same. So you first need to normalize the data to the (0,1)-range, e.g. using a Normalize instance, then you can apply the colormap.
norm = plt.Normalize(vmin=data.min(), vmax=data.max())
surface=ax.plot_surface(x, y, z, cstride=1, rstride=1,
facecolors=cm.jet(norm(data)))
For the colorbar you should then use the same colormap and the same normalization as for the plot itself.
m = cm.ScalarMappable(cmap=cm.jet,norm=norm)
m.set_array(data)
Complete code:
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
# prepare the sphere surface
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
phi = np.linspace(0,2*np.pi, 50)
theta = np.linspace(0, np.pi, 25)
x=np.outer(np.cos(phi), np.sin(theta))
y=np.outer(np.sin(phi), np.sin(theta))
z=np.outer(np.ones(np.size(phi)), np.cos(theta))
# prepare function to plot
PHI=np.outer(phi,np.ones(np.size(theta)))
THETA=np.outer(np.ones(np.size(phi)),theta)
data = PHI/np.pi
# plot
norm = plt.Normalize(vmin=data.min(), vmax=data.max())
surface=ax.plot_surface(x, y, z, cstride=1, rstride=1,
facecolors=cm.jet(norm(data)))
# add colorbar
m = cm.ScalarMappable(cmap=cm.jet,norm=norm)
m.set_array(data)
plt.colorbar(m)
plt.show()
I've fit a 3 feature data set using sklearn.svm.svc(). I can plot the point for each observation using matplotlib and Axes3D. I want to plot the decision boundary to see the fit. I've tried adapting the 2D examples for plotting the decision boundary to no avail. I understand that clf.coef_ is a vector normal to the decision boundary. How can I plot this to see where it divides the points?
Here is an example on a toy dataset. Note that plotting in 3D is funky with matplotlib. Sometimes points that are behind the plane might appear as though they are in front of it, so you may have to fiddle with rotating the plot to ascertain what's going on.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.svm import SVC
rs = np.random.RandomState(1234)
# Generate some fake data.
n_samples = 200
# X is the input features by row.
X = np.zeros((200,3))
X[:n_samples/2] = rs.multivariate_normal( np.ones(3), np.eye(3), size=n_samples/2)
X[n_samples/2:] = rs.multivariate_normal(-np.ones(3), np.eye(3), size=n_samples/2)
# Y is the class labels for each row of X.
Y = np.zeros(n_samples); Y[n_samples/2:] = 1
# Fit the data with an svm
svc = SVC(kernel='linear')
svc.fit(X,Y)
# The equation of the separating plane is given by all x in R^3 such that:
# np.dot(svc.coef_[0], x) + b = 0. We should solve for the last coordinate
# to plot the plane in terms of x and y.
z = lambda x,y: (-svc.intercept_[0]-svc.coef_[0][0]*x-svc.coef_[0][1]*y) / svc.coef_[0][2]
tmp = np.linspace(-2,2,51)
x,y = np.meshgrid(tmp,tmp)
# Plot stuff.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z(x,y))
ax.plot3D(X[Y==0,0], X[Y==0,1], X[Y==0,2],'ob')
ax.plot3D(X[Y==1,0], X[Y==1,1], X[Y==1,2],'sr')
plt.show()
Output:
EDIT (Key Mathematical Linear Algebra Statement In Comment Above):
# The equation of the separating plane is given by all x in R^3 such that:
# np.dot(coefficients, x_vector) + intercept_value = 0.
# We should solve for the last coordinate: x_vector[2] == z
# to plot the plane in terms of x and y.
You cannot visualize the decision surface for a lot of features. This is because the dimensions will be too many and there is no way to visualize an N-dimensional surface.
However, you can use 2 features and plot nice decision surfaces as follows.
I have also written an article about this here:
https://towardsdatascience.com/support-vector-machines-svm-clearly-explained-a-python-tutorial-for-classification-problems-29c539f3ad8?source=friends_link&sk=80f72ab272550d76a0cc3730d7c8af35
Case 1: 2D plot for 2 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
model = svm.SVC(kernel='linear')
clf = model.fit(X, y)
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of linear SVC ')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_ylabel('y label here')
ax.set_xlabel('x label here')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.show()
Case 2: 3D plot for 2 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from mpl_toolkits.mplot3d import Axes3D
iris = datasets.load_iris()
X = iris.data[:, :3] # we only take the first three features.
Y = iris.target
#make it binary classification problem
X = X[np.logical_or(Y==0,Y==1)]
Y = Y[np.logical_or(Y==0,Y==1)]
model = svm.SVC(kernel='linear')
clf = model.fit(X, Y)
# The equation of the separating plane is given by all x so that np.dot(svc.coef_[0], x) + b = 0.
# Solve for w3 (z)
z = lambda x,y: (-clf.intercept_[0]-clf.coef_[0][0]*x -clf.coef_[0][1]*y) / clf.coef_[0][2]
tmp = np.linspace(-5,5,30)
x,y = np.meshgrid(tmp,tmp)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot3D(X[Y==0,0], X[Y==0,1], X[Y==0,2],'ob')
ax.plot3D(X[Y==1,0], X[Y==1,1], X[Y==1,2],'sr')
ax.plot_surface(x, y, z(x,y))
ax.view_init(30, 60)
plt.show()