Why this errror appears during fit while creating decision Tree Classifier - pandas

Hi I am trying Decision Tree Classifier by following this video Hello World - Machine Learning Recipes #1 Google Developers.
Here is my Code.
#Import the Pandas library
import pandas as pd
#Load the train and test datasets to create two DataFrames
train_url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv" train = pd.read_csv(train_url)
#Print the head of the train and test dataframes
train.head()
test_url = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/test.csv" test = pd.read_csv(test_url)
#Print the head of the train and test dataframes
test.head()
#from sklearn import tree
from sklearn import tree
#find the best feature to predict Survival rate
#define X_features and Y_labels
col_names=['Pclass','Age','SibSp','Parch']
X_features= train[col_names]
#assign survial to label
Y_labels= train.Survived
#create a decision tree classifier
clf=tree.DecisionTreeClassifier()
#fit (find patterns in Data)
clf=clf.fit(X_features, Y_labels)
clf.predict(test[col_names])
Getting Error
ValueError Traceback (most recent call last) in () 13#Y_train_sparse=Y_labels.to_sparse() 14 # fit (find patterns in Data) ---> 15 clf=clf.fit(X_features, Y_labels) 16 #clf.predict(test[col_names])
C:\Users\nitinahu\AppData\Local\Continuum\Anaconda3\lib\site-packages\sklearn\tree\tree.py
in fit(self, X, y, sample_weight, check_input, X_idx_sorted) 152
random_state = check_random_state(self.random_state) 153 if
check_input: --> 154 X = check_array(X, dtype=DTYPE,
accept_sparse="csc") 155 if issparse(X): 156 X.sort_indices()
C:\Users\nitinahu\AppData\Local\Continuum\Anaconda3\lib\site-packages\sklearn\utils\validation.py
in check_array(array, accept_sparse, dtype, order, copy,
force_all_finite, ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator) 396 % (array.ndim,
estimator_name)) 397 if force_all_finite: --> 398
_assert_all_finite(array) 399 400 shape_repr = _shape_repr(array.shape)
C:\Users\nitinahu\AppData\Local\Continuum\Anaconda3\lib\site-packages\sklearn\utils\validation.py
in _assert_all_finite(X) 52 and not np.isfinite(X).all()): 53 raise
ValueError("Input contains NaN, infinity" ---> 54 " or a value too
large for %r." % X.dtype) 55 56
ValueError: Input contains NaN, infinity or a value too large for
dtype('float32').

Just check all the values u r getting in the responses.
One or two is giving out of bound values and that is causing an overflow to occur.

Related

Stacking with neural network as baseline model: 'History' object has no attribute 'predict'

I have a problem including a neural network as one of three baseline models in my stacking algorithm.
rf = RandomForestRegressor
xgb = XGBRegressor
nn = Sequential([ Dense(neurons, input_dim=input_dim, use_bias=False),
Dense(neurons),
Dense(1)
])
# STACKING CODE#
# generate dummy data
X = train_x.values
y = train_y.values
# initialize 3 models to be stacked
model_1 = rf
model_2 = xgb
model_3 = nn
# generate cross-val-prediction with rf and xgb and nn using TimeSeriesSplit
cross_val_predict = np.row_stack([
np.column_stack([
model_1.fit(X[id_train], y[id_train]).predict(X[id_test]),
model_2.fit(X[id_train], y[id_train]).predict(X[id_test]),
model_3.fit(X[id_train], y[id_train]).predict(X[id_test]),
y[id_test] # we add in the last position the corresponding fold labels
])
for id_train,id_test in TimeSeriesSplit(n_splits=3).split(X)
]) # (test_size*n_splits, n_models_to_stack+1)
After I run the model for the part on neural network it says that 'History' object has no attribute 'predict'.
AttributeError Traceback (most recent call last)
<ipython-input-76-5c269db6c559> in <module>
12
13 # generate cross-val-prediction with rf and gb using TimeSeriesSplit
---> 14 cross_val_predict = np.row_stack([
15 np.column_stack([
16 model_1.fit(X[id_train], y[id_train]).predict(X[id_test]),
<ipython-input-76-5c269db6c559> in <listcomp>(.0)
16 model_1.fit(X[id_train], y[id_train]).predict(X[id_test]),
17 model_2.fit(X[id_train], y[id_train]).predict(X[id_test]),
---> 18 model_3.fit(X[id_train], y[id_train]).predict(X[id_test]),
19 y[id_test] # we add in the last position the corresponding fold labels
20 ])
AttributeError: 'History' object has no attribute 'predict'
I get that you can't call predict straight after fitting the neural model as fitting the model calls out a history object rather than a model object, so i changed the code to:
cross_val_predict = np.row_stack([
np.column_stack([
model_1.fit(X[id_train], y[id_train]),
model_1.predict(X[id_test]),
model_2.fit(X[id_train], y[id_train]),
model_2.predict(X[id_test]),
model_3.fit(X[id_train], y[id_train]),
model_3.predict(X[id_test]),
y[id_test] # we add in the last position the corresponding fold labels
])
for id_train,id_test in TimeSeriesSplit(n_splits=3).split(X)
]) # (test_size*n_splits, n_models_to_stack+1)
However, now i get a new error....
1/1 [==============================] - 2s 2s/step - loss: 64.2233 - mse: 64.2233 - mae: 1.5694
1/1 [==============================] - 0s 187ms/step
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-98-cd6a2aea778e> in <module>
12
13 # generate cross-val-prediction with rf and gb using TimeSeriesSplit
---> 14 cross_val_predict = np.row_stack([
15 np.column_stack([
16 model_1.fit(X[id_train], y[id_train]), model_1.predict(X[id_test]),
<ipython-input-98-cd6a2aea778e> in <listcomp>(.0)
13 # generate cross-val-prediction with rf and gb using TimeSeriesSplit
14 cross_val_predict = np.row_stack([
---> 15 np.column_stack([
16 model_1.fit(X[id_train], y[id_train]), model_1.predict(X[id_test]),
17 model_2.fit(X[id_train], y[id_train]), model_2.predict(X[id_test]),
<__array_function__ internals> in column_stack(*args, **kwargs)
~\Anaconda3\lib\site-packages\numpy\lib\shape_base.py in column_stack(tup)
654 arr = array(arr, copy=False, subok=True, ndmin=2).T
655 arrays.append(arr)
--> 656 return _nx.concatenate(arrays, 1)
657
658
<__array_function__ internals> in concatenate(*args, **kwargs)
ValueError: all the input array dimensions for the concatenation axis must match exactly, but along dimension 0, the array at index 0 has size 1850 and the array at index 1 has size 75
It seems like somehow i have to preserve the original sequence of the code while calling fit and predict on the neural network at the same time....anyone can help please?
As stated in the error,
all the input array dimensions for the concatenation axis must match exactly, but along dimension 0, the array at index 0 has size 1850 and the array at index 1 has size 75
Numpy is essentially telling you that the shapes of the concatenated matrices/arrays should be aligned. For example, we can concatenate a 4x5 matrix with a 4x4 matrix/array to create a 4x9 matrix/array.
The error here is reporting that the axes are not aligned. We cannot attempt to concatenate a 4x5 matrix with a 10x10 matrix because the shapes are not aligned. You use the np.reshape function to modify the shape of one of the matrices/arrays so that they may be concatenated.
Kindly refer to this for more information. Thank you!

Python Sklearn "ValueError: Classification metrics can't handle a mix of multiclass-multioutput and binary targets" error

I have already visited this answer but didn't understand.
I don't get this error when I use test_train_split function for using the same dateset for testing and training.
But when I try to use different csv files for testing and training I get this error.
link to titanic kaggle competition
Can Someone please explain why I am I getting this error?
from sklearn.linear_model import LogisticRegression
logreg=LogisticRegression()
logreg.fit(df,survived_df)
predictions=logreg.predict(test)
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(test_survived,predictions) #error here Value Error ""ValueError: Classification metrics can't handle a mix of multiclass-multioutput and binary targets""
print(accuracy)
Full Error
ValueError Traceback (most recent call last)
<ipython-input-243-89c8ae1a928d> in <module>
----> 1 logreg.score(test,test_survived)
2
~/mldl/kaggle_practice/titanic_pilot/venv/lib64/python3.8/site-packages/sklearn/base.py in score(self, X, y, sample_weight)
497 """
498 from .metrics import accuracy_score
--> 499 return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
500
501 def _more_tags(self):
~/mldl/kaggle_practice/titanic_pilot/venv/lib64/python3.8/site-packages/sklearn/utils/validation.py in inner_f(*args, **kwargs)
70 FutureWarning)
71 kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
---> 72 return f(**kwargs)
73 return inner_f
74
~/mldl/kaggle_practice/titanic_pilot/venv/lib64/python3.8/site-packages/sklearn/metrics/_classification.py in accuracy_score(y_true, y_pred, normalize, sample_weight)
185
186 # Compute accuracy for each possible representation
--> 187 y_type, y_true, y_pred = _check_targets(y_true, y_pred)
188 check_consistent_length(y_true, y_pred, sample_weight)
189 if y_type.startswith('multilabel'):
~/mldl/kaggle_practice/titanic_pilot/venv/lib64/python3.8/site-packages/sklearn/metrics/_classification.py in _check_targets(y_true, y_pred)
88
89 if len(y_type) > 1:
---> 90 raise ValueError("Classification metrics can't handle a mix of {0} "
91 "and {1} targets".format(type_true, type_pred))
92
ValueError: Classification metrics can't handle a mix of multiclass-multioutput and binary targets
Full Code
df=pd.read_csv('data/train.csv')
test=pd.read_csv('data/test.csv')
test_survived=pd.read_csv('data/gender_submission.csv')
plt.figure(5)
df=df.drop(columns=['Name','SibSp','Ticket','Cabin','Parch','Embarked'])
test=test.drop(columns=['Name','SibSp','Ticket','Cabin','Parch','Embarked'])
sns.heatmap(df.isnull(),),
plt.figure(2)
sns.boxplot(data=df,y='Age')
# from boxplot 75th%ile seems to b 38 n 25th percentile seems to be 20.....
#so multiplying by 1.5 at both ends so Age(10,57) seems good and any value outside this ...lets consider as outliers..
#also using this age for calaculating mean for replacing na values of age.
df=df.loc[df['Age'].between(9,58),]
# test=test.loc[test['Age'].between(9,58),]
# test=test.loc[test['Age'].between(9,58),]
df=df.reset_index(drop=True,)
class_3_age=df.loc[df['Pclass']==3].Age.mean()
class_2_age=df.loc[df['Pclass']==2].Age.mean()
class_1_age=df.loc[df['Pclass']==1].Age.mean()
def remove_null_age(data):
agee=data[0]
pclasss=data[1]
if pd.isnull(agee):
if pclasss==1:
return class_1_age
elif pclasss==2:
return class_2_age
else:
return class_3_age
return agee
df['Age']=df[["Age","Pclass"]].apply(remove_null_age,axis=1)
test['Age']=test[["Age","Pclass"]].apply(remove_null_age,axis=1)
sex=pd.get_dummies(df['Sex'],drop_first=True)
test_sex=pd.get_dummies(test['Sex'],drop_first=True)
sex=sex.reset_index(drop=True)
test_sex=test_sex.reset_index(drop=True)
df=df.drop(columns=['Sex'])
test=test.drop(columns=['Sex'])
df=pd.concat([df,sex],axis=1)
test=test.reset_index(drop=True)
df=df.reset_index(drop=True)
test=pd.concat([test,test_sex],axis=1)
survived_df=df["Survived"]
df=df.drop(columns='Survived')
test["Age"]=test['Age'].round(1)
test.at[152,'Fare']=30
from sklearn.linear_model import LogisticRegression
logreg=LogisticRegression()
logreg.fit(df,survived_df)
predictions=logreg.predict(test)
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(test_survived,predictions)
print(accuracy)
You probably want to get the accuracy for the predictions together with the column Survived of the test_survived dataframe:
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(test_survived['Survived'],predictions)
print(accuracy)
Your error occured, because the accuracy_score() only takes two 1-dimensional arrays, one as the ground truth labels and the other as the predicted labels. But you provided a 2-dimensional "array" (the dataframe) and the 1-dimensional predictions, hence it assumed that your first input is a multiclass-output.
The documentation is also very resourceful for this.

From numpy array of sentences to array of embedding

I'm learning to use tensorflow and trying to classify text. I have a dataset where each text is associated with a label 0 or 1. My goal is to use some sentence embedding to do the classification. First I've created an embedding from the whole text using the Gnews precompile embedding:
embedding = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[2], dtype=tf.string,
trainable=True, output_shape=[None, 20])
Now I'd like to try something else (similar to this method http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/) and I wanted to:
Separate each text into setences.
Create an array of embeddings for each text, one per sentence.
Use that as input for my model.
I'm able to separate the texts in sentences. Each text is an array of sentences saved as:
[array(['AITA - Getting Hugged At The Bar .',
'This all happened less than an hour ago..',
'I was at a bar I frequent and talking to some people I know, suddenly I feel someone from behind me hugging and starting to grind against me.',
"I know a lot of people at the bar, and assume it's a friend of mine, but when I look down at the shoes I do not recognize them.",
'I look back and I see a dude I do not know, nor have I ever seen.',
"He looks back at me, with horror in his eyes, because I'm a dude too...",
'I feel an urge of rage inside me and shove him in the chest with my elbow so I can get away..',
'He goes to his table and I go back to mine.',
'I was with my roommate and his girlfriend.',
'They asked what happened and I told them, then I see the guy who hugged me looking around for me.',
'Him and two of his friends come up to us and he says: .',
'"I just wanted to apologize, I thought you were someone else.".',
'I respond, "I understand, just check before you hug people.',
'Now, please fuck off".',
'He repeats his last statement, so do I.',
'This happens one more time and at this point his friends have surrounded me, my roommate is on his feet and I have left my beer at the table.',
'His friend goes in my face and says.', '.',
'"He just wanted to apologize, you really shouldn\'t be yelling at us" and starts waiving his finger at me.. We are at a rock bar, it\'s loud, I was speaking louder just to be sure I am heard..',
'The manager knows me so he comes asking me what happened.',
'I explain the situation and he speaks with them then he tells me.',
'.', '"They want to say sorry, can you guys shake hand?', '".',
'"Yeah sure, I just want them to leave me alone."', '.',
"Honestly I didn't even want to touch the guy, but whatever.",
"We shake hands and they go away.. Me and my roommate look at their table and there's no one that looks anything like me.",
'So, reddit, did I overreact?', 'Am I The Asshole here?'],
dtype='<U190')
array(["AITA if i don't want to pay my friend 5 dollars for a slice of pizzaSo, my friend bought herself, our other friend and I a pizza to eat for lunch.",
'Me and other friend ate 1 slice of pizza from an extra large pizza.',
'Other friend has already paid my friend that bought the pizza 5 dollars..',
'I am trying to save money wherever i can, but she really wants me to pay her 5 dollars "so its fair".. AITA?'],
dtype='<U146')
Now when I try to create an embedding from one element of the array it works. Here is my embedding function:
def embedding_f(test):
print("test shape:", test.shape)
# a = tf.constant(test)
embedding = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[], dtype=tf.string,
trainable=True, output_shape=[None, 20])
ret = hub_layer(test)
# print(ret)
return ret.numpy()
# Works
emb = cnn.embedding_f(train_data[0])
But if I try to input a batch of data (as will be done later in the pipeline, the program crashes
# Crashes
emb = cnn.embedding_f(train_data[0:2])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-76f4f9171cad> in <module>
----> 1 emb = cnn.embedding_f(train_data[0:2])
~/AITA/aita/cnn.py in embedding_f(test)
22 hub_layer = hub.KerasLayer(embedding, input_shape=[2], dtype=tf.string,
23 trainable=True, output_shape=[None, 20])
---> 24 ret = hub_layer(test)
25 # print(ret)
26 return ret.numpy()
/usr/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
817 return ops.convert_to_tensor_v2(x)
818 return x
--> 819 inputs = nest.map_structure(_convert_non_tensor, inputs)
820 input_list = nest.flatten(inputs)
821
/usr/lib/python3.8/site-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
615
616 return pack_sequence_as(
--> 617 structure[0], [func(*x) for x in entries],
618 expand_composites=expand_composites)
619
/usr/lib/python3.8/site-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
615
616 return pack_sequence_as(
--> 617 structure[0], [func(*x) for x in entries],
618 expand_composites=expand_composites)
619
/usr/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _convert_non_tensor(x)
815 # `SparseTensors` can't be converted to `Tensor`.
816 if isinstance(x, (np.ndarray, float, int)):
--> 817 return ops.convert_to_tensor_v2(x)
818 return x
819 inputs = nest.map_structure(_convert_non_tensor, inputs)
/usr/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor_v2(value, dtype, dtype_hint, name)
1276 ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
1277 """
-> 1278 return convert_to_tensor(
1279 value=value,
1280 dtype=dtype,
/usr/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1339
1340 if ret is None:
-> 1341 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1342
1343 if ret is NotImplemented:
/usr/lib/python3.8/site-packages/tensorflow/python/framework/tensor_conversion_registry.py in _default_conversion_function(***failed resolving arguments***)
50 def _default_conversion_function(value, dtype, name, as_ref):
51 del as_ref # Unused.
---> 52 return constant_op.constant(value, dtype, name=name)
53
54
/usr/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
259 ValueError: if called on a symbolic tensor.
260 """
--> 261 return _constant_impl(value, dtype, shape, name, verify_shape=False,
262 allow_broadcast=True)
263
/usr/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
268 ctx = context.context()
269 if ctx.executing_eagerly():
--> 270 t = convert_to_eager_tensor(value, ctx, dtype)
271 if shape is None:
272 return t
/usr/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
94 dtype = dtypes.as_dtype(dtype).as_datatype_enum
95 ctx.ensure_initialized()
---> 96 return ops.EagerTensor(value, ctx.device_name, dtype)
97
98
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type numpy.ndarray).
The error states that it's not possible to convert a Numpy array to a tensor. I've tried changing the input_shape parameter of the KerasLayer to no avail. The only solution I see is to calculate the embedding for each text by looping through all of them one by one before finding the result to the rest of the network but that seems highly inefficient (and requires too much memory for my laptop). Examples I see with word embedding, do it this way however.
What is the correct way to go about getting a list of embedding from multiple sentences?
I think your output_shape should be set to [20] (from https://www.tensorflow.org/hub/api_docs/python/hub/KerasLayer):
hub.KerasLayer("/tmp/text_embedding_model",
output_shape=[20], # Outputs a tensor with shape [batch_size, 20].
input_shape=[], # Expects a tensor of shape [batch_size] as input.
dtype=tf.string) # Expects a tf.string input tensor.
Using TF 2.4.1 and tensorflow_hub 0.11.0, this works for me:
data = np.array(['AITA - Getting Hugged At The Bar .', 'This all happened less than an hour ago..'])
model_url = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
embedding = hub.KerasLayer(model_url, input_shape=[], dtype=tf.string,
trainable=True, output_shape=[20])(data)
If you don't want to add layers on top of the KerasLayer, you can also just call
model = hub.load(model_url)
embedding = model(data)

Linear Regression

My Problem Statement is :
The following data set shows the result of recently conducted study on the correlation of the number of hours spent driving with the risk of developing acute back pain. Find the equation of the best fit line for this data.
Data set is as below :
x y
10 95
9 80
2 10
15 50
10 45
16 98
11 38
16 93
Machine spec : Linux Ubuntu 18.10 64bit
I am having some error:
python LR.py
Accuracy :
43.70948145101002
[6.01607946]
Enter the no of hours10
y :
0.095271*10.000000+5.063367
Risk Score : 6.016079463451905
Traceback (most recent call last):
File "LR.py", line 30, in <module>
plt.plot(X,y,'o')
File "/home/sumeet/anaconda3/lib/python3.6/site-
packages/matplotlib/pyplot.py", line 3358, in plot
ret = ax.plot(*args, **kwargs)
File "/home/sumeet/anaconda3/lib/python3.6/site-
packages/matplotlib/__init__.py", line 1855, in inner
return func(ax, *args, **kwargs)
File "/home/sumeet/anaconda3/lib/python3.6/site-
packages/matplotlib/axes/_axes.py", line 1527, in plot
for line in self._get_lines(*args, **kwargs):
File "/home/sumeet/anaconda3/lib/python3.6/site-
packages/matplotlib/axes/_base.py", line 406, in _grab_next_args
for seg in self._plot_args(this, kwargs):
File "/home/sumeet/anaconda3/lib/python3.6/site-
packages/matplotlib/axes/_base.py", line 383, in _plot_args
x, y = self._xy_from_xy(x, y)
File "/home/sumeet/anaconda3/lib/python3.6/site-
packages/matplotlib/axes/_base.py", line 242, in _xy_from_xy
"have shapes {} and {}".format(x.shape, y.shape))
ValueError: x and y must have same first dimension, but have
shapes (8, 1) and (1,)
The code is as below:
import matplotlib.pyplot as plt
import pandas as pd
# Read Dataset
dataset=pd.read_csv("hours.csv")
X=dataset.iloc[:,:-1].values
y=dataset.iloc[:,1].values
# Import the Linear Regression and Create object of it
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(X,y)
Accuracy=regressor.score(X, y)*100
print("Accuracy :")
print(Accuracy)
# Predict the value using Regressor Object
y_pred=regressor.predict([[10]])
print(y_pred)
# Take user input
hours=int(input('Enter the no of hours'))
#calculate the value of y
eq=regressor.coef_*hours+regressor.intercept_
y='%f*%f+%f' %(regressor.coef_,hours,regressor.intercept_)
print("y :")
print(y)
print("Risk Score : ", eq[0])
plt.plot(X,y,'o')
plt.plot(X,regressor.predict(X));
plt.show()
In the beginning of your code, you define the y which you probably want to plot:
y=dataset.iloc[:,1].values
but further down, you re-define (and thus overwriting) it as
y='%f*%f+%f' %(regressor.coef_,hours,regressor.intercept_)
which causes the error, as this last y is a string and not an array with 8 elements like X (and like your initial y).
Change it with something else, e.g. Y, at the relevant lines in the end:
Y='%f*%f+%f' %(regressor.coef_,hours,regressor.intercept_)
print("Y :")
print(Y)
so as to keep your y as initially defined, and you should be fine.

How to do cross validation for multiclass data?

I was able to use following method to do cross validation on binary data, but it seems not working for multiclass data:
> cross_validation.cross_val_score(alg, X, y, cv=cv_folds, scoring='roc_auc')
/home/ubuntu/anaconda3/lib/python3.6/site-packages/sklearn/metrics/scorer.py in __call__(self, clf, X, y, sample_weight)
169 y_type = type_of_target(y)
170 if y_type not in ("binary", "multilabel-indicator"):
--> 171 raise ValueError("{0} format is not supported".format(y_type))
172
173 if is_regressor(clf):
ValueError: multiclass format is not supported
> y.head()
0 10
1 6
2 12
3 6
4 10
Name: rank, dtype: int64
> type(y)
pandas.core.series.Series
I also tried changing roc_auc to f1 but still having error:
/home/ubuntu/anaconda3/lib/python3.6/site-packages/sklearn/metrics/classification.py in precision_recall_fscore_support(y_true, y_pred, beta, labels, pos_label, average, warn_for, sample_weight)
1016 else:
1017 raise ValueError("Target is %s but average='binary'. Please "
-> 1018 "choose another average setting." % y_type)
1019 elif pos_label not in (None, 1):
1020 warnings.warn("Note that pos_label (set to %r) is ignored when "
ValueError: Target is multiclass but average='binary'. Please choose another average setting.
Is there any method I can use to do cross validation for such type of data?
As pointed out in the comment by Vivek Kumar sklearn metrics support multi-class averaging for both the F1 score and the ROC computations, albeit with some limitations when data is unbalanced. So you can manually construct the scorer with the corresponding average parameter or use one of the predefined ones (e.g.: 'f1_micro', 'f1_macro', 'f1_weighted').
If multiple scores are needed, then instead of cross_val_score use cross_validate (available since sklearn 0.19 in the module sklearn.model_selection).