ValueError: Input 0 of layer sequential_2 is incompatible with the layer: - tensorflow

i have a error in following code , error in 2st part on code and on first part i am declaring my dataset , layers etc.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_excel('/content/dataset.xlsx')
data.head()
data.plot(kind='scatter', x='fiyat', y='yil',alpha = 0.5,color = 'red')
plt.xlabel('price') # label = name of label
plt.ylabel('year')
plt.title('Fiyat ve yil Scatter Plot')
data.plot(kind='scatter', x='fiyat', y='km',alpha = 0.5,color = 'grey')
plt.xlabel('price') # label = name of label
plt.ylabel('km')
plt.title('Fiyat ve km Scatter Plot')
data.plot(kind='scatter', x='fiyat', y='motor_gucu_hp',alpha = 0.5,color = 'green')
plt.xlabel('price') # label = name of label
plt.ylabel('machine power')
plt.title('fiyat ve motor_gucu_hp Scatter Plot')
# Importing the dataset
X = data.iloc[:, data.columns != 'fiyat']
y = data.fiyat
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
# define base model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(30, input_dim=120, kernel_initializer='normal', activation='relu'))
model.add(Dense(120, activation = 'relu'))
model.add(Dense(120, activation = 'relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mse',
optimizer='adam',
metrics=['mae'] )
return model
model = baseline_model()
model.summary()
And getting error in here ; on model.fit location
import tensorflow as tf
from tensorflow import keras
import numpy as np
# Display training progress by printing a single dot for each completed epoch
EPOCHS = 500
# Store training stats
history = model.fit(X_train, y_train, epochs=EPOCHS,
batch_size=16, verbose=0)
ValueError: Input 0 of layer sequential_2 is incompatible with the layer: expected axis -1 of input shape to have value 120 but received input with shape (None, 47)
And error like this , can you help? What can i do.

Related

How to match dimensions in CNN

I'm trying to build a CNN, where the goal is from 3 features to predict the label, but is giving an error of dimension.
Could someone help me?
updated after comments from #M.Innat
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
from tensorflow.keras.models import Sequential, load_model
from sklearn.metrics import accuracy_score, f1_score, mean_absolute_error
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam
from sklearn import metrics
import tensorflow as tf
import random
# Create data
n = 8500
l = [2, 3, 4, 5,6]
k = int(np.ceil(n/len(l)))
labels = [item for item in l for i in range(k)]
random.shuffle(labels,random.random)
labels =np.array(labels)
label_unique = np.unique(labels)
x = np.linspace(613000, 615000, num=n) + np.random.uniform(-5, 5, size=n)
y = np.linspace(7763800, 7765800, num=n) + np.random.uniform(-5, 5, size=n)
z = np.linspace(1230, 1260, num=n) + np.random.uniform(-5, 5, size=n)
X = np.column_stack((x,y,z))
Y = labels
# Split the dataset into training and testing.
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=1234)
seq_len=len(X_train)
n_features=len(X_train[0])
droprate=0.1
exit_un=len(label_unique)
seq_len=len(X_train)
n_features=len(X_train[0])
droprate=0.1
exit_un=len(label_unique)
print('n_features: {} \n seq_len: {} \n exit_un: {}'.format(n_features,seq_len,exit_un))
X_train = X_train[..., None][None, ...] # add channel axis+batch aix
Y_train = pd.get_dummies(Y_train) # transform to one-hot encoded
drop_prob = 0.5
my_model = Sequential()
my_model.add(Conv2D(input_shape=(seq_len,n_features,1),filters=32,kernel_size=(3,3),padding='same',activation="relu")) # 1 channel of grayscale.
my_model.add(MaxPooling2D(pool_size=(2,1)))
my_model.add(Conv2D(filters=64,kernel_size=(5,5), padding='same',activation="relu"))
my_model.add(MaxPooling2D(pool_size=(2,1)))
my_model.add(Flatten())
my_model.add(Dense(units = 1024, activation="relu"))
my_model.add(Dropout(rate=drop_prob))
my_model.add(Dense(units = exit_un, activation="softmax"))
n_epochs = 100
batch_size = 10
learn_rate = 0.005
# Define the optimizer and then compile.
my_optimizer=Adam(lr=learn_rate)
my_model.compile(loss = "categorical_crossentropy", optimizer = my_optimizer, metrics=['categorical_crossentropy','accuracy'])
my_summary = my_model.fit(X_train, Y_train, epochs=n_epochs, batch_size = batch_size, verbose = 1)
The error I have is:
ValueError: Data cardinality is ambiguous:
x sizes: 1
y sizes: 5950
Make sure all arrays contain the same number of samples.
You're passing the input sample without the channel axis and also the batch axis. Also, according to your loss function, you should transform your integer label to one-hot encoded.
exit_un=len(label_unique)
drop_prob = 0.5
X_train = X_train[..., None][None, ...] # add channel axis+batch aix
X_train = np.repeat(X_train, repeats=100, axis=0) # batch-ing
Y_train = np.repeat(Y_train, repeats=100, axis=0) # batch-ing
Y_train = pd.get_dummies(Y_train) # transform to one-hot encoded
print(X_train.shape, Y_train.shape)
my_model = Sequential()
...
update
Based on the discussion, it seems like you need the conv1d operation in the modeling time and need to reshape your sample as mentioned in the comment. Here is the colab, it should work now.

Model.predict throwing TypeError: 'numpy.ndarray' object is not callable

I am new to Python and facing few issues while implementing Neural Networks on a Earthquake prediction problem.
There is very rare material availabale online to solve this issue using neural networks, so got struck.
Please support.
Model.predict throwing TypeError: 'numpy.ndarray' object is not callable.
enter link description here
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
train_data = pd.read_csv("C:\\Users\\rjraj\\Desktop\\mma\\ML & AI\\Project\\train_values.csv")
train_labels = pd.read_csv("C:\\Users\\rjraj\\Desktop\mma\\ML & AI\\Project\\train_labels.csv")
test_labels = pd.read_csv("C:\\Users\\rjraj\\Desktop\\mma\\ML & AI\\Project\\test_values.csv")
X_tr = train_data
X_te = test_labels
y_tr = train_labels['damage_grade'].values
# label encoding the categorical variables
label_encoding_columns=['land_surface_condition', 'foundation_type', 'roof_type',
'ground_floor_type', 'other_floor_type', 'position',
'plan_configuration', 'legal_ownership_status']
# label encoding categorical columns in train dataset
for i in label_encoding_columns:
X_tr[i]=X_tr[i].astype("category")
X_tr[i]=X_tr[i].cat.codes
# label encoding categorical columns in test dataset
for j in label_encoding_columns:
X_te[j]=X_te[j].astype("category")
X_te[j]=X_te[j].cat.codes
from sklearn.model_selection import train_test_split
X_train, X_test,y_train, y_test = train_test_split(X_tr,y_tr,test_size = 0.3,random_state = 42)
X_train.shape
(182420, 39)
X_test.shape
(78181, 39)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
MinMaxScaler()
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
#from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(Dense(4, activation = 'relu'))
model.add(Dense(4, activation = 'relu'))
model.add(Dense(1))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(x=X_train, y=y_train, epochs=30)
model.evaluate(X_test,y_test, verbose = 0)
model.evaluate(X_train,y_train, verbose = 0)
test_pred = model.predict(X_test)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-74-82e9029ecb43> in <module>
----> 1 test_pred = model.predict(X_test)
TypeError: 'numpy.ndarray' object is not callable

What's the reason for shapes not being compatible and how can I fix this?

Context I actually did not code before but now I'm hoping to build a facial recognition system from fer2013. I encountered a problem when it came to building my CNN which is that the shapes of the labels (None, 7) and the image (23, 23, 7) are not compatible. I don't know where I went wrong. Below are the codes.
import pandas as pd
df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/fer2013.csv')
from csv import reader
import numpy as np
from numpy import genfromtxt
import cv2
from tensorflow import keras
import matplotlib.pyplot as plt
pixels = df['pixels'].tolist() #1
faces = []
for pixel_sequence in pixels: #this lists down each row
face = [int(pixel) for pixel in pixel_sequence.split()] #this lists down each integer as separate
face = np.asarray(face).reshape(48,48)
face = cv2.resize(face.astype('uint8'), (48,48))
faces.append(face.astype('float32')) #### <-- this line
face = face / 255.0
faces = np.asarray(faces)
faces = np.expand_dims(faces, -1)
emotions = np.array(pd.get_dummies(df['emotion']))
import sklearn.model_selection
from sklearn.model_selection import train_test_split
from keras.callbacks import ReduceLROnPlateau
X_train, X_test, y_train, y_test = train_test_split(faces,emotions, test_size=0.2, random_state=10)
model = keras.Sequential([
keras.layers.Conv2D(128, 3, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.MaxPool2D(2,2),
keras.layers.Dense(7, activation='softmax')
])
model.compile(optimizer='adam',
loss=keras.losses.CategoricalCrossentropy(),
metrics='accuracy')
model.fit(X_train, y_train, epochs = 5, batch_size=300)

Getting non-brodcastable error in my LSTM

So, I have been trying to apply LSTM on this csv file CSV File that im trying to train
However, it seems to train it self but after the training, its causing issue on my test file with either
Error 1
Or if I modify it a little pit then I get another error which says "Value Error: cannot reshape array of size 1047835 into shape"
Here is the code im implementing:-
import math
import matplotlib.pyplot as plt
import keras
import pandas as pd
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" #Had to use CPU because of gpus capability was 3.0
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import *
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
df=pd.read_csv(r'C:\Users\LambertThePrick\Desktop\Databysir\LSTM.csv')
print(df.shape)
print(df.head(5))
#df.head(5)
TrainPart=df.iloc[:800,1:3].values
test_set=df.iloc[800:,1:3].values
scaler=MinMaxScaler(feature_range=(0,1))
Trainpart_scaled=scaler.fit_transform(TrainPart)
print(Trainpart_scaled)
X_Train=[]
Y_Train=[]
for i in range(60,800):
X_Train.append(Trainpart_scaled[i-60:i,0])
Y_Train.append(Trainpart_scaled[i,0])
X_Train,Y_Train=np.array(X_Train),np.array(Y_Train)
X_Train = np.reshape(X_Train, (X_Train.shape[0], X_Train.shape[1], 1))
# print(X_train = np.reshape(X_Train, (X_Train.shape[0], X_Train.shape[1], 1)))
#(740, 60, 1)
model = Sequential()
#Adding the first LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True, input_shape = (X_Train.shape[1], 1)))
model.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50))
model.add(Dropout(0.2))
# Adding the output layer
model.add(Dense(units = 1))
# Compiling the RNN
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
model.fit(X_Train, Y_Train, epochs = 100, batch_size = 32)
#THIS IS EXPT AFTER THIS
dataset_train = df.iloc[:800, 1:3]
dataset_test = df.iloc[800:, 1:3]
dataset_total = pd.concat((dataset_train, dataset_test), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = scaler.transform(inputs)
X_Test = []
for i in range(60, 800):
X_Test.append(inputs[i-60:i, 0])
X_Test = np.array(X_Test)
X_Test = np.reshape(X_Test, (X_Test.shape[0], X_Test.shape[1], 1))
print(X_Test.shape)
predicted_stock_price = model.predict(X_Test)
predicted_stock_price = scaler.inverse_transform(predicted_stock_price)
plt.plot(df.loc[800:, 'Date'],dataset_test.values, color = 'red', label = 'Real ASTL Stock Price')
plt.plot(df.loc[800:, 'Date'],predicted_stock_price, color = 'blue', label = 'Predicted ASTL Stock Price')
plt.xticks(np.arange(0,459,50))
plt.title('ASTL Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('ASTL Stock Price')
plt.legend()
plt.show()
You have a moment in your reshaping where you end up with a non-integer division. Take this example:
import numpy as np
data = np.zeros(3936)
out = data.reshape((-1,1,24,2))
works well because 3936/24/2 results in an integer, 82 .
But in this example
import numpy as np
data = np.zeros(34345)
out = data.reshape((-1,1,24,2))
you end up with the error message ValueError: cannot reshape array of size 34345 into shape (1,24,2) because the division does not result in an integer.
So, looping the way you do is bound to result in events of that type.

Keras neural network prediction not working

I have created a project using keras and tensorflow. I used the NSL KDD dataset and coded my project in python. I also used the SGD optimizer.
I would like to fit a model then evaluate it and then check its accuracy. (So I can compare it to the results with machine learning).
Here is my complete code below, please review it.
import tensorflow as tf
from keras import backend as K
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants, signature_def_utils_impl
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
import numpy as np
sess = tf.Session()
K.set_session(sess)
K.set_learning_phase(0)
model_version = "2"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('KDD_Dataset.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 41:42].values
# Encoding categorical data X
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
X[:,0] = labelencoder_X.fit_transform(X[:,0])
X[:,1] = labelencoder_X.fit_transform(X[:,1])
X[:,2] = labelencoder_X.fit_transform(X[:,2])
#
from sklearn.preprocessing import OneHotEncoder
onehotencoder_0 = OneHotEncoder(categorical_features=[0])
onehotencoder_1 = OneHotEncoder(categorical_features=[1])
onehotencoder_2 = OneHotEncoder(categorical_features=[2])
X = onehotencoder_0.fit_transform(X).toarray()
X = onehotencoder_1.fit_transform(X).toarray()
X = onehotencoder_2.fit_transform(X).toarray()
# Encoding categorical data y
from sklearn.preprocessing import LabelEncoder
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
max(y)
# Splitting the dataset into the Training set and Test set
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.2,
random_state = 0)
# create the model
model = Sequential()
model.add(Dense(41, input_dim=8, init='uniform', activation='relu'))
model.add(Dense(20, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# compile the model
model.compile(loss='binary_crossentropy', optimizer=sgd,metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=200, batch_size=5, verbose=0)
See Dense(41, input_dim=8, init='uniform', activation='relu')
The model you defined with 8 features, however your inputs have 45 features. They are not matching. You have to either make your model with 45 features to match the input, or cut the length of input feature to 8 to match your model.
Change line
model.add(Dense(41, input_dim=8, init='uniform', activation='relu'))
to
model.add(Dense(42, input_dim=42, init='uniform', activation='relu'))
and
optimizer=sgd to optimizer='sgd'