Model.predict throwing TypeError: 'numpy.ndarray' object is not callable - numpy

I am new to Python and facing few issues while implementing Neural Networks on a Earthquake prediction problem.
There is very rare material availabale online to solve this issue using neural networks, so got struck.
Please support.
Model.predict throwing TypeError: 'numpy.ndarray' object is not callable.
enter link description here
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
train_data = pd.read_csv("C:\\Users\\rjraj\\Desktop\\mma\\ML & AI\\Project\\train_values.csv")
train_labels = pd.read_csv("C:\\Users\\rjraj\\Desktop\mma\\ML & AI\\Project\\train_labels.csv")
test_labels = pd.read_csv("C:\\Users\\rjraj\\Desktop\\mma\\ML & AI\\Project\\test_values.csv")
X_tr = train_data
X_te = test_labels
y_tr = train_labels['damage_grade'].values
# label encoding the categorical variables
label_encoding_columns=['land_surface_condition', 'foundation_type', 'roof_type',
'ground_floor_type', 'other_floor_type', 'position',
'plan_configuration', 'legal_ownership_status']
# label encoding categorical columns in train dataset
for i in label_encoding_columns:
X_tr[i]=X_tr[i].astype("category")
X_tr[i]=X_tr[i].cat.codes
# label encoding categorical columns in test dataset
for j in label_encoding_columns:
X_te[j]=X_te[j].astype("category")
X_te[j]=X_te[j].cat.codes
from sklearn.model_selection import train_test_split
X_train, X_test,y_train, y_test = train_test_split(X_tr,y_tr,test_size = 0.3,random_state = 42)
X_train.shape
(182420, 39)
X_test.shape
(78181, 39)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
MinMaxScaler()
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
#from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(Dense(4, activation = 'relu'))
model.add(Dense(4, activation = 'relu'))
model.add(Dense(1))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(x=X_train, y=y_train, epochs=30)
model.evaluate(X_test,y_test, verbose = 0)
model.evaluate(X_train,y_train, verbose = 0)
test_pred = model.predict(X_test)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-74-82e9029ecb43> in <module>
----> 1 test_pred = model.predict(X_test)
TypeError: 'numpy.ndarray' object is not callable

Related

Why is the use of return_sequences giving different results across different environments?

When I use return_sequences = true, for a LSTM layer, before adding a dense layer it sometimes results in an error depending upon the environment. I believe it mainly depends on the version of tensorflow and keras. If I am using tensorflow 2.1.0 and Keras 2.3.0, I get the following error -
standardize_input_data 'with shape ' + str(data_shape)) ValueError: Error when checking target: expected dense_2 to have 3 dimensions, but got array with shape (7000, 1)
However, if I use tensorflow 2.9.1 and keras 2.9.0 I do not get any error.
Here is some minimal working sample code -
import os
import pandas as pd
from sklearn import preprocessing
from collections import deque
import random
import numpy as np
import time
import random
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, BatchNormalization, Input
from keras.models import load_model
import keras
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import pickle
epochs = 10
batch_size = 64
X, y = make_classification(n_samples=10000, n_features=3, n_classes=3, n_informative=3, n_redundant=0, n_repeated=0 ,weights=[0.5,0.5,0.5])
X = X.reshape(X.shape[0], 1, 3)
y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
inputs = Input(shape=(X_train.shape[1:]))
outputs = LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True)(inputs)
outputs = Dropout(0.2)(outputs)
outputs = BatchNormalization()(outputs)
outputs = LSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True)(outputs)
outputs = Dropout(0.2)(outputs)
outputs = BatchNormalization()(outputs)
outputs = Dense(32, activation="relu", kernel_initializer="glorot_uniform")(outputs)
outputs = Dropout(0.2)(outputs)
outputs = Dense(3, activation="softmax", kernel_initializer="glorot_uniform")(outputs)
model = keras.Model(inputs, outputs)
opt = keras.optimizers.Adam(lr=0.0001, decay=1e-6)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_test, y_test))

ValueError: Input 0 of layer sequential_2 is incompatible with the layer:

i have a error in following code , error in 2st part on code and on first part i am declaring my dataset , layers etc.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_excel('/content/dataset.xlsx')
data.head()
data.plot(kind='scatter', x='fiyat', y='yil',alpha = 0.5,color = 'red')
plt.xlabel('price') # label = name of label
plt.ylabel('year')
plt.title('Fiyat ve yil Scatter Plot')
data.plot(kind='scatter', x='fiyat', y='km',alpha = 0.5,color = 'grey')
plt.xlabel('price') # label = name of label
plt.ylabel('km')
plt.title('Fiyat ve km Scatter Plot')
data.plot(kind='scatter', x='fiyat', y='motor_gucu_hp',alpha = 0.5,color = 'green')
plt.xlabel('price') # label = name of label
plt.ylabel('machine power')
plt.title('fiyat ve motor_gucu_hp Scatter Plot')
# Importing the dataset
X = data.iloc[:, data.columns != 'fiyat']
y = data.fiyat
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
# define base model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(30, input_dim=120, kernel_initializer='normal', activation='relu'))
model.add(Dense(120, activation = 'relu'))
model.add(Dense(120, activation = 'relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mse',
optimizer='adam',
metrics=['mae'] )
return model
model = baseline_model()
model.summary()
And getting error in here ; on model.fit location
import tensorflow as tf
from tensorflow import keras
import numpy as np
# Display training progress by printing a single dot for each completed epoch
EPOCHS = 500
# Store training stats
history = model.fit(X_train, y_train, epochs=EPOCHS,
batch_size=16, verbose=0)
ValueError: Input 0 of layer sequential_2 is incompatible with the layer: expected axis -1 of input shape to have value 120 but received input with shape (None, 47)
And error like this , can you help? What can i do.

Getting non-brodcastable error in my LSTM

So, I have been trying to apply LSTM on this csv file CSV File that im trying to train
However, it seems to train it self but after the training, its causing issue on my test file with either
Error 1
Or if I modify it a little pit then I get another error which says "Value Error: cannot reshape array of size 1047835 into shape"
Here is the code im implementing:-
import math
import matplotlib.pyplot as plt
import keras
import pandas as pd
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" #Had to use CPU because of gpus capability was 3.0
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import *
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
df=pd.read_csv(r'C:\Users\LambertThePrick\Desktop\Databysir\LSTM.csv')
print(df.shape)
print(df.head(5))
#df.head(5)
TrainPart=df.iloc[:800,1:3].values
test_set=df.iloc[800:,1:3].values
scaler=MinMaxScaler(feature_range=(0,1))
Trainpart_scaled=scaler.fit_transform(TrainPart)
print(Trainpart_scaled)
X_Train=[]
Y_Train=[]
for i in range(60,800):
X_Train.append(Trainpart_scaled[i-60:i,0])
Y_Train.append(Trainpart_scaled[i,0])
X_Train,Y_Train=np.array(X_Train),np.array(Y_Train)
X_Train = np.reshape(X_Train, (X_Train.shape[0], X_Train.shape[1], 1))
# print(X_train = np.reshape(X_Train, (X_Train.shape[0], X_Train.shape[1], 1)))
#(740, 60, 1)
model = Sequential()
#Adding the first LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True, input_shape = (X_Train.shape[1], 1)))
model.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50))
model.add(Dropout(0.2))
# Adding the output layer
model.add(Dense(units = 1))
# Compiling the RNN
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
model.fit(X_Train, Y_Train, epochs = 100, batch_size = 32)
#THIS IS EXPT AFTER THIS
dataset_train = df.iloc[:800, 1:3]
dataset_test = df.iloc[800:, 1:3]
dataset_total = pd.concat((dataset_train, dataset_test), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = scaler.transform(inputs)
X_Test = []
for i in range(60, 800):
X_Test.append(inputs[i-60:i, 0])
X_Test = np.array(X_Test)
X_Test = np.reshape(X_Test, (X_Test.shape[0], X_Test.shape[1], 1))
print(X_Test.shape)
predicted_stock_price = model.predict(X_Test)
predicted_stock_price = scaler.inverse_transform(predicted_stock_price)
plt.plot(df.loc[800:, 'Date'],dataset_test.values, color = 'red', label = 'Real ASTL Stock Price')
plt.plot(df.loc[800:, 'Date'],predicted_stock_price, color = 'blue', label = 'Predicted ASTL Stock Price')
plt.xticks(np.arange(0,459,50))
plt.title('ASTL Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('ASTL Stock Price')
plt.legend()
plt.show()
You have a moment in your reshaping where you end up with a non-integer division. Take this example:
import numpy as np
data = np.zeros(3936)
out = data.reshape((-1,1,24,2))
works well because 3936/24/2 results in an integer, 82 .
But in this example
import numpy as np
data = np.zeros(34345)
out = data.reshape((-1,1,24,2))
you end up with the error message ValueError: cannot reshape array of size 34345 into shape (1,24,2) because the division does not result in an integer.
So, looping the way you do is bound to result in events of that type.

ValueError: Input 0 is incompatible with layer conv1d_1: expected ndim=3, found ndim=2

When I try to give Elmo embedding layer output to conv1d layer input it giving the error
ValueError: Input 0 is incompatible with layer conv1d_1: expected ndim=3, found ndim=2
I want to add a convolution layer from the output of the Elmo embedding layer
import tensorflow as tf
import tensorflow_hub as hub
import keras.backend as K
from keras import Model
from keras.layers import Input, Lambda, Conv1D, Flatten, Dense
from keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv("/home/raju/Desktop/spam.csv", encoding='latin-1')
X = df['v2']
Y = df['v1']
le = LabelEncoder()
le.fit(Y)
Y = le.transform(Y)
Y = to_categorical(Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25)
elmo = hub.Module('/home/raju/models/elmo')
def embeddings(x):
return elmo(tf.squeeze(tf.cast(x, dtype=tf.string)), signature='default', as_dict=True)['default']
input_layer = Input(shape=(1,), dtype=tf.string)
embed_layer = Lambda(embeddings, output_shape=(1024,))(input_layer)
conv_layer = Conv1D(4, 2, activation='relu')(embed_layer)
fcc_layer = Flatten()(conv_layer)
output_layer = Dense(2, activation='softmax')(fcc_layer)
model = Model(inputs=[input_layer], outputs=output_layer)
A Conv1D layer expects input of the shape (batch, steps, channels). The channels dimension is missing in your case, and you need to include it even if it is equal to 1. So the output shape of your elmo module should be (1024, 1) (this does not include the batch size). You can add a dimension to the output of the elmo module with tf.expand_dims(x, axis=-1).

How to plot training loss and accuracy curves for a MLP model in Keras?

I am modeling a neural network using Keras and I am trying to evaluate it with a graph of acc and val_acc. I have 3 errors in the following lines of code:
In print(history.keys()) The error is function' object has not attribute 'keys'
In y_pred = classifier.predict(X_test) The error is name 'classifier' is not defined
In plt.plot(history.history['acc']) The error is 'History' object is not subscriptable
I'm also trying to graph the ROC curve, how could I do it?
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn import cross_validation
from matplotlib import pyplot
from keras.utils import plot_model
dataset = pd.read_csv('Data_BP.csv')
X = dataset.iloc[:, 0:11].values
y = dataset.iloc[:, -1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
def Model():
classifier = Sequential()
classifier.add(Dense(units = 12, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = ['mse', 'acc'])
return classifier
classifier = Model()
history = classifier.fit(X_train, y_train, validation_split=0.25, batch_size = 10, epochs = 5)
print('\n', history.history.keys())
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
from sklearn.metrics import recall_score, classification_report, auc, roc_curve
cm = confusion_matrix(y_test, y_pred)
print(cm)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
What functions should be added?
Change history to classifier in the following lines (actually History object is the return value of fit method called on Model object) like this:
classifier = Model()
history = classifier.fit(...)
Don't confuse the return value of fit method with your model. The History object, as its name suggests, only contains the history of training. However, your model is classifier and it is the one that has methods like fit(), predict(), evaluate(), compile(), etc.
Plus, the History object has an attribute called history which is a dictionary containing the values of loss and metrics during the training. Therefore you need to use print(history.history.keys()) instead.
Now, if you would like to for example plot loss curve during training (i.e. loss at the end of each epoch) you can do it like this:
loss_values = history.history['loss']
epochs = range(1, len(loss_values)+1)
plt.plot(epochs, loss_values, label='Training Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()