My data set is satellite observation which includes a lot of zeroes so that highly effect my final simulation results.
I have two sets of input data, dynamic ones (X_dynamic_LSTM.shape (95931, 1, 5)) which change through time series and static ones (X_static_MLP.shape (95931, 10)) which is not change. For dynamic ones I used LSTM and for static ones the MLP. I Concatenate the two and get the final results by another MLP.
Can you suggest how should I ignore these zero variables in my prediction dataframe??? I know about Masking and Embedding but don't know how to add them in my code!
from tensorflow.keras.layers import Input, LSTM, Dense, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Masking
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Embedding
lstm_input = Input(shape=(X_dynamic_LSTM.shape[1], X_dynamic_LSTM.shape[2]))
x = Masking(mask_value=0.)(lstm_input)
x = LSTM(70, activation='tanh', return_sequences=True)(x)
x = Dropout(0.3)(x)
x = LSTM(35)(x)
x = Dropout(0.3)(x)
x = Dense(1, activation='tanh')(x)
#mlp input with additonal 3 variables at t=t
mlp_input=Input(shape=(X_static_MLP.shape[1]))
mlp = Dense(30, activation='relu')(mlp_input)
mlp = Dense(20, activation='relu')(mlp)
merge = Concatenate()([x, mlp])
hidden1 = Dense(5, activation='relu')(merge)
mlp_out = Dense(1, activation='relu')(hidden1)
model = Model(inputs=[lstm_input, mlp_input],outputs=mlp_out)
#compile the model
model.compile(loss='mae', optimizer='adam')
#fit the model
model.fit([X_dynamic_LSTM, X_static_MLP], y_train, batch_size=40,
epochs=10, validation_split=0.2)
use embedding layer in your first layer
you can use this link
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Embedding())
Related
Say I have a model called other_model, a pre-train model:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Concatenate
features = Concatenate()([other_model.layers[12].output, other_model.layers[11].output])
X = Dense(90, activation='relu')(features)
X = Dense(50, activation='relu')(X)
X = Dense(40, activation='relu')(X)
discriminator = Dense(n_coasses, activation='softmax', name='discriminator')(X)
discriminator_full = Model(inputs=other_model.input, outputs=[discriminator]+v134.other_model)
I first freeze other_model and train discriminator. Then unfreeze other_model and train them both.
What I struggle with is defining other_model loss as other_model_loss = other_model_loss - discriminator_loss. Is it possible to do it with this API?
I am trying to do a transfer learning; for that purpose I want to remove the last two layers of the neural network and add another two layers. This is an example code which also output the same error.
from keras.models import Sequential
from keras.layers import Input,Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.core import Dropout, Activation
from keras.layers.pooling import GlobalAveragePooling2D
from keras.models import Model
in_img = Input(shape=(3, 32, 32))
x = Convolution2D(12, 3, 3, subsample=(2, 2), border_mode='valid', name='conv1')(in_img)
x = Activation('relu', name='relu_conv1')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)
x = Convolution2D(3, 1, 1, border_mode='valid', name='conv2')(x)
x = Activation('relu', name='relu_conv2')(x)
x = GlobalAveragePooling2D()(x)
o = Activation('softmax', name='loss')(x)
model = Model(input=in_img, output=[o])
model.compile(loss="categorical_crossentropy", optimizer="adam")
#model.load_weights('model_weights.h5', by_name=True)
model.summary()
model.layers.pop()
model.layers.pop()
model.summary()
model.add(MaxPooling2D())
model.add(Activation('sigmoid', name='loss'))
I removed the layer using pop() but when I tried to add its outputting this error
AttributeError: 'Model' object has no attribute 'add'
I know the most probable reason for the error is improper use of model.add(). what other syntax should I use?
EDIT:
I tried to remove/add layers in keras but its not allowing it to be added after loading external weights.
from keras.models import Sequential
from keras.layers import Input,Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.core import Dropout, Activation
from keras.layers.pooling import GlobalAveragePooling2D
from keras.models import Model
in_img = Input(shape=(3, 32, 32))
def gen_model():
in_img = Input(shape=(3, 32, 32))
x = Convolution2D(12, 3, 3, subsample=(2, 2), border_mode='valid', name='conv1')(in_img)
x = Activation('relu', name='relu_conv1')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)
x = Convolution2D(3, 1, 1, border_mode='valid', name='conv2')(x)
x = Activation('relu', name='relu_conv2')(x)
x = GlobalAveragePooling2D()(x)
o = Activation('softmax', name='loss')(x)
model = Model(input=in_img, output=[o])
return model
#parent model
model=gen_model()
model.compile(loss="categorical_crossentropy", optimizer="adam")
model.summary()
#saving model weights
model.save('model_weights.h5')
#loading weights to second model
model2=gen_model()
model2.compile(loss="categorical_crossentropy", optimizer="adam")
model2.load_weights('model_weights.h5', by_name=True)
model2.layers.pop()
model2.layers.pop()
model2.summary()
#editing layers in the second model and saving as third model
x = MaxPooling2D()(model2.layers[-1].output)
o = Activation('sigmoid', name='loss')(x)
model3 = Model(input=in_img, output=[o])
its showing this error
RuntimeError: Graph disconnected: cannot obtain value for tensor input_4 at layer "input_4". The following previous layers were accessed without issue: []
You can take the output of the last model and create a new model. The lower layers remains the same.
model.summary()
model.layers.pop()
model.layers.pop()
model.summary()
x = MaxPooling2D()(model.layers[-1].output)
o = Activation('sigmoid', name='loss')(x)
model2 = Model(inputs=in_img, outputs=[o])
model2.summary()
Check How to use models from keras.applications for transfer learnig?
Update on Edit:
The new error is because you are trying to create the new model on global in_img which is actually not used in the previous model creation.. there you are actually defining a local in_img. So the global in_img is obviously not connected to the upper layers in the symbolic graph. And it has nothing to do with loading weights.
To better resolve this problem you should instead use model.input to reference to the input.
model3 = Model(input=model2.input, output=[o])
Another way to do it
from keras.models import Model
layer_name = 'relu_conv2'
model2= Model(inputs=model1.input, outputs=model1.get_layer(layer_name).output)
As of Keras 2.3.1 and TensorFlow 2.0, model.layers.pop() is not working as intended (see issue here). They suggested two options to do this.
One option is to recreate the model and copy the layers. For instance, if you want to remove the last layer and add another one, you can do:
model = Sequential()
for layer in source_model.layers[:-1]: # go through until last layer
model.add(layer)
model.add(Dense(3, activation='softmax'))
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy')
Another option is to use the functional model:
predictions = Dense(3, activation='softmax')(source_model.layers[-2].output)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.layers[-1].output means the last layer's output which is the final output, so in your code, you actually didn't remove any layers, you added another head/path.
An alternative to Wesam Na's answer, if you don't know the layer names you can simply cut off the last layer via:
from keras.models import Model
model2= Model(inputs=model1.input, outputs=model1.layers[-2].output)
I am learning about autoencoders. I want to know what is the difference between combining two sequential models to make one sequential model is different from just using one sequential model. The architecture of both the models is same .But the difference is that we have to provide input shapes for two models for the first combined model.
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(2,input_shape = [3]),
tf.keras.layers.Dense(3)
])
encoder = tf.keras.models.Sequential([tf.keras.layers.Dense(2,input_shape = [3])])
decoder = tf.keras.models.Sequential([tf.keras.layers.Dense(3,input_shape = [2])])
autoencoder = tf.keras.models.Sequential([encoder,decoder])
You can use tf.keras.layers.concatenate to merge Tensorflow keras sequential model.
Sample code
from tensorflow.keras.layers import concatenate
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Input
sequential_model1_in = Input(shape=(28, 28, 1))
sequential_model1_out = Dense(64, input_dim=20, activation='relu', name='layer_1')(sequential_model1_in)
sequential_model1 = Model(sequential_model1_in, sequential_model1_out)
sequential_model2_in = Input(shape=(28, 28, 1))
sequential_model2_out = Dense(64, input_dim=20, activation='relu', name='layer_2')(sequential_model2_in)
sequential_model2 = Model(sequential_model2_in, sequential_model2_out)
concatenated = concatenate([sequential_model1_out, sequential_model2_out])
out = Dense(1, activation='softmax', name='output_layer')(concatenated)
merged_model = Model([sequential_model1_in, sequential_model2_in], out)
Take a look on Tensorflow Intro to Autoencoders
I build an sentiment classifier using Keras to predict if a sentence has a sentiment score of 1, 2, 3, 4 or 5. However I am getting some strange results. I will first show my code:
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping
import pandas as pd
import numpy as np
# the data only reflects the structure of the actual data
# the real data has way larger text and more entries
X_train = ['i am glad i heard about that', 'that is one ugly bike']
y_train = pd.Series(np.array([1, 4])) # pandas series
X_test = ['that hurted me']
y_test = pd.Series(np.array([1, 4])) # pandas series
# tokenizing
tokenizer = Tokenizer(num_words = 5)
tokenizer.fit_on_texts(X_train)
X_train_seq = tokenizer.texts_to_sequences(X_train)
X_test_seq = tokenizer.texts_to_sequences(X_test)
# performing some padding
padding_len = 4
X_train_seq_padded = pad_sequences(X_train_seq, maxlen = padding_len)
X_test_seq_padded = pad_sequences(X_test_seq, maxlen = padding_len)
# building the model
model = Sequential()
model.add(Dense(16, input_dim = padding_len, activation = 'relu', name = 'hidden-1'))
model.add(Dense(16, activation = 'relu', name = 'hidden-2'))
model.add(Dense(16, activation = 'relu', name = 'hidden-3'))
model.add(Dense(6, activation='softmax', name = 'output_layer'))
# compiling the model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics=['accuracy'])
# training the model
callbacks = [EarlyStopping(monitor = 'accuracy', patience = 5, mode = 'max')]
my_model = model.fit(X_train_seq_padded, to_categorical(y_train), epochs = 100, batch_size = 1000, callbacks = callbacks, validation_data = (X_test, to_categorical(y_test)))
Using the actual data I keep getting results around 0.67xx (xx random numbers) which are reached after 1/2 epochs, no matter what changes to the code I introduce (and some are extreme).
I tried changing the padding to 1, 10, 100, 1000.
I tried removing the layer hidden-2 and hidden-3.
I tried adding stop word removal before tokenizing.
I tried using the tahn activation function in the hidden layers.
I used the sgd optimizer.
Example output of one setup:
Now my question is, is there something wrong with my code or are these actual possible results?
i am using a model on the MovieLens dataset. I wanted to combine two sequentials in a dot product of keras. However I got the following error:
Layer dot_1 was called with an input that isn't a symbolic tensor. Received
type: <class 'keras.engine.sequential.Sequential'>. Full input:
[<keras.engine.sequential.Sequential object at 0x00000282DAFCC710>,
<keras.engine.sequential.Sequential object at 0x00000282DB172C18>]. All
inputs to the layer should be tensors.
The code below is how the model is build. The error comes from the line with:
merged = dot([P, Q], axes = 1, normalize = True)
max_userid, max_movieid and K_FACTORS are already defined. Can somebody help me with this error?
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Embedding, Reshape, Concatenate, dot
from keras import Input
from keras.optimizers import Adagrad
# Define model
# P is the embedding layer that creates an User by latent factors matrix.
# If the intput is a user_id, P returns the latent factor vector for that user.
P = Sequential()
P.add(Embedding(max_userid, K_FACTORS, input_length=1))
P.add(Reshape((K_FACTORS,)))
# Q is the embedding layer that creates a Movie by latent factors matrix.
# If the input is a movie_id, Q returns the latent factor vector for that movie.
Q = Sequential()
Q.add(Embedding(max_movieid, K_FACTORS, input_length=1))
Q.add(Reshape((K_FACTORS,)))
mergedModel = Sequential()
merged = dot([P, Q], axes = 1, normalize = True)
mergedModel.add(merged)
ada_grad = Adagrad(lr=0.1, epsilon=1e-08, decay=0.0)
The Keras functional API provides a more flexible way for defining
such models.
from keras.layers import Input
input_1 = Input(shape=(1,))
input_2 = Input(shape=(1,))
P = Reshape((K_FACTORS,))(Embedding(max_userid, K_FACTORS, input_length=1)(input_1))
Q = Reshape((K_FACTORS,))(Embedding(max_userid, K_FACTORS, input_length=1)(input_2))
P_dot_Q = dot([P, Q], axes = 1, normalize = True)
model = Model(inputs=[input_1,input_2], outputs=P_dot_Q)
#print(model.summary())
#model.compile(loss = 'MSE', optimizer='adam',metrics = ['accuracy'])
#model.fit([np.array([1]), np.array([1])],[1])