I'm trying to train an agent using TensorFlow and Keras-rl2 to be able to play a gym environment called CartPole-v1 and I'm using google colaboratory
this's my implementation:
!pip install gym[classic_control]
!pip install keras-rl2
import tensorflow as tf
from tensorflow import keras as k
import numpy as np
import gym
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
env = gym.make('CartPole-v1')
states = env.observation_space.shape[0]
actions = env.action_space.n #actions = 2
def build_model(states, actions):
model = Sequential()
model.add(Flatten(input_shape=(1,states)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actions, activation='linear'))
return model
model = build_model(states, actions)
model.summary()
from rl.agents import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
def build_agent(model, actions):
policy = BoltzmannQPolicy()
memory = SequentialMemory(limit=50000, window_length=1)
dqn = DQNAgent(model=model, memory=memory, policy=policy,
nb_actions=actions, nb_steps_warmup=10, target_model_update=1e-2)
return dqn
dqn = build_agent(model, actions)
dqn.compile(Adam(lr=1e-3), metrics=['mae']) # this's the line that is problematic
dqn.fit(env, nb_steps=50000, visualize=False, verbose=1)
I get this Error when I try to compile my DQN agent:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-35-08c9e140f6bc> in <module>
1 dqn = build_agent(model, actions)
----> 2 dqn.compile(Adam(lr=1e-3), metrics=['mae'])
3 dqn.fit(env, nb_steps=50000, visualize=False, verbose=1)
2 frames
/usr/local/lib/python3.8/dist-packages/keras/engine/training_v1.py in get_weights(self)
154 """
155 strategy = (self._distribution_strategy or
--> 156 self._compile_time_distribution_strategy)
157 if strategy:
158 with strategy.scope():
AttributeError: 'Sequential' object has no attribute '_compile_time_distribution_strategy'
I tried searching for a solution on the internet but I couldn't find any
I did find someone with a similar problem but he was building the model before importing the dependencies
but this wasn't the problem in my case
As you can see it here:
This will occur when you construct your model and then import from rl.* afterwards.
Reverse the order to this, and it will work:
!pip install gym[classic_control]
!pip install keras-rl2
import tensorflow as tf
from tensorflow import keras as k
import numpy as np
import gym
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
from rl.agents import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
env = gym.make('CartPole-v1')
states = env.observation_space.shape[0]
actions = env.action_space.n #actions = 2
Related
I am working on tensorflow federated.
I have the following imports
!pip install --quiet tensorflow-federated==0.20.0 # The latest version of tensorflow-federated is not working with the colab python version
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
%load_ext tensorboard
tf.compat.v1.enable_eager_execution()
import tensorflow as tf
import tensorflow_federated as tff
import collections
import os
import random
import math
import time
import numpy as np
from numpy import sqrt
from numpy.fft import fft, ifft
from numpy.random import rand
import inspect
import tensorflow_probability as tfp
from matplotlib import pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import concatenate
from tensorflow.keras import initializers
from keras import layers, initializers
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
Consider the following model
def create_model():
x_1=tf.Variable(24)
bias_initializer = tf.keras.initializers.HeNormal()
model = Sequential()
model.add(Conv2D(2, (5, 5), input_shape=(28,28,1),activation="relu", name='conv2d_1', use_bias=True,bias_initializer=bias_initializer))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(1, (5, 5), activation="relu",name='conv2d_2', use_bias=True,bias_initializer=bias_initializer))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(2, name='dense_1',activation="relu", use_bias=True,bias_initializer=bias_initializer),)
model.add(Dense(10, name='dense_2', activation="softmax", use_bias=True,bias_initializer=bias_initializer),)
a=model.weights[0]
b=model.weights[1]
c=model.weights[2]
d=model.weights[3]
e=model.weights[4]
f=model.weights[5]
g=model.weights[6]
h=model.weights[7]
print(h)
print(type(a))
L1,B1,L2,B2,L3,B3,L4,B4=processing_work(a,b,c,d,e,f,g,h,x_1)
print('L1 is',L1)
print(type(L1))
print(type(h))
kk=resource_variable_ops.ResourceVariable(L1)
print(type(kk))
KB=resource_variable_ops.ResourceVariable(B1)
print(type(KB))
L1=tf.Variable(L1, dtype='float32')#, name='conv2d_1/kernel:0')
B1=tf.Variable(B1, dtype='float32')#, name='conv2d_1/bias:0')
L2=tf.Variable(L2, dtype='float32')#, name='conv2d_2/kernel:0')
B2=tf.Variable(B2, dtype='float32')#, name='conv2d_2/bias:0')
L3=tf.Variable(L3, dtype='float32')#, name='dense_1/kernel:0')
B3=tf.Variable(B3, dtype='float32')#, name='dense_1/bias:0')
L4=tf.Variable(L4, dtype='float32')#, name='dense_2/kernel:0')
B4=tf.Variable(B4, dtype='float32')#, name='dense_2/bias:0')
model.get_layer('conv2d_1').set_weights([L1,B1])
model.get_layer('conv2d_2').set_weights([L2,B2])
model.get_layer('dense_1').set_weights([L3,B3])
model.get_layer('dense_2').set_weights([L4,B4])
return model
What I am doing in this model is; extracting the weights and biases of all the layers, performing various operations on them and re-assigning the processed/modified weights and biases to their respective layers.
I make an instance of the model here:
def model_fn():
# We _must_ create a new model here, and _not_ capture it from an external
# scope. TFF will call this within different graph contexts.
local_model = create_model()
return tff.learning.from_keras_model(
local_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
I then call the following algo:
iterative_process = tff.learning.algorithms.build_weighted_fed_avg(
model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
However, I get the following error.
<tf.Variable 'dense_2/bias:0' shape=(10,) dtype=float32>
<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
<function reshape at 0x7efde367c830>
L1 is Tensor("Reshape_41:0", shape=(5, 5, 1, 2), dtype=float32)
<class 'tensorflow.python.framework.ops.Tensor'>
<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
<ipython-input-33-e5ea47468ee2> in <module>
1 iterative_process = tff.learning.algorithms.build_weighted_fed_avg(
2 model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
----> 3 server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
8 frames
/usr/local/lib/python3.7/dist-packages/keras/backend.py in batch_set_value(tuples)
4024 feed_dict = {}
4025 for x, value in tuples:
-> 4026 value = np.asarray(value, dtype=dtype_numpy(x))
4027 tf_dtype = tf.as_dtype(x.dtype.name.split('_')[0])
4028 if hasattr(x, '_assign_placeholder'):
NotImplementedError: numpy() is only available when eager execution is enabled.
I have tried both types , i.e., L1 and B1 and kk and KB in
model.get_layer('conv2d_1').set_weights([L1,B1])
But I am getting the same error. Also at the start of the notebook, I added the following
tf.compat.v1.enable_eager_execution()
What might be causing this error?
I am using tensorflow federated with following imports.
import tensorflow as tf
import tensorflow_federated as tff
import collections
import os
import random
import math
import time
import numpy as np
from numpy import sqrt
from numpy.fft import fft, ifft
from numpy.random import rand
import inspect
import tensorflow_probability as tfp
from matplotlib import pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization, AveragePooling2D, MaxPooling2D, Conv2D, Activation, Dropout,Flatten,Input,Dense,concatenate
from tensorflow.keras import layers, initializers
from tensorflow.python.eager import backprop, context, function
from tensorflow.python.framework import constant_op, dtypes, indexed_slices, ops
from tensorflow.python.ops import embedding_ops, math_ops, resource_variable_ops, resources, variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
Consider the following keras model
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=64, kernel_size=[5, 5],name='conv2d_1',activation=tf.nn.relu, use_bias=True, bias_initializer =tf.initializers.lecun_normal(seed=137), input_shape=(28 ,28 ,1)),
tf.keras.layers.MaxPool2D(pool_size=[2,2], strides=2),
tf.keras.layers.Conv2D(filters=32, kernel_size=[5,5 ],name='conv2d_2',activation=tf.nn.relu, use_bias = True, bias_initializer=tf.initializers.lecun_normal(seed=137)),
tf.keras.layers.MaxPool2D(pool_size=[2,2], strides=2),
tf.keras.layers.Reshape(target_shape=(4 * 4 * 32,)),
tf.keras.layers.Dense(units= 150, activation=tf.nn.relu, use_bias=True, bias_initializer=tf.initializers.lecun_normal(seed=137), name='dense_1'),
tf.keras.layers.Dense(units=10 , use_bias=True, bias_initializer=tf.initializers.lecun_normal(seed=137), activation=tf.nn.softmax, name='dense_2' ),
])
I made an instance of the create_keras_model, i.e.,
net_1 = create_keras_model()
I then call the following function
def model_fn():
# We _must_ create a new model here, and _not_ capture it from an external
# scope. TFF will call this within different graph contexts.
global_model = create_keras_model()
global_model.set_weights(net_1.get_weights())
return tff.learning.from_keras_model(
global_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
Following that, I call upon the iterative process
iterative_process = tff.learning.algorithms.build_weighted_fed_avg(
model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
Which gives the following error
AttributeError Traceback (most recent call last)
<ipython-input-31-777247538e22> in <module>
2 model_fn,
3 client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
----> 4 server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
5 frames
/usr/local/lib/python3.7/dist-packages/keras/engine/training_v1.py in get_weights(self)
155 """
156 strategy = (self._distribution_strategy or
--> 157 self._compile_time_distribution_strategy)
158 if strategy:
159 with strategy.scope():
AttributeError: 'Sequential' object has no attribute '_compile_time_distribution_strategy'
Any suggestion for removing the error?
I'm using a StackingClassifier on 5 scikit-learn classifiers and a Keras one. It doesn't seem to recognize the Keras one as a classifier however.
Relevant code:
from tensorflow.keras import layers
from tensorflow import keras
from keras.constraints import maxnorm
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Input
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras import metrics
import joblib
from joblib import parallel_backend
np.random.seed(42)
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import sklearn
from sklearn.ensemble import StackingClassifier
def create_model ():
# create model
model = Sequential()
model.add(Dense(best_neurons, input_shape=(X_train.shape[1],), kernel_initializer=best_init_mode, activation='relu',
kernel_constraint=maxnorm(best_weight_constraint)))
model.add(Dropout(best_dropout_rate))
model.add(Flatten())
optimizer= tf.keras.optimizers.RMSprop(lr=best_learn_rate)
model.add(Dense(units = 1, kernel_initializer=best_init_mode, activation = 'sigmoid')) # Compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
return model
NN_clf=KerasClassifier(build_fn=create_model, epochs=best_epochs, batch_size= best_batch_size)
RF_clf =RandomForestClassifier(max_depth=best_max_depth_rf, n_estimators=best_n_estimators_rf,
min_samples_leaf=best_min_samples_leaf_rf, max_features=best_max_features_rf,
class_weight=best_class_weight_rf, max_samples=best_max_samples_rf,
random_state=42, oob_score=True)
KN_clf =KNeighborsClassifier(n_neighbors=best_n_neighbors, p=best_p, leaf_size=best_leaf_size )
#DT_clf = DecisionTreeClassifier(max_depth=best_max_depth_dt, min_samples_leaf=best_min_samples_leaf_dt)
SV_clf = SVC(gamma=best_gamma_sv, C=best_c_sv, kernel=best_kernel_sv, random_state=42, probability=True)
GBC_clf = xgb.XGBClassifier(learning_rate=best_learning_rate_gbc, random_state=42, colsample_bytree=best_colsample_bytree_gbc,
max_depth=best_max_depth_gbc, n_estimators=best_n_estimators_gbc,
gamma=best_gamma_gbc, subsample=best_subsample_gbc)
EX_clf= ExtraTreesClassifier(max_depth=best_max_depth_ex, n_estimators=best_n_estimators_ex,
min_samples_leaf=best_min_samples_leaf_ex, max_features=best_max_features_ex,
warm_start=False, oob_score=True, bootstrap=True, random_state=42)
LR_clf=LogisticRegression(random_state=42, solver=best_solver, penalty=best_penalty, class_weight=best_class_weight, C=best_log_C)
estimators= [('RF', RF_clf), ('GBC', GBC_clf), ('EX', EX_clf), ('LR',LR_clf), ('KN', KN_clf),
('SV', SV_clf), ('NN', NN_clf) ]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), n_jobs=-1)
clf.fit(X_train, y_train.values.ravel())
print("Stacking model score: %.3f" % clf.score(X_test, y_test.values.ravel()))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-41-272df6aa838e> in <module>
2 ('SV', SV_clf), ('NN', NN_clf) ]
3 clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), n_jobs=-1)
----> 4 clf.fit(X_train, y_train.values.ravel())
5 print("Stacking model score: %.3f" % clf.score(X_test, y_test.values.ravel()))
~\Anaconda3\lib\site-packages\sklearn\ensemble\_stacking.py in fit(self, X, y, sample_weight)
411 self._le = LabelEncoder().fit(y)
412 self.classes_ = self._le.classes_
--> 413 return super().fit(X, self._le.transform(y), sample_weight)
414
415 #if_delegate_has_method(delegate='final_estimator_')
~\Anaconda3\lib\site-packages\sklearn\ensemble\_stacking.py in fit(self, X, y, sample_weight)
129 # all_estimators contains all estimators, the one to be fitted and the
130 # 'drop' string.
--> 131 names, all_estimators = self._validate_estimators()
132 self._validate_final_estimator()
133
~\Anaconda3\lib\site-packages\sklearn\ensemble\_base.py in _validate_estimators(self)
247 raise ValueError(
248 "The estimator {} should be a {}.".format(
--> 249 est.__class__.__name__, is_estimator_type.__name__[3:]
250 )
251 )
ValueError: The estimator KerasClassifier should be a classifier.
I am using Sci-kit learn versions 2.2, TF ver 2.x. I've seen a similar error here but didn't want to rewrite my code and use the MLextend library.
This problem is because of the similar issue reported here for VotingClassifier.
The solution is just adding this _estimator_type='classifier' to KerasClassifier.
Note: please provide just the minimum code to reproduce your issue.
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
from tensorflow.keras import layers
from tensorflow import keras
from keras.constraints import maxnorm
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Input
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras import metrics
import joblib
from joblib import parallel_backend
np.random.seed(42)
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import sklearn
from sklearn.ensemble import StackingClassifier
from sklearn.neighbors import KNeighborsClassifier
def create_model ():
# create model
model = Sequential()
model.add(Dense(20, input_dim=20, activation='relu'))
model.add(Dropout(0.2))
model.add(Flatten())
optimizer= keras.optimizers.RMSprop(lr=0.001)
model.add(Dense(units = 1, activation = 'sigmoid')) # Compile model
model.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
return model
NN_clf=KerasClassifier(build_fn=create_model, epochs=15, batch_size= 32)
NN_clf._estimator_type = "classifier"
RF_clf =RandomForestClassifier(random_state=42, oob_score=True)
KN_clf =KNeighborsClassifier()
SV_clf = SVC(random_state=42, probability=True)
EX_clf= ExtraTreesClassifier(random_state=42)
LR_clf=LogisticRegression(random_state=42,)
estimators= [('RF', RF_clf), ('EX', EX_clf), ('LR',LR_clf), ('KN', KN_clf),
('SV', SV_clf), ('NN', NN_clf) ]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X, y = make_classification()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train , y_test = train_test_split(X, y, test_size=0.3)
clf.fit(X_train, y_train)
print("Stacking model score: %.3f" % clf.score(X_test, y_test))
# Stacking model score: 0.967
I am trying to train EfficientNetB1 on Google Colab and constantly running into different issues with correct import statements from Keras or Tensorflow.Keras, currently this is how my imports look like
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import pickle
import cv2
import os
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import efficientnet.keras as enet
from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization, Flatten, Input
and this is how my model looks like
load the ResNet-50 network, ensuring the head FC layer sets are left
# off
baseModel = enet.EfficientNetB1(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling='avg')
# Adding 2 fully-connected layers to B0.
x = baseModel.output
x = BatchNormalization()(x)
x = Dropout(0.7)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Output layer
predictions = Dense(len(lb.classes_), activation="softmax")(x)
model = Model(inputs = baseModel.input, outputs = predictions)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the training process
for layer in baseModel.layers:
layer.trainable = False
But for the life of me I can't figure out why I am getting the below error
AttributeError Traceback (most recent call last)
<ipython-input-19-269fe6fc6f99> in <module>()
----> 1 baseModel = enet.EfficientNetB1(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling='avg')
2
3 # Adding 2 fully-connected layers to B0.
4 x = baseModel.output
5 x = BatchNormalization()(x)
5 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in _collect_previous_mask(input_tensors)
1439 inbound_layer, node_index, tensor_index = x._keras_history
1440 node = inbound_layer._inbound_nodes[node_index]
-> 1441 mask = node.output_masks[tensor_index]
1442 masks.append(mask)
1443 else:
AttributeError: 'Node' object has no attribute 'output_masks'
The problem is the way you import the efficientnet.
You import it from the Keras package and not from the TensorFlow.Keras package.
Change your efficientnet import to
import efficientnet.tfkeras as enet
Not sure, but this error maybe caused by wrong TF version. Google Colab for now comes with TF 1.x by default. Try this to change the TF version and see if this resolves the issue.
try:
%tensorflow_version 2.x
except:
print("Failed to load")
I am using keras 1.2 with tensorflow 1.0.0 backend.
I have a function that loads a pre-calibrated model from json and then loads its weights from a hdf5 file.
def load():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)
This function, more precisely the call to load_weights results in the following exception:
RuntimeError: The Session graph is empty. Add operations to the graph before calling run()
I wonder if that is due to these lines that I put in the beginning of my module to set the tensorflow seed for reproducibility:
tf.set_random_seed(123) # To set Tensorflow seed
sess = tf.Session()
keras.backend.set_session(sess)
It seems the keras session does not automatically set the loaded model as the graph associated to the session, hence failing to initialize the weights.
Any explanation and workaround to avoid the exception?
I pretty much am using the same code as you and it works for me.
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, GlobalAveragePooling2D
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.models import model_from_json
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.pooling import AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import ZeroPadding2D
from keras.engine.topology import Merge
from keras.layers import merge
from keras.optimizers import Adam
from keras import backend as K
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import ZeroPadding2D
import PIL
import inception
import tensorflow as tf
import keras
import glob
import pandas as pd
import pickle
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("model.h5")
print("Loaded model from disk")
model.summary()
model.compile(Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
score = model.predict(transfer_values_test)
Indeed it seems that Keras doesn't respect the session set by set_session when loading models.
Try forcing Keras to use a particular session by Tensorflow's context manager:
def load():
with sess.as_default():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)''
If Keras still complains, predefine a graph (graph=tf.Graph()) and force model.load_weights to use it by introducing an additional with statement:
def load():
with graph.as_default():
with sess.as_default():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)''