it is my first time building a tfx pipeline after I went through the tutorials and trying to build one with my own dataset. I could use some advice on the transform code that I wrote, and understand better, I'd appreciate your time and thanks in advance.
I've done the ExampleGen, StatisticsGen, SchemaGen, ExampleValidator, Transform, and having error in Trainer components.
ERROR:
c:\lib\site-packages\tfx\orchestration\launcher\in_process_component_launcher.py in _run_executor(self, execution_id, input_dict, output_dict, exec_properties)
65 executor_context) # type: ignore
66
---> 67 executor.Do(input_dict, output_dict, exec_properties)
c:\lib\site-packages\tfx\components\trainer\executor.py in Do(self, input_dict, output_dict, exec_properties)
317
318 fn_args = self._GetFnArgs(input_dict, output_dict, exec_properties)
--> 319 trainer_fn = self._GetFn(exec_properties, 'trainer_fn')
320
321 schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
c:\lib\site-packages\tfx\components\trainer\executor.py in _GetFn(self, exec_properties, fn_name)
128 if has_module_file:
129 return import_utils.import_func_from_source(
--> 130 exec_properties['module_file'], fn_name)
131
132 fn_path_split = exec_properties[fn_name].split('.')
c:\lib\site-packages\tfx\utils\import_utils.py in import_func_from_source(source_path, fn_name)
66 user_module = types.ModuleType(loader.name)
67 loader.exec_module(user_module)
---> 68 return getattr(user_module, fn_name)
69
70 except IOError:
AttributeError: module 'user_module' has no attribute 'trainer_fn'
CODE:
def get_model(show_summary=True):
#one-hot categorical features
num_A = 4,
num_B = 3,
num_C = 2,
num_D = 8,
num_E = 12,
num_F = 4,
num_G = 16,
num_H = 26
input_A = tf.keras.Input(shape=(num_A,), name="A_xf")
input_B = tf.keras.Input(shape=(num_B,), name="B_xf")
input_C = tf.keras.Input(shape=(num_C,), name="C_xf")
input_D = tf.keras.Input(shape=(num_D,), name="D_xf")
input_E = tf.keras.Input(shape=(num_E,), name="E_xf")
input_F = tf.keras.Input(shape=(num_F,), name="F_xf")
input_G = tf.keras.Input(shape=(num_G,), name="G_xf")
input_H = tf.keras.Input(shape=(num_H,), name="H_xf")
fl = keras.Input(shape=(75,))
dense = layers.Dense(35, activation = "relu")
x = dense(fl)
x = layers.Dense(15, activation="relu")(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
_inputs = [input_A, input_B, input_C, input_D, input_E, input_F, input_G, input_H]
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
if show_summary:
model.summary()
return model
We had the same problem and were able to solve it by migrating from the tf.keras sequential model api to the functional api. You can read about the different model apis here. You'll notice in the tutorials that the wide and deep model is implemented as a functional api.
As an example, here is a sample of a keras model for the trainer component:
def _build_functional_test_model():
colname = 'feature_string_xf'
feature_columns = [
tf.feature_column.numeric_column(colname, shape=_MAX_REQUEST_LEN, dtype=tf.dtypes.int64, default_value=0)]
input_layers = {
colname: tf.keras.layers.Input(name=colname, shape=_MAX_REQUEST_LEN, dtype=tf.int64)
}
input_dense_layer = tf.keras.layers.DenseFeatures(feature_columns)(input_layers)
embedding = tf.keras.layers.Embedding(_N_UNIQUE_WORDS,
_N_EMBED,
input_length=_MAX_REQUEST_LEN)(input_dense_layer)
embedding_dropout = tf.keras.layers.SpatialDropout1D(_DROPOUT_EMBEDDING)(embedding)
conv_1 = tf.keras.layers.Conv1D(_N_CONV_1,
_K_CONV_1,
activation='linear',
activity_regularizer=tf.keras.regularizers.l1(_REGULARIZE_L1_CONV))(
embedding_dropout)
activation_1 = tf.keras.layers.Activation('relu')(conv_1)
dropout_1 = tf.keras.layers.Dropout(_DROPOUT_CONV_1)(activation_1)
conv_2 = tf.keras.layers.Conv1D(_N_CONV_1,
_K_CONV_2,
activation='linear',
activity_regularizer=tf.keras.regularizers.l1(_REGULARIZE_L1_CONV))(dropout_1)
activation_2 = tf.keras.layers.Activation('relu')(conv_2)
dropout_2 = tf.keras.layers.Dropout(_DROPOUT_CONV_1)(activation_2)
conv_3 = tf.keras.layers.Conv1D(_N_CONV_3,
_K_CONV_3,
activation='linear',
activity_regularizer=tf.keras.regularizers.l1(_REGULARIZE_L1_CONV))(dropout_2)
activation_3 = tf.keras.layers.Activation('relu')(conv_3)
dropout_3 = tf.keras.layers.Dropout(_DROPOUT_CONV_1)(activation_3)
conv_4 = tf.keras.layers.Conv1D(_N_CONV_4,
_K_CONV_4,
activation='linear',
activity_regularizer=tf.keras.regularizers.l1(_REGULARIZE_L1_CONV))(dropout_3)
activation_4 = tf.keras.layers.Activation('relu')(conv_4)
dropout_4 = tf.keras.layers.Dropout(_DROPOUT_CONV_1)(activation_4)
max_pool_5 = tf.keras.layers.GlobalMaxPooling1D()(dropout_4)
dense_6 = tf.keras.layers.Dense(_N_DENSE_1,
activation='linear',
# activity_regularizer=keras.regularizers.l1(_REGULARIZE_L1_DENSE_1)
)(max_pool_5)
activation_6 = tf.keras.layers.Activation('relu')(dense_6)
dropout_6 = tf.keras.layers.Dropout(_REGULARIZE_L1_DENSE_1)(activation_6)
dense_7 = tf.keras.layers.Dense(_N_DENSE_2,
activation='linear',
# activity_regularizer=keras.regularizers.l1(_REGULARIZE_L1_DENSE_1)
)(dropout_6)
activation_6 = tf.keras.layers.Activation('relu')(dense_7)
dropout_7 = tf.keras.layers.Dropout(_REGULARIZE_L1_DENSE_1)(activation_6)
output = tf.keras.layers.Dense(1, activation='sigmoid')(dropout_7)
model = tf.keras.Model(input_layers, output)
model.compile(loss='binary_crossentropy', optimizer=_OPTIMIZER, metrics=['accuracy'])
return model
By using the directions and comment by Jason, I've changed up the model part as tfx doesn't support the sequential model but the Keras functional API.
def get_model(show_summary=True):
#one-hot categorical features
num_A = 4,
num_B = 3,
num_C = 2,
num_D = 8,
num_E = 12,
num_F = 4,
num_G = 16,
num_H = 26
input_A = tf.keras.Input(shape=(num_A,), name="A_xf")
input_B = tf.keras.Input(shape=(num_B,), name="B_xf")
input_C = tf.keras.Input(shape=(num_C,), name="C_xf")
input_D = tf.keras.Input(shape=(num_D,), name="D_xf")
input_E = tf.keras.Input(shape=(num_E,), name="E_xf")
input_F = tf.keras.Input(shape=(num_F,), name="F_xf")
input_G = tf.keras.Input(shape=(num_G,), name="G_xf")
input_H = tf.keras.Input(shape=(num_H,), name="H_xf")
inputs_con = tf.keras.layers.concatenate([
input_A,
input_B,
input_C,
input_D,
input_E,
input_F,
input_G,
input_H])
dense_1 = tf.keras.layers.Dense(50, activation = 'relu')(inputs_con)
dense_2 = tf keras.layers.Dense(25, activation = "rely") (dense_1)
output = tf.keras.laters.Dense(1, activation = "sigmoid") (dense_2)
model = keras.Model(inputs=inputs, outputs=outputs)
_inputs = [
input_A,
input_B,
input_C,
input_D,
input_E,
input_F,
input_G,
input_H]
model = tf.keras.models.Model(_inputs, output)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
if show_summary:
model.summary()
return model
Related
I have the following Base Network with some important (error is coming due to these) parameters (please assume every else parameter)
maxlen = 250
model_dense = 256
Base Model :
def build_base_model(inputs):
inputs = layers.Input(shape=(maxlen,),name='base_input')
embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim, trans_drop1, trans_drop2, trans_reg1, trans_reg2)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(model_drop1)(x)
outputs = layers.Dense(model_dense)(x)
base_model = keras.Model(inputs=inputs, outputs=outputs)
return base_model
and I my Siamese network as:
base_model = build_base_model()
input_text1 = layers.Input(shape=(maxlen,))
input_text2 = layers.Input(shape=(maxlen,))
emb1 = base_model(input_text1)
emb2 = base_model(input_text2)
distance = layers.Lambda(euclidean_distance)([emb1, emb2])
outputs = layers.Dense(1, activation="sigmoid")(distance)
model = keras.Model(inputs=[emb1, emb2], outputs=outputs)
model.compile(
optimizer="adam", metrics = ["accuracy",], loss= 'binary_crossentropy')
history = model.fit(
train_X, train_y, batch_size=batch_size, epochs = 50, validation_split = 0.15, callbacks = callbacks, verbose = 1,
)
It gives me an error as:
ValueError: Input 0 of layer "model_11" is incompatible with the layer: expected shape=(None, 256), found shape=(None, 250)
What am I doing wrong?
Base Transformer model tutorial taken from this
Siamese Model Structure, cosine distance, make_pairs from this
UPDATE- I have built the new network in a different manner and it is up and running. Can someone please confirms if it is the correct one:
inputs1 = layers.Input(shape=(maxlen,),name='inp_1')
inputs2 = layers.Input(shape=(maxlen,),name='inp_2')
embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim, trans_drop1, trans_drop2, trans_reg1, trans_reg2)
pooling = layers.GlobalAveragePooling1D()
drop_layer = layers.Dropout(model_drop1)
out_dense = layers.Dense(model_dense)
x1 = embedding_layer(inputs1)
x2 = embedding_layer(inputs2)
x1 = transformer_block(x1)
x2 = transformer_block(x2)
x1 = pooling(x1)
x2 = pooling(x2)
x1 = drop_layer(x1)
x2 = drop_layer(x2)
vec_x1 = out_dense(x1)
vec_x2 = out_dense(x2)
distance = layers.Lambda(euclidean_distance)([vec_x1, vec_x2])
outputs = layers.Dense(1, activation="sigmoid")(distance)
model = keras.Model(inputs=[inputs1, inputs2], outputs=outputs)
in the linemodel = keras.Model(inputs=[emb1, emb2], outputs=outputs):
I suspect that you are mean to saymodel = keras.Model(inputs=[input_text1, input_text2], outputs=outputs)
Consider following model
class FractalNeuralNetwork(tf.keras.Model):
def __init__(self, class_number):
super(FractalNeuralNetwork, self).__init__()
self.box_counting_patches = [BoxCountingPatch(box_size) for box_size in range(3, 41 + 1, 2)]
self.chebyshev = ChebyshevBinaryPatch()
self.euclidean = EuclideanBinaryPatch()
self.manhattan = ManhattanBinaryPatch()
self.percolation_c = PercolationC()
self.percolation_m = PercolationM()
self.percolation_q = PercolationQ()
self.probability = ProbabilityMatrix()
self.fractal_dimension = FractalDimension()
self.lacunarity = Lacunarity()
self.assemble = AssembleFractalImage()
self.resize = tf.keras.layers.Resizing(width=224, height=224)
self.rescale = tf.keras.layers.Rescaling(scale=1./255)
self.mobilenet_v2 = hub.KerasLayer("https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4",
output_shape=[1280],
trainable=False)
self.combine = tf.keras.layers.Add()
self.score = tf.keras.layers.Dense(class_number, activation='softmax')
def call(self, inputs):
inputs = tf.ensure_shape(inputs, self.ensure_input_shape)
box_counting_patches = [box_counting_patch(inputs) for box_counting_patch in self.box_counting_patches]
chebyshev = self.chebyshev(inputs=box_counting_patches)
euclidean = self.euclidean(inputs=box_counting_patches)
manhattan = self.manhattan(inputs=box_counting_patches)
percolation_c = self.percolation_c(inputs=[chebyshev, euclidean, manhattan])
percolation_m = self.percolation_m(inputs=[chebyshev, euclidean, manhattan])
percolation_q = self.percolation_q(inputs=[chebyshev, euclidean, manhattan])
probability = self.probability(inputs=[chebyshev, euclidean, manhattan])
fractal_dimension = self.fractal_dimension(inputs=probability)
lacunarity = self.lacunarity(inputs=probability)
fractal_output = self.assemble(
inputs=[
fractal_dimension,
lacunarity,
percolation_c,
percolation_m,
percolation_q
]
)
fractal_output = self.resize(fractal_output)
fractal_output = self.rescale(fractal_output)
fractal_output = self.mobilenet_v2(fractal_output)
original_output = self.rescale(inputs)
original_output = self.mobilenet_v2(original_output)
combined_output = self.combine([fractal_output, original_output])
output = self.score(combined_output)
return output
Every custom layer here is not trainable, they just perform calculations - extract fractal features from images.
The model is trained with the following code:
model = FractalNeuralNetwork(
class_number=CLASS_NUMBER
)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(
training_set,
validation_data=validation_set,
epochs=1
)
The first batch comes with a normal shape (None, 224, 224, 3) but the second (None, None, None, None) and it break my model. Why does it happen?
I'm trying to run a classification simulation in tff, but I'm getting this error:
TypeError: Unable to interpret an argument of type tensorflow.python.data.ops.dataset_ops.PrefetchDataset as a TFF value.
Here is the code I'm using
client_lr = 1e-3
server_lr = 1e-1
NUM_ROUNDS = 200
NUM_EPOCHS = 5
BATCH_SIZE = 2048
EPOCHS = 400
TH = 0.5
def base_model():
return Sequential([
Dense(256, activation='relu', input_shape=(x_train.shape[-1],)),
Dropout(0.5),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(1, activation='sigmoid'),
])
client_train_dataset = collections.OrderedDict()
for i in range(1, total_clients+1):
client_name = "client_" + str(i)
start = samples_per_set * (i-1)
end = samples_per_set * i
data = collections.OrderedDict((('y', y_train[start:end]), ('x', x_train[start:end])))
client_train_dataset[client_name] = data
train_dataset = tff.simulation.FromTensorSlicesClientData(client_train_dataset)
sample_dataset = train_dataset.create_tf_dataset_for_client(train_dataset.client_ids[0])
sample_element = next(iter(sample_dataset))
PREFETCH_BUFFER = 10
SHUFFLE_BUFFER = samples_per_set
def preprocess(dataset):
def batch_format_fn(element):
return collections.OrderedDict(
x=element['x'],
y=tf.reshape(element['y'], [-1, 1]))
return dataset.repeat(NUM_EPOCHS).shuffle(SHUFFLE_BUFFER).batch(BATCH_SIZE).map(batch_format_fn).prefetch(PREFETCH_BUFFER)
preprocessed_sample_dataset = preprocess(sample_dataset)
sample_batch = tf.nest.map_structure(lambda x: x.numpy(), next(iter(preprocessed_sample_dataset)))
def make_federated_data(client_data, client_ids):
return [preprocess(client_data.create_tf_dataset_for_client(x)) for x in client_ids]
federated_train_data = make_federated_data(train_dataset, train_dataset.client_ids)
def model_tff():
model = base_model()
return tff.learning.from_keras_model(
model,
input_spec=preprocessed_sample_dataset.element_spec,
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[
tfa.metrics.F1Score(num_classes=1, threshold=TH),
keras.metrics.Precision(name="precision", thresholds=TH),
keras.metrics.Recall(name="recall", thresholds=TH)
])
iterative_process = tff.learning.build_federated_averaging_process(
model_tff,
client_optimizer_fn=lambda: optimizers.Adam(learning_rate=client_lr),
server_optimizer_fn=lambda: optimizers.SGD(learning_rate=server_lr))
state = iterative_process.initialize()
federated_model = None
for round_num in range(1, NUM_ROUNDS+1):
state, tff_metrics = iterative_process.next(state, federated_train_data) # THE ERROR IS HERE
federated_model = base_model()
federated_model.compile(optimizer=optimizers.Adam(learning_rate=client_lr),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[
tfa.metrics.F1Score(num_classes=1, threshold=TH),
keras.metrics.Precision(name="precision", thresholds=TH),
keras.metrics.Recall(name="recall", thresholds=TH)
])
state.model.assign_weights_to(model=federated_model)
federated_result = federated_model.evaluate(x_val, y_val, verbose=1, return_dict=True)
federated_test = federated_model.evaluate(x_test, y_test, verbose=1, return_dict=True)
I'm using this creditcard dataset: https://www.kaggle.com/mlg-ulb/creditcardfraud
The federated_train_data is a list of <PrefetchDataset shapes: OrderedDict([(x, (None, 29)), (y, (None, 1))]), types: OrderedDict([(x, tf.float64), (y, tf.int64)])>, just like the tutorial from the Tensorflow Federated website Federated Learning for Image Classification.
This might be issue#918. Does this only occur when running in Google Colab? What version of TFF is being used?
Commit#4e57386 is believed to have fixed this, which is now part of the tensorflow-federated-nightly pip package.
I've attempted to build a simple multi-input, single-output model with LSTM layers. Generated some data to fit too. Basically, I've got a reference corpus each length 100, and a question corpus, each doc of length 25, and some answer where the length is 5.
TEXT_VOCAB_SIZE, QUESTION_VOCAB_SIZE = 10000, 25
ANSWER_VOCAB_SIZE = 500
max_samples, max_length, max_qn_length = 1000, 100, 25
max_ans_length = 5
text_corpus = np.random.randint(1, TEXT_VOCAB_SIZE,
size=(max_samples, max_length))
questions_corpus = np.random.randint(1, QUESTION_VOCAB_SIZE,
size=(max_samples, max_qn_length))
answers_corpus = np.random.randint(1,ANSWER_VOCAB_SIZE,
size=(max_samples, max_ans_length))
backend.clear_session()
m31_corpus_input = Input(shape=(max_length,), dtype='int32')
m31_qn_input = Input(shape=(max_qn_length,), dtype='int32')
m31_corpus_emb = layers.Embedding(64, TEXT_VOCAB_SIZE)(m31_corpus_input)
m31_qn_emb = layers.Embedding(64, QUESTION_VOCAB_SIZE)(m31_qn_input)
m31_corpus_lstm = layers.LSTM(32)(m31_corpus_emb)
m31_qn_lstm = layers.LSTM(32)(m31_qn_emb)
m31_concat = layers.concatenate([m31_corpus_lstm, m31_qn_lstm], axis=-1)
# m31_concat = layers.Concatenate()([m31_corpus_lstm, m31_qn_lstm])
m31_ans = layers.Dense(ANSWER_VOCAB_SIZE, activation='softmax')(m31_concat)
m31 = models.Model(inputs=[m31_corpus_input, m31_qn_input], outputs=m31_ans)
print(m31.summary())
m31.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['acc'])
m31.fit([text_corpus, questions_corpus],
answers_corpus, epochs=10, batch_size=64,
validation_split=0.2)
When running the code i got the following error
ValueError: Shapes (None, 5) and (None, 500) are incompatible
Been tweaking the different values in this model but still cannot get the answer to why this is not correct.
I've solved it:
TEXT_VOCAB_SIZE, QUESTION_VOCAB_SIZE, ANSWER_VOCAB_SIZE = 10000, 25, 500
max_length, max_qn_length, max_ans_length = 100, 25, 5
max_samples = 1000
text_corpus = np.random.randint(1, TEXT_VOCAB_SIZE,
size=(max_samples, max_length))
questions_corpus = np.random.randint(1, QUESTION_VOCAB_SIZE,
size=(max_samples, max_qn_length))
answers_corpus = np.random.randint(0,ANSWER_VOCAB_SIZE,
size=(max_samples,))
answers_corpus = to_categorical(answers_corpus)
backend.clear_session()
m31_corpus_input = Input(shape=(max_length,), dtype='int32')
m31_qn_input = Input(shape=(max_qn_length,), dtype='int32')
m31_corpus_emb = layers.Embedding(TEXT_VOCAB_SIZE, 64)(m31_corpus_input)
m31_qn_emb = layers.Embedding(QUESTION_VOCAB_SIZE, 64)(m31_qn_input)
m31_corpus_lstm = layers.LSTM(32)(m31_corpus_emb)
m31_qn_lstm = layers.LSTM(32)(m31_qn_emb)
m31_concat = layers.Concatenate()([m31_corpus_lstm, m31_qn_lstm])
m31_ans = layers.Dense(ANSWER_VOCAB_SIZE, activation='softmax')(m31_concat)
m31 = models.Model(inputs=[m31_corpus_input, m31_qn_input], outputs=m31_ans)
print(m31.summary())
m31.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['acc'])
m31.fit([text_corpus, questions_corpus], answers_corpus,
epochs=10, batch_size=128)
I am trying to create hierarchical attention in TensorFlow 2.0 using the AdditiveAttention Keras layer. The error I get:
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("question_input:0", shape=(None, None), dtype=float32) at layer "question_input". The following previous layers were accessed without issue: []
Can someone please tell me what I am doing wrong?
def get_text_model(self, embedding):
print("Text Input")
text_input = Input(shape=(None,), name="text_input")
text_embedding = embedding(text_input)
cnn_1d = Conv1D(128, 4, padding="same", activation="relu", strides=1)(text_embedding)
output = cnn_1d
model = Model(text_input, output)
return model
def get_sentence_attention_model(self, sentence_input, encoded_question, sentence_model):
encoded_sentence = sentence_model(sentence_input)
sentence_attention = AdditiveAttention()([encoded_sentence, encoded_question])
output = Concatenate()([sentence_attention, encoded_question])
model = Model(sentence_input, output)
return model
def get_section_model(self, encoded_question, sentence_model):
section_input = Input(shape=(None, None), name="section_input")
section_encoded = TimeDistributed(sentence_model)([self.question_input, section_input])
cnn_1d = Conv1D(128, 4, padding="same", activation="relu", strides=1)(section_encoded)
output = cnn_1d
section_attention_output = AdditiveAttention()([output, encoded_question])
model = Model(section_input, section_attention_output)
return model
def get_document_model(self, encoded_question, section_model):
document_input = Input(shape=(None, None, None), name="document_input")
document_encoded = TimeDistributed(section_model)(document_input)
cnn_1d = Conv1D(128, 4, padding="same", activation="relu", strides=1)(document_encoded)
document_attention = AdditiveAttention()([cnn_1d, encoded_question])
model = Model(document_input, document_attention)
return model
def get_model(self):
self.vocabulary_size = self.vectorizer.get_vocabulary_size()
self.embedding_matrix = self.vectorizer.get_embedding_matrix()
embedding = Embedding(self.vocabulary_size, self.embedding_size, mask_zero=True, trainable=True,
weights=None if self.embedding_matrix is None else [self.embedding_matrix])
self.question_input = Input(shape=(None,), name="question_input")
self.sentence_input = Input(shape=(None,), name="sentence_input")
self.question_model = self.get_text_model(embedding)
self.sentence_model = self.get_text_model(embedding)
self.encoded_question = self.question_model(self.question_input)
self.sentence_attention_model = self.get_sentence_attention_model(self.sentence_input, self.encoded_question, self.sentence_model)
self.section_model = self.get_section_model(self.encoded_question, self.sentence_attention_model)
self.document_model = self.get_document_model(self.encoded_question, self.section_model)
optimizer = Adadelta()
loss_metrics = "binary_crossentropy"
self.document_model.compile(loss=loss_metrics, optimizer=optimizer, metrics=[loss_metrics])
self.document_model.summary()