I have trained an image multi classification model based on MobileNet-V2(Only the Dense layer has been added), and have carried out full integer quantization(INT8), and then exported model.tflite file, using TF Class () to call this model.
Here is my code to quantify it:
import tensorflow as tf
import numpy as np
import pathlib
def representative_dataset():
for _ in range(100):
data = np.random.rand(1, 96, 96, 3) // random tensor for test
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model/my_model')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
tflite_quant_model = converter.convert()
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_quant_model)
The accuracy of this model is quite good in the test while training. However, when tested on openmv, the same label is output for all objects (although the probability is slightly different).
I looked up some materials, one of them mentioned TF Classify() has offset and scale parameters, which is related to compressing RGB values to [- 1,0] or [0,1] during training, but this parameter is not available in the official API document.
for obj in tf.classify(self.net , img1, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
print("**********\nTop 1 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
sorted_list = sorted(zip(self.labels, obj.output()), key = lambda x: x[1], reverse = True)
for i in range(1):
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
return sorted_list[i][0]
So are there any examples of workflow from tensorflow training model to deployment to openmv?
Related
I am learning this new ONNX framework that allows us to deploy the deep learning (and others) model into production.
However, there is one thing I am missing. I thought that the main reason for having such a framework is so that for inference purposes e.g. when we have a trained model and want to use it in a different venv (where for example we cannot have PyTorch) the model still can be used.
I have preped a "from scratch" example here:
# Modules
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
import torchvision
import onnx
import onnxruntime
import matplotlib.pyplot as plt
import numpy as np
# %config Completer.use_jedi = False
# MNIST Example dataset
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
'data', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
])),
batch_size=800)
# Take data and labels "by hand"
inputs_batch, labels_batch = next(iter(train_loader))
# Simple Model
class CNN(nn.Module):
def __init__(self, in_channels, num_classes):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels = 10, kernel_size = (3, 3), stride = (1, 1), padding=(1, 1))
self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride = (2, 2))
self.conv2 = nn.Conv2d(in_channels = 10, out_channels=16, kernel_size = (3, 3), stride = (1, 1), padding=(1, 1))
self.fc1 = nn.Linear(16*7*7, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc1(x)
return x
# Training setting
device = 'cpu'
batch_size = 64
learning_rate = 0.001
n_epochs = 10
# Dataset prep
dataset = TensorDataset(inputs_batch, labels_batch)
TRAIN_DF = DataLoader(dataset = dataset, batch_size = batch_size, shuffle = True)
# Model Init
model = CNN(in_channels=1, num_classes=10)
optimizer = optim.Adam(model.parameters(), lr = learning_rate)
# Training Loop
for epoch in range(n_epochs):
for data, labels in TRAIN_DF:
model.train()
# Send Data to GPU
data = data.to(device)
# Send Data to GPU
labels = labels.to(device)
# data = data.reshape(data.shape[0], -1)
# Forward
pred = model(data)
loss = F.cross_entropy(pred, labels)
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Check Accuracy
def check_accuracy(loader, model):
num_correct = 0
num_total = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
# x = x.reshape(x.shape[0], -1)
scores = model(x)
_, pred = scores.max(1)
num_correct += (pred == y).sum()
num_total += pred.size(0)
print(F"Got {num_correct} / {num_total} with accuracy {float(num_correct)/float(num_total)*100: .2f}")
check_accuracy(TRAIN_DF, model)
# Inference with ONNX
# Create Artifical data of the same size
img_size = 28
dummy_data = torch.randn(1, img_size, img_size)
dummy_input = torch.autograd.Variable(dummy_data).unsqueeze(0)
input_name = "input"
output_name = "output"
model_eval = model.eval()
torch.onnx.export(
model_eval,
dummy_input,
"model_CNN.onnx",
input_names=["input"],
output_names=["output"],
)
# Take Random Image from Training Data
X_pred = data[4].unsqueeze(0)
# Convert the Tensor image to PURE numpy and pretend we are working in venv where we only have numpy - NO PYTORCH
X_pred_np = X_pred.numpy()
X_pred_np = np.array(X_pred_np)
IMG_Rando = np.random.rand(1, 1, 28, 28)
np.shape(X_pred_np) == np.shape(IMG_Rando)
ort_session = onnxruntime.InferenceSession(
"model_CNN.onnx"
)
def to_numpy(tensor):
return (
tensor.detach().gpu().numpy()
if tensor.requires_grad
else tensor.cpu().numpy()
)
# compute ONNX Runtime output prediction
# WORKS
# ort_inputs = {ort_session.get_inputs()[0].name: X_pred_np}
# DOES NOT WORK
ort_inputs = {ort_session.get_inputs()[0].name: IMG_Rando}
# WORKS
# ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(X_pred)}
ort_outs = ort_session.run(None, ort_inputs)
ort_outs
Firstly, we create a simple model and train it on the MNIST dataset.
Then we export the trained model using the ONNX framework.
Now, when I want to classify an image using the X_pred_np It works even though it is a "pure" NumPy, which is what I want.
However, I suspect that this particular case works only because it has been derived from the PyTorch tensor object, and thus "under the hood" it still has PyTorch attributes.
While when I try to inference on the random "pure" NumPy object IMG_Rando, there seems to be a problem:
Unexpected input data type. Actual: (tensor(double)) , expected: (tensor(float)).
Referring that PyTorch form is needed.
Is there a way how to be able to use only numpy Images for the ONNX predictions?. So the inference can be performed in separated venv where no pytorch is installed?
Secondly, is there a way that ONNX would remember the actual classes?
In this particular case, the index corresponds to the label of the image. However, in animal classification, ONNX would not provide us with the "DOG" and "CAT" and other labels but would only provide us the index of the predicted label. Which we would need to run throw our own "prediction dictionary" so we know that the fifth label is associated with "cat" and sixth label is associated with "dog" etc.
Numpy defaults to float64 while pytorch defaults to float32. Cast the input to float32 before the inference:
IMG_Rando = np.random.rand(1, 1, 28, 28).astype(np.float32)
double is short for double-precision floating-point format, which is a floating point number representation on 64 bits, while float refers to a floating point number on 32 bits.
As an improvement to the accepted answer, the idiomatic way to generate random numbers in Numpy is now by using a Generator. This offers the benefit of being able to create the array in the right type directly, rather than using the expensive astype operation, which copies the array (as in the accepted answer). Thus, the improved solution would look like:
rng = np.random.default_rng() # set seed if desired
IMG_Rando = rng.random((1, 1, 28, 28), dtype=np.float32)
Problem Description
I'm using tensorflow Estimator API, and have encountered a weird phenomenon.
I'm passing the exact same input_fn to both training and evaluation, and for some reason the images which are provided to the network are not identical.
They seem similar, but after taking a closer look, it seems that evaluation images are ok, but train images are somewhat distorted.
After loading them both, I noticed that for some reason the training images go through some kind of ReLu. I affirmed it with this code, which operates on mat_eval and mat_train, which are tensors that input_fn provides in evaluation and train mode:
special_relu = lambda mat: ((mat - 0.5) / 0.5) * ((mat - 0.5) / 0.5 > 0)
np.allclose(mat_train, special_relu(mat_eval))
>>> True
What I thought and tried
My initial thought was that it is some form of BatchNormalization. But BatchNormalization is supposed to happen within the network, and not as some preprocess, shouldn't it?
What I recorded (using tf.summary.image) was the features['image'] object, passed to my model_fn. And if I understand correctly, the features object is passed to model_fn by the input_fn called by the Estimator object.
Regardless, I tried to remove the parts in the code which are supposed to call the BatchNormalization. This had no effect. Of course, I might have not done that in the right way, but as I said it I don't really think it is BatchNormalization.
Code
from datetime import datetime
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
from dcnn import modeling
from dcnn.dv_constants import BATCH_SIZE, BATCHES_PER_EPOCH
from dcnn.variant_io import get_input_fn, num_variants_in_ds
logging.set_verbosity(logging.INFO)
new_checkpoint_name = lambda: f'./train_dir/' \
f'{datetime.now().strftime("%d-%m %H:%M:%S")}'
if __name__ == '__main__':
model_name = 'small_inception'
start_from_checkpoint = ''
# start_from_checkpoint = '/home/yonatan/Desktop/yonas_code/dcnn/train_dir' \
# '/2111132905/model.ckpt-256'
model_dir = str(Path(start_from_checkpoint).parent) if \
start_from_checkpoint else new_checkpoint_name()
test = False
train = True
predict = False
epochs = 1
train_dataset_name = 'same_example'
val_dataset_name = 'same_example'
test_dataset_name = 'same_example'
predict_dataset_name = 'same_example'
model = modeling.get_model(model_name=model_name)
estimator = model.make_estimator( \
batch_size=BATCH_SIZE,
model_dir=model_dir,
params=dict(batches_per_epoch=BATCHES_PER_EPOCH),
use_tpu=False,
master='',
# The target of the TensorFlow standard server to use. Can be the empty string to run locally using an inprocess server.
start_from_checkpoint=start_from_checkpoint)
if train:
train_input_fn = get_input_fn(train_dataset_name, repeat=True)
val_input_fn = get_input_fn(val_dataset_name, repeat=False)
steps = (epochs * num_variants_in_ds(train_dataset_name)) / \
BATCH_SIZE
train_spec = tf.estimator.TrainSpec(input_fn=val_input_fn,
max_steps=steps)
eval_spec = tf.estimator.EvalSpec(input_fn=val_input_fn,
throttle_secs=1)
metrics = tf.estimator.train_and_evaluate(estimator, train_spec,
eval_spec)
print(metrics)
I have plenty of more code to share, but I tried to be concise. If anyone has any idea why this behavior happens, or needs more information, let me know.
I made a script in tensorflow 2.x but I had to downconvert it to tensorflow 1.x (tested in 1.14 and 1.15). However, the tf1 version performs very differently (10% accuracy lower on the test set). See also the plot for train and validation performance (diagram is attached below).
Looking at the operations needed for the migration from tf1 to tf2 it seems that only the Adam learning rate may be a problem but I'm defining it explicitly tensorflow migration
I've reproduced the same behavior both locally on GPU and CPU and on colab. The keras used was the one built-in in tensorflow (tf.keras). I've used the following functions (both for train,validation and test), using a sparse categorization (integers):
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
horizontal_flip=horizontal_flip,
#rescale=None, #not needed for resnet50
preprocessing_function=None,
validation_split=None)
train_dataset = train_datagen.flow_from_directory(
directory=train_dir,
target_size=image_size,
class_mode='sparse',
batch_size=batch_size,
shuffle=True)
And the model is a simple resnet50 with a new layer on top:
IMG_SHAPE = img_size+(3,)
inputs = Input(shape=IMG_SHAPE, name='image_input',dtype = tf.uint8)
x = tf.cast(inputs, tf.float32)
# not working in this version of keras. inserted in imageGenerator
x = preprocess_input_resnet50(x)
base_model = tf.keras.applications.ResNet50(
include_top=False,
input_shape = IMG_SHAPE,
pooling=None,
weights='imagenet')
# Freeze the pretrained weights
base_model.trainable = False
x=base_model(x)
# Rebuild top
x = GlobalAveragePooling2D(data_format='channels_last',name="avg_pool")(x)
top_dropout_rate = 0.2
x = Dropout(top_dropout_rate, name="top_dropout")(x)
outputs = Dense(num_classes,activation="softmax", name="pred_out")(x)
model = Model(inputs=inputs, outputs=outputs,name="ResNet50_comp")
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
And then I'm calling the fit function:
history = model.fit_generator(train_dataset,
steps_per_epoch=n_train_batches,
validation_data=validation_dataset,
validation_steps=n_val_batches,
epochs=initial_epochs,
verbose=1,
callbacks=[stopping])
I've reproduced the same behavior for example with the following full script (applied to my dataset and changed to adam and removed intermediate final dense layer):
deep learning sandbox
The easiest way to replicate this behavior was to enable or disable the following line on a tf2 environment with the same script and add the following line to it. However, I've tested also on tf1 environments (1.14 and 1.15):
tf.compat.v1.disable_v2_behavior()
Sadly I cannot provide the dataset.
Update 26/11/2020
For full reproducibility I've obtained a similar behaviour by means of the food101 (101 categories) dataset enabling tf1 behaviour with 'tf.compat.v1.disable_v2_behavior()'. The following is the script executed with tensorflow-gpu 2.2.0:
#%% ref https://medium.com/deeplearningsandbox/how-to-use-transfer-learning-and-fine-tuning-in-keras-and-tensorflow-to-build-an-image-recognition-94b0b02444f2
import os
import sys
import glob
import argparse
import matplotlib.pyplot as plt
import tensorflow as tf
# enable and disable this to obtain tf1 behaviour
tf.compat.v1.disable_v2_behavior()
from tensorflow.keras import __version__
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.optimizers import Adam
# since i'm using resnet50 weights from imagenet, i'm using food101 for
# similar but different categorization tasks
# pip install tensorflow-datasets if tensorflow_dataset not found
import tensorflow_datasets as tfds
(train_ds,validation_ds),info= tfds.load('food101', split=['train','validation'], shuffle_files=True, with_info=True)
assert isinstance(train_ds, tf.data.Dataset)
print(train_ds)
#%%
IM_WIDTH, IM_HEIGHT = 224, 224
NB_EPOCHS = 10
BAT_SIZE = 32
def get_nb_files(directory):
"""Get number of files by searching directory recursively"""
if not os.path.exists(directory):
return 0
cnt = 0
for r, dirs, files in os.walk(directory):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r, dr + "/*")))
return cnt
def setup_to_transfer_learn(model, base_model):
"""Freeze all layers and compile the model"""
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
def add_new_last_layer(base_model, nb_classes):
"""Add last layer to the convnet
Args:
base_model: keras model excluding top
nb_classes: # of classes
Returns:
new keras model with last layer
"""
x = base_model.output
x = GlobalAveragePooling2D()(x)
#x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init
predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer
model = Model(inputs=base_model.input, outputs=predictions)
return model
def train(nb_epoch, batch_size):
"""Use transfer learning and fine-tuning to train a network on a new dataset"""
#nb_train_samples = train_ds.cardinality().numpy()
nb_train_samples=info.splits['train'].num_examples
nb_classes = info.features['label'].num_classes
classes_names = info.features['label'].names
#nb_val_samples = validation_ds.cardinality().numpy()
nb_val_samples = info.splits['validation'].num_examples
#nb_epoch = int(args.nb_epoch)
#batch_size = int(args.batch_size)
def preprocess(features):
#print(features['image'], features['label'])
image = tf.image.resize(features['image'], [224,224])
#image = tf.divide(image, 255)
#print(image)
# data augmentation
image=tf.image.random_flip_left_right(image)
image = preprocess_input(image)
label = features['label']
# for categorical crossentropy
#label = tf.one_hot(label,101,axis=-1)
#return image, tf.cast(label, tf.float32)
return image, label
#pre-processing the dataset to fit a specific image size and 2D labelling
train_generator = train_ds.map(preprocess).batch(batch_size).repeat()
validation_generator = validation_ds.map(preprocess).batch(batch_size).repeat()
#train_generator=train_ds
#validation_generator=validation_ds
#fig = tfds.show_examples(validation_generator, info)
# setup model
base_model = ResNet50(weights='imagenet', include_top=False) #include_top=False excludes final FC layer
model = add_new_last_layer(base_model, nb_classes)
# transfer learning
setup_to_transfer_learn(model, base_model)
history = model.fit(
train_generator,
epochs=nb_epoch,
steps_per_epoch=nb_train_samples//BAT_SIZE,
validation_data=validation_generator,
validation_steps=nb_val_samples//BAT_SIZE)
#class_weight='auto')
#execute
history = train(nb_epoch=NB_EPOCHS, batch_size=BAT_SIZE)
And the performance on food101 dataset:
update 27/11/2020
It's possible to see the discrepancy also in the way smaller oxford_flowers102 dataset:
(train_ds,validation_ds,test_ds),info= tfds.load('oxford_flowers102', split=['train','validation','test'], shuffle_files=True, with_info=True)
Nb: the above plot shows confidences given by running the same training multiple times and evaluatind mean and std to check for the effects on random weights initialization and data augmentation.
Moreover I've tried some hyperparameter tuning on tf2 resulting in the following picture:
changing optimizer (adam and rmsprop)
not applying horizontal flipping aumgentation
deactivating keras resnet50 preprocess_input
Thanks in advance for every suggestion. Here are the accuracy and validation performance on tf1 and tf2 on my dataset:
Update 14/12/2020
I'm sharing the colab for reproducibility on oxford_flowers at the clic of a button:
colab script
I came across something similar, when doing the opposite migration (from TF1+Keras to TF2).
Running this code below:
# using TF2
import numpy as np
from tensorflow.keras.applications.resnet50 import ResNet50
fe = ResNet50(include_top=False, pooling="avg")
out = fe.predict(np.ones((1,224,224,3))).flatten()
sum(out)
>>> 212.3205274187726
# using TF1+Keras
import numpy as np
from keras.applications.resnet50 import ResNet50
fe = ResNet50(include_top=False, pooling="avg")
out = fe.predict(np.ones((1,224,224,3))).flatten()
sum(out)
>>> 187.23898954353717
you can see the same model from the same library on different versions does not return the same value (using sum as a quick check-up). I found the answer to this mysterious behavior in this other SO answer: ResNet model in keras and tf.keras give different output for the same image
Another recommendation I'd give you is, try using pooling from inside applications.resnet50.ResNet50 class, instead of the additional layer in your function, for simplicity, and to remove possible problem-generators :)
I have a TF Estimator that uses Feature Columns at its input layer. One of these is and EmbeddingColumn which I have been initializing randomly (the default behaviour).
Now I would like to pre-train my embeddings in gensim and transfer the learned embeddings into my TF model. The embedding_column accepts an initializer argument which expects a callable that can be created using tf.contrib.framework.load_embedding_initializer.
However, that function expects a saved TF checkpoint, which I don't have, because I trained my embeddings in gensim.
The question is: how do I save gensim word vectors (which are numpy arrays) as a tensor in the TF checkpoint format so that I can use that to initialize my embedding column?
Figured it out! This worked in Tensorflow 1.14.0.
You first need to turn the embedding vectors into a tf.Variable. Then use tf.train.Saver to save it in a checkpoint.
import tensorflow as tf
import numpy as np
ckpt_name = 'gensim_embeddings'
vocab_file = 'vocab.txt'
tensor_name = 'embeddings_tensor'
vocab = ['A', 'B', 'C']
embedding_vectors = np.array([
[1,2,3],
[4,5,6],
[7,8,9]
], dtype=np.float32)
embeddings = tf.Variable(initial_value=embedding_vectors)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver({tensor_name: embeddings})
with tf.Session() as sess:
sess.run(init_op)
saver.save(sess, ckpt_name)
# writing vocab file
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab))
To use this checkpoint to initialize an embedding feature column:
cat = tf.feature_column.categorical_column_with_vocabulary_file(
key='cat', vocabulary_file=vocab_file)
embedding_initializer = tf.contrib.framework.load_embedding_initializer(
ckpt_path=ckpt_name,
embedding_tensor_name='embeddings_tensor',
new_vocab_size=3,
embedding_dim=3,
old_vocab_file=vocab_file,
new_vocab_file=vocab_file
)
emb = tf.feature_column.embedding_column(cat, dimension=3, initializer=embedding_initializer, trainable=False)
And we can test to make sure it has been initialized properly:
def test_embedding(feature_column, sample):
feature_layer = tf.keras.layers.DenseFeatures(feature_column)
print(feature_layer(sample).numpy())
tf.enable_eager_execution()
sample = {'cat': tf.constant(['B', 'A'], dtype=tf.string)}
test_embedding(item_emb, sample)
The output, as expected, is:
[[4. 5. 6.]
[1. 2. 3.]]
Which are the embeddings for 'B' and 'A' respectively.
I want to use a feature extractor (such as ResNet101) and add layers after that which use the output of the feature extractor layer. However, I can't seem to figure out how. I have only found solutions online where an entire network is used without adding additional layers.
I am inexperienced with Tensorflow.
In the code below you can see what I have tried. I can run the code properly without the additional convolutional layer, however my goal is to add more layers after the ResNet.
With this attempt at adding the extra conv layer, this type error is returned:
TypeError: Expected float32, got OrderedDict([('resnet_v1_101/conv1', ...
Once I have added more layers, I would like to start training on a very small test set to see if my model can overfit.
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
import matplotlib.pyplot as plt
numclasses = 17
from google.colab import drive
drive.mount('/content/gdrive')
def decode_text(filename):
img = tf.io.decode_jpeg(tf.io.read_file(filename))
img = tf.image.resize_bilinear(tf.expand_dims(img, 0), [224, 224])
img = tf.squeeze(img, 0)
img.set_shape((None, None, 3))
return img
dataset = tf.data.TextLineDataset(tf.cast('gdrive/My Drive/5LSM0collab/filenames.txt', tf.string))
dataset = dataset.map(decode_text)
dataset = dataset.batch(2, drop_remainder=True)
img_1 = dataset.make_one_shot_iterator().get_next()
net = resnet_v1.resnet_v1_101(img_1, 2048, is_training=False, global_pool=False, output_stride=8)
net = slim.conv2d(net, numclasses, 1)
sess = tf.Session()
global_init = tf.global_variables_initializer()
local_init = tf.local_variables_initializer()
sess.run(global_init)
sess.run(local_init)
img_out, conv_out = sess.run((img_1, net))
resnet_v1.resnet_v1_101 does not return just net, but instead returns a tuple net, end_points. The second element is a dictionary, which is presumably why you are getting this particular error message.
For the documentation of this function:
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out,
channels_out]. If global_pool is False,
then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
So you can write for example:
net, _ = resnet_v1.resnet_v1_101(img_1, 2048, is_training=False, global_pool=False, output_stride=8)
net = slim.conv2d(net, numclasses, 1)
You can also choose an intermediate layer, e.g.:
_, end_points = resnet_v1.resnet_v1_101(img_1, 2048, is_training=False, global_pool=False, output_stride=8)
net = slim.conv2d(end_points["main_Scope/resnet_v1_101/block3"], numclasses, 1)
(you can look into end_points to find the names of the endpoints. Your scope name will be different than main_Scope.)