I'm using Keras 2.2.0 and am trying to do something like the following:
import keras.backend as K
K.clear_session()
sess = tf.Session()
K.set_session(sess)
...
with K.get_session() as sess:
However, I get errors saying AttributeError: 'module' object has no attribute 'clear_session'. So it seems this functionality is no longer in keras.backend?
For instance, if I do dir(keras.backend), I get:
['Function', 'NAME_SCOPE_STACK', 'Print', 'RandomStreams', 'T', 'T_softsign', '_BACKEND', '__builtins__', '__doc__', '__file__', '__name__', '__package__', '__path__', '_backend', '_config', '_config_path', '_epsilon', '_floatx', '_image_data_format', '_keras_base_dir', '_keras_dir', 'abs', 'absolute_import', 'all', 'any', 'arange', 'argmax', 'argmin', 'backend', 'batch_dot', 'batch_flatten', 'batch_get_value', 'batch_normalization', 'batch_set_value', 'bias_add', 'binary_crossentropy', 'cast', 'cast_to_floatx', 'categorical_crossentropy', 'clip', 'common', 'concatenate', 'constant', 'contextmanager', 'conv1d', 'conv2d', 'conv2d_transpose', 'conv3d', 'conv3d_transpose', 'cos', 'count_params', 'ctc_batch_cost', 'ctc_cost', 'ctc_create_skip_idxs', 'ctc_interleave_blanks', 'ctc_path_probs', 'ctc_update_log_p', 'cumprod', 'cumsum', 'defaultdict', 'depthwise_conv2d', 'division', 'dot', 'dropout', 'dtype', 'elu', 'epsilon', 'equal', 'eval', 'exp', 'expand_dims', 'eye', 'f', 'flatten', 'floatx', 'foldl', 'foldr', 'function', 'gather', 'get_uid', 'get_value', 'get_variable_shape', 'gradients', 'greater', 'greater_equal', 'hard_sigmoid', 'has_arg', 'identity', 'ifelse', 'image_data_format', 'image_dim_ordering', 'importlib', 'in_test_phase', 'in_top_k', 'in_train_phase', 'int_shape', 'is_keras_tensor', 'is_placeholder', 'is_sparse', 'is_tensor', 'json', 'l2_normalize', 'learning_phase', 'less', 'less_equal', 'local_conv1d', 'local_conv2d', 'log', 'logsumexp', 'map_fn', 'max', 'maximum', 'mean', 'min', 'minimum', 'moving_average_update', 'name_scope', 'ndim', 'normalize_batch_in_training', 'not_equal', 'np', 'one_hot', 'ones', 'ones_like', 'os', 'pattern_broadcast', 'permute_dimensions', 'placeholder', 'pool', 'pool2d', 'pool3d', 'pow', 'print_function', 'print_tensor', 'prod', 'py_all', 'py_any', 'py_slice', 'py_sum', 'random_binomial', 'random_normal', 'random_normal_variable', 'random_uniform', 'random_uniform_variable', 'relu', 'repeat', 'repeat_elements', 'reset_uids', 'reshape', 'resize_images', 'resize_volumes', 'reverse', 'rnn', 'round', 'separable_conv1d', 'separable_conv2d', 'set_epsilon', 'set_floatx', 'set_image_data_format', 'set_image_dim_ordering', 'set_learning_phase', 'set_value', 'shape', 'sigmoid', 'sign', 'sin', 'slice', 'softmax', 'softplus', 'softsign', 'sparse_categorical_crossentropy', 'spatial_2d_padding', 'spatial_3d_padding', 'sqrt', 'square', 'squeeze', 'stack', 'std', 'stop_gradient', 'sum', 'switch', 'sys', 'tanh', 'temporal_padding', 'th_sparse_module', 'theano', 'theano_backend', 'tile', 'to_dense', 'transpose', 'truncated_normal', 'update', 'update_add', 'update_sub', 'var', 'variable', 'zeros', 'zeros_like']
and don't see any of those 3 in there.
How should I be writing this code in modern Keras?
Thanks!
EDIT: https://github.com/keras-team/keras/issues/11015
Seems like it is not available any may have to downgrade
It might be that your backend is set to using Theano (I believe clear_session is only available through the Tensorflow backend with Keras). You can change these settings in your keras.json to TF and clear_session should be available to you.
Related
I am currently trying to use H2O from Python, and I encounter some problems on my Mac OS with XGBoost.
It seems like H2O does not find it anywhere.
More precisely, the next simple snippet
import pandas as pd
import h2o
data = [['2015-01-01', '2490.925806' , '-0.41'],
['2015-01-02', '2412.623113' , '-0.48'],
['2015-01-03', '2365.611276' , '-0.55']]
df = pd.DataFrame(data, columns=["time", "base", "target"]).set_index("time", drop=True)
h2o.init(nthreads=-1)
estimator = h2o.estimators.H2OXGBoostEstimator()
training_frame = h2o.H2OFrame(df)
estimator.train(["base"], "target", training_frame)
gives me the error :
H2OResponseError: Server error water.exceptions.H2ONotFoundArgumentException:
Error: POST /3/ModelBuilders/xgboost not found
Request: POST /3/ModelBuilders/xgboost
data: {'training_frame': 'Key_Frame__upload_893634781f588299bbd20d51c98d43a9.hex', 'nfolds': '0', 'keep_cross_validation_models': 'True', 'keep_cross_validation_predictions': 'False', 'keep_cross_validation_fold_assignment': 'False', 'score_each_iteration': 'False', 'fold_assignment': 'auto', 'response_column': 'target', 'ignore_const_cols': 'True', 'stopping_rounds': '0', 'stopping_metric': 'auto', 'stopping_tolerance': '0.001', 'max_runtime_secs': '0.0', 'seed': '-1', 'distribution': 'auto', 'tweedie_power': '1.5', 'categorical_encoding': 'auto', 'quiet_mode': 'True', 'ntrees': '50', 'max_depth': '6', 'min_rows': '1.0', 'min_child_weight': '1.0', 'learn_rate': '0.3', 'eta': '0.3', 'sample_rate': '1.0', 'subsample': '1.0', 'col_sample_rate': '1.0', 'colsample_bylevel': '1.0', 'col_sample_rate_per_tree': '1.0', 'colsample_bytree': '1.0', 'colsample_bynode': '1.0', 'max_abs_leafnode_pred': '0.0', 'max_delta_step': '0.0', 'score_tree_interval': '0', 'min_split_improvement': '0.0', 'gamma': '0.0', 'nthread': '-1', 'build_tree_one_node': 'False', 'calibrate_model': 'False', 'max_bins': '256', 'max_leaves': '0', 'sample_type': 'uniform', 'normalize_type': 'tree', 'rate_drop': '0.0', 'one_drop': 'False', 'skip_drop': '0.0', 'tree_method': 'auto', 'grow_policy': 'depthwise', 'booster': 'gbtree', 'reg_lambda': '1.0', 'reg_alpha': '0.0', 'dmatrix_type': 'auto', 'backend': 'auto', 'gainslift_bins': '-1', 'auc_type': 'auto', 'scale_pos_weight': '1.0'}
For more information about my distribution:
OS: Monterey 12.3
Processor: Apple M1
Python: 3.9.10
H2O: 3.36.0.3
I suspect Apple M1 to be the cause of the error, but is that really the case ?
I am sorry, the XGBoost is not supported on Apple M1 processor yet.
https://h2oai.atlassian.net/browse/PUBDEV-8482
I'm trying to run the following code in Jupyter notebook - but it just keeps running endlessly with no output. I'm following the tutorial from: https://data-flair.training/blogs/stock-price-prediction-machine-learning-project-in-python/
The code is from the stock_app.py which doesn't seem to be working:
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
import numpy as np
app = dash.Dash()
server = app.server
scaler=MinMaxScaler(feature_range=(0,1))
df_nse = pd.read_csv("./NSE-TATA.csv")
df_nse["Date"]=pd.to_datetime(df_nse.Date,format="%Y-%m-%d")
df_nse.index=df_nse['Date']
data=df_nse.sort_index(ascending=True,axis=0)
new_data=pd.DataFrame(index=range(0,len(df_nse)),columns=['Date','Close'])
for i in range(0,len(data)):
new_data["Date"][i]=data['Date'][i]
new_data["Close"][i]=data["Close"][i]
new_data.index=new_data.Date
new_data.drop("Date",axis=1,inplace=True)
dataset=new_data.values
train=dataset[0:987,:]
valid=dataset[987:,:]
scaler=MinMaxScaler(feature_range=(0,1))
scaled_data=scaler.fit_transform(dataset)
x_train,y_train=[],[]
for i in range(60,len(train)):
x_train.append(scaled_data[i-60:i,0])
y_train.append(scaled_data[i,0])
x_train,y_train=np.array(x_train),np.array(y_train)
x_train=np.reshape(x_train,(x_train.shape[0],x_train.shape[1],1))
model=load_model("saved_ltsm_model.h5")
inputs=new_data[len(new_data)-len(valid)-60:].values
inputs=inputs.reshape(-1,1)
inputs=scaler.transform(inputs)
X_test=[]
for i in range(60,inputs.shape[0]):
X_test.append(inputs[i-60:i,0])
X_test=np.array(X_test)
X_test=np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))
closing_price=model.predict(X_test)
closing_price=scaler.inverse_transform(closing_price)
train=new_data[:987]
valid=new_data[987:]
valid['Predictions']=closing_price
df= pd.read_csv("./stock_data.csv")
app.layout = html.Div([
html.H1("Stock Price Analysis Dashboard", style={"textAlign": "center"}),
dcc.Tabs(id="tabs", children=[
dcc.Tab(label='NSE-TATAGLOBAL Stock Data',children=[
html.Div([
html.H2("Actual closing price",style={"textAlign": "center"}),
dcc.Graph(
id="Actual Data",
figure={
"data":[
go.Scatter(
x=train.index,
y=valid["Close"],
mode='markers'
)
],
"layout":go.Layout(
title='scatter plot',
xaxis={'title':'Date'},
yaxis={'title':'Closing Rate'}
)
}
),
html.H2("LSTM Predicted closing price",style={"textAlign": "center"}),
dcc.Graph(
id="Predicted Data",
figure={
"data":[
go.Scatter(
x=valid.index,
y=valid["Predictions"],
mode='markers'
)
],
"layout":go.Layout(
title='scatter plot',
xaxis={'title':'Date'},
yaxis={'title':'Closing Rate'}
)
}
)
])
]),
dcc.Tab(label='Facebook Stock Data', children=[
html.Div([
html.H1("Facebook Stocks High vs Lows",
style={'textAlign': 'center'}),
dcc.Dropdown(id='my-dropdown',
options=[{'label': 'Tesla', 'value': 'TSLA'},
{'label': 'Apple','value': 'AAPL'},
{'label': 'Facebook', 'value': 'FB'},
{'label': 'Microsoft','value': 'MSFT'}],
multi=True,value=['FB'],
style={"display": "block", "margin-left": "auto",
"margin-right": "auto", "width": "60%"}),
dcc.Graph(id='highlow'),
html.H1("Facebook Market Volume", style={'textAlign': 'center'}),
dcc.Dropdown(id='my-dropdown2',
options=[{'label': 'Tesla', 'value': 'TSLA'},
{'label': 'Apple','value': 'AAPL'},
{'label': 'Facebook', 'value': 'FB'},
{'label': 'Microsoft','value': 'MSFT'}],
multi=True,value=['FB'],
style={"display": "block", "margin-left": "auto",
"margin-right": "auto", "width": "60%"}),
dcc.Graph(id='volume')
], className="container"),
])
])
])
#app.callback(Output('highlow', 'figure'),
[Input('my-dropdown', 'value')])
def update_graph(selected_dropdown):
dropdown = {"TSLA": "Tesla","AAPL": "Apple","FB": "Facebook","MSFT": "Microsoft",}
trace1 = []
trace2 = []
for stock in selected_dropdown:
trace1.append(
go.Scatter(x=df[df["Stock"] == stock]["Date"],
y=df[df["Stock"] == stock]["High"],
mode='lines', opacity=0.7,
name=f'High {dropdown[stock]}',textposition='bottom center'))
trace2.append(
go.Scatter(x=df[df["Stock"] == stock]["Date"],
y=df[df["Stock"] == stock]["Low"],
mode='lines', opacity=0.6,
name=f'Low {dropdown[stock]}',textposition='bottom center'))
traces = [trace1, trace2]
data = [val for sublist in traces for val in sublist]
figure = {'data': data,
'layout': go.Layout(colorway=["#5E0DAC", '#FF4F00', '#375CB1',
'#FF7400', '#FFF400', '#FF0056'],
height=600,
title=f"High and Low Prices for {', '.join(str(dropdown[i]) for i in selected_dropdown)} Over Time",
xaxis={"title":"Date",
'rangeselector': {'buttons': list([{'count': 1, 'label': '1M',
'step': 'month',
'stepmode': 'backward'},
{'count': 6, 'label': '6M',
'step': 'month',
'stepmode': 'backward'},
{'step': 'all'}])},
'rangeslider': {'visible': True}, 'type': 'date'},
yaxis={"title":"Price (USD)"})}
return figure
#app.callback(Output('volume', 'figure'),
[Input('my-dropdown2', 'value')])
def update_graph(selected_dropdown_value):
dropdown = {"TSLA": "Tesla","AAPL": "Apple","FB": "Facebook","MSFT": "Microsoft",}
trace1 = []
for stock in selected_dropdown_value:
trace1.append(
go.Scatter(x=df[df["Stock"] == stock]["Date"],
y=df[df["Stock"] == stock]["Volume"],
mode='lines', opacity=0.7,
name=f'Volume {dropdown[stock]}', textposition='bottom center'))
traces = [trace1]
data = [val for sublist in traces for val in sublist]
figure = {'data': data,
'layout': go.Layout(colorway=["#5E0DAC", '#FF4F00', '#375CB1',
'#FF7400', '#FFF400', '#FF0056'],
height=600,
title=f"Market Volume for {', '.join(str(dropdown[i]) for i in selected_dropdown_value)} Over Time",
xaxis={"title":"Date",
'rangeselector': {'buttons': list([{'count': 1, 'label': '1M',
'step': 'month',
'stepmode': 'backward'},
{'count': 6, 'label': '6M',
'step': 'month',
'stepmode': 'backward'},
{'step': 'all'}])},
'rangeslider': {'visible': True}, 'type': 'date'},
yaxis={"title":"Transactions Volume"})}
return figure
if __name__=='__main__':
app.run_server(debug=True)
Trained NER spacy custom training model using the document https://towardsdatascience.com/train-ner-with-custom-training-data-using-spacy-525ce748fab7 and https://spacy.io/usage/processing-pipelines by sample test case dataset to find the currency exactly in the given text.
Examble dataset:
TRAIN_DATA = [('This is AFN currency', {'entities': [(8, 11, 'CUR')]}),
('I have EUR european currency', {'entities': [(7, 10, 'CUR')]}),
('let as have ALL money', {'entities': [(12, 15, 'CUR')]}),
('DZD is a dollar', {'entities': [(0, 3, 'CUR')]}),
('money USD united states', {'entities': [(6, 9, 'CUR')]})
]
trained a model successfully by naming the model 'currency'. It predicts good for the trained dataset with proper label but mostly it predicts untrained text data with wrong label.
Input test line: 'I have AZWSQTS lot LOT of Indian MZW currency USD INR'
output:
AZWSQTS - CUR , LOT - CUR, MZW - CUR, USD - CUR, INR - CUR
Here, 'AZWSQTS' & 'LOT' is not a currency but it predicts, this is the problem I am getting.
Complete code:
from __future__ import unicode_literals, print_function
import random
from pathlib import Path
import spacy
from tqdm import tqdm
from spacy.training import Example
def spacy_train_model():
''' Sample traning dataset format'''
'''list of currency'''
currency_list = ['AFN', 'EUR', 'EUR', 'ALL', 'DZD', 'USD', 'EUR', 'AOA', 'XCD', 'XCD', 'ARS',
'AMD', 'AWG', 'SHP', 'AUD', 'EUR', 'AZN', '', 'BSD', 'BHD', 'BDT', 'BBD', 'BYN', 'EUR', 'BZD',
'XOF', 'BMD', 'BTN', 'BOB', 'USD', 'BAM', 'BWP', 'BRL', 'USD', 'USD', 'BND', 'BGN', 'XOF', 'BIF',
'CVE', 'KHR', 'XAF', 'CAD', 'USD', 'KYD', 'XAF', 'XAF', 'NZD', 'CLP', 'CNY', 'AUD', 'AUD', 'COP',
'KMF', 'CDF', 'XAF', 'none', 'CRC', 'XOF', 'HRK', 'CUP', 'ANG', 'EUR', 'CZK', '', 'DKK', 'DJF',
'XCD', 'DOP', '', 'USD', 'EGP', 'USD', 'XAF', 'ERN', 'EUR', 'SZL', 'ETB', '', 'FKP', 'FJD',
'EUR', 'EUR', 'EUR', 'XPF', '', 'XAF', 'GMD', 'GEL', 'EUR', 'GHS', 'GIP', 'EUR', 'DKK', 'XCD',
'EUR', 'USD', 'GTQ', 'GGP', 'GNF', 'XOF', 'GYD', '', 'HTG', 'HNL', 'HKD', 'HUF', 'ISK', 'INR',
'IDR', 'XDR', 'IRR', 'IQD', 'EUR', 'IMP', 'ILS', 'EUR', '', 'JMD', 'JPY', 'JEP', 'JOD',
'KZT', 'KES', 'AUD', 'EUR', 'KWD', 'KGS', '', 'LAK', 'EUR', 'LBP', 'LSL', 'LRD', 'LYD', 'CHF',
'EUR', 'EUR', '', 'MOP', 'MGA', 'MWK', 'MYR', 'MVR', 'XOF', 'EUR', 'USD', 'EUR', 'MRU', 'MUR',
'EUR', 'MXN', 'USD', 'MDL', 'EUR', 'MNT', 'EUR', 'XCD', 'MAD', 'MZN', 'MMK', '', 'NAD', 'AUD',
'NPR', 'EUR', 'XPF', 'NZD', 'NIO', 'XOF', 'NGN', 'NZD', 'AUD', 'USD', 'KPW', 'MKD', 'NOK',
'OMR','PKR', 'USD', 'ILS', 'USD', 'PGK', 'PYG', 'PEN', 'PHP', 'NZD', 'PLN', 'EUR', 'USD','QAR',
'EUR', 'RON', 'RUB', 'RWF', '', 'USD', 'EUR', 'SHP', 'XCD', 'XCD', 'EUR', 'EUR', 'XCD', 'WST',
'EUR', 'STN', 'SAR', 'XOF', 'RSD', 'SCR', 'SLL', 'SGD', 'USD', 'ANG', 'EUR', 'EUR', 'SBD', 'SOS',
'ZAR', 'GBP', 'KRW', 'SSP', 'EUR', 'LKR', 'SDG', 'SRD', 'NOK', 'SEK', 'CHF', 'SYP', '', 'TWD',
'TJS', 'TZS', 'THB', 'USD', 'XOF', 'NZD', 'TOP', 'TTD', 'GBP', 'TND', 'TRY', 'TMT', 'USD', 'AUD',
'UGX', 'UAH', 'AED', 'GBP', 'USD', 'UYU', 'USD', 'UZS', '', 'VUV', 'EUR', 'VES', 'VND', '',
'USD', 'XPF', 'YER', 'ZMW', 'USD']
TRAIN_DATA = [('This is AFN currency', {'entities': [(8, 11, 'CUR')]}),
('I have EUR europen currency', {'entities': [(7, 10, 'CUR')]}),
('let as have ALL money', {'entities': [(12, 15, 'CUR')]}),
('DZD is a dollar', {'entities': [(0, 3, 'CUR')]}),
('money USD united states', {'entities': [(6, 9, 'CUR')]})
]
# model = "en_core_web_lg"
model = None
output_dir=Path(r"D:\currency") # Path to save training model - create new empty directory
n_iter=100
#load the model
if model is not None:
nlp = spacy.load(model)
optimise = nlp.create_optimizer()
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank('en')
optimise = nlp.begin_training()
print("Created blank 'en' model")
#set up the pipeline
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe('ner', last=True)
else:
ner = nlp.get_pipe('ner')
for _, annotations in TRAIN_DATA:
for ent in annotations.get('entities'):
ner.add_label(ent[2])
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
optimizer = nlp.initialize()
# optimizer = optimise
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in tqdm(TRAIN_DATA):
doc = nlp.make_doc(text)
example = Example.from_dict(doc, annotations)
nlp.update(
[example],
drop=0.5,
sgd=optimizer,
losses=losses)
print(losses)
for text, _ in TRAIN_DATA:
doc = nlp(text)
print('Entities', [(ent.text, ent.label_) for ent in doc.ents])
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
def test_model(text):
nlp = spacy.load(r'D:\currency')
for tex in text.split('\n'):
doc = nlp(tex)
for token in doc.ents:
print(token.text, token.label_)
spacy_train_model() #Training the model
test_model('text') #Testing the model
Couple of thoughts here...
You can't train a model with only five examples. Maybe this is just example code and you have more, but you generally needs hundreds of examples.
If you only need to recognize currency names like USD or GBP, use spaCy's rule-based matchers. You would only need an NER model if these are ambiguous somehow. Like if ALL is a currency, but you don't want to recognize it in "I ate ALL the donuts", an NER model can help, but that's a pretty hard distinction to learn, so you'll need hundreds of examples.
What is probably happening in your example problem is that the NER model has learned that any all capital token is a currency. If you want to fix that with an NER model, you'll need to give it examples where an all capital token isn't currency to learn from.
I wanted to perform multi-gpu inference using tensorflow/Keras
this is my prediction
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
Is there a way to run this model on multiple gpus?
Thanks in advance.
Increase the GPU_COUNT as per the number of GPUs in the system and pass the new config when creating the model using modellib.MaskRCNN.
class InferenceConfig(coco.CocoConfig):
GPU_COUNT = 1 # increase the GPU count based on number of GPUs
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
https://github.com/matterport/Mask_RCNN/blob/master/samples/demo.ipynb
I want to use Lucid to analyze the feature extraction of a detection model I trained using the tensorflow Object Detection API on my own dataset. The model used is one from the Tensorflow Object Detection Zoo, namely faster_rcnn_resnet101.
I followed the Lucid tutorial to import my own model and saved a frozen graph of the model with the node /all_class_predictions_with_background as output_node.
I'm having trouble finding the input node of the graph to make Lucid run on it.
Furthermore I don't really think I have the right approach. Maybe I should first extract all the classification part of the detection model and freeze a new graph with only this part before going to Lucid.
Or maybe I should just import a resnet_101 classification model and copy/paste the correct weights from the detection model on it?
But I don't really know how to do those kind of things.
Can someone help me? I really want to try running Lucid on my detection network.
Yes, you should export an inference (frozen) graph to work with in Lucid.
I use the following script to export a graph from the training checkpoint files.
Useful information about the nodes in the exported file is logged to the console.
training_model="ssd_mnet_v2_ppn_512x288.config"
model_signature="eb_13_v09_ppmn2_13_256_adam_512x288_tf_1.14_200k"
# the specific checkpoint to export from
checkpoint_path="/TRAIN/models/model/train/model.ckpt-200000"
# directory to export into
output_path="/XYZ/graphs/${model_signature}"
# ensure these graph nodes are exported, and everything in between
additional_output_tensor_names="Preprocessor/sub,concat_1"
#
python export_inference_graph.py \
--input_type=image_tensor \
--pipeline_config_path /TRAIN/models/model/$training_model \
--trained_checkpoint_prefix=$checkpoint_path \
--output_directory=$output_path \
--additional_output_tensor_names=$additional_output_tensor_names
I found it convenient to make my own Lucid Model class, after reviewing the examples in the Lucid model zoo.
You have to examine your graph carefully as you need to specify the input node, and provide a list of layers that Lucid can work with.
from lucid.modelzoo.vision_base import Model, _layers_from_list_of_dicts
# the input node "Preprocessor/sub" is appropriate for image injection
class SSD_Mnet2_PPN( Model ):
def __init__(self, image_shape=None, graph_path=None, labels_path=None ):
self.model_path = graph_path
self.labels_path = labels_path
self.image_shape = image_shape
self.image_value_range = (-1, 1)
self.input_name = "Preprocessor/sub"
super().__init__()
# a hand-crafted list of layers - by inspection of the graph
SSD_Mnet2_PPN.layers = _layers_from_list_of_dicts(SSD_Mnet2_PPN, [
{ 'id': 0, 'tags': ['conv'], 'name': 'FeatureExtractor/MobilenetV2/expanded_conv_2/add', 'depth': 24, 'shape': [ 1, 72, 128, 24 ], 'transform_id': 2 },
{ 'id': 2, 'tags': ['conv'], 'name': 'FeatureExtractor/MobilenetV2/expanded_conv_5/add', 'depth': 32, 'shape': [ 1, 36, 64, 32 ], 'transform_id': 2 },
{ 'id': 5, 'tags': ['conv'], 'name': 'FeatureExtractor/MobilenetV2/expanded_conv_9/add', 'depth': 64, 'shape': [ 1, 18, 32, 64 ], 'transform_id': 2 },
{ 'id': 7, 'tags': ['conv'], 'name': 'FeatureExtractor/MobilenetV2/expanded_conv_12/add', 'depth': 96, 'shape': [ 1, 18, 32, 96 ], 'transform_id': 2 },
{ 'id': 9, 'tags': ['conv'], 'name': 'FeatureExtractor/MobilenetV2/expanded_conv_15/add', 'depth': 160, 'shape': [ 1, 9, 16, 160 ], 'transform_id': 2 },
{ 'id': 11, 'tags': ['concat'], 'name': 'concat_1', 'depth': 13, 'shape': [ 1, 1212, 13 ], 'transform_id': 4 },
])
def model_for_version( version=None, path=None ):
if "320x180" in version:
return SSD_Mnet2_PPN( graph_path=path, image_shape=[ 320, 180, 3 ] )
if "480x270" in version:
return SSD_Mnet2_PPN( graph_path=path, image_shape=[ 480, 270, 3 ] )
if "512x288" in version:
return SSD_Mnet2_PPN( graph_path=path, image_shape=[ 512, 288, 3 ] )
if "720x405" in version:
return SSD_Mnet2_PPN( graph_path=path, image_shape=[ 720, 405, 3 ] )
raise ValueError( "No model for graph_version: {}".format( version ) )
Then you can write code as follows:
from lucid.optvis import render
model = model_for_version(
version = "eb_13_v09_ppmn2_13_256_adam_512x288_tf_1.14",
path = "/XYZ/graphs/eb_13_v09_ppmn2_13_256_adam_512x288_tf_1.14_200k/frozen_inference_graph.pb"
)
model.load_graphdef()
_ = render.render_vis( model, "FeatureExtractor/MobilenetV2/expanded_conv_15/add:17", thresholds=( 32, 256, 1024 ) )
Inevitably, one has to experiment quite a bit.