TypeError: sample_chain() got an unexpected keyword argument 'seed' - Tensorflow 2.0 - typeerror

Error with Tensorflow 2.0 using MCMC on MacOS 10.13.6
The error on the console:
2020-12-27 22:06:48.253835: I tensorflow/core/platform/cpu_feature_guard.cc:145] This TensorFlow binary is optimized with Intel(R) MKL-DNN to use the following CPU instructions in performance critical operations: SSE4.1 SSE4.2 AVX
To enable them in non-MKL-DNN operations, rebuild TensorFlow with the appropriate compiler flags.
2020-12-27 22:06:48.254353: I tensorflow/core/common_runtime/process_util.cc:115] Creating new thread pool with default inter op setting: 4. Tune using inter_op_parallelism_threads for best performance.
objc[69111]: Class zmAppHelper is implemented in both /Library/ScriptingAdditions/zOLPluginInjection.osax/Contents/MacOS/zOLPluginInjection (0x1a48eaf4f0) and /Library/Application Support/Microsoft/ZoomOutlookPlugin/zOutlookPlugin64.bundle/Contents/MacOS/zOutlookPlugin64 (0x1a490e0518). One of the two will be used. Which one is undefined.
objc[69111]: class `ERCalendarEventEditorWindowController' not linked into application
Traceback (most recent call last):
File "dc7.py", line 131, in <module>
chains, kernel_results = run_chain(initial_state)
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py", line 457, in __call__
result = self._call(*args, **kwds)
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py", line 503, in _call
self._initialize(args, kwds, add_initializers_to=initializer_map)
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py", line 408, in _initialize
*args, **kwds))
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py", line 1848, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py", line 2150, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py", line 2041, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/framework/func_graph.py", line 915, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py", line 358, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/Users/ram/opt/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/framework/func_graph.py", line 905, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in converted code:
dc7.py:117 run_chain *
return tfp.mcmc.sample_chain(
**TypeError: sample_chain() got an unexpected keyword argument 'seed'**
Versions
MacOS 10.13.6 High Sierra
tensorflow 2.0.0 mkl_py37hda344b4_0
tensorflow-base 2.0.0 mkl_py37h66b1bf0_0
tensorflow-estimator 2.0.0 pyh2649769_0
tensorflow-probability 0.8.0 py_0 conda-forge
jupyter_client 6.1.7 py_0
jupyter_core 4.7.0 py37hecd8cb5_0
jupyterlab_pygments 0.1.2 py_0
ipython 7.19.0 py37h01d92e1_0
ipython_genutils 0.2.0 pyhd3eb1b0_1
python 3.7.9 h26836e1_0
python-dateutil 2.8.1 py_0
python_abi 3.7 1_cp37m conda-forge
The source-code:
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
#import tensorflow as tf
#print(tf.__version__)
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
sns.reset_defaults()
sns.set_context(context = 'talk', font_scale = 0.7)
plt.rcParams['image.cmap'] = 'viridis'
#%matplotlib inline
tfd = tfp.distributions
tfb = tfp.bijectors
#### ============================================
##title Utils { display-mode: "form" }
def print_subclasses_from_module(module, base_class, maxwidth=80):
import functools, inspect, sys
subclasses = [name for name, obj in inspect.getmembers(module)
if inspect.isclass(obj) and issubclass(obj, base_class)]
def red(acc, x):
if not acc or len(acc[-1]) + len(x) + 2 > maxwidth:
acc.append(x)
else:
acc[-1] += ", " + x
return acc
print('\n'.join(functools.reduce(red, subclasses, [])))
# Generate some data
def f(x, w):
# Pad x with 1's so we can add bias via matmul
x = tf.pad(x, [[1, 0], [0, 0]], constant_values=1)
linop = tf.linalg.LinearOperatorFullMatrix(w[..., np.newaxis])
result = linop.matmul(x, adjoint=True)
return result[..., 0, :]
num_features = 2
num_examples = 50
noise_scale = .5
true_w = np.array([-1., 2., 3.])
xs = np.random.uniform(-1., 1., [num_features, num_examples])
ys = f(xs, true_w) + np.random.normal(0., noise_scale, size=num_examples)
# Visualize the data set
plt.scatter(*xs, c=ys, s=100, linewidths=0)
grid = np.meshgrid(*([np.linspace(-1, 1, 100)] * 2))
xs_grid = np.stack(grid, axis=0)
fs_grid = f(xs_grid.reshape([num_features, -1]), true_w)
fs_grid = np.reshape(fs_grid, [100, 100])
plt.colorbar()
plt.contour(xs_grid[0, ...], xs_grid[1, ...], fs_grid, 20, linewidths=1)
plt.show()
### Sampling the noise scale
# Define the joint_log_prob function, and our unnormalized posterior.
def joint_log_prob(w, sigma, x, y):
# Our model in maths is
# w ~ MVN([0, 0, 0], diag([1, 1, 1]))
# y_i ~ Normal(w # x_i, noise_scale), i=1..N
rv_w = tfd.MultivariateNormalDiag(
loc=np.zeros(num_features + 1),
scale_diag=np.ones(num_features + 1))
rv_sigma = tfd.LogNormal(np.float64(1.), np.float64(5.))
rv_y = tfd.Normal(f(x, w), sigma[..., np.newaxis])
return (rv_w.log_prob(w) +
rv_sigma.log_prob(sigma) +
tf.reduce_sum(rv_y.log_prob(y), axis=-1))
# Create our unnormalized target density by currying x and y from the joint.
def unnormalized_posterior(w, sigma):
return joint_log_prob(w, sigma, xs, ys)
# Create an HMC TransitionKernel
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior,
step_size=np.float64(.1),
num_leapfrog_steps=4)
# Create a TransformedTransitionKernl
transformed_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=hmc_kernel,
bijector=[tfb.Identity(), # w
tfb.Invert(tfb.Softplus())]) # sigma
# Apply a simple step size adaptation during burnin
#tf.function
def run_chain(initial_state, num_results=1000, num_burnin_steps=500):
adaptive_kernel = tfp.mcmc.SimpleStepSizeAdaptation(
transformed_kernel,
num_adaptation_steps=int(.8 * num_burnin_steps),
target_accept_prob=np.float64(.75))
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=initial_state,
kernel=adaptive_kernel,
seed=(0, 1),
trace_fn=lambda cs, kr: kr)
# Instead of a single set of initial w's, we create a batch of 8.
num_chains = 8
initial_state = [np.zeros([num_chains, num_features + 1]),
.54 * np.ones([num_chains], dtype=np.float64)]
chains, kernel_results = run_chain(initial_state)
r_hat = tfp.mcmc.potential_scale_reduction(chains)
print("Acceptance rate:", kernel_results.inner_results.inner_results.is_accepted.numpy().mean())
print("R-hat diagnostic (per w variable):", r_hat[0].numpy())
print("R-hat diagnostic (sigma):", r_hat[1].numpy())
w_chains, sigma_chains = chains

I was using incompatible versions of tensorflow and tensorflow_probability.
With the following versions, the above Typererror went away:
ipython 7.19.0 pypi_0 pypi
ipython-genutils 0.2.0 pypi_0 pypi
python 3.7.9 h26836e1_0
python-dateutil 2.8.1 pypi_0 pypi
tensorboard 2.4.0 pypi_0 pypi
tensorboard-plugin-wit 1.7.0 pypi_0 pypi
tensorflow 2.3.0 pypi_0 pypi
tensorflow-estimator 2.3.0 pypi_0 pypi
tensorflow-probability 0.11.0 pypi_0 pypi

Related

AttributeError: 'DataFrame' object has no attribute '_data' [Not a duplicate]

I was trying to run the main.py but it threw an error about attribution.
My Python version is Python 3.5. I am using the CNTK Docker release 2.6-cpu-python3.5. I cannot update the Python version because of CNTK. It only supports Python 3.5 and will only run in Ubuntu 16.04.
Pandas version: pandas==0.25.3
The Error
Traceback (most recent call last):
File "/workspace/main.py", line 5, in <module>
from model import extract_patches, score_patch, del_cache
File "/workspace/model.py", line 2, in <module>
from regressionModel import extract_features, predict_label
File "/workspace/regressionModel.py", line 26, in <module>
regression_model = read_model['model'][0]
File "/usr/local/lib/python3.5/dist-packages/pandas/core/frame.py", line 2898, in __getitem__
if self.columns.is_unique and key in self.columns:
File "/usr/local/lib/python3.5/dist-packages/pandas/core/generic.py", line 5063, in __getattr__
return object.__getattribute__(self, name)
File "pandas/_libs/properties.pyx", line 65, in pandas._libs.properties.AxisProperty.__get__
File "/usr/local/lib/python3.5/dist-packages/pandas/core/generic.py", line 5063, in __getattr__
return object.__getattribute__(self, name)
AttributeError: 'DataFrame' object has no attribute '_data'
main.py
import os
import flask
import numpy as np
from flask import jsonify, request
from model import extract_patches, score_patch, del_cache
app = flask.Flask(__name__)
#app.route('/url/<path:argument>')
def url(argument):
# create a patch folder
patch_path = './patches'
if not os.path.exists(patch_path):
os.mkdir(patch_path)
# get image url from the query string
imageURL = request.url.split('=',1)[1]
# extract patches from imageURL
dimension, face_loc, image_dim = extract_patches(imageURL)
# score each patch
patch_score= score_patch(patch_path)
# delete the downloaded image and the patches from local
del_cache(patch_path)
if os.path.exists('temp.jpg'):
os.remove('temp.jpg')
data = dict()
data['patch_score'] = []
for key in dimension:
tmp = []
tmp[:] = dimension[key]
tmp.append(patch_score[key])
data['patch_score'].append(tmp)
data['image_score'] = round(np.mean(list(patch_score.values())), 2)
data['face_loc'] = face_loc['face_loc']
data['img_dim'] = image_dim
return jsonify(patch_score = str(data['patch_score']), image_score = str(data['image_score']), face_loc = str(data['face_loc']), image_dim = str(data['img_dim']))
if __name__ == '__main__':
app.run(host='0.0.0.0', port = 9580) # port number can be changed in your case
model.py
import getPatches
from regressionModel import extract_features, predict_label
import os
import shutil
def extract_patches(imageURL):
patch_path = './patches'
dimension_dict = dict()
face_dict = dict()
image_dim = []
try:
dim, face, img = getPatches.extract_patches(imageURL, dimension_dict,face_dict, image_dim, patch_path)
print ("extract patches pass")
except:
print ('cannot extract patches from the image')
return dim, face, img
def score_patch(patch_path):
patch_score = dict()
for file in next(os.walk(patch_path))[2]:
file_path = os.path.join(patch_path, file)
score_features = extract_features (file_path)[0].flatten()# extract features from CNTK pretrained model
pred_score_label = predict_label(score_features) # score the extracted features using trained regression model
patch_score[file.split('.')[0]] = float("{0:.2f}".format(pred_score_label[0]))
return patch_score
def infer_label(patch_score, label_mapping):
max_score_name, max_score_value = max(patch_score.items(), key=lambda x:x[1])
pred_label = label_mapping[round(max_score_value)-1]
return pred_label
def del_cache(patch_folder):
shutil.rmtree(patch_folder)
return
regressionModel.py
import numpy as np
import pandas as pd
import cntk as C
from PIL import Image
import pickle
from cntk import load_model, combine
import cntk.io.transforms as xforms
from cntk.logging import graph
from cntk.logging.graph import get_node_outputs
pretrained_model = 'ResNet152_ImageNet_Caffe.model'
pretrained_node_name = 'pool5'
regression_model = 'cntk_regression.dat'
image_width = 224
image_height = 224
# load CNTK pretrained model
#model_file = os.path.join(pretrained_model_path, pretrained_model_name)
loaded_model = load_model(pretrained_model) # a full path is required
node_in_graph = loaded_model.find_by_name(pretrained_node_name)
output_nodes = combine([node_in_graph.owner])
# load the stored regression model
read_model = pd.read_pickle(regression_model)
regression_model = read_model['model'][0]
train_regression = pickle.loads(regression_model)
def extract_features(image_path):
img = Image.open(image_path)
resized = img.resize((image_width, image_height), Image.ANTIALIAS)
bgr_image = np.asarray(resized, dtype=np.float32)[..., [2, 1, 0]]
hwc_format = np.ascontiguousarray(np.rollaxis(bgr_image, 2))
arguments = {loaded_model.arguments[0]: [hwc_format]}
output = output_nodes.eval(arguments)
return output
def predict_label(features):
return train_regression.predict(features.reshape(1,-1))
https://pypi.org/project/cntk/#files has CNTK 2.7 for Python 3.6. Still an obsolete version, but not quite as obsolete.

raise ValueError('Image with id {} already added.'.format(image_id)) in Tensorflow object detection api

Image training is ok with ssd_mobilenet_v1_coco in tensorflow object detection api.
getting the error while testing:
File "/home/hipstudents/anaconda3/envs/tensorflow_gpuenv/lib/python3.6/site-packages/object_detection-0.1-py3.6.egg/object_detection/utils/object_detection_evaluation.py", line 203, in add_single_ground_truth_image_info
raise ValueError('Image with id {} already added.'.format(image_id))
Please help.
System Info:
What is the top-level directory of the model you are using: ~/
Have I written custom code (as opposed to using a stock example script provided in TensorFlow): Yes, written scripts to convert .xml files to tf record
OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Linux Ubuntu 16.04
TensorFlow installed from (source or binary): Compiled from source
TensorFlow version (use command below): 1.11.0
Bazel version (if compiling from source): 0.16.1
CUDA/cuDNN version: 9.0.176, cuDNN: 9.0
GPU model and memory: GeForce GTX1080Ti, 11GB
Exact command to reproduce: python eval.py --logtostderr --pipeline_config_path=training/ssd_mobilenet_v1_coco.config --checkpoint_dir=training/ --eval_dir=eval/
I created dataset manually. Then label it using labelimg. after labeling I created csv file for image annotation and file name. then I create tf record. I follow this tutorial: https://towardsdatascience.com/how-to-train-your-own-object-detector-with-tensorflows-object-detector-api-bec72ecfe1d9
My tfrecord generator for training and testing image:
"""
Usage:
# From tensorflow/models/
# Create train data:
python generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=train.record
# Create test data:
python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=test.record
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDict
flags = tf.app.flags
flags.DEFINE_string('csv_input', '', 'Path to the CSV input')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
FLAGS = flags.FLAGS
# TO-DO replace this with label map
def class_text_to_int(row_label):
if row_label == 'Field':
return 1
else:
None
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
path = os.path.join(os.getcwd(), 'Images')
examples = pd.read_csv(FLAGS.csv_input)
grouped = split(examples, 'filename')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
output_path = os.path.join(os.getcwd(), FLAGS.output_path)
print('Successfully created the TFRecords: {}'.format(output_path))
if __name__ == '__main__':
tf.app.run()
In the ssd_mobilenet_coco_v1.config file, num_examples was 8000. In my case, test dataset only has 121 samples. I forgot to update that and got new kind of error that I couldn't find on the Internet. As it is a silly mistake, so I think a very few people did that. this answer might help someone who will do this kind of mistake. I changed the following in the config file and the error is resolved:
eval_config: {
#num of test images. In my case 121. Previously It was 8000
num_examples: 121
# Note: The below line limits the evaluation process to 10 evaluations.
# Remove the below line to evaluate indefinitely.
max_evals: 10
}
In my case, the problem was that I'd included images multiple times when constructing tfrecord files. Though now obvious, I hadn't noticed that many categories of the Open Images Dataset share the same images (which would have the same id in the evaluation, thus the error...). Once I'd corrected the algorithm creating the tfrecords, the error was gone.
I have solved the problem with this article: https://www.coder.work/article/3120495
By just adding 2 line
eval_config {
num_examples: 50
use_moving_averages: false
metrics_set: "coco_detection_metrics"
}

The iris tutorial in tensorflow's website does not work well

The code is showed below,and the wrong message is also showed below:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import urllib.request
import tensorflow as tf
import numpy as np
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_RRL = "http://download.tensorflow.org/data/iris_test.csv"
if not os.path.exists(IRIS_TRAINING):
raw = urllib.request.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, 'w') as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.request.urlopen(IRIS_TEST_RRL).read()
with open(IRIS_TEST, 'w') as f:
f.write(raw)
# load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32
)
# Specify that all features have real_valued data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layers DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 30],
n_class=3,
model_dir="/tem/iris_model")
# Define the training imputs
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model
classifier.fit(input_fn=get_train_inputs(), steps=2000)
# Define the test inputs
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy
accuracy_score = classifier.evaluate(input_fn=get_test_inputs(), steps=1)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
This prints the following stack-trace:
Traceback (most recent call last):
File "/home/skyfacon/PycharmProjects/LinearFitting/IrisClassification.py", line 35, in <module>
features_dtype=np.float32
File "/home/skyfacon/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 69, in load_csv_without_header
data.append(np.asarray(row, dtype=features_dtype))
File "/home/skyfacon/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/numeric.py", line 531, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: could not convert string to float: 'setosa'
Process finished with exit code 1
I would like to know which page you are using as tutorial for this. Because the first page which comes when searching in google is this:
https://www.tensorflow.org/get_started/tflearn
And the difference between this and what you posted is tf.contrib.learn.datasets.base.load_csv_without_header and tf.contrib.learn.datasets.base.load_csv_with_header.
The actual URL or iris data you have specified contains the header. And you are trying to load it as a file without the header. Hence the strings in the header are not able to get converted to float and the error.
Change your code to:
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)

Tflearn KeyError [] not in index

I have this code, i think im doing something wrong with de input of de data in the neural net (the neural net is only provisionally)
here is my code:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import tflearn
data = pd.read_csv('winequality-white.csv', sep=';')
X = data[['fixed acidity',
'volatile acidity',
'citric acid',
'residual sugar',
'chlorides',
'free sulfur dioxide',
'total sulfur dioxide',
'density',
'pH',
'sulphates',
'alcohol']]
y = data['quality']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.1)
# Build neural network
net = tflearn.input_data(shape=[None, 11])
net = tflearn.fully_connected(net, 110)
net = tflearn.regression(net)
# Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(X_train, y_train, n_epoch=10, batch_size=1, show_metric=True)
the data i use looks like this:
"fixed acidity";"volatile acidity";"citric acid";"residual sugar";"chlorides";"free sulfur dioxide";"total sulfur dioxide";"density";"pH";"sulphates";"alcohol";"quality"
7;0.27;0.36;20.7;0.045;45;170;1.001;3;0.45;8.8;6
6.3;0.3;0.34;1.6;0.049;14;132;0.994;3.3;0.49;9.5;6
8.1;0.28;0.4;6.9;0.05;30;97;0.9951;3.26;0.44;10.1;6
7.2;0.23;0.32;8.5;0.058;47;186;0.9956;3.19;0.4;9.9;6
7.2;0.23;0.32;8.5;0.058;47;186;0.9956;3.19;0.4;9.9;6
8.1;0.28;0.4;6.9;0.05;30;97;0.9951;3.26;0.44;10.1;6
6.2;0.32;0.16;7;0.045;30;136;0.9949;3.18;0.47;9.6;6
...
this is the error i get:
=== RESTART: /home/tux/Schreibtisch/Projects/Wineclassifier/classifier.py ===
hdf5 is not supported on this machine (please install/reinstall h5py for optimal experience)
---------------------------------
Run id: Q6AJQP
Log directory: /tmp/tflearn_logs/
---------------------------------Exception in thread Thread-3:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.5/dist-packages/tflearn/data_flow.py", line 187, in fill_feed_dict_queue
data = self.retrieve_data(batch_ids)
File "/usr/local/lib/python3.5/dist-packages/tflearn/data_flow.py", line 222, in retrieve_data
utils.slice_array(self.feed_dict[key], batch_ids)
File "/usr/local/lib/python3.5/dist-packages/tflearn/utils.py", line 187, in slice_array
return X[start]
File "/usr/local/lib/python3.5/dist-packages/pandas/core/frame.py", line 2056, in __getitem__
return self._getitem_array(key)
File "/usr/local/lib/python3.5/dist-packages/pandas/core/frame.py", line 2100, in _getitem_array
indexer = self.loc._convert_to_indexer(key, axis=1)
File "/usr/local/lib/python3.5/dist-packages/pandas/core/indexing.py", line 1231, in _convert_to_indexer
raise KeyError('%s not in index' % objarr[mask])
KeyError: '[3374] not in index'
Training samples: 4408
Validation samples: 0
--
Somebody a clue whats wrong?
Place the following code at the beginning of your code to reset the graph. If you ran the code interactively in IPython environment, the tensorflow graph can get really messy.
import tensorflow as tf
tf.reset_default_graph()
Many people are in the same shoes as you do. Refer to this Github issue for more detail.

Installing distributed tensorflow on Mac

Could someone please provide the link which shows how to do the above? I tried all the relevant places, but I could not find the procedure. If I just install TF for Mac as described on the TF website, does that give me the distributed version by default?
---After installing the GPU version of TF and running the test script given in the answer---
(tensorflow) acbc32a44fc1:~ z001jly$ python test.py
I tensorflow/stream_executor/dso_loader.cc:108] successfully opened CUDA library libcublas.dylib locally
I tensorflow/stream_executor/dso_loader.cc:108] successfully opened CUDA library libcudnn.dylib locally
I tensorflow/stream_executor/dso_loader.cc:108] successfully opened CUDA library libcufft.dylib locally
I tensorflow/stream_executor/dso_loader.cc:108] successfully opened CUDA library libcuda.1.dylib locally
I tensorflow/stream_executor/dso_loader.cc:108] successfully opened CUDA library libcurand.dylib locally
Traceback (most recent call last):
File "test.py", line 2, in <module>
import tensorflow as tf
File "/Users/z001jly/anaconda/lib/python2.7/site-packages/tensorflow/__init__.py", line 23, in <module>
from tensorflow.python import *
File "/Users/z001jly/anaconda/lib/python2.7/site-packages/tensorflow/python/__init__.py", line 48, in <module>
from tensorflow.python import pywrap_tensorflow
File "/Users/z001jly/anaconda/lib/python2.7/site-packages/tensorflow/python/pywrap_tensorflow.py", line 28, in <module>
_pywrap_tensorflow = swig_import_helper()
File "/Users/z001jly/anaconda/lib/python2.7/site-packages/tensorflow/python/pywrap_tensorflow.py", line 24, in swig_import_helper_mod = imp.load_module('_pywrap_tensorflow', fp, pathname, description)
ImportError: dlopen(/Users/z001jly/anaconda/lib/python2.7/site-packages/tensorflow/python/_pywrap_tensorflow.so, 10): Library not loaded: #rpath/libcudart.7.5.dylib
Referenced from: /Users/z001jly/anaconda/lib/python2.7/site-packages/tensorflow/python/_pywrap_tensorflow.so
Reason: image not found
The script runs successfully if I use it with the CPU version of TF.
It's part of official TensorFlow binaries. You could run script below to check if it works, should see "Success"
import subprocess
import tensorflow as tf
import time
import sys
flags = tf.flags
flags.DEFINE_string("port1", "12222", "port of worker1")
flags.DEFINE_string("port2", "12223", "port of worker2")
flags.DEFINE_string("task", "", "internal use")
FLAGS = flags.FLAGS
# setup local cluster from flags
host = "127.0.0.1:"
cluster = {"worker": [host+FLAGS.port1, host+FLAGS.port2]}
clusterspec = tf.train.ClusterSpec(cluster).as_cluster_def()
def run():
dtype=tf.int32
params_size = 1
with tf.device("/job:worker/task:0"):
params = tf.get_variable("params", [params_size], dtype,
initializer=tf.zeros_initializer)
with tf.device("/job:worker/task:1"):
update_variable = tf.get_variable("update_variable", [params_size], dtype,
initializer=tf.ones_initializer)
add_op = params.assign_add(update_variable)
init_op = tf.initialize_all_variables()
# launch distributed service
def runcmd(cmd): subprocess.Popen(cmd, shell=True, stderr=subprocess.STDOUT)
runcmd("python "+sys.argv[0]+" --task=0")
runcmd("python "+sys.argv[0]+" --task=1")
time.sleep(1)
sess = tf.Session("grpc://"+host+FLAGS.port1)
sess.run(init_op)
print("Adding 1 on %s to variable on %s"%(update_variable.device,
params.device))
result = sess.run(add_op)
if result == [1]:
print("Success")
if __name__=='__main__':
if not FLAGS.task:
run()
else: # Launch TensorFlow server
server = tf.train.Server(clusterspec,
job_name="worker",
task_index=int(FLAGS.task),
config=tf.ConfigProto(log_device_placement=True))
server.join()