Custom keras loss function using scipy - tensorflow

I would like to implement a custom loss function, and I am using tensorflow with keras backend.
In my loss function for each training sample (2D matrix of size (2048x192) I would like to add a bandpassed version of the corresponding training sample as a constant (non-trainable) value.
I implemented bandpass filter based on How to implement band-pass Butterworth filter with Scipy.signal.butter
from scipy.signal import butter, sosfiltfilt
def butter_bandpass_sos(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
sos = butter(order, [low, high], analog=False, btype='band', output='sos')
return sos
def butter_bandpass_filter_sos(data, lowcut, highcut, fs, order=5):
sos = butter_bandpass_sos(lowcut, highcut, fs, order=order)
y = sosfiltfilt(sos, data)
return y
and for the loss function based on Adding a constant to Loss function in Tensorflow, I implemented:
from tensorflow.python.keras import backend
import tensorflow as tf
lowcut = 2.5e6
highcut = 7.5e6
order = 5
fs = 40e6
def HP_func(mat):
for i in range(0, 192):
RF_ch = mat[:, i]
y = butter_bandpass_filter_sos(RF_ch, lowcut, highcut, fs, order=order)
mat_band_sos[:, i] = y
return mat_band_sos
def my_custom_loss_HF(y_true,y_pred):
HF_mat = HP_func(y_true)
loss = backend.sqrt(tf.keras.losses.mean_squared_error(y_true, y_pred)) + HF_mat
return loss
I have three branches and therefore three losses:
model.compile(loss=['mean_squared_error', my_custom_loss_HF,'mean_squared_error'],
loss_weights=[1.0, 1.0, 1.0],
optimizer='Adam',
metrics=['mae', rmse])
but I am getting this error:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-19-01efc0717223>", line 6, in <module>
metrics=['mae', rmse])
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py", line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 337, in compile
self._compile_weights_loss_and_weighted_metrics()
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/training/tracking/base.py", line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1710, in _compile_weights_loss_and_weighted_metrics
self.total_loss = self._prepare_total_loss(masks)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1770, in _prepare_total_loss
per_sample_losses = loss_fn.call(y_true, y_pred)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/tensorflow/python/keras/losses.py", line 215, in call
return self.fn(y_true, y_pred, **self._fn_kwargs)
File "<ipython-input-18-a4f1cf924d3f>", line 3, in my_custom_loss_HF
HF_mat = HP_fun(y_true)
File "<ipython-input-17-74a2f0e736b9>", line 19, in HP_fun
y = butter_bandpass_filter_sos(RF_ch, lowcut, highcut, fs, order=order)
File "<ipython-input-2-4e34aa35b4cd>", line 69, in butter_bandpass_filter_sos
y = sosfiltfilt(sos, data)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/scipy/signal/signaltools.py", line 4131, in sosfiltfilt
x = _validate_x(x)
File "/home/z003zpjj/venv/lib/python3.6/site-packages/scipy/signal/signaltools.py", line 3926, in _validate_x
raise ValueError('x must be at least 1D')
ValueError: x must be at least 1D
How can I use the scipy function in my loss function?

you need to rewrite the function to work with tensors, otherwise it doesnt have a gradient, the network cant be trained.
you can use only functions like keras backend.
your function has even a for, forget the logic in loss function.

Related

What do I need in order to save animation videos from matplotlib in mp3 format?

I am using python3.8 on Linux Mint 19.3, and I am trying to save an animation created by a cellular automata model in matplotlib. My actual code for the model is private, but it uses the same code for saving the animation as the code shown below, which is a slight modification of one of the examples shown in the official matplotlib documentation:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, ax = plt.subplots()
def f(x, y):
return np.sin(x) + np.cos(y)
x = np.linspace(0, 2 * np.pi, 120)
y = np.linspace(0, 2 * np.pi, 100).reshape(-1, 1)
fig, ax = plt.subplots()
ims = []
for i in range(60):
x += np.pi / 15.
y += np.pi / 20.
im = ax.imshow(f(x, y), animated=True)
if i == 0:
ax.imshow(f(x, y)) # show an initial one first
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat_delay=1000)
# To save the animation, use e.g.
#
# ani.save("movie.mp4")
#
# or
#
writer = animation.FFMpegWriter(fps=15, metadata=dict(artist='Me'), bitrate=1800)
ani.save("movie.mp3", writer=writer)
When executed, the code produces this error:
MovieWriter stderr:
Output file #0 does not contain any stream
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/matplotlib/animation.py", line 234, in saving
yield self
File "/usr/local/lib/python3.8/dist-packages/matplotlib/animation.py", line 1093, in save
writer.grab_frame(**savefig_kwargs)
File "/usr/local/lib/python3.8/dist-packages/matplotlib/animation.py", line 351, in grab_frame
self.fig.savefig(self._proc.stdin, format=self.frame_format,
File "/usr/local/lib/python3.8/dist-packages/matplotlib/figure.py", line 3046, in savefig
self.canvas.print_figure(fname, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/matplotlib/backend_bases.py", line 2319, in print_figure
result = print_method(
File "/usr/local/lib/python3.8/dist-packages/matplotlib/backend_bases.py", line 1648, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/matplotlib/_api/deprecation.py", line 415, in wrapper
return func(*inner_args, **inner_kwargs)
File "/usr/local/lib/python3.8/dist-packages/matplotlib/backends/backend_agg.py", line 486, in print_raw
fh.write(renderer.buffer_rgba())
BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/justin/animation_test.py", line 36, in <module>
ani.save("movie.mp3", writer=writer)
File "/usr/local/lib/python3.8/dist-packages/matplotlib/animation.py", line 1093, in save
writer.grab_frame(**savefig_kwargs)
File "/usr/lib/python3.8/contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/local/lib/python3.8/dist-packages/matplotlib/animation.py", line 236, in saving
self.finish()
File "/usr/local/lib/python3.8/dist-packages/matplotlib/animation.py", line 342, in finish
self._cleanup() # Inline _cleanup() once cleanup() is removed.
File "/usr/local/lib/python3.8/dist-packages/matplotlib/animation.py", line 373, in _cleanup
raise subprocess.CalledProcessError(
subprocess.CalledProcessError: Command '['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo', '-s', '640x480', '-pix_fmt', 'rgba', '-r', '15', '-loglevel', 'error', '-i', 'pipe:', '-vcodec', 'h264', '-pix_fmt', 'yuv420p', '-b', '1800k', '-metadata', 'artist=Me', '-y', 'movie.mp3']' returned non-zero exit status 1.
I have looked at posts on similar queries concerning matplotlib animations, but none have specifically included the error Output file #0 does not contain any stream. I have little experience with ffmpeg, so I am wondering what might be missing.

How to avoid set_index on a pre-sorted DataFrame constructed with from_delayed?

I am trying to get the expression, 'df.resample('1T', how='mean').sum()' to work in Dask but, running into an issue where it seems like Dask needs me to explicitly set_index on the DataFrame before performing resample. I get an error as below...
>>> c.gather(df).compute()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/site-packages/distributed/client.py", line 1508, in gather
asynchronous=asynchronous)
File "/usr/local/lib/python2.7/site-packages/distributed/client.py", line 615, in sync
return sync(self.loop, func, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/distributed/utils.py", line 253, in sync
six.reraise(*error[0])
File "/usr/local/lib/python2.7/site-packages/distributed/utils.py", line 238, in f
result[0] = yield make_coro()
File "/usr/local/lib64/python2.7/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "/usr/local/lib64/python2.7/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "/usr/local/lib64/python2.7/site-packages/tornado/gen.py", line 1063, in run
yielded = self.gen.throw(*exc_info)
File "/usr/local/lib/python2.7/site-packages/distributed/client.py", line 1385, in _gather
traceback)
File "/usr/local/lib/python2.7/site-packages/dask/dataframe/core.py", line 1633, in resample
return _resample(self, rule, how=how, closed=closed, label=label)
File "/usr/local/lib/python2.7/site-packages/dask/dataframe/tseries/resample.py", line 33, in _resample
return getattr(resampler, how)()
File "/usr/local/lib/python2.7/site-packages/dask/dataframe/tseries/resample.py", line 151, in mean
return self._agg('mean')
File "/usr/local/lib/python2.7/site-packages/dask/dataframe/tseries/resample.py", line 126, in _agg
meta_r = self.obj._meta_nonempty.resample(self._rule, **self._kwargs)
File "/usr/local/lib64/python2.7/site-packages/pandas/core/generic.py", line 7104, in resample
base=base, key=on, level=level)
File "/usr/local/lib64/python2.7/site-packages/pandas/core/resample.py", line 1148, in resample
return tg._get_resampler(obj, kind=kind)
File "/usr/local/lib64/python2.7/site-packages/pandas/core/resample.py", line 1276, in _get_resampler
"but got an instance of %r" % type(ax).__name__)
TypeError: Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, but got an instance of 'Index'
Below is the python code which I am using. Since the pandas DFs being returned by my delayed objects were already timestamp indexed, my expectation was for Dask to infer/construct an index from those DFs' timestamp indices instead of me having to explicitly set one. Although, I am unsure how an explicit set_index can be called in this case (what are the arguments to be passed?). Setting a pd.DatetimeIndex on the meta dataframe (commented line as below) works. Is constructing the index by hand and feeding it to meta the only realistic way to do this? Am I missing something?
#! /usr/bin/env python
# Start dask scheduler and workers
# dask-scheduler &
# dask-worker --nthreads 1 --nprocs 6 --memory-limit 3GB localhost:8786 --local-directory /dev/shm &
from dask.distributed import Client
from dask.delayed import delayed
import pandas as pd
import numpy as np
import dask.dataframe as dd
import time
c = Client('127.0.0.1:8786')
def load(epoch):
# 1525132800 - 1/5
# 1527811200 - 1/6
num_ts=100
idx = []
for ts in range(0, 86400, 15):
idx.append(epoch + ts)
d = np.random.rand(86400/15, num_ts)
ts = []
for i in range(0, num_ts):
# tsname = "ts_%s_%s" % (i, epoch)
tsname = "ts_%s" % (i)
ts.append(tsname)
gts.append(tsname)
res = pd.DataFrame(index=idx, data=d, columns=ts, dtype=np.float64)
res.index = pd.to_datetime(arg=res.index, unit='s')
return res
gts = []
load(1525132800)
print time.time()
i = pd.DatetimeIndex(start=1525132800, freq='15S', end=1527811185, dtype='datetime64[s]')
# meta = pd.DataFrame(index=i, data=[], columns=gts, dtype=np.float64)
meta = pd.DataFrame(index=[], data=[], columns=gts, dtype=np.float64)
dfs = [delayed(load)(fn) for fn in range(1525132800, 1527811200, 86400)]
print time.time()
df = dd.from_delayed(dfs, meta, 'sorted')
print time.time()
df.npartitions
df.divisions
print time.time()
df = c.submit(dd.DataFrame.resample, df, rule='1T', how='mean')
print time.time()
#df = c.submit(dd.DataFrame.sum, df, axis=1)
print time.time()
c.gather(df).compute()
print time.time()
#c.gather(df).visualize(filename='/usr/share/nginx/html/svg/df4.svg')
Dask uses the meta of a data-frame to infer the data types before computing any of the chunks of data. In your case, your chunks contain datetime indexes, but the meta doesn't. The meta should be a zero-length version of the data:
meta = pd.DataFrame(index=i[:0], data=[], columns=gts, dtype=np.float64)

The iris tutorial in tensorflow's website does not work well

The code is showed below,and the wrong message is also showed below:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import urllib.request
import tensorflow as tf
import numpy as np
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_RRL = "http://download.tensorflow.org/data/iris_test.csv"
if not os.path.exists(IRIS_TRAINING):
raw = urllib.request.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, 'w') as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.request.urlopen(IRIS_TEST_RRL).read()
with open(IRIS_TEST, 'w') as f:
f.write(raw)
# load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32
)
# Specify that all features have real_valued data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layers DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 30],
n_class=3,
model_dir="/tem/iris_model")
# Define the training imputs
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model
classifier.fit(input_fn=get_train_inputs(), steps=2000)
# Define the test inputs
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy
accuracy_score = classifier.evaluate(input_fn=get_test_inputs(), steps=1)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
This prints the following stack-trace:
Traceback (most recent call last):
File "/home/skyfacon/PycharmProjects/LinearFitting/IrisClassification.py", line 35, in <module>
features_dtype=np.float32
File "/home/skyfacon/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 69, in load_csv_without_header
data.append(np.asarray(row, dtype=features_dtype))
File "/home/skyfacon/anaconda3/envs/tensorflow/lib/python3.6/site-packages/numpy/core/numeric.py", line 531, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: could not convert string to float: 'setosa'
Process finished with exit code 1
I would like to know which page you are using as tutorial for this. Because the first page which comes when searching in google is this:
https://www.tensorflow.org/get_started/tflearn
And the difference between this and what you posted is tf.contrib.learn.datasets.base.load_csv_without_header and tf.contrib.learn.datasets.base.load_csv_with_header.
The actual URL or iris data you have specified contains the header. And you are trying to load it as a file without the header. Hence the strings in the header are not able to get converted to float and the error.
Change your code to:
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)

Tflearn KeyError [] not in index

I have this code, i think im doing something wrong with de input of de data in the neural net (the neural net is only provisionally)
here is my code:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import tflearn
data = pd.read_csv('winequality-white.csv', sep=';')
X = data[['fixed acidity',
'volatile acidity',
'citric acid',
'residual sugar',
'chlorides',
'free sulfur dioxide',
'total sulfur dioxide',
'density',
'pH',
'sulphates',
'alcohol']]
y = data['quality']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.1)
# Build neural network
net = tflearn.input_data(shape=[None, 11])
net = tflearn.fully_connected(net, 110)
net = tflearn.regression(net)
# Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(X_train, y_train, n_epoch=10, batch_size=1, show_metric=True)
the data i use looks like this:
"fixed acidity";"volatile acidity";"citric acid";"residual sugar";"chlorides";"free sulfur dioxide";"total sulfur dioxide";"density";"pH";"sulphates";"alcohol";"quality"
7;0.27;0.36;20.7;0.045;45;170;1.001;3;0.45;8.8;6
6.3;0.3;0.34;1.6;0.049;14;132;0.994;3.3;0.49;9.5;6
8.1;0.28;0.4;6.9;0.05;30;97;0.9951;3.26;0.44;10.1;6
7.2;0.23;0.32;8.5;0.058;47;186;0.9956;3.19;0.4;9.9;6
7.2;0.23;0.32;8.5;0.058;47;186;0.9956;3.19;0.4;9.9;6
8.1;0.28;0.4;6.9;0.05;30;97;0.9951;3.26;0.44;10.1;6
6.2;0.32;0.16;7;0.045;30;136;0.9949;3.18;0.47;9.6;6
...
this is the error i get:
=== RESTART: /home/tux/Schreibtisch/Projects/Wineclassifier/classifier.py ===
hdf5 is not supported on this machine (please install/reinstall h5py for optimal experience)
---------------------------------
Run id: Q6AJQP
Log directory: /tmp/tflearn_logs/
---------------------------------Exception in thread Thread-3:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.5/dist-packages/tflearn/data_flow.py", line 187, in fill_feed_dict_queue
data = self.retrieve_data(batch_ids)
File "/usr/local/lib/python3.5/dist-packages/tflearn/data_flow.py", line 222, in retrieve_data
utils.slice_array(self.feed_dict[key], batch_ids)
File "/usr/local/lib/python3.5/dist-packages/tflearn/utils.py", line 187, in slice_array
return X[start]
File "/usr/local/lib/python3.5/dist-packages/pandas/core/frame.py", line 2056, in __getitem__
return self._getitem_array(key)
File "/usr/local/lib/python3.5/dist-packages/pandas/core/frame.py", line 2100, in _getitem_array
indexer = self.loc._convert_to_indexer(key, axis=1)
File "/usr/local/lib/python3.5/dist-packages/pandas/core/indexing.py", line 1231, in _convert_to_indexer
raise KeyError('%s not in index' % objarr[mask])
KeyError: '[3374] not in index'
Training samples: 4408
Validation samples: 0
--
Somebody a clue whats wrong?
Place the following code at the beginning of your code to reset the graph. If you ran the code interactively in IPython environment, the tensorflow graph can get really messy.
import tensorflow as tf
tf.reset_default_graph()
Many people are in the same shoes as you do. Refer to this Github issue for more detail.

while_loop in tensorflow returns type error

I am confused why the following code returns this error message:
Traceback (most recent call last):
File "/Users/Desktop/TestPython/tftest.py", line 46, in <module>
main(sys.argv[1:])
File "/Users/Desktop/TestPython/tftest.py", line 35, in main
result = tf.while_loop(Cond_f2, Body_f1, loop_vars=loopvars)
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2518, in while_loop
result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2356, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2292, in _BuildLoop
c = ops.convert_to_tensor(pred(*packed_vars))
File "/Users/Desktop/TestPython/tftest.py", line 18, in Cond_f2
boln = tf.less(tf.cast(tf.constant(ind), dtype=tf.int32), tf.cast(tf.constant(N), dtype=tf.int32))
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/framework/constant_op.py", line 163, in constant
tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape))
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/framework/tensor_util.py", line 353, in make_tensor_proto
_AssertCompatible(values, dtype)
File "/Users/Desktop/HPC_LIB/TENSORFLOW/lib/python2.7/site-packages/tensorflow/python/framework/tensor_util.py", line 287, in _AssertCompatible
raise TypeError("List of Tensors when single Tensor expected")
TypeError: List of Tensors when single Tensor expected
I would appreciate if someone could help me fix this error. Thanks!
from math import *
import numpy as np
import sys
import tensorflow as tf
def Body_f1(n, ind, N, T):
# Compute trace
a = tf.trace(tf.random_normal(0.0, 1.0, (n, n)))
# Update trace
a = tf.cast(a, dtype=T.dtype)
T = tf.scatter_update(T, ind, a)
# Update index
ind = ind + 1
return n, ind, N, T
def Cond_f2(n, ind, N, T):
boln = tf.less(tf.cast(tf.constant(ind), dtype=tf.int32), tf.cast(tf.constant(N), dtype=tf.int32))
return boln
def main(argv):
# Open tensorflow session
sess = tf.Session()
# Parameters
N = 10
T = tf.zeros((N), dtype=tf.float64)
n = 4
ind = 0
# While loop
loopvars = [n, ind, N, T]
result = tf.while_loop(Cond_f2, Body_f1, loop_vars=loopvars, shape_invariants=None, \
parallel_iterations=1, back_prop=False, swap_memory=False, name=None)
trace = result[3]
trace = sess.run(trace)
print trace
print 'Done!'
# Close tensorflow session
if session==None:
sess.close()
if __name__ == "__main__":
main(sys.argv[1:])
Update: I have added the full error message. I am not sure why I get this error message. Does loop_vars expect a single tensor and not a list of tensors? I hope not.
tf.constant expects a non-Tensor value, like a Python list or a numpy array. You can get the same error by iterating tf.constant, as in tf.constant(tf.constant(5.)). Removing those calls fixes that first error. It's a very poor error message, so I would encourage you to file a bug on Github.
It also looks like the arguments to random_normal are a bit mixed up; keyword arguments are good for avoiding issues like that:
tf.random_normal(mean=0.0, stddev=1.0, shape=(n, n))
Finally scatter_update expects a variable. It looks like a TensorArray may be what you're looking for here (or one of the higher level looping constructs which use a TensorArray implicitly).