Webcam Frame to model.predict_generator - Jupyter Notebook & CV2 - tensorflow

I have created this custom CNN, trained it, and now wish to try and pass frames from my webcam in real-time for testing the predictions.
The webcam video playback starts to capture the frame by frame, however, I am unsure what to do to the frame in order to get it working with the CNN model
Any advice would be appreciated
I have provided the full code of what I am trying to achieve
#imported necessities
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import cv2
from matplotlib.image import imread
from IPython.display import clear_output
import time
import PIL.Image
from io import StringIO
import IPython.display
import numpy as np
from io import BytesIO
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Conv2D, MaxPool2D, Dropout, Flatten, MaxPooling2D
from tensorflow.keras.callbacks import EarlyStopping
#Data Paths
data_dir = 'C:\\Users\\User\\Desktop\\DATAWeather'
test_path = data_dir+'\\Test\\'
train_path = data_dir+'\\Train\\'
#Variable to resize all of the images
image_shape = (224,224,3) #224*224*3 = 150528 Data Points : thats why we need image batch
#Apply a generator so it does not always get the same format of picture (recognizes different things)
image_gen = ImageDataGenerator(rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, rescale=1/255, shear_range=0.1, zoom_range=0.1,horizontal_flip=True,fill_mode='nearest')
#setting up a base convolutional layer
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3),input_shape=image_shape, activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=64, kernel_size=(3,3),input_shape=image_shape, activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=64, kernel_size=(3,3),input_shape=image_shape, activation='relu',))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy']), #model.summary()
#Create an early EPOCH stoppage based on the validation loss based off TWO epochs
early_stop=EarlyStopping(monitor='val_loss', patience=2)
#TRAINING MODEL - use two to the power
batch_size=32
#TWO generators
train_image_gen = image_gen.flow_from_directory(train_path, target_size=image_shape[:2], color_mode='rgb', batch_size = batch_size, class_mode='categorical', shuffle=True)
test_image_gen = image_gen.flow_from_directory(test_path, target_size=image_shape[:2], color_mode='rgb', batch_size = batch_size, class_mode='categorical', shuffle=False)
results = model.fit_generator(train_image_gen, epochs=1, validation_data=test_image_gen, callbacks=[early_stop])
***
**def showarray(a, fmt='jpeg'):
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
IPython.display.display(IPython.display.Image(data=f.getvalue()))
def get_frame(cam):
# Capture frame-by-frame
ret, frame = cam.read()
#flip image for natural viewing
frame = cv2.flip(frame, 1)
return frame
cam = cv2.VideoCapture(0)
def make_1080p():
cam.set(3, 224)
cam.set(4, 224)
def change_res(width, height):
cam.set(3, width)
cam.set(4, height)
change_res(224, 224)
try:
while(True):
t1 = time.time()
frame = get_frame(cam)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
showarray(frame)
t2 = time.time()
print("%f FPS" % (1/(t2-t1)))
# Display the frame until new frame is available
clear_output(wait=True)
Weather_Prediction_Cell = (frame)
#Weather_Prediction_Cell /= 255
model.predict_generator(frame)
#print(Weather_Prediction_Cell)
print(pred)**
except KeyboardInterrupt:
cam.release()
print("Stream stopped")
***

Keras model has a method called "predict". It takes a single np.array or a list of np.arrays as input (which should have the exact same shape as your neural net. input, including the batch part: (batch_count, width, height, channels) for example). You input this to model.predict, then it returns you back the result as an np.array again, with the shape your neural network output layer has. I'm not used to webcam applications with opencv, but if you get the frame data in np.array somehow, you can feed it to your neural net. as well. Just be sure about its shape, and reshape it if needed.

Related

How to feed grayscale images into a pretrained neural network models?

Noob here . I have been working with grayscale images for mitosis classification Here's a sample image I'm working with . I have employed VGGnet for achieving this. I've some doubts about how my grayscale image should be fed into the neural network. I've read the documentation of VGGnet about being trained on colored images on Imagenet.
I read the images using cv2.imread() and by squeezing into an array. I found its shape to be (227,227,3). Shouldn't it be (227,227,1) when I'm handling with grayscale images ? Model accuracy was also found to be only 50% . I'm wondering if it's something wrong with the dataset itself or VGGnet isn't suitable for this purpose. Or should I use some other method to read these images to get grayscale images?
I have tried the solutions listed in similar questions . Am I reading the images in the right way?
I'm sharing my code here.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Display Image data
from PIL import Image
import cv2
from google.colab import drive
drive.mount('/content/drive')
mitotic_image = Image.open('/content/drive/MyDrive/Medical/actualmito1/trainactmito01.jpg')
nonmitotic_image=Image.open('/content/drive/MyDrive/Medical/actualnonmito1/trainactnonmito01.jpg')
# subplotting image data
fig = plt.figure(figsize=(15,9))
ax1 = fig.add_subplot(1, 2, 1)
img_plot = plt.imshow(nonmitotic_image, cmap = plt.cm.bone)
ax1.set_title("Non-Mitotic Image")
ax2 = fig.add_subplot(1, 2, 2)
img_plot = plt.imshow(mitotic_image, cmap = plt.cm.bone)
ax2.set_title("Mitotic Image")
plt.show()
import os
yes = os.listdir("/content/drive/MyDrive/Medical/actualmito1")
no = os.listdir("/content/drive/MyDrive/Medical/actualnonmito1")
data = np.concatenate([yes, no])
target_yes = np.full(len(yes), 1)
target_no = np.full(len(no), 0)
# Image Target
data_target = np.concatenate([target_yes, target_no])
# Generate Image Data
img = cv2.imread("/content/drive/MyDrive/Medical/actualmito1/trainactmito01.jpg")
mitosis = cv2.resize(img,(32,32))
plt.imshow(mitosis)
X_data = []
yes = os.listdir("/content/drive/MyDrive/Medical/actualmito1")
for file in yes:
img = cv2.imread("/content/drive/MyDrive/Medical/actualmito1/" + file)
# resizing image data to 32x32
img = cv2.resize(img, (224,224))
X_data.append(img) # This will store list of all image data in an array
no = os.listdir("/content/drive/MyDrive/Medical/actualnonmito1")
for file in no:
img = cv2.imread("/content/drive/MyDrive/Medical/actualnonmito1/" + file)
# resizing image data to 32x32
img = cv2.resize(img, (224,224))
X_data.append(img) # This will store list of all image data in an array
X = np.squeeze(X_data)
X.shape
# Image Pixel Normalization
X = X.astype('float32')
X /= 255
X.shape
# Train & Test Data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, data_target, test_size = 0.1,
random_state = 3)
x_train2, x_val, y_train2, y_val = train_test_split(x_train, y_train, test_size = 0.15,
random_state = 3)
# VGG16 - Transfer Learning
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Conv2D, GlobalAveragePooling2D, Flatten, ZeroPadding2D,
Dropout, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import VGG16
def build_model():
#use Imagenet = pre-trained models weights called knowledge transfer
# image_shape = 32x32x3
vgg16_model = VGG16(weights = 'imagenet', include_top = False, input_shape=(224,224,3))
# Input Layer
model = Sequential()
# paadding = 'same' = ZeroPadding
model.add(Conv2D(filters=3, kernel_size=(3,3), padding='same', input_shape = (224,224,3)))
# add transfer learning model
model.add(vgg16_model)
# Average Pooling Layer
model.add(GlobalAveragePooling2D())
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Fully Connected Layer
model.add(Dense(units = 512, activation='relu'))
model.add(Dropout(0.5))
# Output Layer
model.add(Dense(units = 1, activation='sigmoid'))
model.compile(optimizer = 'Adam', loss = 'binary_crossentropy', metrics = ['Accuracy'])
return model
model = build_model()
model.summary()
from tensorflow.keras import callbacks
filepath = "/content/drive/MyDrive/BestModelMRI3.hdf5"
checkpoint = callbacks.ModelCheckpoint(filepath, monitor = 'val_loss', save_best_only = True,
mode = 'min',verbose = 1)
import datetime
import keras
import os
logdir = os.path.join("/content/drive/MyDrive/MRI_logs",
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = keras.callbacks.TensorBoard(logdir)
history = model.fit(x_train2, y_train2, epochs = 200, batch_size = 32, shuffle = True,
validation_data = (x_val, y_val), callbacks = [checkpoint, tensorboard_callback],verbose= 1)
model.load_weights("/content/drive/MyDrive/BestModelMRI3.hdf5")
model.evaluate(x_test, y_test)
predictions = model.predict(x_test)
yhat = np.round(predictions)
from sklearn.metrics import confusion_matrix, classification_report
confusion_matrix(y_test, yhat)
sns.heatmap(confusion_matrix(y_test, yhat), annot = True, cmap = 'RdPu')
print(classification_report(y_test, yhat))
VGG model was trained on RGB images. CV2 reads in images as BGR. so add code
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
to convert BGR to RGB. Also VGG requires the pixels to be in the range -1 to +1 so scale with
X=X/127.5 -1

Model predict giving same results after fitting

Hi I am a beginner in ML and Tensorflow so do pardon me for not understanding complex theories.
I was building an image classifier CNN as a form of practice. The model is trained using MobileNetv2 and its supposed to classify images of cats, dogs and pandas. After training my model (with decent accuracy of 92%), I tried using model.predict() to evaluate how it does with new images but I noticed that all my outputs were 1. This happens even if I used the same previous training data. By the way, I used 2700 (900 from each class) images for training and 300 for validation.
Here is my code
%tensorflow_version 2.x # this line is not required unless you are in a notebook
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import PIL.Image
import tensorflow_datasets as tfds
import pathlib
from google.colab import drive
drive.mount('/content/gdrive')
IMAGE_SIZE=[150,150]
train_path = "/content/gdrive/MyDrive/Colab Notebooks/Cat_Dog_Panda_CNN/train"
test_path = "/content/gdrive/MyDrive/Colab Notebooks/Cat_Dog_Panda_CNN/test"
IMAGE_SHAPE=[150,150,3]
base_model = tf.keras.applications.MobileNetV2(input_shape=IMAGE_SHAPE,
include_top=False,
weights='imagenet')
base_model.trainable = False
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
prediction_layer = tf.keras.layers.Dense(3)
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer
])
model.summary()
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# creates a data generator object that transforms images
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator (rescale=1./255)
training_set = train_datagen.flow_from_directory(train_path, target_size=IMAGE_SIZE, batch_size=32, class_mode='categorical')
testing_set = test_datagen.flow_from_directory(test_path, target_size=IMAGE_SIZE, batch_size=32, class_mode='categorical')
model.fit(
training_set,
epochs=3,
validation_data=testing_set)
img = Image.open("/content/gdrive/MyDrive/Colab Notebooks/Cat_Dog_Panda_CNN/test/panda/panda_00094.jpg").convert('RGB').resize((150, 150), Image.ANTIALIAS)
img = np.array(img)
predictions = model.predict(img[None,:,:])
np.argmax(predictions[0])
in model.compile you have loss=tf.keras.losses.BinaryCrossentropy(from_logits=True). Since you have 3 classes you should have
loss=tf.keras.losses.CategoricalCrossentropy()
The Mobilenet model was trained with pixel values scaled within the range -1 to +1. So you should set rescale to
rescale=1/127.5-1
When you want to feed an image into model.predict you must perform the same preprocessing
of the image as you did for the training images namely rescale the image and resize the image to be the same size you used in training, namely (150,150).

Keras predicting different output for same input image

am working on a classification problem for binary classes, I have finished the training and testing the model in single images now using the below code
import warnings
import time
from urllib.request import urlopen
import os
import urllib.request
start_time = time.time()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.applications.vgg16 import VGG16
import tensorflow as tf
import logging
logging.getLogger('tensorflow').disabled = True
img_size = 224
class PersonPrediction:
def __init__(self):
self.class_dictionary = np.load(
'class_indices_vgg.npy',
allow_pickle=True).item()
self.top_model_weights_path = 'v2/weights/bottleneck_fc_model_2020-10-10-05.h5'
self.num_classes = len(self.class_dictionary)
self.model = self.create_model(self.num_classes)
self.graph = tf.compat.v1.get_default_graph()
def create_model(self, num_of_cls):
model = Sequential()
vgg_model = VGG16(include_top=False, weights='imagenet', input_shape=(img_size, img_size, 3))
for layer in vgg_model.layers[:-4]:
layer.trainable = False
model.add(vgg_model)
model.add(GlobalAveragePooling2D())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
return model
def predict(self, path=None, file_name=None):
if path:
image_path = path
path = self.url_to_image(image_path)
else:
path = os.path.join('imgs', file_name)
print("[INFO] loading and preprocessing image...")
image = load_img(path, target_size=(224, 224))
image = img_to_array(image)
# important! otherwise the predictions will be '0'
image = image / 255
image = np.expand_dims(image, axis=0)
label_idx = self.model.predict_classes(image)[0][0]
probability = self.model.predict(image)[0]
inv_map = {v: k for k, v in self.class_dictionary.items()}
label = inv_map[label_idx]
return label, probability[0]
path = 'temp.jpg'
tax_model = PersonPrediction()
label, proba = tax_model.predict(
file_name='frame303.jpg')
print(label, proba)
Problem is I keep getting chaning predictions of both label and accuracy every time I rerun the code, am not sure what is causing that
There are a number of sources that create randomness in the results when training a model. First the weights are randomly initialized so your model is starting from a different point in N space (N is the number of trainable parameters). Second layers like dropout have randomness in terms of which nodes will be nodes will be selected. Some GPU processes particularly with multi-processing can also have some degree of randomness. I have seen a number of posts on getting repeatable results in tensorflow but I have not found one that seems to really work. In general though the results should be reasonably close if your model is working correctly and you run enough epochs. Now once the model is trained and you use it for predictions as long as you use the same trained model you should get identical prediction results.

I am running out of 25gb ram on google colab

This is my code below I am making a multilabel classification model using 8000 x-ray images can someone here help me this is my code below most of the ram is used while loading the images itself and only 10 epochs are able to run.
Can someone tell me what changes do I need to make to this code for it to run and generate the model.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import VGG16
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import *
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import os
# construct the argument parser and parse the arguments
# initialize the initial learning rate, number of epochs to train for,
# and batch size
INIT_LR = 1e-3
EPOCHS = 40
BS = 66
# grab the list of images in our dataset directory, then initialize
# the list of data (i.e., images) and class images
print("[INFO] loading images...")
imagePaths = list(paths.list_images('/content/drive/My Drive/testset/'))
data = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the image, swap color channels, and resize it to be a fixed
# 224x224 pixels while ignoring aspect ratio
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (224, 224))
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays while scaling the pixel
# intensities to the range [0, 255]
data = np.array(data) / 255.0
labels = np.array(labels)
# perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)
# initialize the training data augmentation object
trainAug = ImageDataGenerator(
rotation_range=15,
fill_mode="nearest")
# load the VGG16 network, ensuring the head FC layer sets are left
# off
baseModel = VGG16(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(4, 4))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(64, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(3, activation="softmax")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# compile our model
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# train the head of the network
print("[INFO] training head...")
H = model.fit(
trainAug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)
# make predictions on the testing set
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs,
target_names=lb.classes_))
# compute the confusion matrix and and use it to derive the raw
# accuracy, sensitivity, and specificity
cm = confusion_matrix(testY.argmax(axis=1), predIdxs)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
# show the confusion matrix, accuracy, sensitivity, and specificity
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
# plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc",color='green')
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on COVID-19 Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig('/content/drive/My Drive/setcovid/plot2.png')
# serialize the model to disk
print("[INFO] saving COVID-19 detector model...")
model.save('/content/drive/My Drive/setcovid/model2',save_format="h5" )
You can try to generate TFRecords from this data, store them into your drive and then feed them into your model through batches instead of directly loading them to memory. I would recommend the Hvass Laboratories YouTube channel, tensorflow tutorials playlist, tutorial number 18, TFRecords and Dataset API and the Dataset api tensorflow's official documentation.

How does Tensorflow's DirectoryIterator work?

I am used to using something like model.fix(train_data,train_labels, epochs=10) where i use glob to read a folder full of images into RAM. I wanted to read direct from the HDD as the training happens. I found:
https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/DirectoryIterator
Only I do not know how it works exactly. I have scoured the internet for more help then the documentation linked but I haven't found any. I have the labels and directories in the DirectoryIterator. I just don't know how to feed the DirectoryIterator into my model?
The code shows what I have done so far. I also tried to use a tensorflow sess and feed the DirectoryIterator as a feed_dict. Code is messy, just been trying this and that. In the code I try to use fit_generator to fit the DirectoryIterator.
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras as keras
import cv2 as ocv
import glob
import matplotlib.pyplot as plt
from tensorflow import image
import glob
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Plot inline
%matplotlib inline
# Load an color image in 1-colour 0-grayscale -1-bw
img = ocv.imread('C:/Users/ew/Documents/Python Scripts/Noodles/my.png',1)
RGB_im = ocv.cvtColor(img, ocv.COLOR_BGR2RGB)
img.shape
plt.imshow(RGB_im)
cv_img = []
for img in glob.glob("C:\\Users\\EW\\pictures\\Noodles\\Banana\\*.jpg"):
cv_img.append(img)
#n= ocv.imread(img)
#cv_img.append(n)
print(cv_img[1])
image_data_generator = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False, zca_epsilon=1e-06,
rotation_range=0,
width_shift_range=0.0,
height_shift_range=0.0,
brightness_range=None,
shear_range=0.0,
zoom_range=0.0,
channel_shift_range=0.0,
fill_mode='nearest',
cval=0.0,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format='channels_last',
validation_split=0.3,
# interpolation_order=1,
dtype='float32')
noodle_data = directory = "C:\\Users\\EW\\pictures\\Noodles\\"
image_set = keras.preprocessing.image.DirectoryIterator(directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype=None)
model = Sequential()
#add model layers
model.add(Dense(10, activation='relu', input_shape=(256,256)))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.fit_generator(noodle_data , steps_per_epoch=16, validation_data=val_it, validation_steps=8)
---> 12 model.fit_generator(prawn_data , steps_per_epoch=16, validation_data=prawn_data, validation_steps=8)
AttributeError: 'str' object has no attribute 'shape'
I think you should write like this:
image_data_generator = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
...,
dtype='float32')
directory = "C:\\Users\\EW\\pictures\\Noodles\\"
image_set = image_data_generator.flow_from_directory(directory,
target_size=(256, 256),
color_mode='rgb',
...,
dtype=None)
So, you call an intance of ImageDataGenerator(), which is named image_data_generator and use its method flow_from_directory() to read from directory. And you shouldn't pass image_data_generator as your parameter in flow_from_directory.