简体   繁体   中英

Predictions using CNN in Tensorflow

I'm trying to create an binary image classifier to detect if someone is using a surgical mask following the 'Cat vs Dogs' example available at the TensorFlow website ( https://www.tensorflow.org/tutorials/images/classification )

I've created a small dataset with some images of people wearing surgical masks and some people without it, trainned my CNN and got an accuray around 70%, which is fine by now. But the thing is, how do I make predictions? The 'CatVsDogs' example stops at augmentation.

Right now I'm not worried about accuracy, just wondering how I can get predictions from my model.

This is my code:

import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import random
import keras
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import adam



IMG_SIZE = 100 # Image dimensions
batch_size = 100 # Amount of data that will be fed to the NN at a time
epochs = 1 # Amount of times the data will be pass to the NN

training_data = []
#img_array = []
new_array = []

#######  DATASET LOCATION  #######
TRAIN_DIR = 'C:/Users/Alex/Google Drive/Colab Notebooks/MaskDetector/Train/' #Create variable to store the path of the training images directory 
VALIDATION_DIR = 'C:/Users/Alex/Google Drive/Colab Notebooks/MaskDetector/Validate/' #Create variable to store the path of the validation images directory 
TEST_DIR = 'C:/Users/Alex/Google Drive/Colab Notebooks/MaskDetector/Test/' #Create variable to store the path of the testing images directory 
CATEGORIES = ['MaskOn','MaskOff'] #Categories 'MaskOn' and 'MaskOff' same name as the folders

TRAIN_DIR_MASKON = os.path.join(TRAIN_DIR, 'MaskOn')  # Directory with pics of ppl with masks for training 
TRAIN_DIR_MASKOFF = os.path.join(TRAIN_DIR, 'MaskOff')  # Directory with pics of ppl without masks for training 
VALIDATION_DIR_MASKON = os.path.join(VALIDATION_DIR, 'MaskOn')  # Directory with pics of ppl with masks for validation
VALIDATION_DIR_MASKOFF = os.path.join(VALIDATION_DIR, 'MaskOff')  # Directory with pics of ppl without masks for validation
###########################################

#######     Shows the size of the dataset  #######
num_maskon_tr = len(os.listdir(TRAIN_DIR_MASKON))
num_maskoff_tr = len(os.listdir(TRAIN_DIR_MASKOFF))

num_maskon_val  = len(os.listdir(VALIDATION_DIR_MASKON))
num_maskoff_val = len(os.listdir(VALIDATION_DIR_MASKOFF))

total_train = num_maskon_tr + num_maskoff_tr
total_val = num_maskon_val + num_maskoff_val



###################### DATA AUGMENTATION  ######################

#########   FLIP   #########
image_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True)

train_data_gen = image_gen.flow_from_directory(batch_size=batch_size,
                                               directory=TRAIN_DIR,
                                               shuffle=True,
                                               target_size=(IMG_SIZE, IMG_SIZE))

augmented_images = [train_data_gen[0][0][0] for i in range(5)]
# Re-use the same custom plotting function defined and used above to visualize the training images


#########   ROTATE 45°   #########
image_gen = ImageDataGenerator(rescale=1./255, rotation_range=45)
train_data_gen = image_gen.flow_from_directory(batch_size=batch_size,
                                               directory=TRAIN_DIR,
                                               shuffle=True,
                                               target_size=(IMG_SIZE, IMG_SIZE))

augmented_images = [train_data_gen[0][0][0] for i in range(5)]


#########   ZOOM FROM 0 TO 10%   #########
# zoom_range from 0 - 1 where 1 = 100%.
image_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5) #

train_data_gen = image_gen.flow_from_directory(batch_size=batch_size,
                                               directory=TRAIN_DIR,
                                               shuffle=True,
                                               target_size=(IMG_SIZE, IMG_SIZE))

augmented_images = [train_data_gen[0][0][0] for i in range(5)]
####################################################################################################

### PREPARES THE DATA TO BE FED INTO THE NN ###
train_image_generator = ImageDataGenerator(rescale=1./255)
image_gen_val = ImageDataGenerator(rescale=1./255)
test_data_generator = ImageDataGenerator(rescale=1./255) #Use this to make predictons?Don't know yet

val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,
                                                 directory=VALIDATION_DIR,
                                                 target_size=(IMG_SIZE, IMG_SIZE),
                                                 class_mode='binary')
####################################################################################
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
                                                           directory=TRAIN_DIR,
                                                           shuffle=True,
                                                           target_size=(IMG_SIZE, IMG_SIZE),
                                                           class_mode='binary')
#####################################################################################

test_generator = test_data_generator.flow_from_directory(TEST_DIR,
                                                         target_size=(IMG_SIZE, IMG_SIZE),
                                                         batch_size=batch_size,
                                                         class_mode="binary",
                                                         shuffle=True)

############   NN Model    ############
model = Sequential([
    Conv2D(16, 3, padding='same', activation='relu',
           input_shape=(IMG_SIZE, IMG_SIZE ,3)),
    MaxPooling2D(),
    Dropout(0.2),
    Conv2D(32, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Conv2D(64, 3, padding='same', activation='relu'),
    MaxPooling2D(),
    Dropout(0.2),
    Flatten(),
    Dense(512, activation='relu'),
    Dense(1)
])
#########################################################

############   COMPILES THE NN    ############
model.compile(optimizer='adam',
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])


############   PERFORMS THE TRAINNING    ############
history = model.fit_generator(
    train_data_gen,
    steps_per_epoch=total_train // batch_size,
    epochs=epochs,
    validation_data=val_data_gen,
    validation_steps=total_val // batch_size
)

You've clearly found model.compile() and model.fit_generator() - all you need to do is head over to the documentation and find the other methods. Here's a link that'll tell you how to use model.predict() . Use that for your prediction.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM