简体   繁体   中英

deep learning image classifier architecture configuration

hiya im new to Machine learning and only know some basics and how things are supposed to work. so i was looking at this tutorial on Deep Learning with Python, TensorFlow, and Keras tutorial and got these codes

import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2

DATADIR = "C:/Users/Acer/imagerec/MRI"

CATEGORIES = ["yes", "no"]

for category in CATEGORIES:
    path = os.path.join(DATADIR,category)
    for img in os.listdir(path):
        img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
        plt.imshow(img_array, cmap='gray')


        break
    break
print(img_array)
print(img_array.shape)

IMG_SIZE = 224

new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')


training_data = []

def create_training_data():
    for category in CATEGORIES:
        path = os.path.join(DATADIR, category)
        class_num = CATEGORIES.index(category)
        for img in os.listdir(path):
            try:
                img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
                new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
                training_data.append([new_array, class_num])
            except Exception as e:
                pass


create_training_data()

print(len(training_data))

import random

random.shuffle(training_data)
for sample in training_data[:10]:
    print(sample[1])

X = []
y = []
for features, label in training_data:
    X.append(features)
    y.append(label)

X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)

import pickle

pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)



pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0

model = Sequential()

model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(Dense(64))
model.add(Activation('relu'))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X, y, batch_size=15, epochs=20, validation_split=0.1)

from sklearn.metrics import confusion_matrix
pred = model.predict(X)
pred = np.round(pred)

conf = confusion_matrix(y, pred)

import seaborn as sns
sns.heatmap(conf, annot=True)

plt.show()

so running these codes gave me good results with a 76.9% validation accuracy what i needed to do was to change the model of this code into VGG16,VGG19 and mobilenet but i dont know how to import a pretrained model so i decided to make my own model and train that so i looked at the architecture of VGG16 and VGG19 i looked at how many conv and maxpooling and came up with this code

import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2

DATADIR = "C:/Users/Acer/imagerec/EDA"

CATEGORIES = ["yes", "no"]

for category in CATEGORIES:
    path = os.path.join(DATADIR,category)
    for img in os.listdir(path):
        img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
        plt.imshow(img_array, cmap='gray')
        plt.show()

        break
    break
print(img_array)
print(img_array.shape)

IMG_SIZE = 224

new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')
plt.show()

training_data = []

def create_training_data():
    for category in CATEGORIES:
        path = os.path.join(DATADIR, category)
        class_num = CATEGORIES.index(category)
        for img in os.listdir(path):
            try:
                img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
                new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
                training_data.append([new_array, class_num])
            except Exception as e:
                pass


create_training_data()

print(len(training_data))

import random

random.shuffle(training_data)
for sample in training_data[:10]:
    print(sample[1])

X = []
y = []
for features, label in training_data:
    X.append(features)
    y.append(label)

X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)

import pickle

pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)



pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0

model = Sequential()

model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Conv2D(128, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(128, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(Conv2D(512, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
padding='same'
model.add(MaxPooling2D(pool_size=(1, 1)))

model.add(Flatten())

model.add(Dense(64))
model.add(Activation('relu'))

model.add(Dense(1))
model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X, y, batch_size=15, epochs=1, validation_split=0.1)

from sklearn.metrics import confusion_matrix
pred = model.predict(X)
pred = np.round(pred)

conf = confusion_matrix(y, pred)

import seaborn as sns
sns.heatmap(conf, annot=True)

plt.show()

but running this always gave me a val accuracy of 57.69% in any epoch am i doing something wrong? or did i do everything wrong?

edit so i used a pretrained model now

import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2

DATADIR = "C:/Users/Acer/imagerec/MRI"

CATEGORIES = ["yes", "no"]

for category in CATEGORIES:
    path = os.path.join(DATADIR,category)
    for img in os.listdir(path):
        img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
        plt.imshow(img_array, cmap='gray')


        break
    break
print(img_array)
print(img_array.shape)

IMG_SIZE = 224

new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')


training_data = []

def create_training_data():
    for category in CATEGORIES:
        path = os.path.join(DATADIR, category)
        class_num = CATEGORIES.index(category)
        for img in os.listdir(path):
            try:
                img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
                new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
                training_data.append([new_array, class_num])
            except Exception as e:
                pass


create_training_data()

print(len(training_data))

import random

random.shuffle(training_data)
for sample in training_data[:10]:
    print(sample[1])

X = []
y = []
for features, label in training_data:
    X.append(features)
    y.append(label)

X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)

import pickle

pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)



pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)

X = X/255.0


def input_shape(args):
    pass


from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense

vgg = VGG16(include_top=False, weights='imagenet', input_shape=(), pooling='avg')
x = vgg.output
x = Dense(1, activation='sigmoid')(x)
model = Model(vgg.input, x)
model.summary()

from sklearn.metrics import confusion_matrix
pred = model.predict(X)
pred = np.round(pred)

conf = confusion_matrix(y, pred)

import seaborn as sns
sns.heatmap(conf, annot=True)

plt.show()

model.save('64x2-CNN.model')

and got this error Model: "model_1"


Layer (type) Output Shape Param #

input_1 (InputLayer) (None, None, None, 3) 0


block1_conv1 (Conv2D) (None, None, None, 64) 1792


block1_conv2 (Conv2D) (None, None, None, 64) 36928


block1_pool (MaxPooling2D) (None, None, None, 64) 0


block2_conv1 (Conv2D) (None, None, None, 128) 73856


block2_conv2 (Conv2D) (None, None, None, 128) 147584


block2_pool (MaxPooling2D) (None, None, None, 128) 0


block3_conv1 (Conv2D) (None, None, None, 256) 295168


block3_conv2 (Conv2D) (None, None, None, 256) 590080


block3_conv3 (Conv2D) (None, None, None, 256) 590080


block3_pool (MaxPooling2D) (None, None, None, 256) 0


block4_conv1 (Conv2D) (None, None, None, 512) 1180160


block4_conv2 (Conv2D) (None, None, None, 512) 2359808


block4_conv3 (Conv2D) (None, None, None, 512) 2359808


block4_pool (MaxPooling2D) (None, None, None, 512) 0


block5_conv1 (Conv2D) (None, None, None, 512) 2359808


block5_conv2 (Conv2D) (None, None, None, 512) 2359808


block5_conv3 (Conv2D) (None, None, None, 512) 2359808


block5_pool (MaxPooling2D) (None, None, None, 512) 0


global_average_pooling2d_1 ( (None, 512) 0


dense_1 (Dense) (None, 1) 513

Total params: 14,715,201 Trainable params: 14,715,201 Non-trainable params: 0


Traceback (most recent call last): File "C:/Users/Acer/PycharmProjects/condas/UwU.py", line 95, in pred = model.predict(X) File "C:\\Users\\Acer\\Anaconda3\\envs\\condas\\lib\\site-packages\\keras\\engine\\training.py", line 1441, in predict x, _, _ = self._standardize_user_data(x) File "C:\\Users\\Acer\\Anaconda3\\envs\\condas\\lib\\site-packages\\keras\\engine\\training.py", line 579, in _standardize_user_data exception_prefix='input') File "C:\\Users\\Acer\\Anaconda3\\envs\\condas\\lib\\site-packages\\keras\\engine\\training_utils.py", line 145, in standardize_input_data str(data_shape)) ValueError: Error when checking input: expected input_1 to have shape (None, None, 3) but got array with shape (50, 50, 1)

Process finished with exit code 1

In a keras sequential model, only the first layer needs to know the input_shape it should expect, in your case its Conv2D layer. Also, there's no point in adding multiple Dense layers with sigmoid activation.

Refer this

model = Sequential([
Conv2D(64, (3, 3), input_shape=input_shape, padding='same', activation='relu'),
Conv2D(64, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(128, (3, 3), activation='relu', padding='same'),
Conv2D(128, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(256, (3, 3), activation='relu', padding='same',),
Conv2D(256, (3, 3), activation='relu', padding='same',),
Conv2D(256, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Flatten(),
Dense(4096, activation='relu'),
Dense(4096, activation='relu'),
Dense(1, activation='sigmoid')
])

Alternatively, you could use pretrained VGG model from keras applications.

from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense

vgg = VGG16(include_top=False, weights='imagenet', input_shape=(), pooling='avg')
x = vgg.output
x = Dense(1, activation='sigmoid')(x)
model = Model(vgg.input, x)
model.summary()

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM