简体   繁体   中英

Matplotlib 'ValueError: x and y must have same first dimension, but have shapes (20,) and (1,)'

I'm having trouble while visualizing a trained tensorflow model to compare the training loss, accuracy against the validation data with the following block of code:

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(EPOCHS)

plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.savefig('./foo.png')
plt.show()

But it's throwing the error: ValueError: x and y must have same first dimension, but have shapes (20,) and (1,)

The entire code:

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator

import os
import matplotlib.pyplot as plt
import numpy as np

import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)

URL = r'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'

zip_dir = tf.keras.utils.get_file('cats_and_dogs_filtered.zip', origin=URL, extract=True)

zip_dir_base = os.path.dirname(zip_dir)

base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered')
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')

train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')

num_cats_tr = len(os.listdir(train_cats_dir))
num_dogs_tr = len(os.listdir(train_dogs_dir))

num_cats_val = len(os.listdir(validation_cats_dir))
num_dogs_val = len(os.listdir(validation_dogs_dir))

total_train = num_cats_tr+num_dogs_tr
total_validation = num_cats_val+num_dogs_val

print(total_train)
print(total_validation)

BATCH_SIZE = 100
IMG_SHAPE = 150

train_image_generator = ImageDataGenerator(rescale=1./255)
validation_image_generator = ImageDataGenerator(rescale=1./255)

train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE, 
directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE), class_mode='binary')

val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE, 
directory=validation_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE), 
class_mode='binary')

sample_training_images, _ = next(train_data_gen)

def plotImages(image_arr):
    fig, axes = plt.subplots(1, 5, figsize=(20,20))
    axes = axes.flatten()
    for img, ax in zip(image_arr, axes):
        ax.imshow(img)
    plt.tight_layout()
    plt.show()

plotImages(sample_training_images[:5])

model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150,150,3)),
                               tf.keras.layers.MaxPooling2D(2,2),
                               
                               tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
                               tf.keras.layers.MaxPooling2D(2,2),
                               
                               tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
                               tf.keras.layers.MaxPooling2D(2,2),
                               
                               tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
                               tf.keras.layers.MaxPooling2D(2,2),
                               
                               tf.keras.layers.Flatten(),
                               tf.keras.layers.Dense(512, activation='relu'),
                               tf.keras.layers.Dense(2)
                               ])

model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])

model.summary()

EPOCHS = 20

history = model.fit_generator(train_data_gen, steps_per_epoch=int(np.ceil(total_train/float(BATCH_SIZE))), epochs=EPOCHS, validation_data=val_data_gen, validation_steps=int(np.ceil(total_validation/float(BATCH_SIZE))))

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(EPOCHS)

plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.savefig('./foo.png')
plt.show()

Had the same issue. According to my repro, problem was history.history['val_accuracy'] had a single value, hence the error. To resolve it, i tired with kernel_initializer='random_normal' in Dense layer while preparing the model.

.
.
model.add(Dense(64, activation='relu', kernel_initializer='random_normal'))
.
.

The above plotting code shows these graphs with my test data.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM