简体   繁体   English

keras flow_from_dataframe 语义分割

[英]keras flow_from_dataframe semantic segmentation

I'm trying to use keras' flow_from_dataframe for semantic segmentation (input is an image of dimension (height, width, 3), label is also an image of dimension (height, width), but can't get it to work.我正在尝试使用 keras 的 flow_from_dataframe 进行语义分割(输入是尺寸(高度,宽度,3)的图像,标签也是尺寸(高度,宽度)的图像,但无法使其工作。

As recommended here I (uninstalled the existing and) installed the latest keras preprocessing lib with按照这里的建议我(卸载了现有的并)安装了最新的 keras 预处理库

    pip install git+https://github.com/keras-team/keras-preprocessing.git

I'm getting following error for below mini example我收到以下迷你示例的以下错误

ValueError: Error when checking target: expected conv1 to have 4 dimensions, but got array with shape (1, 1) ValueError:检查目标时出错:预期 conv1 有 4 个维度,但得到形状为 (1, 1) 的数组

Using following versions in an anaconda virtual environment on Windows 7 in Pycharm在 Pycharm 的 Windows 7 上的 anaconda 虚拟环境中使用以下版本

  • tf 1.13.1 tf 1.13.1
  • keras-preprocessing 1.0.9 keras 预处理 1.0.9
  • keras 2.2.4 keras 2.2.4
  • keras-applications 1.0.7 keras 应用程序 1.0.7
  • keras-base 2.2.4 keras-base 2.2.4

I think the error lies in my usage of flow_from_dataframe, as I was able to write my own keras datagenerator following this blog .我认为错误在于我对 flow_from_dataframe 的使用,因为我能够按照此博客编写自己的 keras 数据生成器。

Any advice, how to correctly setup flow_from_dataframe?任何建议,如何正确设置flow_from_dataframe?

Fully working example which also generates the random training data.完全工作的例子,它也生成随机训练数据。

import numpy as np
import pandas as pd
import os
import scipy.misc

import keras
from keras.models import Model
from keras.layers import Input, Conv2D
from keras_preprocessing.image import ImageDataGenerator


def get_file_list(root_path):
    """
    # Returns:
        file_list: _list_, list of full paths to all files found
    """
    file_list = []
    for root, dirs, files in os.walk(root_path):
        for name in files:
            file_list.append(os.path.join(root, name))
    return file_list


def gen_rand_img_labels(n_rand_imgs, path_img, path_label):
    for i in range(n_rand_imgs):
        img_rand = np.random.randint(0, 256, size=img_dim)
        scipy.misc.toimage(img_rand, cmin=0, cmax=255).save(os.path.join(path_img, 'img{}.png'.format(i)))

        label_rand = np.random.randint(0, n_classes, size=(img_dim[0], img_dim[1]))
        print('label_rand.shape: ', label_rand.shape)
        scipy.misc.toimage(label_rand, cmin=0, cmax=255).save(os.path.join(path_label, 'img{}.png'.format(i)))


if __name__ == "__main__":
    img_dim = (100, 200, 3)  # height, width, channels
    batch_size = 1
    nr_epochs = 1

    n_classes = 5
    n_rand_imgs = 10
    savepath_img = r'/path/to/img'
    savepath_label = r'/path/to/label'

    # --- generate random images and random labels and save them to disk
    gen_rand_img_labels(n_rand_imgs, savepath_img, savepath_label)


    # --- build Data Generator
    train_df = pd.DataFrame(columns=['path', 'label'])

    list_img_names = get_file_list(savepath_img)

    for fname in list_img_names:
        fname_pure = os.path.split(fname)[1]

        # read in png label file as numpy array
        y = scipy.misc.imread(os.path.join(savepath_label, fname_pure))
        y = keras.utils.to_categorical(y, n_classes)
        print('shape y: {}'.format(y.shape))
        train_df.loc[len(train_df)] = [fname, y]

    datagen = ImageDataGenerator(rescale=1/255.0, validation_split=0.25)
    train_generator = datagen.flow_from_dataframe(
        dataframe=train_df,
        x_col="path",
        y_col="label",
        subset="training",
        batch_size=batch_size,
        class_mode="raw",
        target_size=(img_dim[0], img_dim[1]))


    valid_generator = datagen.flow_from_dataframe(
        dataframe=train_df,
        x_col="path",
        y_col="label",
        subset="validation",
        batch_size=batch_size,
        class_mode="raw",
        target_size=(img_dim[0], img_dim[1]))

    # --- create the model and train it
    input_ = Input(shape=img_dim)
    x = Conv2D(n_classes, (3, 3), activation='relu', padding='same', name='conv1')(input_)
    model = Model(inputs=input_, outputs=[x])
    model.summary()

    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"])

    # Train model
    STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size
    STEP_SIZE_VALID = valid_generator.n // valid_generator.batch_size
    model.fit_generator(generator=train_generator,
                        steps_per_epoch=STEP_SIZE_TRAIN,
                        validation_data=valid_generator,
                        validation_steps=STEP_SIZE_VALID,
                        epochs=nr_epochs)

I have updated the code for use with TensorFlow 2.0我已经更新了用于 TensorFlow 2.0 的代码

NOTE: Use scipy 1.2.0, as scipy.misc.toimage is deprecated in later versions.注意:使用 scipy 1.2.0,因为scipy.misc.toimage在更高版本中已弃用。

import tensorflow as tf
import numpy as np
import pandas as pd
import os
import scipy.misc

def get_file_list(root_path):
    """
    # Returns:
        file_list: _list_, list of full paths to all files found
    """
    file_list = []
    for root, dirs, files in os.walk(root_path):
        for name in files:
            file_list.append(os.path.join(root, name))
    return file_list

def gen_rand_img_labels(n_rand_imgs, path_img, path_label):
    for i in range(n_rand_imgs):
        img_rand = np.random.randint(0, 256, size=img_dim)
        PIL.Image.fromarray(img_rand, cmin=0, cmax=255).save(os.path.join(path_img, 'img{}.png'.format(i)))

        label_rand = np.random.randint(0, n_classes, size=(img_dim[0], img_dim[1]))
        print('label_rand.shape: ', label_rand.shape)
        PIL.Image.fromarray(label_rand, cmin=0, cmax=255).save(os.path.join(path_label, 'img{}.png'.format(i)))

if __name__ == "__main__":
    img_dim = (100, 200, 3)  # height, width, channels
    batch_size = 1
    nr_epochs = 1

    n_classes = 5
    n_rand_imgs = 10
    savepath_img = r''
    savepath_label = r''

    # --- generate random images and random labels and save them to disk
    gen_rand_img_labels(n_rand_imgs, savepath_img, savepath_label)


    # --- build Data Generator
    train_df = pd.DataFrame(columns=['path', 'label'])

    list_img_names = get_file_list(savepath_img)

    for fname in list_img_names:
        fname_pure = os.path.split(fname)[1]

        # read in png label file as numpy array
        y = scipy.misc.imread(os.path.join(savepath_label, fname_pure))
        y = tf.keras.utils.to_categorical(y, n_classes)
        print('shape y: {}'.format(y.shape))
        train_df.loc[len(train_df)] = [fname, y]

    datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255.0, validation_split=0.25)
    train_generator = datagen.flow_from_dataframe(
        dataframe=train_df,
        x_col="path",
        y_col="label",
        subset="training",
        batch_size=batch_size,
        class_mode="raw",
        target_size=(img_dim[0], img_dim[1]))


    valid_generator = datagen.flow_from_dataframe(
        dataframe=train_df,
        x_col="path",
        y_col="label",
        subset="validation",
        batch_size=batch_size,
        class_mode="raw",
        target_size=(img_dim[0], img_dim[1]))

    # --- create the model and train it
    input_ = tf.keras.Input(shape=img_dim)
    x = tf.keras.layers.Conv2D(n_classes, (3, 3), activation='relu', padding='same', name='conv1')(input_)
    model = tf.keras.Model(inputs=input_, outputs=[x])
    model.summary()

    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"])

    # Train model
    STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size
    STEP_SIZE_VALID = valid_generator.n // valid_generator.batch_size
    model.fit_generator(generator=train_generator,
                        steps_per_epoch=STEP_SIZE_TRAIN,
                        validation_data=valid_generator,
                        validation_steps=STEP_SIZE_VALID,
                        epochs=nr_epochs)

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM