繁体   English   中英

我可以使用flow()而不是flow_from_directory来增加Keras中的图像和蒙版吗?

[英]Can I augment images and masks in Keras just using flow() instead of flow_from_directory?

我一直在尝试使用Keras训练CNN,并将数据增强应用于一系列图像及其分割蒙版。 在线示例说,为了做到这一点,我应该使用flow_from_directory()创建两个单独的生成器,然后压缩它们。

但是我可以只为图像和蒙版设置两个numpy数组,使用flow()函数,而不是这样做:

# Create image generator
data_gen_args = dict(rotation_range=5,
                     width_shift_range=0.1,
                     height_shift_range=0.1,
                     validation_split=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)

seed = 1

# Create training and validation generators including masks
train_generator = image_datagen.flow(images, masks, seed=seed, subset='training')
val_train_generator = image_datagen.flow(images, masks, seed=seed, subset='validation')   

# Train model
model.fit_generator(train_generator, steps_per_epoch=50,
                validation_data = val_train_generator,
                validation_steps = 10, shuffle=True, epochs=20)

如果没有,为什么不呢? 似乎如果我通过生成器,我只能输出图像而不是面具,所以我担心它不会做我想要的。

您需要一个自定义生成器,将相同的增强应用于图像和蒙版。

Keras ImageDataGenerator采用2个参数(图像,标签或掩码)并仅将变换应用于第一个 (图像)。 您可以在下面使用我的生成器:

# Create image generator
data_gen_args = dict(rotation_range=5,
                     width_shift_range=0.1,
                     height_shift_range=0.1,
                     validation_split=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)

seed = 1

def XYaugmentGenerator(X1, y, seed, batch_size):
    genX1 = gen.flow(X1, y, batch_size=batch_size, seed=seed)
    genX2 = gen.flow(y, X1, batch_size=batch_size, seed=seed)
    while True:
        X1i = genX1.next()
        X2i = genX2.next()

        yield X1i[0], X2i[0]


# Train model
model.fit_generator(XYaugmentGenerator(images, masks, seed, batch_size), steps_per_epoch=np.ceil(float(len(images)) / float(batch_size)),
                validation_data = XYaugmentGenerator(images_valid, masks_valid, batch_size), 
                validation_steps = np.ceil(float(len(images_valid)) / float(batch_size))
, shuffle=True, epochs=20)

根据我的实验,你不能只使用zip(img_generator,mask_generator) 。虽然不会发生错误,但它会永远运行。 似乎它返回一个无限的发电机。 要解决这个问题,你可以使用while true:yield(img_generator.next(),mask_generator.next())

在深度学习中,对于分段问题,可以使用customer 'DataGenerator'函数而不是keras ImageDataGenerator

如何编写客户DataGenerator

编写客户DataGenerator有助于处理图像分割问题。

解:

如果您的训练和测试图像位于文件夹中且面具和标签位于csv中,请使用以下函数 - 自定义 - DataGenerator:

class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, df, target_df=None, mode='fit',
             base_path='../train_images',
             batch_size=16, dim=(1400, 2100), n_channels=3, reshape=None,
             augment=False, n_classes=2, random_state=42, shuffle=True):
    self.dim = dim
    self.batch_size = batch_size
    self.df = df
    self.mode = mode
    self.base_path = base_path
    self.target_df = target_df
    self.list_IDs = list_IDs
    self.reshape = reshape
    self.n_channels = n_channels
    self.augment = augment
    self.n_classes = n_classes
    self.shuffle = shuffle
    self.random_state = random_state

    self.on_epoch_end()
    np.random.seed(self.random_state)

def __len__(self):
    'Denotes the number of batches per epoch'
    return int(np.floor(len(self.list_IDs) / self.batch_size))

def __getitem__(self, index):
    'Generate one batch of data'
    # Generate indexes of the batch
    indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

    # Find list of IDs
    list_IDs_batch = [self.list_IDs[k] for k in indexes]

    X = self.__generate_X(list_IDs_batch)

    if self.mode == 'fit':
        y = self.__generate_y(list_IDs_batch)

        if self.augment:
            X, y = self.__augment_batch(X, y)

        return X, y

    elif self.mode == 'predict':
        return X

    else:
        raise AttributeError('The mode parameter should be set to "fit" or "predict".')

def on_epoch_end(self):
    'Updates indexes after each epoch'
    self.indexes = np.arange(len(self.list_IDs))
    if self.shuffle == True:
        np.random.seed(self.random_state)
        np.random.shuffle(self.indexes)

def __generate_X(self, list_IDs_batch):
    'Generates data containing batch_size samples'
    # Initialization
    if self.reshape is None:
        X = np.empty((self.batch_size, *self.dim, self.n_channels))
    else:
        X = np.empty((self.batch_size, *self.reshape, self.n_channels))

    # Generate data
    for i, ID in enumerate(list_IDs_batch):
        im_name = self.df['ImageId'].iloc[ID]
        img_path = f"{self.base_path}/{im_name}"
        img = self.__load_rgb(img_path)

        if self.reshape is not None:
            img = np_resize(img, self.reshape)

        # Store samples
        X[i,] = img

    return X

def __generate_y(self, list_IDs_batch):
    if self.reshape is None:
        y = np.empty((self.batch_size, *self.dim, self.n_classes), dtype=int)
    else:
        y = np.empty((self.batch_size, *self.reshape, self.n_classes), dtype=int)

    for i, ID in enumerate(list_IDs_batch):
        im_name = self.df['ImageId'].iloc[ID]
        image_df = self.target_df[self.target_df['ImageId'] == im_name]

        rles = image_df['EncodedPixels'].values

        if self.reshape is not None:
            masks = build_masks(rles, input_shape=self.dim, reshape=self.reshape)
        else:
            masks = build_masks(rles, input_shape=self.dim)

        y[i, ] = masks

    return y

def __load_grayscale(self, img_path):
    img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
    img = img.astype(np.float32) / 255.
    img = np.expand_dims(img, axis=-1)

    return img

def __load_rgb(self, img_path):
    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = img.astype(np.float32) / 255.

    return img

def __random_transform(self, img, masks):
    composition = albu.Compose([
        albu.HorizontalFlip(),
        albu.VerticalFlip(),
        albu.ShiftScaleRotate(rotate_limit=30, shift_limit=0.1)
        #albu.ShiftScaleRotate(rotate_limit=90, shift_limit=0.2)
    ])

    composed = composition(image=img, mask=masks)
    aug_img = composed['image']
    aug_masks = composed['mask']

    return aug_img, aug_masks

def __augment_batch(self, img_batch, masks_batch):
    for i in range(img_batch.shape[0]):
        img_batch[i, ], masks_batch[i, ] = self.__random_transform(
            img_batch[i, ], masks_batch[i, ])

    return img_batch, masks_batch

参考:

  1. http://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM