繁体   English   中英

Keras 功能 model 产生子类化错误

[英]Keras functional model produces subclassing error

我正在尝试使用功能 api 在 keras 中创建自动编码器。 一切正常,但是,当我尝试加载保存的 model 时,它会引发与 Model 子类化 api 相关的错误。 它还会引发与签名相关的错误,我认为该错误与 model 加载问题无关。

我在 windows 10 上使用来自 anaconda 的 tensorflow 2.1 并在 Spyder 4 中运行代码。

我的带有虚拟数据的代码:

from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
from tensorflow.keras.utils import Sequence
from tensorflow.keras.models import load_model
import numpy as np
import h5py
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import time
import pickle

def build_autoencoder(width, height, depth, filters=[32], latentDim=64):
    # initialize the input shape to be "channels last" along with
    # the channels dimension itself

    inputShape = (height, width, depth)
    chanDim = -1

    # define the input to the encoder
    inputs = Input(shape=inputShape)
    x = inputs

           # loop over the number of filters
    for f in filters:
           # apply a CONV => RELU => BN operation
        x = Conv2D(f, (3, 3), strides=2, padding="same")(x)
        x = LeakyReLU(alpha=0.2)(x)
        x = BatchNormalization(axis=chanDim)(x)

           # flatten the network and then construct our latent vector
    volumeSize = K.int_shape(x)
    x = Flatten()(x)
    latent = Dense(latentDim)(x)

           # build the encoder model
    encoder = Model(inputs, latent, name="encoder")

       # start building the decoder model which will accept the
       # output of the encoder as its inputs
    latentInputs = Input(shape=(latentDim,))
    x = Dense(np.prod(volumeSize[1:]))(latentInputs)
    x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(x)

       # loop over our number of filters again, but this time in
       # reverse order
    for f in filters[::-1]:
           # apply a CONV_TRANSPOSE => RELU => BN operation
        x = Conv2DTranspose(f, (3, 3), strides=2,
               padding="same")(x)
        x = LeakyReLU(alpha=0.2)(x)
        x = BatchNormalization(axis=chanDim)(x)

       # apply a single CONV_TRANSPOSE layer used to recover the
       # original depth of the image
    x = Conv2DTranspose(depth, (3, 3), padding="same")(x)
    outputs = Activation("sigmoid")(x)

       # build the decoder model
    decoder = Model(latentInputs, outputs, name="decoder")

       # our autoencoder is the encoder + decoder
    autoencoder = Model(inputs, decoder(encoder(inputs)),
               name="autoencoder")

       # return a 3-tuple of the encoder, decoder, and autoencoder
    return (encoder, decoder, autoencoder)

class DataGenerator(Sequence):
    """Generates data for Keras
    Sequence based data generator. Suitable for building 
    data generator for training and prediction.
    """
    def __init__(self, indexes, data_path, dataset_name,
                 to_fit=True, batch_size=16, dim=(256, 256),
                 n_channels=3, shuffle=True):
        """Initialization
        :param num_samples: number of samples in dataset
        :param data_path: path to data file location        
        :param dataset_name: name of datset in datafile
        :param to_fit: True to return X and y, False to return X only
        :param batch_size: batch size at each iteration
        :param dim: tuple indicating image dimension
        :param n_channels: number of image channels
        :param shuffle: True to shuffle label indexes after every epoch
        """
        self.indexes = np.sort(indexes)
        self.data_path = data_path
        self.dataset_name = dataset_name
        self.to_fit = to_fit
        self.batch_size = batch_size
        self.dim = dim
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        """Denotes the number of batches per epoch
        :return: number of batches per epoch
        """
        return int(np.floor(len(self.indexes) / self.batch_size))

    def __getitem__(self, index):
        """Generate one batch of data
        :param index: index of the batch
        :return: X and y when fitting. X only when predicting
        """
        # Generate indexes of the batch
        indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]

        # Generate data
        X = self._generate_X(indexes)

        # normalise images
        X = np.divide(X, 255.0)

        if self.to_fit:
            return X, X
        else:
            return X

    def on_epoch_end(self):
        """Updates indexes after each epoch
        """
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def _generate_X(self, indexes):
        """Generates data containing batch_size images
        :param list_IDs_temp: list of label ids to load
        :return: batch of images
        """
        # Generate data
        with h5py.File(self.data_path, 'r') as f:
            indexes = np.sort(indexes)
            X = f[self.dataset_name][indexes, :, :, :]
        return X



DATA_PATH = "simulation_data.hdf5"
DATA_NAME = "visual_obs"
EPOCHS = 3
BATCH = 16
DIM = [256, 256]
CHANNELS = 3
NUM_SAMPLES = 100
# Dummy data
with h5py.File('simulation_data.hdf5', 'w') as f:
       vis_data = f.create_dataset('visual_obs', (NUM_SAMPLES, 256, 256, 3))
       vis_data[:, :, :, :] = np.random.rand(NUM_SAMPLES, 256, 256, 3)
# construct training data generator and validation generator
number_train_samples = int(np.floor(NUM_SAMPLES*0.7))
number_val_samples = int(np.floor(NUM_SAMPLES*0.2))
indexes = np.arange(NUM_SAMPLES)
np.random.shuffle(indexes)
train_indexes = indexes[:number_train_samples]
val_indexes = indexes[number_train_samples:number_train_samples+number_val_samples]
test_indexes = indexes[number_train_samples+number_val_samples:]


train_generator = DataGenerator(train_indexes, DATA_PATH, DATA_NAME,
                 to_fit=True, batch_size=BATCH, dim=DIM,
                 n_channels=CHANNELS, shuffle=True)

val_generator = DataGenerator(val_indexes, DATA_PATH, DATA_NAME,
                 to_fit=True, batch_size=BATCH, dim=DIM,
                 n_channels=CHANNELS, shuffle=True)


# construct our convolutional autoencoder
(encoder, decoder, autoencoder) = build_autoencoder(*DIM, CHANNELS)
opt = Adam(lr=1e-3)
autoencoder.compile(loss="mse", optimizer=opt)

# train the convolutional autoencoder
H = autoencoder.fit(train_generator,
    epochs=EPOCHS, validation_data = val_generator,
    workers=4, use_multiprocessing=False)
ts = time.time()
autoencoder.save("model_test", save_format= "tf")
loaded_model = load_model("model_test") 

错误:

WARNING: AutoGraph could not transform <function canonicalize_signatures.<locals>.signature_wrapper at 0x00000138CDBF2948> and will run it as-is.
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
Cause: 
INFO:tensorflow:Assets written to: model_test\assets
Traceback (most recent call last):

  File "C:\Users\seano\Thesis\test.py", line 190, in <module>
    loaded_model = load_model("model_test")

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\saving\save.py", line 150, in load_model
    return saved_model_load.load(filepath, compile)

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\load.py", line 89, in load
    model = tf_load.load_internal(path, loader_cls=KerasObjectLoader)

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\saved_model\load.py", line 552, in load_internal
    export_dir)

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\load.py", line 119, in __init__
    self._finalize()

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\load.py", line 157, in _finalize
    created_layers={layer.name: layer for layer in node.layers})

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\network.py", line 1903, in reconstruct_from_config
    process_node(layer, node_data)

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\network.py", line 1851, in process_node
    output_tensors = layer(input_tensors, **kwargs)

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py", line 773, in __call__
    outputs = call_fn(cast_inputs, *args, **kwargs)

  File "C:\Users\seano\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\network.py", line 712, in call
    raise NotImplementedError('When subclassing the `Model` class, you should'

NotImplementedError: When subclassing the `Model` class, you should implement a `call` method.

我认为问题的根本原因是在 model 中使用DataGenerator (称为custom_objects )。

当 model 中有custom_objects时,model 的加载略有不同。 您需要将custom_objects添加到load_model中,如下所示

loaded_model = load_model('model_test',custom_objects={'DataGenerator':DataGenerator})

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM