简体   繁体   中英

Type Error when saving Tensorflow keras model as h5 file

I am trying to make an autoencoder in order to get a vector format for images(x:640, y:480). However when I try to call encoder.save("encoder.h5") I get the following error:

TypeError: ('Not JSON Serializable:', <tf.Variable 'batch_normalization/gamma:0' shape=(32,) dtype=float32, numpy=
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
       1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
      dtype=float32)>)

I'm fairly sure the model works as the fit function works, and I am able to call on the encoder.predict() after the fitting is done and get out the encoded vectors (length of 60) My code is:

import os
import tensorflow as tf
from tensorflow.python.keras.layers import Input, UpSampling2D, Add, Conv2D, MaxPooling2D, LeakyReLU
import cv2
from tensorflow.python.keras import Model
from tensorflow.python.keras import layers, losses
from tensorflow.python.keras.models import Model
import h5py

def get_encoder(shape=(640, 480, 3)):
    def res_block(x, n_features):
        _x = x
        x = tf.keras.layers.BatchNormalization()(x)
        x = LeakyReLU()(x)

        x = Conv2D(n_features, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
        x = Add()([_x, x])
        return x

    inp = Input(shape=shape)

    # 640 x 480
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(inp)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 320 x 240
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    for _ in range(2):
        x = res_block(x, 32)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 160 x 120
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    for _ in range(2):
        x = res_block(x, 32)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 80 x 60
    x = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)
    return Model(inp, x)


def get_decoder(shape=(240, 180, 3)):
    inp = Input(shape=shape)

    # 60 x 80
    x = UpSampling2D((2, 2))(inp)
    x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 120 x 160
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 240 x 320
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 480 x 640
    x = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)
    return Model(inp, x)


encoder = get_encoder((480, 640, 3))
decoder = get_decoder((60, 80, 1))
inp = Input((480, 640, 3))
e = encoder(inp)
d = decoder(e)
autoencoder = Model(inp, d)

autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())

batch_size = 8
SHAPE = (480, 640)
IMAGES = "pathToImages"
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_gen = image_generator.flow_from_directory(
    os.path.join(IMAGES, "train"),
    class_mode="input", target_size=SHAPE, batch_size=batch_size,
)
val_gen = image_generator.flow_from_directory(
    os.path.join(IMAGES, "test"),
    class_mode="input", target_size=SHAPE, batch_size=batch_size,
)

for i in range(10):
    autoencoder.fit(train_gen, validation_data=val_gen, epochs=1, steps_per_epoch=10, validation_steps= 5, batch_size=batch_size)
    encoder.save('encoder.h5')
    decoder.save('decoder.h5')

I based it on this https://www.kaggle.com/code/miklgr500/image2vec-autoencoder/notebook

Any ideas on how I can fix it, or alternative methods for saving the model weights?

edit: using encoder.save_weights('encoder.h5') works

OP says this about Sadra's answer:

using encoder.save_weights('encoder.h5') works

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM