簡體   English   中英

矩陣大小不兼容:DCGAN 中的 In[0]: [16,1024], In[1]: [16384,1]

[英]Matrix size-incompatible: In[0]: [16,1024], In[1]: [16384,1] in DCGAN

我正在嘗試構建一個 dcgan nn。

我收到:

InvalidArgumentError: Matrix size-incompatible: In[0]: [16,1024], In[1]: [16384,1]
     [[{{node model_69/dense_50/BiasAdd}}]]

我試圖在鑒別器中添加一個重塑,但沒有成功。

我的圖像有尺寸: (64, 64, 3)

#Generator
def generator(gen_inputs):
    # 4x4x1024
    inputs = Input(shape=(gen_inputs,))
    x = Dense(4 * 4 * 1024, activation='relu')(inputs)
    x = Reshape((4, 4, 1024))(x)
    x = BatchNormalization()(x)

    # 8x8x512
    x = UpSampling2D()(x)
    x = Conv2D(512, (5, 5), strides=(2, 2), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # 16x16x256
    x = UpSampling2D()(x)
    x = Conv2D(256, (5, 5), strides=(2, 2), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # 32x32x128
    x = UpSampling2D()(x)
    x = Conv2D(128, (5, 5), strides=(2, 2), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # 64x64x3
    x = UpSampling2D()(x)
    out = Conv2D(3, (5, 5), strides=(2, 2), activation='tanh', padding='same')(x)

    return Model(inputs, out)

def discriminator(discr_inputs):

    # 32x32x128
    x = Conv2D(128, (5, 5), strides=(2, 2), padding='same')(discr_inputs)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)

    # 16x16x256
    x = Conv2D(256, (5, 5), strides=(2, 2), padding='same')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)

    # 8x8x512
    x = Conv2D(512, (5, 5), strides=(2, 2), padding='same')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)

    # 4x4x1024
    x = Conv2D(1024, (5, 5), strides=(2, 2), padding='same')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)

    x = Flatten()(x)
    #x = Reshape((-1,))(x)
    out = Dense(1, activation='sigmoid')(x)

    return Model(discr_inputs, out)


def build_gan(gen_inputs, discr_inputs, optimizer):
    # discriminator
    discr = discriminator(discr_inputs)
    discr.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    # generator
    gen = generator(gen_inputs)
    gen.compile(loss='binary_crossentropy',
                optimizer=optimizer)

    print('discriminator:', discr.summary())
    # setup    
    z = Input(shape=(100,))
    img = gen(z)

    discr.trainable = False
    discr_out = discr(img)

    print('discriminator_out:', discr_out)

    model = Model(z, discr_out)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer)

    print('gan modell:', model.summary())
    return model


def train(gen_inputs, discr_inputs, optimizer, epochs, image_path, batch_size, save_interval=50):

    gan = build_gan(gen_inputs, discr_inputs, optimizer)
    X_train = load_imgs()

    # Rescale images from -1 to 1
    X_train = (X_train.astype(np.float32) - 127.5) / 127.5

    half_batch = batch_size // 2

    for epoch in range(epochs):

        # Train Generator
        noise = np.random.normal(0, 1, (batch_size, 100))
        gen_loss = gan.train_on_batch(noise, np.ones((batch_size, 1)))

        # Train Discriminator
        idx = np.random.randint(0, X_train.shape[0], half_batch)
        imgs = X_train[idx]

        # Sample noise 
        noise = np.random.normal(0, 1, (half_batch, 100))
        gen_imgs = gen.predict(noise)

        # Train the discriminator 
        discr_loss_real = discr.train_on_batch(imgs, np.ones((half_batch, 1)))
        discr_loss_fake = discr.train_on_batch(gen_imgs, np.zeros((half_batch, 1)))
        discr_loss = 0.5 * np.add(discr_loss_real, discr_loss_fake)




gen_inputs = 100
discr_inputs = Input(shape=(images.shape[1:])
batch_size = 16

鑒別器:

_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_59 (InputLayer)        (None, 64, 64, 3)         0         
_________________________________________________________________
conv2d_201 (Conv2D)          (None, 32, 32, 128)       9728      
_________________________________________________________________
leaky_re_lu_99 (LeakyReLU)   (None, 32, 32, 128)       0         
_________________________________________________________________
batch_normalization_200 (Bat (None, 32, 32, 128)       512       
_________________________________________________________________
conv2d_202 (Conv2D)          (None, 16, 16, 256)       819456    
_________________________________________________________________
leaky_re_lu_100 (LeakyReLU)  (None, 16, 16, 256)       0         
_________________________________________________________________
batch_normalization_201 (Bat (None, 16, 16, 256)       1024      
_________________________________________________________________
conv2d_203 (Conv2D)          (None, 8, 8, 512)         3277312   
_________________________________________________________________
leaky_re_lu_101 (LeakyReLU)  (None, 8, 8, 512)         0         
_________________________________________________________________
batch_normalization_202 (Bat (None, 8, 8, 512)         2048      
_________________________________________________________________
conv2d_204 (Conv2D)          (None, 4, 4, 1024)        13108224  
_________________________________________________________________
leaky_re_lu_102 (LeakyReLU)  (None, 4, 4, 1024)        0         
_________________________________________________________________
batch_normalization_203 (Bat (None, 4, 4, 1024)        4096      
_________________________________________________________________
flatten_25 (Flatten)         (None, 16384)             0         
_________________________________________________________________
dense_50 (Dense)             (None, 1)                 16385     
=================================================================
Total params: 17,238,785
Trainable params: 17,234,945
Non-trainable params: 3,840


discriminator_out: Tensor("model_69/dense_50/Sigmoid:0", shape=(?, 1), dtype=float32)

甘型號:

Layer (type)                 Output Shape              Param #   
=================================================================
input_61 (InputLayer)        (None, 100)               0         
_________________________________________________________________
model_70 (Model)             (None, 4, 4, 3)           18876163  
_________________________________________________________________
model_69 (Model)             (None, 1)                 17238785  
=================================================================
Total params: 36,114,948
Trainable params: 18,872,323
Non-trainable params: 17,242,625

生成器的輸出張量的形狀為(None, 4, 4, 3) ,與預期的形狀(None, 64, 64, 3) 這是由於使用了跨步卷積。

以下生成器生成尺寸為 64x64x3 的圖像:

def generator(gen_inputs):
    # 4x4x1024
    inputs = Input(shape=(gen_inputs,))
    x = Dense(4 * 4 * 1024, activation='relu')(inputs)
    x = Reshape((4, 4, 1024))(x)
    x = BatchNormalization()(x)

    # 8x8x512
    x = UpSampling2D()(x)
    x = Conv2D(512, (5, 5), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # 16x16x256
    x = UpSampling2D()(x)
    x = Conv2D(256, (5, 5), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # 32x32x128
    x = UpSampling2D()(x)
    x = Conv2D(128, (5, 5), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # 64x64x3
    x = UpSampling2D()(x)
    out = Conv2D(3, (5, 5), activation='tanh', padding='same')(x)

    return Model(inputs, out)

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM