簡體   English   中英

InvalidArgumentError:reshape 的輸入是具有 27000 個值的張量,但請求的形狀有 810000 [Op:Reshape]

[英]InvalidArgumentError: Input to reshape is a tensor with 27000 values, but the requested shape has 810000 [Op:Reshape]

為 ModelNet10 設置 3D-GAN 時收到以下錯誤消息:

InvalidArgumentError:reshape 的輸入是具有 27000 個值的張量,但請求的形狀有 810000 [Op:Reshape]

在我看來,批次沒有正確創建,因此張量的形狀無效。 嘗試了不同的東西,但無法設置批處理。我非常感謝有關如何清理我的代碼的任何提示! 提前致謝!

import time

import numpy as np
import tensorflow as tf
np.random.seed(1)

from tensorflow.keras import layers
from IPython import display

# Load the data
modelnet_path = '/modelnet10.npz'
data = np.load(modelnet_path)
X, Y = data['X_train'], data['y_train']
X_test, Y_test = data['X_test'], data['y_test']
X = X.reshape(X.shape[0], 30, 30, 30, 1).astype('float32')

#Hyperparameters
BUFFER_SIZE = 3991
BATCH_SIZE = 30
LEARNING_RATE = 4e-4
BETA_1 = 5e-1
EPOCHS = 100

#Random seed for image generation
n_examples = 16
noise_dim = 100
seed = tf.random.normal([n_examples, noise_dim])

train_dataset = tf.data.Dataset.from_tensor_slices(X).batch(BATCH_SIZE)

# Build the network
def make_discriminator_model():    
    model = tf.keras.Sequential()
    model.add(layers.Reshape((30, 30, 30, 1), input_shape=(30, 30, 30)))  
    model.add(layers.Conv3D(16, 6, strides=2, activation='relu'))
    model.add(layers.Conv3D(64, 5, strides=2, activation='relu'))
    model.add(layers.Conv3D(64, 5, strides=2, activation='relu'))
    model.add(layers.Flatten())
    model.add(layers.Dense(10))

    return model

discriminator = make_discriminator_model()

def make_generator_model():   
    model = tf.keras.Sequential()
    model.add(layers.Dense(15*15*15*128, use_bias=False,input_shape=(100,)))
    model.add(layers.BatchNormalization())
    model.add(layers.ReLU())
    model.add(layers.Reshape((15,15,15,128)))    
    model.add(layers.Conv3DTranspose(64, (5,5,5), strides=(1,1,1), padding='valid', use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.ReLU())  
    model.add(layers.Conv3DTranspose(32, (5,5,5), strides=(2,2,2), padding='valid', use_bias=False, activation='tanh'))

    return model

generator = make_generator_model()

#Optimizer & Loss function
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss = real_loss + fake_loss

    return total_loss

def generator_loss(fake_output):
    return cross_entropy(tf.ones_like(fake_output), fake_output)

optimizer = tf.keras.optimizers.Adam(lr=LEARNING_RATE, beta_1=BETA_1)

#Training
def train_step(shapes):
    noise = tf.random.normal([BATCH_SIZE, noise_dim])

    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
        generated_shapes = generator(noise, training=True)

        real_output = discriminator(shapes, training=True)
        fake_output = discriminator(generated_shapes, training=True)
        gen_loss = generator_loss(fake_output)
        disc_loss = discriminator_loss(real_output, fake_output)

    gen_gradients = gen_tape.gradient(gen_loss, generator.trainable_variables)
    disc_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

    optimizer.apply_gradients(zip(gen_gradients, generator.trainable_variables))
    optimizer.apply_gradients(zip(disc_gradients, discriminator.trainable_variables))

def train(dataset, epochs):
    for epoch in range(epochs):
        start = time.time()

        for shape_batch in dataset:
            train_step(shape_batch)

        display.clear_output(wait=True)
        print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))

    display.clear_output(wait=True)      

train(X_test, EPOCHS)

X_test 只是一個列表,因此在您的訓練循環中,只有一個樣本 (30*30*30=27000) 輸入 model 但 model 本身要求 30(batchsize) * 30 * 30 *30=8100。

modelnet_path = '/modelnet10.npz'
data = np.load(modelnet_path)
X, Y = data['X_train'], data['y_train']
X_test, Y_test = data['X_test'], data['y_test']
X = X.reshape(X.shape[0], 30, 30, 30, 1).astype('float32')

...
train_dataset = tf.data.Dataset.from_tensor_slices(X).batch(BATCH_SIZE)
...
def train(dataset, epochs):
    for epoch in range(epochs):
        start = time.time()

        for shape_batch in dataset:
            train_step(shape_batch)

        display.clear_output(wait=True)
        print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))

    display.clear_output(wait=True)      

train(X_test, EPOCHS)

考慮使用您創建的train_dataset進行訓練,或將 X_test 生成為 tf.dataset。

train(train_dataset , EPOCHS)

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM