简体   繁体   中英

How to fix tuple argument when passing it to Conv2D layer in Tensorflow

I'm trying to reproduce the code in this web but implementing the model as a class with some modifications. The code, minus the testing part, is:

%matplotlib inline
from itertools import islice
import itertools
from itertools import count 

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import trange
from tensorflow.keras import Model
from keras.utils import to_categorical


(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

x_train, x_test = x_train[..., np.newaxis]/255.0, x_test[..., np.newaxis]/255.0

x_train = x_train.reshape((60000, 28, 28, 1))
x_train= x_train.astype('float32') / 255 # rescale pixel values from range [0, 255] to [0, 1]

x_test = x_test.reshape((10000, 28, 28, 1))
x_test = x_test.astype('float32') / 255

train_labels = to_categorical(y_train)
test_labels = to_categorical(y_test)

print("Number of original training examples:", len(x_train))
print("Number of original test examples:", len(x_test))

data_mean = 0.1307
data_std = 0.3081

# Normalize the data
x_train = (x_train/255.0 - data_mean) / data_std
x_test = (x_test/255.0 - data_mean) / data_std
num_classes = 10
input_shape = (28, 28, 1)

# Decay the learning rate at a base rate of gamma roughly every epoch, which
# is len(x_train) steps
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
    1,
    decay_steps=len(x_train),
    decay_rate=0.7)

# Define the optimizer to user for gradient descent
optimizer = tf.keras.optimizers.Adadelta(scheduler)
loss_object = tf.keras.losses.CategoricalCrossentropy()

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.CategoricalCrossentropy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalCrossentropy(name='test_accuracy')

@tf.function
def train_step(images, labels):
    with tf.GradientTape() as tape:
        predictions = model(images)
        loss = loss_object(labels, predictions)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss(loss)
    train_accuracy(labels, predictions)

@tf.function
def test_step(images, labels):
    predictions = model(images)
    t_loss = loss_object(labels, predictions)

    test_loss(t_loss)
    test_accuracy(labels, predictions)


class MyModel(tf.keras.models.Sequential):
    def __init__(self):
        super(MyModel, self).__init__()
        self.c1 = tf.keras.layers.Conv2D(32, (3,3), strides=(1,1),
                                      padding='valid', 
                                      activation='relu',
                                      input_shape=input_shape),
        self.max2d = tf.keras.layers.MaxPool2D(),
        self.c2 = tf.keras.layers.Conv2D(64, (3,3), strides=(1,1),
                                        padding='valid',
                                        activation='relu'),
        self.dropout1 = tf.keras.layers.Dropout(0.25),
        self.flatten = tf.keras.layers.Flatten(),
        self.dense1 = tf.keras.layers.Dense(64, activation='relu'),
        self.dropout2 = tf.keras.layers.Dropout(0.5),
        self.dense2 = tf.keras.layers.Dense(num_classes, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.c2 (x)
        x = self.max2d(x)
        x = self.dropout1(x)
        x = self.flatten(x)
        x = self.dense1(x)
        x = self.dropout2(x)
        x = self.dense2(x)
        return x

model = MyModel()

train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)

EPOCHS = 5

for epoch in range(EPOCHS):
    for images, labels in train_ds:        
        train_step(images, labels)  # <----- Boom!!

    for test_images, test_labels in test_ds:
        test_step(test_images, test_labels)

The complete error I got in my Visual Studio Code is:

TypeError                                 Traceback (most recent call last)
d:\Machine_Learning\tensorflow_test.ipynb Cell 5' in <module>
      6 for epoch in range(EPOCHS):
      7     for images, labels in train_ds:        
----> 8         train_step(images, labels)
     10     for test_images, test_labels in test_ds:
     11         test_step(test_images, test_labels)

File c:\Python38\lib\site-packages\tensorflow\python\util\traceback_utils.py:153, in filter_traceback.<locals>.error_handler(*args, **kwargs)
    151 except Exception as e:
    152   filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153   raise e.with_traceback(filtered_tb) from None
    154 finally:
    155   del filtered_tb

File ~\AppData\Local\Temp\__autograph_generated_file14pr1250.py:9, in outer_factory.<locals>.inner_factory.<locals>.tf__train_step(images, labels)
      7 with ag__.FunctionScope('train_step', 'fscope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as fscope:
      8     with ag__.ld(tf).GradientTape() as tape:
----> 9         predictions = ag__.converted_call(ag__.ld(model), (ag__.ld(images),), None, fscope)
     10         loss = ag__.converted_call(ag__.ld(loss_object), (ag__.ld(labels), ag__.ld(predictions)), None, fscope)
     11     gradients = ag__.converted_call(ag__.ld(tape).gradient, (ag__.ld(loss), ag__.ld(model).trainable_variables), None, fscope)

File c:\Python38\lib\site-packages\keras\utils\traceback_utils.py:69, in filter_traceback.<locals>.error_handler(*args, **kwargs)
     66   filtered_tb = _process_traceback_frames(e.__traceback__)
     67   # To get the full stack trace, call:
     68   # `tf.debugging.disable_traceback_filtering()`
---> 69   raise e.with_traceback(filtered_tb) from None
     70 finally:
     71   del filtered_tb

File ~\AppData\Local\Temp\__autograph_generated_filecwsasoc4.py:10, in outer_factory.<locals>.inner_factory.<locals>.tf__call(self, x)
      8 do_return = False
      9 retval_ = ag__.UndefinedReturnValue()
---> 10 x = ag__.converted_call(ag__.ld(self).c1, (ag__.ld(x),), None, fscope)
     11 x = ag__.converted_call(ag__.ld(self).c2, (ag__.ld(x),), None, fscope)
     12 x = ag__.converted_call(ag__.ld(self).max2d, (ag__.ld(x),), None, fscope)

TypeError: in user code:

    File "C:\Users\user\AppData\Local\Temp\ipykernel_17788\3342806354.py", line 21, in train_step  *
        predictions = model(images)
    File "c:\Python38\lib\site-packages\keras\utils\traceback_utils.py", line 69, in error_handler  **
        raise e.with_traceback(filtered_tb) from None
    File "C:\Users\user\AppData\Local\Temp\__autograph_generated_filecwsasoc4.py", line 10, in tf__call
        x = ag__.converted_call(ag__.ld(self).c1, (ag__.ld(x),), None, fscope)

    TypeError: Exception encountered when calling layer "my_model_7" (type MyModel).
    
    in user code:
    
        File "C:\Users\user\AppData\Local\Temp\ipykernel_17788\2012441124.py", line 56, in call  *
            x = self.c1(x)
    
        TypeError: '_TupleWrapper' object is not callable
    
    
    Call arguments received by layer "my_model_7" (type MyModel):
      • x=tf.Tensor(shape=(32, 28, 28, 1), dtype=float32)

So, I understand I'm passing a tuple to my c1 layer when other data type is expected. How can I fix this error?

Python version: 3.8.3 x64

Tensorflow version: 2.10.0-dev20220517

Keras version: 2.10.0

Os: windows 10

Edit:

Stupid trailing commas made me waste two days debugging. The right way to declare the model and fix the error is:

class MyModel(tf.keras.models.Sequential):
    def __init__(self):
        super(MyModel, self).__init__()
        self.c1 = tf.keras.layers.Conv2D(32, (3,3), strides=(1,1),
                                      padding='valid', 
                                      activation='relu',
                                      input_shape=input_shape)
        self.max2d = tf.keras.layers.MaxPool2D()
        self.c2 = tf.keras.layers.Conv2D(64, (3,3), strides=(1,1),
                                        padding='valid',
                                        activation='relu')
        self.dropout1 = tf.keras.layers.Dropout(0.25)
        self.flatten = tf.keras.layers.Flatten()
        self.dense1 = tf.keras.layers.Dense(64, activation='relu')
        self.dropout2 = tf.keras.layers.Dropout(0.5)
        self.dense2 = tf.keras.layers.Dense(num_classes, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.c2 (x)
        x = self.max2d(x)
        x = self.dropout1(x)
        x = self.flatten(x)
        x = self.dense1(x)
        x = self.dropout2(x)
        x = self.dense2(x)
        return x

Ten Thousand Thanks, Pietro D'Antuono!

Looking at your error message ( TypeError: '_TupleWrapper' object is not callable ), I guess these trailing commas cause the class attributes to be interpreted as tuples

class MyModel(tf.keras.models.Sequential):
    def __init__(self):
        super(MyModel, self).__init__()
        self.c1 = tf.keras.layers.Conv2D(32, (3,3), strides=(1,1),
                                      padding='valid', 
                                      activation='relu',
                                      input_shape=input_shape)  # REMOVE TR. COMMAS,
        self.max2d = tf.keras.layers.MaxPool2D()  # ,
        self.c2 = tf.keras.layers.Conv2D(64, (3,3), strides=(1,1),
                                        padding='valid',
                                        activation='relu')  # ,
        self.dropout1 = tf.keras.layers.Dropout(0.25)  # ,
        self.flatten = tf.keras.layers.Flatten()  # ,
        self.dense1 = tf.keras.layers.Dense(64, activation='relu')  # ,
        self.dropout2 = tf.keras.layers.Dropout(0.5)  # ,
        self.dense2 = tf.keras.layers.Dense(num_classes, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.c2 (x)
        x = self.max2d(x)
        x = self.dropout1(x)
        x = self.flatten(x)
        x = self.dense1(x)
        x = self.dropout2(x)
        x = self.dense2(x)
        return x

model = MyModel()

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM