简体   繁体   English

为什么在 keras 中使用 train_step() 时会收到错误“ValueError:没有为任何变量提供梯度:”?

[英]Why am I getting the error "ValueError: No gradients provided for any variable:" while using train_step() in keras?

I'm facing trouble with tensorFlow.keras.我在使用 tensorFlow.keras 时遇到了麻烦。 While executing the following code在执行以下代码时

class Whole_model(tf.keras.Model):
    def __init__(self, EEG_gen_model, emg_feature_extractor, eeg_feature_extractor, seq2seq_model):
        super(Whole_model, self).__init__()
        self.EEG_gen_model= EEG_gen_model
        self.emg_feature_extractor= emg_feature_extractor
        self.eeg_feature_extractor= eeg_feature_extractor
        self.seq2seq_model=seq2seq_model

   def compile(self, EEG_gen_optimizer, emg_feature_optim, eeg_feature_optim, seq2seq_optim, EEG_gen_loss, seq2seq_loss_fn, gen_mae, accuracy):
      super(Whole_model, self).compile()
      self.EEG_gen_optimizer = EEG_gen_optimizer
      self.emg_feature_optim=emg_feature_optim
      self.eeg_feature_optim=eeg_feature_optim
      self.seq2seq_optim=seq2seq_optim
      self.EEG_gen_loss = EEG_gen_loss
      self.seq2seq_loss_fn=seq2seq_loss_fn
      self.gen_mae=gen_mae
      self.accuracy=accuracy
      #we can use diffrent optimizer for each model

  def train_step(self, data):
      no_Epochs=3
      x_train, [y_train_eeg, y]= data
      y = tf.reshape(y, [-1, no_Epochs , 5])
      n_samples_per_epoch=x_train.shape[1]
      print(n_samples_per_epoch)
      emg_input=tf.reshape(x_train, [-1, n_samples_per_epoch, 1])
      y_eeg_true= tf.reshape(y_train_eeg, [-1, n_samples_per_epoch, 1])
      print(emg_input.shape, y_eeg_true.shape)

      #tf.argmax(pred_classes,1)
      # Train the EEG generator
      with tf.GradientTape() as tape:
          EEG_Gen= self.EEG_gen_model(emg_input)
          print(EEG_Gen.shape, y_eeg_true.shape)
          gen_model_loss= self.EEG_gen_loss(y_eeg_true, EEG_Gen)
          gen_MAE= self.gen_mae(y_eeg_true, EEG_Gen)
          grads = tape.gradient(gen_model_loss, self.EEG_gen_model.trainable_weights)
          self.EEG_gen_optimizer.apply_gradients(zip(grads, self.EEG_gen_model.trainable_weights))

          #SEQ2SEQ 
          emg_inp = x_train
          eeg_inp = self.EEG_gen_model(emg_inp)
          emg_enc_seq=self.emg_feature_extractor(emg_inp)
          eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
    
          len_epoch=input_layer.shape[1] 
          inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch]) 
    
          # Train the discriminator
         with tf.GradientTape() as tape:
             outputs=self.seq2seq_model(inputs)
             seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
             print('loss',  seq2seq_loss)
             accuracy=self.accuracy(y, outputs)
        
        
      grads = tape.gradient(seq2seq_loss, self.seq2seq_model.trainable_weights)
     self.seq2seq_optim.apply_gradients(zip(grads, self.seq2seq_model.trainable_weights))

      #fEATURE EXTRACTOR
      emg_inp = x_train
      eeg_inp = self.EEG_gen_model(emg_inp)
      eeg_enc_seq=self.emg_feature_extractor(emg_inp)
   
      with tf.GradientTape() as tape:
          eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
          len_epoch=input_layer.shape[1] 
          inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch])
          outputs=self.seq2seq_model(inputs)
          seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
          print('loss',  seq2seq_loss)
     grads = tape.gradient(seq2seq_loss, self.eeg_feature_extractor.trainable_weights)
    
     self.eeg_feature_optim.apply_gradients(zip(grads, self.eeg_feature_extractor.trainable_weights))     



       emg_inp = x_train
       eeg_inp = self.EEG_gen_model(emg_inp)
       with tf.GradientTape() as tape:
            eeg_enc_seq=self.emg_feature_extractor(emg_inp)
            eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
            len_epoch=input_layer.shape[1] 
            inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch])
            outputs=self.seq2seq_model(inputs)
            seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
            print('loss',  seq2seq_loss)
            accuracy=self.accuracy(y, outputs)
      grads = tape.gradient(seq2seq_loss, self.emg_feature_extractor.trainable_weights)
     print('check', outputs.shape, y.shape, grads)
     self.emg_feature_optim.apply_gradients(zip(grads, self.emg_feature_extractor.trainable_weights))     
    

     return {"seq2seq_loss": seq2seq_loss, 'gen_model_loss':gen_model_loss, "gen_MAE": gen_MAE, 
                      "accuracy": accuracy}

  def test_step(self, data):
      x_emg, y = data
      no_Epochs=3
      y = tf.reshape(y, [-1, no_Epochs , 5])
      emg_inp = tf.keras.layers.Input(3000, 1)
      eeg_inp = self.EEG_gen_model(emg_inp)
      emg_enc_seq=self.emg_feature_extractor(emg_inp)
      eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
      len_epoch=input_layer.shape[1] 
      inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch]) 
      outputs=self.seq2seq_model(inputs)
      sleep_classifier_model=tf.keras.Model(inputs=emg_inp, outputs=outputs)

      y_pred=sleep_classifier_model(x_emg,  training=False)# Forward pass
      # Compute our own loss
      loss = self.seq2seq_loss_fn(y, y_pred, 
      regularization_losses=self.seq2seq_loss_fn)
      accuracy=accuracy(y, y_pred)

      return {"seq2seq_loss": seq2seq_loss, "accuracy": accuracy}

 

    model = Whole_model( EEG_gen_model=EEG_gen_model, emg_feature_extractor=emg_feature_extractor, 
                eeg_feature_extractor=eeg_feature_extractor, seq2seq_model=seq2seq_model)
model.compile(
EEG_gen_optimizer=tf.optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False),
                                     
emg_feature_optim=tf.optimizers.Adam(lr=1e-3,  beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False),
                                     
eeg_feature_optim=tf.optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False),
                                     
seq2seq_optim=tf.optimizers.Adam(lr=1e-3,  beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False),
seq2seq_loss_fn=tf.keras.losses.CategoricalCrossentropy(),
EEG_gen_loss=tf.keras.losses.MSE, 
gen_mae=tf.keras.losses.MAE,                                   
accuracy=tf.keras.metrics.Accuracy())
model.fit(x_train_emg, [x_train_eeg, y_train],  batch_size=3, epochs=1,  validation_split=None, 
                                              validation_data=(x_test_emg, y_test), shuffle=False)

After executing this code, I am getting the following error执行此代码后,我收到以下错误

ValueError: No gradients provided for any variable: ['conv1d_8/kernel:0', 'conv1d_8/bias:0', 'conv1d_12/kernel:0', 'conv1d_12/bias:0', 'conv1d_9/kernel:0', 'conv1d_9/bias:0', 'conv1d_13/kernel:0', 'conv1d_13/bias:0', 'conv1d_10/kernel:0', 'conv1d_10/bias:0', 'conv1d_14/kernel:0', 'conv1d_14/bias:0', 'conv1d_11/kernel:0', 'conv1d_11/bias:0', 'conv1d_15/kernel:0', 'conv1d_15/bias:0']. ValueError: 没有为任何变量提供梯度:['conv1d_8/kernel:0', 'conv1d_8/bias:0', 'conv1d_12/kernel:0', 'conv1d_12/bias:0', 'conv1d_9/kernel:0', 'conv1d_9/bias:0'、'conv1d_13/kernel:0'、'conv1d_13/bias:0'、'conv1d_10/kernel:0'、'conv1d_10/bias:0'、'conv1d_14/kernel:0'、'conv1d_14 /bias:0'、'conv1d_11/kernel:0'、'conv1d_11/bias:0'、'conv1d_15/kernel:0'、'conv1d_15/bias:0']。

How to fix it?如何解决? Please help me.请帮我。 Thank you in advance.先感谢您。

Thanks, Andrey.谢谢,安德烈。 But I have tried by defining all submodules like self.EEG_gen_model as tf.keras.layers.Layers which has a call() method as follows:但是我已经尝试将 self.EEG_gen_model 等所有子模块定义为 tf.keras.layers.Layers ,它具有 call() 方法,如下所示:

    class EEG_gen_layer(tf.keras.layers.Layer):
         def __init__(self):
            super(EEG_gen_layer, self).__init__()
            n_samples_per_epoch=3000
            print(n_samples_per_epoch)
            inputs=tf.keras.layers.Input(batch_shape=[None, 
                                      n_samples_per_epoch, 1], name="input")    
            lstm1=tf.keras.layers.Bidirectional(tf.keras.layers.LSTM( 
                       units=128, return_sequences=True), 
                        merge_mode='concat')(inputs)
            lstm2=tf.keras.layers.Bidirectional(tf.keras.layers.LSTM( 
            units=128, return_sequences=True), merge_mode='concat')(lstm1)
            dens1=tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(32, 
            kernel_initializer=tf.keras.initializers.glorot_normal(), 
            activation=tf.nn.relu))(lstm2)
            dens2=tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1, 
            kernel_initializer=tf.keras.initializers.glorot_normal(), 
              activation=None))(dens1)
            self.EEG_gen_model=tf.keras.Model(inputs=inputs, outputs=dens2)

        def call(self, inp_emg, training=False):
            x = self.EEG_gen_model(inp_emg)
            return x

Like that, I defined all submodule of the model as tf.keras.layers.Layers.像那样,我将模型的所有子模块定义为 tf.keras.layers.Layers。 But again I am getting the same error.但我再次遇到同样的错误。

Please help me in fixing this error.请帮助我修复此错误。

You are calling your model in train_step:您在 train_step 中调用您的模型:

eeg_inp = self.EEG_gen_model(emg_inp)

But your model has no call() method.但是您的模型没有 call() 方法。

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

相关问题 在培训期间,我收到以下错误。 “ ValueError:没有为任何变量提供梯度” - Whie training I am getting the following error. “ ValueError: No gradients provided for any variable” TensorFlow/Keras 中的错误:ValueError:没有为任何变量提供梯度 - Error in TensorFlow/Keras: ValueError: No gradients provided for any variable ValueError:没有为使用 Keras 功能 API 的任何变量提供梯度 - ValueError: No gradients provided for any variable using Keras functional API TF Keras - ValueError:没有为任何变量提供梯度 - TF Keras - ValueError: No gradients provided for any variable Keras ValueError:没有为任何变量提供梯度 - Keras ValueError: No gradients provided for any variable 我收到一个 ValueError:没有为任何变量提供渐变 - I'm getting a ValueError: No gradients provided for any variable TensorFlow 错误:ValueError:没有为任何变量提供梯度 - TensorFlow Error: ValueError: No gradients provided for any variable ValueError:没有为任何变量提供梯度 - Keras Tensorflow 2.0 - ValueError: No gradients provided for any variable - Keras Tensorflow 2.0 ValueError:没有为任何变量提供渐变 - Tensorflow 2.0/Keras - ValueError: No gradients provided for any variable - Tensorflow 2.0/Keras Keras 使用自定义优化器时自定义 train_step - Keras customizing train_step when using a custom optimizer
 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM