简体   繁体   中英

How do I add noise to the weights when calculating the loss with Keras?

I am new to Keras and am trying to customize my training step in Keras.

Quesions:

  1. How to create the new variable weights_right using weights_right=weights- (lr+alpha)*gradients in Keras when customizing training loop?
  2. How to feedforward the NN with weights as formal parameters? Could I customize the forward function in Keras like the code in the following below?

Background:

In the stochastic gradient descent algorithm, after feedforwarding a mini-batch data and getting the gradients on this mini-batch data, I would like to perturb the weights and create a new variable called weights_right weights_righ t= weights-(lr+alpha)*gradients (alpha is a const) and then feedforward the NN with weights_right to get the new loss.

Some parts of code in python are the following:

class Network(object):
    def __init__(self, sizes):
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.weights = [np.random.randn(y,1) for y in sizes[1:]]
        self.biases = [np.random.randn(y,x) for x, y in zip(sizes[:-1], sizes[1:])]
    def feedforward(self, a, weights=None, biases=None):
        """Return the output of the network if ``a`` is input."""
        if weights is None:
            weights=self.weights
        if biases is None:
            biases=self.biases
        #!!! Note the output layer has no activation for regression.
        for b, w in zip(biases[:-1], weights[:-1]):
            a = sigmoid(np.dot(w, a)+b)
        a=np.dot(weights[-1],a)+biases[-1]
        
        return a
    #-----------------------------------
    # The following is the important one.
    #-----------------------------------
    def customSGD():
        for epoch in range(epochs):
            random.shuffle(training_data)
            mini_batches= [training_data[k:k+mini_batch_size] for k in range(0, len(training_data), mini_batch_size)]
            for mini_batch in mini_batches:
                gradients_on_mini_batch = get_gradients(mini_batch)
                #---------------------------------------
                # The following two steps are what 
                # I would like to archive in Keras
                #---------------------------------------
                # Creat new variable called weights_right

                weights_right = weights-(lr+alpha)*gradients_on_mini_batch

                # feed the NN with weights_right, note that the params 
                #in current NN are still weights, not weights_right.

                pred_right = feedforward(training_data, weights_right)
                loss_right = loss_func(pred_right, training_labels)
                ......

                # update weights
                weights = weights-lr*gradients_on_mini_batch          

Above codes are mainly from the online book Michael Nielsen .

Any help would be appreciated. Thank you so much!

In a custom training loop, you can do whatever you like with the gradients and weights.

@tf.function
def train_step(inputs, labels):
    with tf.GradientTape() as tape:
        logits = model(inputs)
        loss = loss_object(labels, logits)

    weights = model.trainable_variables
    # add manipulation of weights here
    gradients = tape.gradient(loss, weights)
    opt.apply_gradients(zip(gradients, model.trainable_variables))
    train_loss(loss)
    train_acc(labels, logits)

Here's the full running example:

import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
from sklearn.datasets import load_iris

X, y = load_iris(return_X_y=True)

dataset = tf.data.Dataset.from_tensor_slices((X, y)).shuffle(150)

train_dataset = dataset.take(120).batch(4)
test_dataset = dataset.skip(120).take(30).batch(4)


class DenseModel(Model):
    def __init__(self):
        super(DenseModel, self).__init__()
        self.dens1 = Dense(8, activation='elu')
        self.dens2 = Dense(16, activation='relu')
        self.dens3 = Dense(3)

    def call(self, inputs, training=None, **kwargs):
        x = self.dens1(inputs)
        x = self.dens2(x)
        x = self.dens3(x)
        return x


model = DenseModel()

loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

train_loss = tf.keras.metrics.Mean()
test_loss = tf.keras.metrics.Mean()

train_acc = tf.keras.metrics.SparseCategoricalAccuracy()
test_acc = tf.keras.metrics.SparseCategoricalAccuracy()


opt = tf.keras.optimizers.Adam(learning_rate=1e-3)


@tf.function
def train_step(inputs, labels):
    with tf.GradientTape() as tape:
        logits = model(inputs)
        loss = loss_object(labels, logits)

    weights = model.trainable_variables
    # add manipulation of weights here
    gradients = tape.gradient(loss, weights)
    opt.apply_gradients(zip(gradients, model.trainable_variables))
    train_loss(loss)
    train_acc(labels, logits)


@tf.function
def test_step(inputs, labels):
    logits = model(inputs)
    loss = loss_object(labels, logits)
    test_loss(loss)
    test_acc(labels, logits)


for epoch in range(10):
    template = 'Epoch {:>2} Train Loss {:.3f} Test Loss {:.3f} ' \
               'Train Acc {:.2f} Test Acc {:.2f}'

    train_loss.reset_states()
    test_loss.reset_states()
    train_acc.reset_states()
    test_acc.reset_states()

    for X_train, y_train in train_dataset:
        train_step(X_train, y_train)

    for X_test, y_test in test_dataset:
        test_step(X_test, y_test)

    print(template.format(
        epoch + 1,
        train_loss.result(),
        test_loss.result(),
        train_acc.result(),
        test_acc.result()
    ))

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM