简体   繁体   中英

How to access the weights (parameters) of Neural Network after every certain epochs during training process

I am training a Neural Network using Tensorflow. I am calculating the training cost after every 100 epochs and printing the cost. I am using the following training code for my model.

def model(X_train, Y_train, layers_dims,learning_rate = 0.0001,
      num_epochs = 100, minibatch_size = 32, print_cost = True):



ops.reset_default_graph()                         
tf.set_random_seed(1)                             
seed = 3                                         
(n_x, m) = X_train.shape                          
n_y = Y_train.shape[0]                            
costs = []
beta = 0



X, Y,keep_prob= create_placeholders(n_x, n_y)


# Initialize parameters

parameters = initialize_parameters(layers_dims)


# Forward propagation: Build the forward propagation in the tensorflow graph

Z = forward_propagation(X, parameters, keep_prob)




cost = compute_cost(Z, Y)
L3=len(layers_dims)
regularizers=tf.constant(0,dtype=tf.float64)
for l3 in range(1,L3):
    regularizers = regularizers+tf.nn.l2_loss(parameters['W' + str(l3)])
cost = tf.reduce_mean(cost + beta * regularizers) 




with tf.device('/gpu:0'):
    optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)





init = tf.global_variables_initializer()


with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:


    sess.run(init)


    for epoch in range(num_epochs):

        epoch_cost = 0.                       # Defines a cost related to an epoch
        num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
        seed = seed + 1
        minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)

        for minibatch in minibatches:


            (minibatch_X, minibatch_Y) = minibatch




            _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y,keep_prob:1})


            epoch_cost += minibatch_cost / num_minibatches


        if print_cost == True and epoch % 100 == 0:
            print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
        if print_cost == True and epoch % 5 == 0:
            costs.append(epoch_cost)


    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.xlabel('iterations (per tens)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()


    parameters = sess.run(parameters)
    print ("Parameters have been trained!")


    return parameters

I want to access the parameters after every 100 epochs during training so that I may use these parameters to print the validation set cost after every 100 epochs. I want to plot the validation set cost and the training set cost both on single plot. Currently I am plotting and printing only training set cost. 在此处输入图片说明

最简单的方法是在验证集上运行模型:

val_minibatch_cost = sess.run(cost, feed_dict={X: val_minibatch_X, Y: val_minibatch_Y, keep_prob:1}

I solved it by including following additional code. Though still my code seems to me a bit inefficient.

#Additional blocks of code for getting validation set cost.
cv_costs_post_train=[]
beta = 0.5
(n_x_cv, m_cv) = cv_x.shape                         
n_y_cv = cv_y.shape[0]  
............
.............
...........
...........
if print_cost == True and epoch % 100 == 0:
            print ("Train Cost after epoch %i: %f" % (epoch, epoch_cost))
            X_cv, Y_cv,keep_prob_cv= create_placeholders(n_x_cv, n_y_cv)
            Z_cv = forward_propagation(X_cv, parameters, keep_prob_cv)
            cv_cost_post_train = compute_cost(Z_cv,Y_cv)
            cv_cost_post_train = sess.run([cv_cost_post_train ], feed_dict={X_cv: cv_x, Y_cv: cv_y, keep_prob_cv:1})
            print (cv_cost_post_train)




if print_cost == True and epoch % 5 == 0:
            costs.append(epoch_cost)
            cv_costs_post_train.append(cv_cost_post_train)
....
....
....
....

The full code for model is as under.

def model(X_train, Y_train,cv_x, cv_y, layers_dims,learning_rate = 0.0001,
      num_epochs = 2000, minibatch_size = 32, print_cost = True):



ops.reset_default_graph()                         
tf.set_random_seed(1)                             
seed = 3                                         
(n_x, m) = X_train.shape                          
n_y = Y_train.shape[0]                            
costs = []
cv_costs_post_train=[]
beta = 0.5
(n_x_cv, m_cv) = cv_x.shape                         
n_y_cv = cv_y.shape[0]  


###########################################################################


###############################################################################################

# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y,keep_prob= create_placeholders(n_x, n_y)
### END CODE HERE ###

# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters(layers_dims)
### END CODE HERE ###

# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z = forward_propagation(X, parameters, keep_prob)
### END CODE HERE ###

# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z, Y)
L3=len(layers_dims)
regularizers=tf.constant(0,dtype=tf.float64)
for l3 in range(1,L3):
    regularizers = regularizers+tf.nn.l2_loss(parameters['W' + str(l3)])
loss = tf.reduce_mean(cost + beta * regularizers) 

### END CODE HERE ###

# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
with tf.device('/gpu:0'):
    optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)


### END CODE HERE ###

# Initialize all the variables
init = tf.global_variables_initializer()

# Start the session to compute the tensorflow graph
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:

    # Run the initialization
    sess.run(init)

    # Do the training loop
    for epoch in range(num_epochs):

        epoch_cost = 0.                       # Defines a cost related to an epoch
        num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
        seed = seed + 1
        minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)

        for minibatch in minibatches:

            # Select a minibatch
            (minibatch_X, minibatch_Y) = minibatch

            # IMPORTANT: The line that runs the graph on a minibatch.
            # Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
            ### START CODE HERE ### (1 line)
            _ , minibatch_cost = sess.run([optimizer, loss], feed_dict={X: minibatch_X, Y: minibatch_Y,keep_prob:0.5})
            ### END CODE HERE ###

            epoch_cost += minibatch_cost / num_minibatches

        # Print the cost every epoch
        if print_cost == True and epoch % 100 == 0:
            print ("Train Cost after epoch %i: %f" % (epoch, epoch_cost))


            X_cv, Y_cv,keep_prob_cv= create_placeholders(n_x_cv, n_y_cv)
            Z_cv = forward_propagation(X_cv, parameters, keep_prob_cv)
            cv_cost_post_train = compute_cost(Z_cv,Y_cv)
            cv_cost_post_train = sess.run([cv_cost_post_train ], feed_dict={X_cv: cv_x, Y_cv: cv_y, keep_prob_cv:1})
            print (cv_cost_post_train)


        if print_cost == True and epoch % 5 == 0:
            costs.append(epoch_cost)
            cv_costs_post_train.append(cv_cost_post_train)

    # plot the cost
    plt.plot(np.squeeze(costs))
    plt.ylabel('train cost')
    plt.xlabel('iterations (per tens)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()




    plt.plot(np.squeeze(cv_costs_post_train))
    plt.ylabel('CV cost')
    plt.xlabel('iterations (per tens)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()

    # lets save the parameters in a variable
    parameters = sess.run(parameters)
    print ("Parameters have been trained!")


    return parameters

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM