簡體   English   中英

MNIST數據集上的ResNet的准確性沒有增加

[英]Accuracy does not increase in my ResNet on MNIST dataset

我已經建立了一個帶有tensorflow的ResNet模型來對MNIST數字進行分類。 但是,在訓練時,我的准確性變化不大,甚至在3-4個紀元后仍保持在0.1左右,這對應於一個隨機分類器(超過10個機會做出正確預測的幾率)。

我嘗試過更改激活功能(從relu到Sigmoid),但是它不能提高准確性。 修改學習率沒有明顯影響。 我想知道我的get_variable()調用是否正確...。

這是完整的模型:

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


def conv_2D(x, w, b, stride=1, padding='SAME', activation=None):
    '''
    2D convolution
    x: tensor of shape (batch, height, width, channel) -> 
    w: tensor of shape (f_width, f_height, channels_in, channels_out) -> weights
    b: tensor of shape (channels_out) -> biases
    '''
    # convolution
    x = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding=padding)
    # add biases
    x = tf.nn.bias_add(x, b)

    if activation is not None:
        x = activation(x)

    return x

def print_tensor_shape(x, msg=''):
    print(msg, x.get_shape().as_list())


class RepBlock(object):
    def __init__(self, num_repeats, num_filters, bottleneck_size, name_scope):
        self.num_repeats = num_repeats
        self.num_filters = num_filters
        self.bottleneck_size = bottleneck_size
        self.name_scope = name_scope

    def apply_block(self, net):

        print_tensor_shape(net, 'entering apply_block')

        # loop over repeats
        for i_repeat in range(self.num_repeats):

            print_tensor_shape(net, 'layer %i' % i_repeat)

            # subsampling is performed by a convolution with stride=2, only
            # for the first convolution of the first repetition
            if i_repeat == 0:
                stride = 2
            else:
                stride = 1

            name = self.name_scope+'/%i/conv_in' % i_repeat
            with tf.variable_scope(name):
                w = tf.get_variable('w', initializer=tf.random_normal([1, 1, net.get_shape().as_list()[-1], self.bottleneck_size]))
                b = tf.get_variable('b', initializer=tf.random_normal([self.bottleneck_size]))
                conv = conv_2D(net, w, b, stride=stride, padding='VALID', activation=tf.nn.relu)

            print_tensor_shape(conv, name)

            name = self.name_scope+'/%i/conv_bottleneck' % i_repeat    
            with tf.variable_scope(name):
                w = tf.get_variable('w', initializer=tf.random_normal([3, 3, conv.get_shape().as_list()[-1], self.bottleneck_size]))
                b = tf.get_variable('b', initializer=tf.random_normal([self.bottleneck_size]))
                conv = conv_2D(conv, w, b, stride=1, padding='SAME', activation=tf.nn.relu)

                print_tensor_shape(conv, name)

            name = self.name_scope+'/%i/conv_out' % i_repeat
            with tf.variable_scope(name):
                w = tf.get_variable('w', initializer=tf.random_normal([1, 1, conv.get_shape().as_list()[-1], self.num_filters]))
                b = tf.get_variable('b', initializer=tf.random_normal([self.num_filters]))
                conv = conv_2D(conv, w, b, stride=1, padding='VALID', activation=None)
                print_tensor_shape(conv, name)

            if i_repeat == 0:
                net = conv + tf.nn.max_pool(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            else:
                net = conv + net 

            net = tf.nn.relu(net)


        return net










def resnet(x):
    # reshape input
    x = tf.reshape(x, shape=[-1, 28, 28, 1])
    # init block for each layer
    layer_1 = RepBlock(num_repeats=3, num_filters=128, bottleneck_size=32, name_scope='layer_1')
    layer_2 = RepBlock(num_repeats=3, num_filters=256, bottleneck_size=64, name_scope='layer_2')
#    layer_3 = RepBlock(num_repeats=3, num_filters=512, bottleneck_size=128, name_scope='layer_3')
#    layer_4 = RepBlock(num_repeats=3, num_filters=1024, bottleneck_size=256, name_scope='layer_4') 

    layers = [layer_1, layer_2]

    # first layer
    name = 'conv_1'
    with tf.variable_scope(name):
        w = tf.get_variable('w', initializer=tf.random_normal([7, 7, x.get_shape().as_list()[-1], 64]))
        b = tf.get_variable('b', initializer=tf.random_normal([64]))
        net = conv_2D(x, w, b, stride=1, padding='SAME', activation=tf.nn.relu)  

    print_tensor_shape(net)

    net = tf.nn.max_pool(
        net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

    print_tensor_shape(net)

    with tf.variable_scope('conv_2'):
        w = tf.get_variable('w', initializer=tf.random_normal([1, 1, net.get_shape().as_list()[-1], layers[0].num_filters]))
        b = tf.get_variable('b', initializer=tf.random_normal([layers[0].num_filters]))
        net = conv_2D(net, w, b, stride=1, padding='SAME', activation=tf.nn.relu)


    print_tensor_shape(net)


    for i_layer, layer in enumerate(layers):

        # pass the net through all blocks of the layer
        net = layer.apply_block(net)

        print_tensor_shape(net, 'After block')

        try:
            # upscale (depth) to the next block size
            next_block = layers[i_layer+1]
            with tf.variable_scope('upscale_%i' % i_layer):
                w = tf.get_variable('w', initializer=tf.random_normal([1, 1, net.get_shape().as_list()[-1], next_block.num_filters]))
                b = tf.get_variable('b', initializer=tf.random_normal([next_block.num_filters]))
                net = conv_2D(net, w, b, stride=1, padding='SAME', activation=tf.nn.relu)

            print_tensor_shape(net)

        except IndexError:
            pass

    # apply average pooling
    net = tf.nn.avg_pool(net, ksize=[1, net.get_shape().as_list()[1], net.get_shape().as_list()[2], 1], 
                                     strides=[1, 1, 1, 1], padding='VALID')

    print_tensor_shape(net, msg='after average pooling')

    # fully connected layer
    with tf.variable_scope('fc'):
        w = tf.get_variable('w', initializer=tf.random_normal([256, 10]))
        b = tf.get_variable('b', initializer=tf.random_normal([10]))

    net = tf.reshape(net, shape=[-1, 256])
    net = tf.add(tf.matmul(net, w), b)

    print_tensor_shape(net, 'after fc')

    return net    



if __name__ == '__main__':

    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    X = tf.placeholder(tf.float32, [None, 784])
    Y = tf.placeholder(tf.float32, [None, 10])
    Y_pred = resnet(X)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Y_pred, labels=Y))
    optim = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)

    correct_pred = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    session = tf.InteractiveSession()
    init_op = tf.initialize_all_variables()
    session.run(init_op)

    nb_epochs = 10
    batch_size = 128
    training_size = mnist.train.num_examples

    nb_mini_batches = training_size // batch_size

    # loop over epochs    
    for i_epoch in range(nb_epochs):

        # loop over mini-batches
        for i_batch in range(nb_mini_batches):

            # get mini-batch
            batch_x, batch_y = mnist.train.next_batch(batch_size)

            [_, cost_val, acc] = session.run([optim, cost, accuracy], feed_dict={X: batch_x, Y:batch_y})

            print('epoch %i - batch %i - cost=%f - accuracy=%f' % (i_epoch, i_batch, cost_val, acc))

你可以試試看

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


def conv_2D(x, w, b=None, stride=1, padding='SAME', activation=None):
    '''
    2D convolution
    x: tensor of shape (batch, height, width, channel) ->
    w: tensor of shape (f_width, f_height, channels_in, channels_out) -> weights
    b: tensor of shape (channels_out) -> biases
    '''
    # convolution
    x = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding=padding)
    # add biases
    if b is not None:
        x = tf.nn.bias_add(x, b)

    if activation is not None:
        x = activation(x)

    return x


def print_tensor_shape(x, msg=''):
    print(msg, x.get_shape().as_list())


class RepBlock(object):
    def __init__(self, num_repeats, num_filters, bottleneck_size, name_scope):
        self.num_repeats = num_repeats
        self.num_filters = num_filters
        self.bottleneck_size = bottleneck_size
        self.name_scope = name_scope

    def apply_block(self, net):

        print_tensor_shape(net, 'entering apply_block')

        # loop over repeats
        for i_repeat in range(self.num_repeats):

            print_tensor_shape(net, 'layer %i' % i_repeat)

            # subsampling is performed by a convolution with stride=2, only
            # for the first convolution of the first repetition
            if i_repeat == 0:
                stride = 2
            else:
                stride = 1

            name = self.name_scope+'/%i/conv_in' % i_repeat
            with tf.variable_scope(name):
                w = tf.get_variable('w', shape=[1, 1, net.get_shape().as_list()[-1], self.bottleneck_size],
                                    initializer=tf.contrib.layers.xavier_initializer_conv2d())
                b = tf.get_variable('b', initializer=tf.constant(0.1, shape=[self.bottleneck_size]))
                # w = tf.get_variable('w', initializer=tf.random_normal([1, 1, net.get_shape().as_list()[-1], self.bottleneck_size]))
                # b = tf.get_variable('b', initializer=tf.random_normal([self.bottleneck_size]))
                conv = conv_2D(net, w, b, stride=stride, padding='VALID', activation=tf.nn.relu)

            print_tensor_shape(conv, name)

            name = self.name_scope+'/%i/conv_bottleneck' % i_repeat
            with tf.variable_scope(name):
                w = tf.get_variable('w', shape=[3, 3, conv.get_shape().as_list()[-1], self.bottleneck_size],
                                    initializer=tf.contrib.layers.xavier_initializer_conv2d())
                b = tf.get_variable('b', initializer=tf.constant(0.1, shape=[self.bottleneck_size]))
                # w = tf.get_variable('w', initializer=tf.random_normal([3, 3, conv.get_shape().as_list()[-1], self.bottleneck_size]))
                # b = tf.get_variable('b', initializer=tf.random_normal([self.bottleneck_size]))
                conv = conv_2D(conv, w, b, stride=1, padding='SAME', activation=tf.nn.relu)

                print_tensor_shape(conv, name)

            name = self.name_scope+'/%i/conv_out' % i_repeat
            with tf.variable_scope(name):
                w = tf.get_variable('w', shape=[1, 1, conv.get_shape().as_list()[-1], self.num_filters],
                                    initializer=tf.contrib.layers.xavier_initializer_conv2d())
                b = tf.get_variable('b', initializer=tf.constant(0.1, shape=[self.num_filters]))
                # w = tf.get_variable('w', initializer=tf.random_normal([1, 1, conv.get_shape().as_list()[-1], self.num_filters]))
                # b = tf.get_variable('b', initializer=tf.random_normal([self.num_filters]))
                conv = conv_2D(conv, w, b, stride=1, padding='VALID', activation=None)
                print_tensor_shape(conv, name)

            if i_repeat == 0:
                net = conv + tf.nn.max_pool(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            else:
                net = conv + net

            net = tf.nn.relu(net)

        return net


def resnet(x):
    # reshape input
    x = tf.reshape(x, shape=[-1, 28, 28, 1])
    # init block for each layer
    layer_1 = RepBlock(num_repeats=3, num_filters=128, bottleneck_size=32, name_scope='layer_1')
    layer_2 = RepBlock(num_repeats=3, num_filters=256, bottleneck_size=64, name_scope='layer_2')
#    layer_3 = RepBlock(num_repeats=3, num_filters=512, bottleneck_size=128, name_scope='layer_3')
#    layer_4 = RepBlock(num_repeats=3, num_filters=1024, bottleneck_size=256, name_scope='layer_4')

    layers = [layer_1, layer_2]

    # first layer
    name = 'conv_1'
    with tf.variable_scope(name):
        w = tf.get_variable('w', shape=[7, 7, x.get_shape().as_list()[-1], 64],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
        # w = tf.get_variable('w', initializer=tf.random_normal([7, 7, x.get_shape().as_list()[-1], 64]))
        b = tf.get_variable('b', initializer=tf.constant(0.1, shape=[64]))
        net = conv_2D(x, w, b, stride=1, padding='SAME', activation=tf.nn.relu)

    print_tensor_shape(net, name)

    net = tf.nn.max_pool(
        net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

    print_tensor_shape(net, 'After max pooling')

    with tf.variable_scope('conv_2'):
        w = tf.get_variable('w', shape=[1, 1, net.get_shape().as_list()[-1], layers[0].num_filters],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
        # w = tf.get_variable('w', initializer=tf.random_normal([1, 1, net.get_shape().as_list()[-1], layers[0].num_filters]))
        b = tf.get_variable('b', initializer=tf.constant(0.1, shape=[layers[0].num_filters]))
        net = conv_2D(net, w, b, stride=1, padding='SAME', activation=tf.nn.relu)

    print_tensor_shape(net, 'conv_2')

    for i_layer, layer in enumerate(layers):
        print i_layer, layer

        # pass the net through all blocks of the layer
        net = layer.apply_block(net)

        print_tensor_shape(net, 'After block')

        try:
            # upscale (depth) to the next block size
            next_block = layers[i_layer+1]
            with tf.variable_scope('upscale_%i' % i_layer):
                w = tf.get_variable('w', shape=[1, 1, net.get_shape().as_list()[-1], next_block.num_filters],
                                    initializer=tf.contrib.layers.xavier_initializer_conv2d())
                # w = tf.get_variable('w', initializer=tf.random_normal([1, 1, net.get_shape().as_list()[-1], next_block.num_filters]))
                b = tf.get_variable('b', initializer=tf.constant(0.1, shape=[next_block.num_filters]))
                net = conv_2D(net, w, b, stride=1, padding='SAME', activation=tf.nn.relu)

            print_tensor_shape(net)

        except IndexError:
            pass

    # apply average pooling
    net = tf.nn.avg_pool(net, ksize=[1, net.get_shape().as_list()[1], net.get_shape().as_list()[2], 1],
                                     strides=[1, 1, 1, 1], padding='VALID')

    print_tensor_shape(net, msg='after average pooling')

    # fully connected layer
    with tf.variable_scope('fc'):
        w = tf.get_variable('w', shape=[256, 10],
                            initializer=tf.contrib.layers.xavier_initializer_conv2d())
        # w = tf.get_variable('w', initializer=tf.random_normal([256, 10]))
        b = tf.get_variable('b', initializer=tf.constant(0.1, shape=[10]))

    net = tf.reshape(net, shape=[-1, 256])
    net = tf.add(tf.matmul(net, w), b)

    print_tensor_shape(net, 'after fc')

    return net

if __name__ == '__main__':
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    X = tf.placeholder(tf.float32, [None, 784])
    Y = tf.placeholder(tf.float32, [None, 10])
    Y_pred = resnet(X)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Y_pred, labels=Y))
    optim = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)

    correct_pred = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    session = tf.InteractiveSession()
    init_op = tf.initialize_all_variables()
    session.run(init_op)

    nb_epochs = 10
    batch_size = 128
    training_size = mnist.train.num_examples

    nb_mini_batches = training_size // batch_size

    # loop over epochs
    for i_epoch in range(nb_epochs):

        # loop over mini-batches
        for i_batch in range(nb_mini_batches):

            # get mini-batch
            batch_x, batch_y = mnist.train.next_batch(batch_size)

            [_, cost_val, acc] = session.run([optim, cost, accuracy], feed_dict={X: batch_x, Y:batch_y})

            print('epoch %i - batch %i - cost=%f - accuracy=%f' % (i_epoch, i_batch, cost_val, acc))

唯一的問題是初始化,權重和偏差。 請注意,還有其他權重初始化方法,例如

n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
                '', [filter_size, filter_size, in_filters, out_filters], tf.float32,
                initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0/n))
                # initializer=tf.contrib.layers.xavier_initializer()
            )

此外,使用常數0.1或0.01初始化偏差,但在resnet中,在塊中conv2d之后它們不使用偏差。 僅在使用完全連接的層時才使用偏壓。

希望這可以對您有所幫助。

實際上,問題from __future__ import division ,該from __future__ import division丟失了。 我也沒有在其他腳本中插入它,但是它仍然有效。 不知道為什么此腳本中需要它。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM