简体   繁体   中英

“tf.layers.conv2d” and “tf.nn.softmax” don't change the dimension

I am using RNN model to do something. But there were some error which confusing me. I used tf.layers.conv2d . As I know, it will change the dimension of the inputs.

the output of conv :

Width=(W-F+2P)/S+1 Height=(H-F+2P)/S+1

the output of pool :

W=(WF)/S+1 H=(HF)/S+1

Say, as my inputs shape is (128,1293),and then conv2d (29,294,32).The result shape should be (100,1000,32).But it became (128,1293,32).

And at the end of the model ,I used softmax . The inputs of the softmax is (5,2),but the result is still (5,2).It shouldn't be vector with shape of 5?

My code:

def inference(input_mfcc, train):
    with tf.variable_scope('conv1'):
        # 128*1293  conv1 29*294*32 ===> 100*1000*32
        # 100*1000*32  pool1 4*4  s4====>25*250*32
        conv1 = tf.layers.conv2d(inputs=input_mfcc,
                                filters=32,
                                kernel_size=[29,294],
                                padding='SAME',
                                activation=tf.nn.relu)

        pool1 = tf.layers.max_pooling2d(inputs=conv1,pool_size=[4,4],strides=4)
        print("conv1:",conv1.get_shape().as_list())
        print("pool1:",pool1.get_shape().as_list())
    with tf.variable_scope('conv2'):
        # 25*250  conv2 6*51*64 ===> 20*200*64
        # 20*200*64  pool1 4*4  s4====> 5*50*64
        conv2 = tf.layers.conv2d(inputs=pool1,
                                filters=64,
                                kernel_size=[6,51],
                                padding='SAME',
                                activation=tf.nn.relu)
        pool2 = tf.layers.max_pooling2d(inputs=conv2,pool_size=[4,4],strides=4)
        print("conv2:",conv2.get_shape().as_list())
        print("pool2:",pool2.get_shape().as_list())

    with tf.variable_scope('conv3'):
        #5*5*64
        conv3 = tf.layers.conv2d(inputs=pool2,
                                filters=64,
                                kernel_size=[1,46],
                                padding='SAME',
                                activation=tf.nn.relu)
        print("conv3",conv3.get_shape().as_list())

    with tf.variable_scope('fc1'):
        pool2_flat = tf.reshape(pool2,[5,-1])
        print("pool2_flat",pool2_flat.get_shape().as_list())
        fc1 = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
        dropout1 = tf.layers.dropout(inputs=fc1, rate=0.4, training=train)
        print("dropout1",dropout1.get_shape().as_list())
    with tf.variable_scope('logits'):
        logits = tf.layers.dense(inputs=dropout1, units=2)
        predit = tf.nn.softmax(logits=logits)
        print("logits",logits.get_shape().as_list())
        print("predit",predit.get_shape().as_list())
    return predit


def losses(logits,labels):
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=logits,name='cross_entropy')
    cross_entropy_loss = tf.reduce_mean(cross_entropy)
    return cross_entropy

def training(loss,learning_rate):
    with tf.name_scope("optimizer"):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name="global_step", trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
    return train_op

def evaluation(logits,labels):
    with tf.variable_scope("accuracy"):
        correct = tf.nn.in_top_k(logits,labels,1)
        correct = tf.cast(correct,tf.float32)
        accuracy = tf.reduce_mean(correct)
    return accuracy



ckpt="./model/music/model.ckpt"
N_CLASSES = 2
MFCC_ROW = 128
MFCC_COL = 1293
INPUT_NODE = MFCC_ROW * MFCC_COL
BATCH_SIZE = 5
CAPACITY = 20
MAX_STEP = 500
learning_rate = 0.0001

def run_train():
    mfcc, label= read_TFRecord()
    train_batch,train_labels_batch = tf.train.batch([mfcc,label],batch_size=BATCH_SIZE,num_threads=1,capacity=CAPACITY)
    print("train_batch",train_batch.get_shape().as_list())
    print("labels_batch",train_labels_batch.get_shape().as_list())
    train_logits = inference(train_batch,True)
    print(train_logits.get_shape().as_list())
    train_loss = losses(train_logits, train_labels_batch)
    train_op = training(train_loss,learning_rate)
    train_acc = evaluation(train_logits,train_labels_batch)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        init_op = tf.group(tf.local_variables_initializer(),
                            tf.global_variables_initializer())
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        try:
            for step in range(MAX_STEP):
                if coord.should_stop():
                    break;
                _,tra_loss,tra_acc = sess.run([train_op,train_loss,train_acc])
        # print some 
                if step%50==0:
                    print('Step %d,train loss = %.2f,train occuracy = %.2f%%'%(step,tra_loss,tra_acc))
        # 100 save
                if step % 100 ==0 or (step +1) == MAX_STEP:
                    saver.save(sess,ckpt,global_step = step)

        except tf.errors.OutOfRangeError:
            print('Done training epoch limit reached')
        finally:
            coord.request_stop()
            coord.join(threads)




run_train()   

ValueError                                Traceback (most recent call last)
<ipython-input-6-c2bffa4d5f17> in <module>()
----> 1 run_train()

<ipython-input-5-1743ee19f55f> in run_train()
     18     train_logits = inference(train_batch,True)
     19     print(train_logits.get_shape().as_list())
---> 20     train_loss = losses(train_logits, train_labels_batch)
     21     train_op = training(train_loss,learning_rate)
     22     train_acc = evaluation(train_logits,train_labels_batch)

<ipython-input-4-a0a7b4ee345d> in losses(logits, labels)
      1 def losses(logits,labels):
----> 2     cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=logits,name='cross_entropy')
      3     cross_entropy_loss = tf.reduce_mean(cross_entropy)
      4     return cross_entropy
      5 

D:\Anaconda3\envs\tfenv\lib\site-packages\tensorflow\python\ops\nn_ops.py in sparse_softmax_cross_entropy_with_logits(_sentinel, labels, logits, name)
   2037       raise ValueError("Rank mismatch: Rank of labels (received %s) should "
   2038                        "equal rank of logits minus 1 (received %s)." %
-> 2039                        (labels_static_shape.ndims, logits.get_shape().ndims))
   2040     if (static_shapes_fully_defined and
   2041         labels_static_shape != logits.get_shape()[:-1]):

ValueError: Rank mismatch: Rank of labels (received 2) should equal rank of logits minus 1 (received 2).

Expect output:

train_batch [5, 128, 1293, 1]
labels_batch [5, 2]
conv1: [5, 100, 1000, 32]
pool1: [5, 25, 250, 32]
conv2: [5, 20, 200, 64]
pool2: [5, 5, 50, 64]
conv3 [5, 5, 5, 64]
pool2_flat [5, 5*5*64]
dropout1 [5, 1024]
logits [5, 2]
predit [5, ]
train_logits [5, ]

Actual output:

train_batch [5, 128, 1293, 1]
labels_batch [5, 2]
conv1: [5, 128, 1293, 32]
pool1: [5, 32, 323, 32]
conv2: [5, 32, 323, 64]
pool2: [5, 8, 80, 64]
conv3 [5, 8, 80, 64]
pool2_flat [5, 40960]
dropout1 [5, 1024]
logits [5, 2]
predit [5, 2]
train_logits [5, 2]

I used tf.layers.conv2d. As I know, it will change the dimension of the inputs.

No. You use padding='SAME' , which means "pad the output to the input shape".

Shouldn't it be shape (5,) ?

No, softmax normalizes each value individually, it does not change the shape.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM