简体   繁体   中英

Dimensions must be equal, but are 1 and 64 for 'Conv2D_1' (op: 'Conv2D') with input shapes: [?,24,24,1], [5,5,64,64]

I have the following code:

import numpy as np
import matplotlib.pyplot as plt
import cifar_tools
import tensorflow as tf

data, labels = cifar_tools.read_data('C:\\Users\\abc\\Desktop\\temp')

x = tf.placeholder(tf.float32, [None, 24 * 24])
y = tf.placeholder(tf.float32, [None, 2])

w1 = tf.Variable(tf.random_normal([5, 5, 1, 64]))
b1 = tf.Variable(tf.random_normal([64]))

w2 = tf.Variable(tf.random_normal([5, 5, 64, 64]))
b2 = tf.Variable(tf.random_normal([64]))

w3 = tf.Variable(tf.random_normal([6*6*64, 1024]))
b3 = tf.Variable(tf.random_normal([1024]))

w_out = tf.Variable(tf.random_normal([1024, 2]))
b_out = tf.Variable(tf.random_normal([2]))

def conv_layer(x,w,b):
    conv = tf.nn.conv2d(x,w,strides=[1,1,1,1], padding = 'SAME')
    conv_with_b = tf.nn.bias_add(conv,b)
    conv_out = tf.nn.relu(conv_with_b)
    return conv_out

def maxpool_layer(conv,k=2):
    return tf.nn.max_pool(conv, ksize=[1,k,k,1], strides=[1,k,k,1], padding='SAME')

def model():
    x_reshaped = tf.reshape(x, shape=[-1,24,24,1])

    conv_out1 = conv_layer(x_reshaped, w1, b1)
    maxpool_out1 = maxpool_layer(conv_out1)
    norm1 = tf.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001/9.0, beta=0.75)

    conv_out2 = conv_layer(x_reshaped, w2, b2)
    norm2 = tf.nn.lrn(maxpool_out2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75)
    maxpool_out2 = maxpool_layer(conv_out2)

    maxpool_reshaped = tf.reshape(maxpool_out2, [-1,w3.get_shape().as_list()[0]])
    local = tf.add(tf.matmul(maxpool_reshaped, w3), b3)
    local_out = tf.nn.relu(local)

    out = tf.add(tf.matmul(local_out, w_out), b_out)
    return out

model_op = model()

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_legits(model_op, y))
train_op = tf.train.AdamOptimizer(learning_rate-0.001).minimize(cost)

current_pred = tf.equal(tf.argmax(model_op, 1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    onehot_labels = tf.one_hot(labels, 2, on_value=1.,off_value=0.,axis=-1)
    onehot_vals = sess.run(onehot_labels)
    batch_size = len(data) / 200
    print('batch size', batch_size)
    for j in range(0, 1000):
        print('EPOCH', j)
        for j in range(0, len(data), batch_size):
            batch_data = data[i:i+batch_size, :]
            batch_onehot_vals = onehot_vals[i,i+batch_size, :]
            _, accuracy_val = sess.run([train_op, accuracy], feed_dict={x: batch_data, y: batch_onehot_vals})
            if i % 1000 == 0:
                print(i, accuracy_val)
        print('DONE WITH EPOCH')

When I run the code, I get the following error which I'm not sure how to fix:

Traceback (most recent call last):
  File "C:\Python35\lib\site-packages\tensorflow\python\framework\common_shapes.py", line 670, in _call_cpp_shape_fn_impl
    status)
  File "C:\Python35\lib\contextlib.py", line 66, in __exit__
    next(self.gen)
  File "C:\Python35\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 469, in raise_exception_on_not_ok_status
    pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: Dimensions must be equal, but are 1 and 64 for 'Conv2D_1' (op: 'Conv2D') with input shapes: [?,24,24,1], [5,5,64,64].

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "cnn.py", line 50, in <module>
    model_op = model()
  File "cnn.py", line 39, in model
    conv_out2 = conv_layer(x_reshaped, w2, b2)
  File "cnn.py", line 24, in conv_layer
    conv = tf.nn.conv2d(x,w,strides=[1,1,1,1], padding = 'SAME')
  File "C:\Python35\lib\site-packages\tensorflow\python\ops\gen_nn_ops.py", line 396, in conv2d
    data_format=data_format, name=name)
  File "C:\Python35\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 759, in apply_op
    op_def=op_def)
  File "C:\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 2242, in create_op
    set_shapes_for_outputs(ret)
  File "C:\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 1617, in set_shapes_for_outputs
    shapes = shape_func(op)
  File "C:\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 1568, in call_with_requiring
    return call_cpp_shape_fn(op, require_shape_fn=True)
  File "C:\Python35\lib\site-packages\tensorflow\python\framework\common_shapes.py", line 610, in call_cpp_shape_fn
    debug_python_shape_fn, require_shape_fn)
  File "C:\Python35\lib\site-packages\tensorflow\python\framework\common_shapes.py", line 675, in _call_cpp_shape_fn_impl
    raise ValueError(err.message)
ValueError: Dimensions must be equal, but are 1 and 64 for 'Conv2D_1' (op: 'Conv2D') with input shapes: [?,24,24,1], [5,5,64,64].

Do you have an idea on why I'm getting this error, and how I can fix it?

Thanks.

In your code, you apply the second convolution on the original data as well

conv_out2 = conv_layer(x_reshaped, w2, b2)

And then the number of channels do not match (1 channel in your original data, 64 input channels in w2 ).

If you want to apply it on the output of the first layer, replace it with maxpool_out1 , that is:

conv_out2 = conv_layer(maxpool_out1, w2, b2)

And then the number of channels should match, as w1 has 64 output channels.

In addition, in the following part of your code

conv_out2 = conv_layer(x_reshaped, w2, b2)
norm2 = tf.nn.lrn(maxpool_out2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75)
maxpool_out2 = maxpool_layer(conv_out2)

I think you should reverse the order of the last two lines. Because you use maxpool_out2 before you defined it.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM