简体   繁体   中英

Tensorflow throws “Dimensions must be equal, but are 100 and 0 for 'MatMul' (op: 'MatMul') with input shapes: [0,100], [0,100].”

I am trying to learn tensorflow after a tutorial, but I don't want to use mnist database, so I can learn database management in python( I am new to it, and it's a hard learning curve going from c++/java to it)

So, here is my code. I've tried printing shapes, values, and all sort of stuff, but none seemed to work. Note: if i make x of shape [0, 100] and the weights [100, 0], the error from matmul goes away, but the resul is of shape [0,0] and cannot be added to the biases. I am 100% sure it's a newbie error, but i will appreciate any help from you. Thanks in advance.

import tensorflow as tf
import pandas as pd

data = pd.read_csv('trainingData.txt', sep = "\t", header = None )
data.columns = ["in", "out"]

data_x = data.loc[: , "in"]
data_y = data.loc[: , "out"]

n_noduri_hl1 = 100
n_noduri_hl2 = 250
n_noduri_hl3 = 100

batch_size = 100
x = tf.placeholder("float", [0, 100])
y = tf.placeholder('float')


def Neural_Network(data):
    # input * wheight + bias

    hidden_1 = {'weight': tf.Variable(tf.random_normal([0, n_noduri_hl1])),
                'biases': tf.Variable(tf.random_normal([n_noduri_hl1]))}

    hidden_2 = {'weight': tf.Variable(tf.random_normal([n_noduri_hl1,      n_noduri_hl2])),
                'biases': tf.Variable(tf.random_normal([n_noduri_hl2]))}

    hidden_3 = {'weight': tf.Variable(tf.random_normal([n_noduri_hl2, n_noduri_hl3])),
                'biases': tf.Variable(tf.random_normal([n_noduri_hl3]))}

    output_layer = {'weight': tf.Variable(tf.random_normal([n_noduri_hl3, 1])),
                    'biases': tf.Variable(tf.random_normal([1]))}
    #calcul
    print("data: ", data, "matmul: ", tf.matmul(data, hidden_1['weight']))

    l1 = tf.add(tf.matmul(data, hidden_1['weight']), hidden_1['biases'])
    l1 = tf.nn.relu(l1)

    l2 = tf.add(tf.matmul(l1, hidden_2['weight']), hidden_2['biases'])
    l2 = tf.nn.relu(l2)

    l3 = tf.add(tf.matmul(l2, hidden_3['weight']), hidden_3['biases'])
    l3 = tf.nn.relu(l3)

    output = tf.matmul(l3, output_layer['weight']) + output_layer['biases']

    return output

def get_next_batch(dataptr, batch_size, index):
    batch = dataptr.loc[index: index+batch_size]
    print(batch)
    return batch

def train(x):
    predictie = Neural_Network(x)
    cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits = predictie, labels = y))

    optimizer = tf.train.AdamOptimizer().minimize(cost)

    epoci = 10
    index = 0
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for epoca in range(epoci):
            loss = 0
            for _ in range(int(len(data)/batch_size)):
                ep_x = get_next_batchin(data_x, batch_size, index)
                ep_y = get_next_batchout(data_ybatch_size, index)
                index += batch_size
                _, c = sess.run([optimizer, cost], feed_dict = {x: ep_x, y: ep_y})
               loss += c
            print('Epoca: ', epoca, " din ", epoci, " loss: ", loss)
        corect = tf.equal(tf.argmax(predictie, 1), tf.argmax(y,1))

        accuracy = tf.reduce_mean(tf.cast(corect, 'float'))

        print('Acuratete: ', accuracy.eval({x: data.loc[: , "in"], y: data.loc[: , "out"]}))

train(x)

Instead of 0 , your placeholder should have None for the first dimension (the batch dimension) and the following dimensions should be the size of the description vector / matrix.

For example, x = tf.placeholder("float", [None, 64, 64, 3]) would be the place holder for a batch of 64 x 64 pixel RGB colour images.

When performing 2D matrix multiplication, the number of columns of the first operand must match the number of rows of the second operand. This is just how matrix multiplication is defined.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM