I`m using the simple example of tensorflow for neural network:
# define the numbers of nodes in hidden layers
nodesLayer1 = 500
nodesLayer2 = 500
nodesLayer3 = 500
# define our goal class
classes = 2
batchSize = 500
# x for input, y for output
sizeOfRow = len(data[0])
x = tensorFlow.placeholder(dtype= "float", shape=[None, sizeOfRow])
y = tensorFlow.placeholder(dtype= "float")
def neuralNetworkModel(data):
# first step: (input * weights) + bias, linear operation like y = ax + b
# each layer connection to other layer will represent by nodes(i) * nodes(i+1)
hiddenLayer1 = {"weights" : tensorFlow.Variable(tensorFlow.random_normal([sizeOfRow, nodesLayer1])),
"biases" : tensorFlow.Variable(tensorFlow.random_normal([nodesLayer1]))}
hiddenLayer2 = {"weights" : tensorFlow.Variable(tensorFlow.random_normal([nodesLayer1, nodesLayer2])),
"biases" : tensorFlow.Variable(tensorFlow.random_normal([nodesLayer2]))}
hiddenLayer3 = {"weights": tensorFlow.Variable(tensorFlow.random_normal([nodesLayer2, nodesLayer3])),
"biases": tensorFlow.Variable(tensorFlow.random_normal([nodesLayer3]))}
outputLayer = {"weights": tensorFlow.Variable(tensorFlow.random_normal([nodesLayer3, classes])),
"biases": tensorFlow.Variable(tensorFlow.random_normal([classes]))}
# create the layers
layer1 = tensorFlow.add(tensorFlow.matmul(data, hiddenLayer1["weights"]), hiddenLayer1["biases"])
layer1 = tensorFlow.nn.relu(layer1) # pass values to activation function (i.e sigmoid, softmax) and add it to the layer
layer2 = tensorFlow.add(tensorFlow.matmul(layer1, hiddenLayer2["weights"]), hiddenLayer2["biases"])
layer2 = tensorFlow.nn.relu(layer2)
layer3 = tensorFlow.add(tensorFlow.matmul(layer2, hiddenLayer3["weights"]), hiddenLayer3["biases"])
layer3 = tensorFlow.nn.relu(layer3)
output = tensorFlow.matmul(layer3, outputLayer["weights"]) + outputLayer["biases"]
return output
def neuralNetworkTrain(x):
prediction = neuralNetworkModel(x)
# using softmax function, normalize values to range(0,1)
cost = tensorFlow.reduce_mean(tensorFlow.nn.softmax_cross_entropy_with_logits(prediction, y))
# minimize the cost function
# at the same way we can use Gradiant Decent
# default learning rate is 0.001 in AdamOptimizer
optimizer = tensorFlow.train.AdamOptimizer(0.0001).minimize(cost)
epochs = 15
# build sessions and train the model
with tensorFlow.Session() as sess:
sess.run(tensorFlow.initialize_all_variables())
for epoch in range(epochs):
epochLoss = 0
i = 0
for temp in range(int(len(data) / batchSize)):
ex, ey = nextBatch(batchSize, i) # takes 500 examples
i += 1
# TO-DO : fix bug here
temp, cos = sess.run((optimizer, cost)) # start session to optimize the cost function
epochLoss += cos
print("Epoch", epoch, "completed out of", epochs, "loss:", epochLoss)
correct = tensorFlow.equal(tensorFlow.argmax(prediction,1), tensorFlow.argmax(y, 1))
accuracy = tensorFlow.reduce_mean(tensorFlow.cast(correct, "float"))
print("Accuracy:", accuracy.eval())
and I have got this error message:
Caused by op u'Placeholder_1', defined at:
File "/home/or/PycharmProjects/untitled/NeuralNetwork.py", line 39, in <module>
y = tensorFlow.placeholder(dtype= "float")
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 1332, in placeholder
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 1748, in _placeholder
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 749, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2380, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1298, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_1' with dtype float
[[Node: Placeholder_1 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
someone see this before and can explain please how to solve ? I tried many things and read a lot in tensorflow website but can`t find the exactly answer..
when specifying placeholder data type, you need to be clear about the bit information... try x = tf.placeholder(dtype=tf.float32)
(often ppl use float32
for the speed and memory purpose). Here is the link to the documentation for placeholder
When running the session, you'll need to feed the real data with the matching data type for all the placeholders. Change the neuralNetworkTrain
function to following:
def neuralNetworkTrain(x):
prediction = neuralNetworkModel(x)
# ... omitted code...
# build sessions and train the model
with tensorFlow.Session() as sess:
sess.run(tensorFlow.initialize_all_variables())
for epoch in range(epochs):
epochLoss = 0
i = 0
for temp in range(int(len(data) / batchSize)):
ex, ey = nextBatch(batchSize, i) # takes 500 examples
i += 1
# TO-DO : fix bug here
# **Add feed_dict for placeholder x**
feed_dict = {x: ex}
temp, cos = sess.run((optimizer, cost), feed_dict=feed_dict) # start session to optimize the cost function
epochLoss += cos
print("Epoch", epoch, "completed out of", epochs, "loss:", epochLoss)
correct = tensorFlow.equal(tensorFlow.argmax(prediction,1), tensorFlow.argmax(y, 1))
accuracy = tensorFlow.reduce_mean(tensorFlow.cast(correct, "float"))
# **Add feed_dict for placeholder y**
print("Accuracy:", accuracy.eval(feed_dict={y: ey}))
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.