[英]Why do I get “nan” for Loss when I implement MNIST tensorflow code to my dataset
My Project goal is to checkout weight map of neural network for my dataset. 我的项目目标是为我的数据集检查神经网络的权重图。
I followed the MNIST example code and worked fine. 我遵循了MNIST示例代码并运行良好。
MNIST dataset have 784(28*28) pixel_data
input and 10 class_data
output. MNIST数据集具有784(28 * 28)个
pixel_data
输入和10个class_data
输出。 My dataset have 72(8*9) pixel_data
input and 4 class_data
output. 我的数据集有72(8 * 9)个
pixel_data
输入和4个class_data
输出。 I made the code to handle my dataset same format as MNIST dataset but when I train, the Loss is keep giving "NAN" value. 我编写了代码来处理与MNIST数据集相同格式的数据集,但是当我训练时,Loss始终给出“ NAN”值。
you can check my code and dataset in my github . 您可以在我的github中检查我的代码和数据集 。
import os
import glob
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import random as ran
#데이터를 8x9이미지 형태로 display
def display_digit(num):
print(y_data[num])
label = y_data[num].argmax(axis=0)
image = x_data[num].reshape([8,9])
plt.title('Example: %d Label: %d' % (num, label))
plt.imshow(image, cmap=plt.get_cmap('gray_r'))
plt.show()
#데이터를 vector형태로 dispaly
def display_mult_flat(start, stop):
images = x_data[start].reshape([1,72])
for i in range(start+1,stop):
images = np.concatenate((images, x_data[i].reshape([1,72])))
plt.imshow(images, cmap=plt.get_cmap('gray_r'))
plt.show()
#y_data을 oneshot방법으로 표현
def oneshot(n):
if n=="1":
return [1,0,0,0]
elif n=="2":
return [0,1,0,0]
elif n=="3":
return [0,0,1,0]
elif n=="4":
return [0,0,0,1]
# input, out data 반환, (MNIST에서 사용되는 형태)
def Get_data():
Glass_dir='./glass_data/'
csv_filenames = [i for i in glob.glob('./glass_data/*.{}'.format('csv'))]
y_data=[]
x_data=[]
for filename in csv_filenames:
y=oneshot(filename[13])
csv_file = pd.read_csv(filename)
df = pd.DataFrame(csv_file).T
df.columns = df.iloc[0]
df = df[1:]
df = df.ffill()
for i in range(len(df.index)):
y_data.append(y)
for row in df.iterrows():
index, data = row
x_data.append(data.tolist())
combined = list(zip(x_data, y_data))
ran.shuffle(combined)
x_data[:], y_data[:] = zip(*combined)
y_data=np.array(y_data)
x_data=np.array(x_data, dtype=np.float32)
return x_data, y_data
#각 class의 5개씩을 test로 사용
def Get_testdata():
Glass_dir='./glass_data/'
csv_filenames = [i for i in glob.glob('./glass_data/*.{}'.format('csv'))]
y_test=[]
x_test=[]
for filename in csv_filenames:
y=oneshot(filename[13])
csv_file = pd.read_csv(filename)
df = pd.DataFrame(csv_file).T
df.columns = df.iloc[0]
df = df[1:]
df = df.ffill()
for i in range(5):
y_test.append(y)
df2=df.head()
for row in df2.iterrows():
index, data = row
x_test.append(data.tolist())
y_test=np.array(y_test)
x_test=np.array(x_test, dtype=np.float32)
return x_test, y_test
# In[3]:
x_data, y_data = Get_data()
# In[4]:
x_data
# In[5]:
y_data
# In[6]:
display_digit(ran.randint(0, x_data.shape[0]))
# In[7]:
display_mult_flat(0,200)
# In[8]:
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, 72])
y_ = tf.placeholder(tf.float32, shape=[None, 4])
W = tf.Variable(tf.zeros([72,4]))
b = tf.Variable(tf.zeros([4]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
print(y)
# In[9]:
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# In[10]:
x_train, y_train = Get_data()
x_test, y_test= Get_testdata()
LEARNING_RATE = 0.01
TRAIN_STEPS = 2500
# In[11]:
init = tf.global_variables_initializer()
sess.run(init)
# In[12]:
training = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# In[13]:
for i in range(TRAIN_STEPS+1):
sess.run(training, feed_dict={x: x_train, y_: y_train})
if i%100 == 0:
print('Training Step:' + str(i) + ' Accuracy = ' + str(sess.run(accuracy, feed_dict={x: x_train, y_: y_train})) + ' Loss = ' + str(sess.run(cross_entropy, {x: x_train, y_: y_train})))
# In[14]:
for i in range(4):
plt.subplot(2, 5, i+1)
weight = sess.run(W)[:,i]
plt.title(i)
plt.imshow(weight.reshape([8,9]), cmap=plt.get_cmap('seismic'))
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
# In[15]:
plt.show()
Two suggestions: first initialize your weights to something other than zeros. 有两个建议:首先将权重初始化为零以外的值。 For instance:
例如:
W = tf.Variable(tf.truncated_normal(shape=[72, 4]))
Second, it is recommended that you use tf.nn.softmax_cross_entropy_with_logits
rather than calculating this yourself. 其次,建议您使用
tf.nn.softmax_cross_entropy_with_logits
而不要自己计算。 It is more efficient and numerically stable. 它效率更高且数值稳定。 To do so, get rid of your
softmax
function and change your cost: 为此,请摆脱您的
softmax
函数并更改成本:
y = tf.matmul(x,W) + b # your network output before softmax (aka "logits")
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
cross_entropy = tf.reduce_mean(cross_entropy)
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.