簡體   English   中英

Tensorflow錯誤:嘗試使用未初始化的值beta1_power_18

[英]Tensorflow Error: Attempting to use uninitialized value beta1_power_18

這是我在tensorflow中用於簡單神經網絡的代碼:

import tensorflow as tf
import numpy as np

class Model:
    def __init__(self,input_neuron=2,hidden_neuron=10,output_neuron=2):
        self.input_neuron = input_neuron
        self.hidden_neuron = hidden_neuron
        self.output_neuron = output_neuron
        self.x = tf.placeholder(tf.float32,[None,self.input_neuron])
        self.y = tf.placeholder(tf.float32,[None,self.output_neuron])
        self.model = self.graph()
        self.sess = tf.InteractiveSession()
        self.sess.run(tf.global_variables_initializer())



    @staticmethod
    def one_hot_encode(y):
        y_ = np.zeros((len(y),2))
        for i in range(len(y)):
            y_[i,y[i][0]]=1

        return y_

    def graph(self):
        w1=tf.Variable(tf.random_normal([self.input_neuron,self.hidden_neuron]))
        l1=tf.nn.relu(tf.matmul(self.x,w1))
        w2=tf.Variable(tf.random_normal([self.hidden_neuron,self.output_neuron]))
        l2=tf.matmul(l1,w2)

        return l2

    def train(self,xTrain,yTrain):
        yTrain = self.one_hot_encode(yTrain)
        loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(
                             logits=self.model,labels=self.y))
        train = tf.train.AdamOptimizer(0.1).minimize(loss)
        for epoch in range(100):
            self.sess.run(train,feed_dict={self.x:xTrain,self.y:yTrain})

    def predict(self,xTest):
        prediction = tf.argmax(self.model)
        return self.sess.run(prediction,feed_dict={x:xTest}) 

當我使用運行此:

model = Model()
xTrain = np.array([[0,0],[0,1],[1,0],[1,1]])
yTrain = np.array([[0],[1],[1],[0]])
model.train(xTrain,yTrain)

我收到此錯誤:

FailedPreconditionError(請參閱上面的回溯):嘗試使用未初始化的值beta1_power_18

我究竟做錯了什么?

您可以在Model類的__init__中執行self.sess.run(tf.global_variables_initializer()) ,但僅在train()方法中才可以設置tf.train.AdamOptimizer() 后者還創建了一些需要初始化的變量。 移動

self.sess.run(tf.global_variables_initializer())

train = tf.train.AdamOptimizer(0.1).minimize(loss)

它會工作。

完整代碼(已測試):

import tensorflow as tf
import numpy as np

class Model:
    def __init__(self,input_neuron=2,hidden_neuron=10,output_neuron=2):
        self.input_neuron = input_neuron
        self.hidden_neuron = hidden_neuron
        self.output_neuron = output_neuron
        self.x = tf.placeholder(tf.float32,[None,self.input_neuron])
        self.y = tf.placeholder(tf.float32,[None,self.output_neuron])
        self.model = self.graph()
        self.sess = tf.InteractiveSession()

    @staticmethod
    def one_hot_encode(y):
        y_ = np.zeros((len(y),2))
        for i in range(len(y)):
            y_[i,y[i][0]]=1

        return y_

    def graph(self):
        w1=tf.Variable(tf.random_normal([self.input_neuron,self.hidden_neuron]))
        l1=tf.nn.relu(tf.matmul(self.x,w1))
        w2=tf.Variable(tf.random_normal([self.hidden_neuron,self.output_neuron]))
        l2=tf.matmul(l1,w2)

        return l2

    def train(self,xTrain,yTrain):
        yTrain = self.one_hot_encode(yTrain)
        loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(
                             logits=self.model,labels=self.y))
        train = tf.train.AdamOptimizer(0.1).minimize(loss)
        self.sess.run(tf.global_variables_initializer())

        for epoch in range(100):
            self.sess.run(train,feed_dict={self.x:xTrain,self.y:yTrain})
        print("Training done!")

    def predict(self,xTest):
        prediction = tf.argmax(self.model)
        return self.sess.run(prediction,feed_dict={x:xTest})

model = Model()
xTrain = np.array([[0,0],[0,1],[1,0],[1,1]])
yTrain = np.array([[0],[1],[1],[0]])
model.train(xTrain,yTrain)

根據您的評論,如果您不想在每次調用train()方法時重新初始化整個網絡,則需要在__init__()方法中初始化網絡,並使用tf.report_uninitialized_variables()來獲取所有未初始化的僅初始化train()那些。 基於Salvador Dali這個問題的回答 ,我編寫了initialize_uninitialized()方法來做到這一點

完整代碼(已測試):

import tensorflow as tf
import numpy as np

class Model:
    def __init__(self,input_neuron=2,hidden_neuron=10,output_neuron=2):
        self.input_neuron = input_neuron
        self.hidden_neuron = hidden_neuron
        self.output_neuron = output_neuron
        self.x = tf.placeholder(tf.float32,[None,self.input_neuron])
        self.y = tf.placeholder(tf.float32,[None,self.output_neuron])
        self.model = self.graph()
        self.sess = tf.InteractiveSession()
        self.sess.run(tf.global_variables_initializer())

    @staticmethod
    def one_hot_encode(y):
        y_ = np.zeros((len(y),2))
        for i in range(len(y)):
            y_[i,y[i][0]]=1

        return y_

    def graph(self):
        w1=tf.Variable(tf.random_normal([self.input_neuron,self.hidden_neuron]))
        l1=tf.nn.relu(tf.matmul(self.x,w1))
        w2=tf.Variable(tf.random_normal([self.hidden_neuron,self.output_neuron]))
        l2=tf.matmul(l1,w2)

        return l2

    def initialize_uninitialized( self ):
        uninitialized_variables = [v for v in tf.global_variables()
            if v.name.split(':')[0] in set(self.sess.run(tf.report_uninitialized_variables())) ]
        self.sess.run( tf.variables_initializer( uninitialized_variables ) )

    def train(self,xTrain,yTrain):
        yTrain = self.one_hot_encode(yTrain)
        loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(
                             logits=self.model,labels=self.y))
        train = tf.train.AdamOptimizer(0.1).minimize(loss)
        self.initialize_uninitialized()

        for epoch in range(100):
            self.sess.run(train,feed_dict={self.x:xTrain,self.y:yTrain})
        print("Training done!")

    def predict(self,xTest):
        prediction = tf.argmax(self.model)
        return self.sess.run(prediction,feed_dict={x:xTest}) 

model = Model()
xTrain = np.array([[0,0],[0,1],[1,0],[1,1]])
yTrain = np.array([[0],[1],[1],[0]])
model.train(xTrain,yTrain)

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM