簡體   English   中英

DNN訓練結束時返回Hessian逆矩陣,並且使用輸入進行偏導數

[英]Return Inverse Hessian Matrix at the end of DNN Training and Partial Derivatives wrt the Inputs

我使用Keras和Tensorflow作為后端,構建了一個將恆星光譜作為輸入(7213個數據點)並輸出三個恆星參數(溫度,重力和金屬度)的DNN。 網絡在我的測試集上訓練得很好並且可以很好地預測,但是為了使結果在科學上有用,我需要能夠估計我的錯誤。 這樣做的第一步是獲得逆黑森州矩陣,似乎僅使用Keras是不可能的。 因此,我嘗試使用scipy.optimize.minimize並使用BFGS,L-BFGS-B或Netwon-CG作為方法來使用scipy創建解決方法。 這些中的任何一個都將返回逆黑森州矩陣。

這個想法是使用Adam優化器對模型進行100個時期的訓練(或直到模型收斂),然后運行BFGS的單個迭代或函數(或其他函數之一)以返回我模型的Hessian矩陣。

這是我的代碼:

from scipy.optimize import minimize

import numpy as np

from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import Adam


# Define vars
activation = 'relu'
init = 'he_normal'
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-08

input_shape = (None,n)
n_hidden = [2048,1024,512,256,128,32]
output_dim = 3

epochs = 100
lr = 0.0008
batch_size = 64
decay = 0.00

# Design DNN Layers

model = Sequential([

    Dense(n_hidden[0], batch_input_shape=input_shape, init=init, activation=activation),

    Dense(n_hidden[1], init=init, activation=activation), 

    Dense(n_hidden[2], init=init, activation=activation),

    Dense(n_hidden[3], init=init, activation=activation),

    Dense(n_hidden[4], init=init, activation=activation),

    Dense(n_hidden[5], init=init, activation=activation),

    Dense(output_dim, init=init, activation='linear'),
])


# Optimization function
optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay)


# Compile and train network
model.compile(optimizer=optimizer, loss='mean_squared_error')

#train_X.shape = (50000,7213)
#train_Y.shape = (50000,3)
#cv_X.shape = (10000,7213)
#cv_Y.shape = (10000,3)

history = model.fit(train_X, train_Y, validation_data=(cv_X, cv_Y),
             nb_epoch=epochs, batch_size=batch_size, verbose=2)


weights = []
for layer in model.layers:
    weights.append(layer.get_weights())

def loss(W):
    weightsList = W
    weightsList = np.array(W)
    new_weights = []
    for i, layer in enumerate((weightsList)):
        new_weights.append(np.array(weightsList[i]))
    model.set_weights(np.array(new_weights))
    preds = model.predict(train_X)
    mse = np.sum(np.square(np.subtract(preds,train_Y)))/len(train_X[:,0])
    print(mse)
    return mse


x0=weights    
res = minimize(loss, x0, args=(), method = 'BFGS', options={'maxiter':1,'eps':1e-6,'disp':True})
#res = minimize(loss, x0, method='L-BFGS-B', options={'disp': True, 'maxls': 1, 'gtol': 1e-05, 'eps': 1e-08, 'maxiter': 1, 'ftol': 0.5, 'maxcor': 1, 'maxfun': 1})
#res = minimize(loss, x0, args=(), method='Newton-CG', jac=None, hess=None, hessp=None, tol=None, callback=None, options={'disp': False, 'xtol': 1e-05, 'eps': 1.4901161193847656e-08, 'return_all': False, 'maxiter': 1})
inv_hess = res['hess_inv']

1)模型訓練得非常好,但是當嘗試使用先前訓練過的權重運行scipy最小化器進行單次迭代時,我遇到了問題。

嘗試method = BFGS時的輸出:

0.458706819754
0.457811632697
0.458706716791
...
0.350124572422
0.350186770445
0.350125320636

ValueErrorTraceback (most recent call last)
---> 19 res = minimize(loss, x0, args=(), method = 'BFGS', tol=1, options={'maxiter':1,'eps':1e-6,'disp':True})#,'gtol':0.1}, tol=5)

/opt/anaconda3/lib/python2.7/site-packages/scipy/optimize/_minimize.pyc in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
    442         return _minimize_cg(fun, x0, args, jac, callback, **options)
    443     elif meth == 'bfgs':
--> 444         return _minimize_bfgs(fun, x0, args, jac, callback, **options)

/opt/anaconda3/lib/python2.7/site-packages/scipy/optimize/optimize.pyc in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
    963         try:  # this was handled in numeric, let it remaines for more safety
--> 964             rhok = 1.0 / (numpy.dot(yk, sk))
    965         except ZeroDivisionError:
    966             rhok = 1000.0

ValueError: operands could not be broadcast together with shapes (7213,2048) (2048,1024) 

嘗試method = L-BFGS-B時的輸出:

ValueErrorTraceback (most recent call last)

---> 20 res = minimize(loss, x0, method='L-BFGS-B', options={'disp': True, 'maxls': 1, 'gtol': 1e-05, 'eps': 1e-08, 'maxiter': 1, 'ftol': 0.5, 'maxcor': 1, 'maxfun': 1})


/opt/anaconda3/lib/python2.7/site-packages/scipy/optimize/_minimize.pyc in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
    448     elif meth == 'l-bfgs-b':
    449         return _minimize_lbfgsb(fun, x0, args, jac, bounds,
--> 450                                 callback=callback, **options)


/opt/anaconda3/lib/python2.7/site-packages/scipy/optimize/lbfgsb.pyc in _minimize_lbfgsb(fun, x0, args, jac, bounds, disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback, maxls, **unknown_options)
    300         raise ValueError('maxls must be positive.')
    301 
--> 302     x = array(x0, float64)
    303     f = array(0.0, float64)
    304     g = zeros((n,), float64)

ValueError: setting an array element with a sequence.

嘗試method = Newton-CG時輸出

ValueErrorTraceback (most recent call last)

---> 21 res = minimize(loss, x0, args=(), method='Newton-CG', jac=None, hess=None, hessp=None, tol=None, callback=None, options={'disp': False, 'xtol': 1e-05, 'eps': 1.4901161193847656e-08, 'return_all': False, 'maxiter': 1})


/opt/anaconda3/lib/python2.7/site-packages/scipy/optimize/_minimize.pyc in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
    445     elif meth == 'newton-cg':
    446         return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
--> 447                                   **options)
    448     elif meth == 'l-bfgs-b':
    449         return _minimize_lbfgsb(fun, x0, args, jac, bounds,

/opt/anaconda3/lib/python2.7/site-packages/scipy/optimize/optimize.pyc in _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, xtol, eps, maxiter, disp, return_all, **unknown_options)
   1438     _check_unknown_options(unknown_options)
   1439     if jac is None:
-> 1440         raise ValueError('Jacobian is required for Newton-CG method')

ValueError: Jacobian is required for Newton-CG method

2)下一個任務是獲得模型輸出相對於模型輸入的導數。 例如,對於一個恆星參數(輸出之一),例如溫度,我需要針對7213個輸入中的每一個找到偏導數。 然后對3個輸出中的每個輸出執行相同操作。

因此,基本上,我的第一個任務(1)是找到一種方法來返回模型的反Hessian矩陣,而下一個(2)我需要找到一種方法來返回輸出相對於輸入的一階偏導數。

是否有人對這兩項任務有任何見識? 謝謝。

編輯

我試圖使用theano.gradient.jacobian()返回我的輸出和輸入的雅可比矩陣。 我已將我的模型轉換為模型權重的函數,並將該函數用作theano.gradient.jacobian()中的第一個參數。 當我嘗試使用多維數組運行梯度時,我的問題就出現了,我的模型權重和輸入數據都以形式存在。

import theano.tensor as T

weights_in_model = T.dvector('model_weights')
x = T.dvector('x')

def pred(x,weights_in_model):
    weights = T.stack((weights_in_model[0],weights_in_model[1]), axis=0)
    x = T.shape_padright(x, n_ones=1)

    prediction=T.dot(x, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.stack((weights_in_model[2],weights_in_model[3]), axis=0)
    prediction = T.shape_padright(prediction, n_ones=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.stack((weights_in_model[4],weights_in_model[5]), axis=0)
    prediction = T.shape_padright(prediction, n_ones=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.stack((weights_in_model[6],weights_in_model[7]), axis=0)
    prediction = T.shape_padright(prediction, n_ones=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.stack((weights_in_model[8],weights_in_model[9]), axis=0)
    prediction = T.shape_padright(prediction, n_ones=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.stack((weights_in_model[10],weights_in_model[11]), axis=0)
    prediction = T.shape_padright(prediction, n_ones=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)


    weights = T.stack((weights_in_model[12],weights_in_model[13]), axis=0)
    prediction = T.shape_padright(prediction, n_ones=1)
    prediction = T.dot(prediction, weights)
    T.flatten(prediction)

    return prediction


f=theano.gradient.jacobian(pred(x,weights_in_model),wrt=x)
h=theano.function([x,weights_in_model],f,allow_input_downcast=True)


x = train_X
weights_in_model = model.get_weights()
h(x,weights_in_model)

最后一行給出了錯誤:

TypeError: ('Bad input argument to theano function with name "<ipython-input-365-a1ab256aa220>:1"  at index 0(0-based)', 'Wrong number of dimensions: expected 1, got 2 with shape (2000, 7213).')

但是當我將輸入更改為:

weights_in_model = T.matrix('model_weights')
x = T.matrix('x')

我從一行中得到一個錯誤:

f=theano.gradient.jacobian(pred(x,weights_in_model),wrt=x)

讀:

AssertionError: tensor.jacobian expects a 1 dimensional variable as `expression`. If not use flatten to make it a vector

關於如何解決這個問題的任何想法?

答案:該代碼用於預測模型的一個輸出值。 目前,我正在對其進行修改,以計算3個jacobian矩陣。 每個輸出一個。

import theano
import theano.tensor as T
import theano.typed_list
theano.config.optimizer='fast_compile'
theano.config.exception_verbosity='high'

# Declare function input placeholders
weights_in_model = theano.typed_list.TypedListType(theano.tensor.dmatrix)()
x = T.matrix('x')

# Define model function
def pred(x,weights_in_model): 
    weights = T.concatenate((weights_in_model[0],weights_in_model[1]), axis=0)
    x = T.concatenate((x, T.ones((T.shape(x)[0], 1))), axis=1)

    prediction = T.dot(x, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.concatenate((weights_in_model[2],weights_in_model[3]), axis=0)
    prediction = T.concatenate((prediction, T.ones((T.shape(prediction)[0], 1))), axis=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.concatenate((weights_in_model[4],weights_in_model[5]), axis=0)
    prediction = T.concatenate((prediction, T.ones((T.shape(prediction)[0], 1))), axis=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.concatenate((weights_in_model[6],weights_in_model[7]), axis=0)
    prediction = T.concatenate((prediction, T.ones((T.shape(prediction)[0], 1))), axis=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.concatenate((weights_in_model[8],weights_in_model[9]), axis=0)
    prediction = T.concatenate((prediction, T.ones((T.shape(prediction)[0], 1))), axis=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)

    weights = T.concatenate((weights_in_model[10],weights_in_model[11]), axis=0)
    prediction = T.concatenate((prediction, T.ones((T.shape(prediction)[0], 1))), axis=1)
    prediction = T.dot(prediction, weights)
    prediction = T.clip(prediction, 0, 9999.)


    weights = T.concatenate((weights_in_model[12],weights_in_model[13]), axis=0)
    prediction = T.concatenate((prediction, T.ones((T.shape(prediction)[0], 1))), axis=1)
    prediction = T.dot(prediction, weights)
    prediction = T.flatten(prediction)
    return prediction

# Create gradient function
f=theano.gradient.jacobian(pred(x,weights_in_model),wrt=x)

# Compile function
h=theano.function([x,weights_in_model],f,allow_input_downcast=True)


# Get function inputs
weights_in_model_ = model.get_weights()
x_=train_data

# Reshape bias layers
weights_in_model_[1] = np.reshape(weights_in_model_[1], (1, 2048))
weights_in_model_[3] = np.reshape(weights_in_model_[3], (1, 1024))
weights_in_model_[5] = np.reshape(weights_in_model_[5], (1, 512))
weights_in_model_[7] = np.reshape(weights_in_model_[7], (1, 256))
weights_in_model_[9] = np.reshape(weights_in_model_[9], (1, 128))
weights_in_model_[11] = np.reshape(weights_in_model_[11], (1, 32))
weights_in_model_[13] = np.reshape(weights_in_model_[13], (1, 1))

# Compute Jacobian (returns format with a bunch of zero rows)
jacs = h(x_, weights_in_model_)

# Put Jacobian matrix in proper format (ie. shape = (number_of_input_examples, number_of_input_features)

jacobian_matrix = np.zeros((jacs.shape[0],jacs.shape[2]))
for i, jac in enumerate(jacs): 
    jacobian_matrix[i] = jac[i]

下一個任務是找到模型權重的輸出的黑森州矩陣!

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM