[英]Back propagation for neural network ( Error in shapes )
下面我附有錯誤的4張圖片作為圖片。
一般來說,我訓練我的神經網絡(具有2, 3, 1
,其由輸入層兩個輸入神經元,3個神經元在我的隱藏層和在我的輸出層1個輸出神經元的體系結構)。
因此,我使用反向傳播訓練了我的網絡,但誤差很小(在圖片中已指定)。
有人可以幫我嗎
Error: shapes (200,200) and (1,3) not aligned: 200 (dim 1) != 1 (dim 0)
import numpy as np
import random
# Generating training data set according to the function y=x^2+y^2
input1_train = np.random.uniform(low=-1, high=1, size=(200,))
input2_train = np.random.uniform(low=-1, high=1, size=(200,))
input1_sq_train= input1_train **2
input2_sq_train= input2_train **2
input_merge= np.column_stack((input1_train,input2_train))
# normalized input data
input_merge= input_merge / np.amax(input_merge, axis=0)
# output of the training data
y_output_train= input1_sq_train + input2_sq_train
# normalized output data
y_output_train= y_output_train / 100
# Generating test data set according to the function y=x^2+y^2
input1_test = np.random.uniform(low=-1, high=1, size=(100,))
input2_test = np.random.uniform(low=-1, high=1, size=(100,))
input1_sq_test= input1_test **2
input2_sq_test= input2_test **2
y_output_test= input1_sq_test + input2_sq_test
# Merging two inputs of testing data into an one matrix
input_merge1= np.column_stack((input1_test,input2_test))
# normalized input test data
input_merge1=input_merge1 / np.amax(input_merge1, axis=0)
# normalized output test data
y_output_test= y_output_test / 100
# Generating validation data set according to the function y=x^2+y^2
input1_validation = np.random.uniform(low=-1, high=1, size=(50,))
input2_validation = np.random.uniform(low=-1, high=1, size=(50,))
input1_sq_validation= input1_validation **2
input2_sq_validation= input2_validation **2
input_merge2= np.column_stack((input1_validation,input2_validation))
# normalized input validation data
input_merge2= input_merge2 / np.amax(input_merge2, axis=0)
y_output_validation= input1_sq_validation + input2_sq_validation
# normalized output validation data
y_output_validation= y_output_validation / 100
class Neural_Network(object):
def __init__(self):
# parameters
self.inputSize = 2
self.outputSize = 1
self.hiddenSize = 3
# weights
self.W1 = np.random.randn(self.inputSize, self.hiddenSize) # (3x2)
# weight matrix from input to hidden layer
self.W2 = np.random.randn(self.hiddenSize, self.outputSize) # (3x1)
# weight matrix from hidden to output layer
def forward(self, input_merge):
# forward propagation through our network
self.z = np.dot(input_merge, self.W1) # dot product of X (input) and first set of 3x2 weights
self.z2 = self.sigmoid(self.z) # activation function
self.z3 = np.dot(self.z2, self.W2) # dot product of hidden layer (z2)
# and second set of 3x1 weights
o = self.sigmoid(self.z3) # final activation function
return o
def costFunction(self, input_merge, y_output_train):
# Compute cost for given X,y, use weights already stored in class.
self.o = self.forward(input_merge)
J = 0.5*sum((y_output_train-self.yHat)**2)
return J
def costFunctionPrime(self, input_merge, y_output_train):
# Compute derivative with respect to W and W2 for a given X and y:
self.o = self.forward(input_merge)
delta3 = np.multiply(-(y_output_train-self.yHat),
self.sigmoidPrime(self.z3))
dJdW2 = np.dot(self.a2.T, delta3)
delta2 = np.dot(delta3, self.W2.T)*self.sigmoidPrime(self.z2)
dJdW1 = np.dot(input_merge.T, delta2)
return dJdW1, dJdW2
def sigmoid(self, s):
# activation function
return 1/(1+np.exp(-s))
def sigmoidPrime(self, s):
# derivative of sigmoid
return s * (1 - s)
def backward(self, input_merge, y_output_train, o):
# backward propgate through the network
self.o_error = y_output_train - o # error in output
self.o_delta = self.o_error*self.sigmoidPrime(o) # applying derivative of sigmoid to error
self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden layer weights contributed to output error
self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2) # applying derivative of sigmoid to z2 error
self.W1 += input_merge.T.dot(self.z2_delta) # adjusting first set (input --> hidden) weights
self.W2 += self.z2.T.dot(self.o_delta) # adjusting second set (hidden --> output) weights
def train (self, input_merge, y_output_train):
o = self.forward(input_merge)
self.backward(input_merge, y_output_train, o)
NN = Neural_Network()
for i in range(1000): # trains the NN 1,000 times
# print ( "Actual Output for training data: \n" + str(y_output_train))
# print ("Predicted Output for training data: \n" + str(NN.forward(input_merge)))
print ( "Loss for training: \n"
+ str( np.mean( np.square( y_output_train
- NN.forward( input_merge )
)
)
)
) # mean sum squared loss
NN.train(input_merge, y_output_train)
# NN.test(input_merge1,y_output_test)
# NN.validation(input_merge2,y_output_validation)
因此,首先,發布基於MCVE的StackOverflow提出的問題是一種公平的做法。
在這里,這意味着還應復制完整的Error-Traceback,包括在其中引發了Traceback的行號。 好的,您下次再正確。
您的問題不是一個小錯誤 -您的代碼基本上是錯誤的,因為它嘗試(在未知位置)處理一對數組,這些數組在形狀上不匹配,但尚未進行未知的操作(似乎只是.multiply()
是正確的可疑對象,但不確定在哪里可以調用它,因為沒有明確的要求來請求.costFunctionPrime()
方法)。
盡管如此,還是嘗試在某處處理一對矩陣/向量數組,
一個是[200,200]
,另一個是[1,3]
根本無法使其處理成為可能。
因此,錯誤出在您的代碼/語法中。 對其進行檢查,可能使用預先打印的形狀檢查:
def aFormatSHAPE( anArray ):
return "[{0: >4d},{1: >4d}]".format( anArray.shape[0],
anArray.shape[1]
)
def aHelperPrintSHAPE( anArray1, anArray2 ):
try:
print( "CHK:{0:}-(op)-{1:}".format( aFormatSHAPE( anArray1 ),
aFormatSHAPE( anArray2 )
)
)
except:
pass
return
修復代碼使其符合所有常見的矩陣向量代數規則(關於如何在數組/向量上處理加,減,乘,點積)之后,您的小錯誤就可以解決。
您永遠不會看到類似以下內容的信息:
CHK:[200,200]-(op)-[1,3]
在我看來,您的矩陣尺寸不合適。 您不能將(200,200)乘以(1,3)。 第一矩陣的列數必須匹配。 簡單來說就是第二矩陣的行數。 希望這可以幫助。
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.