[英]Spiking Neural Network: ValueError: too many values to unpack (expected 2)
我正在编写尖峰神经网络来监控建筑物的结构健康状况。 我拥有的数据集是一个 (129, 5651) 矩阵,其中包含 .network 的触发时间,分为 (129, 3957) 训练集和 (129, 1694) 测试集。
这是触发时间矩阵 (129, 5651):
[[ 2.6 2.5 2.6 ... 2.8 2.7 2.5]
[ 2.7 2.6 2.6 ... 2.8 2.8 2.6]
[ 5. 4.6 5. ... 6.4 6.1 5. ]
...
[ 0. 0. 0. ... 18.9 18.6 0. ]
[ 0. 0. 0. ... 0. 18. 0. ]
[ 0. 0. 0. ... 0. 0. 0. ]]
这是我用作参考的代码,但我想使用我的数据集而不是 MNIST 数据集: https://snntorch.readthedocs.io/en/latest/tutorials/tutorial_5.html
在这里,我用我的替换编写了所有代码:
batch_size = 128
ok_train = np.array(Matrix[:,0:3446])
faulty_train = np.array(Matrix[:, 4923:5434])
train_loader= np.concatenate((ok_train,faulty_train), axis=1);
ok_test = np.array(Matrix[:, 3446:4923])
faulty_test = np.array(Matrix[:, 5434:5651])
test_loader = np.concatenate((ok_test,faulty_test), axis=1);
# Leaky neuron model, overriding the backward pass with a custom function
class LeakySurrogate(nn.Module):
def __init__(self, beta, threshold=1.0):
super(LeakySurrogate, self).__init__()
# initialize decay rate beta and threshold
self.beta = beta
self.threshold = threshold
self.spike_op = self.SpikeOperator.apply
# the forward function is called each time we call Leaky
def forward(self, input_, mem):
spk = self.spike_op((mem-self.threshold)) # call the Heaviside function
reset = (spk * self.threshold).detach() # removes spike_op gradient from reset
mem = self.beta * mem + input_ - reset # Eq (1)
return spk, mem
# Forward pass: Heaviside function
# Backward pass: Override Dirac Delta with the Spike itself
@staticmethod
class SpikeOperator(torch.autograd.Function):
@staticmethod
def forward(ctx, mem):
spk = (mem > 0).float() # Heaviside on the forward pass: Eq(2)
ctx.save_for_backward(spk) # store the spike for use in the backward pass
return spk
@staticmethod
def backward(ctx, grad_output):
(spk,) = ctx.saved_tensors # retrieve the spike
grad = grad_output * spk # scale the gradient by the spike: 1/0
return grad
lif1 = LeakySurrogate(beta=0.9)
batch_size = 128
dtype = torch.float
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Network Architecture
num_inputs = 129
num_hidden = 1000
num_outputs = 2
# Temporal Dynamics
num_steps = 25
beta = 0.95
# Define Network
class Net(nn.Module):
def __init__(self):
super().__init__()
# Initialize layers
self.fc1 = nn.Linear(num_inputs, num_hidden)
self.lif1 = snn.Leaky(beta=beta)
self.fc2 = nn.Linear(num_hidden, num_outputs)
self.lif2 = snn.Leaky(beta=beta)
def forward(self, x):
# Initialize hidden states at t=0
mem1 = self.lif1.init_leaky()
mem2 = self.lif2.init_leaky()
# Record the final layer
spk2_rec = []
mem2_rec = []
for step in range(num_steps):
cur1 = self.fc1(x)
spk1, mem1 = self.lif1(cur1, mem1)
cur2 = self.fc2(spk1)
spk2, mem2 = self.lif2(cur2, mem2)
spk2_rec.append(spk2)
mem2_rec.append(mem2)
return torch.stack(spk2_rec, dim=0), torch.stack(mem2_rec, dim=0)
# Load the network onto CUDA if available
net = Net().to(device)
# pass data into the network, sum the spikes over time
# and compare the neuron with the highest number of spikes
# with the target
def print_batch_accuracy(data, targets, train=False):
output, _ = net(data.view(batch_size, -1))
_, idx = output.sum(dim=0).max(1)
acc = np.mean((targets == idx).detach().cpu().numpy())
if train:
print(f"Train set accuracy for a single minibatch: {acc*100:.2f}%")
else:
print(f"Test set accuracy for a single minibatch: {acc*100:.2f}%")
def train_printer():
print(f"Epoch {epoch}, Iteration {iter_counter}")
print(f"Train Set Loss: {loss_hist[counter]:.2f}")
print(f"Test Set Loss: {test_loss_hist[counter]:.2f}")
print_batch_accuracy(data, targets, train=True)
print_batch_accuracy(test_data, test_targets, train=False)
print("\n")
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=5e-4, betas=(0.9, 0.999))
训练
num_epochs = 1
loss_hist = []
test_loss_hist = []
counter = 0
# Outer training loop
for epoch in range(num_epochs):
iter_counter = 0
train_batch = iter(train_loader)
# Minibatch training loop
for data, targets in train_batch:
data = data.to(device)
targets = targets.to(device)
# forward pass
net.train()
spk_rec, mem_rec = net(data.view(batch_size, -1))
# initialize the loss & sum over time
loss_val = torch.zeros((1), dtype=dtype, device=device)
for step in range(num_steps):
loss_val += loss(mem_rec[step], targets)
# Gradient calculation + weight update
optimizer.zero_grad()
loss_val.backward()
optimizer.step()
# Store loss history for future plotting
loss_hist.append(loss_val.item())
# Test set
with torch.no_grad():
net.eval()
test_data, test_targets = next(iter(test_loader))
test_data = test_data.to(device)
test_targets = test_targets.to(device)
# Test set forward pass
test_spk, test_mem = net(test_data.view(batch_size, -1))
# Test set loss
test_loss = torch.zeros((1), dtype=dtype, device=device)
for step in range(num_steps):
test_loss += loss(test_mem[step], test_targets)
test_loss_hist.append(test_loss.item())
# Print train/test loss/accuracy
if counter % 50 == 0:
train_printer()
counter += 1
iter_counter +=1
但是如果我用我的训练/测试集替换我的训练/测试集,我会不断收到这个错误标志:有人知道我如何才能使这样的数据集适应这个网络吗?
ValueError Traceback (most recent call last)
<ipython-input-46-9d1979c2b76f> in <module>
10
11 # Minibatch training loop
---> 12 for data, targets in train_batch:
13 data = data.to(device)
14 targets = targets.to(device)
ValueError: too many values to unpack (expected 2)
这可能会解决您的问题。 由于解包时迭代器有两个变量,使用“枚举”可以返回计数及其值Python 迭代器解包 - 枚举
for data, targets in enumerate(train_batch):
print(targets)
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.