[英]Pytorch Geometric: RuntimeError: expected scalar type Long but found Float
I have gone through all the similar threads and even sought help via github.我经历了所有类似的线程,甚至通过 github 寻求帮助。
import torch
from scipy.sparse import coo_matrix
from torch_geometric.data import Data, Dataset, download_url
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
def graph_data(A, X, labels):
tg_graphs = []
sc = MinMaxScaler()
y_train = np.array(labels)
y_train = sc.fit_transform(y_train.reshape((-1,1)))
y_train = torch.from_numpy(y_train).view(-1,1)
for i in range(len(A)):
coo = coo_matrix(A[i])
indices = np.vstack((coo.row, coo.col))
x = [ord(i) for i in X[i]]
index = torch.LongTensor(indices)
feature = torch.tensor(x, dtype=torch.long)
graph = Data(x=feature, edge_index=index, y=y_train[i])
tg_graphs.append(graph)
return tg_graphs, y_train
The code above is how i create the dataset.上面的代码是我创建数据集的方式。
import torch
dataset = torch.load('data/dataset.pt')
#%%
data = dataset[0] # Get the first graph object.
print()
print(data)
print('=============================================================')
# Gather some statistics about the first graph.
print(f'Number of nodes: {data.num_nodes}')
print(f'Number of edges: {data.num_edges}')
print(f'Average node degree: {data.num_edges / data.num_nodes:.2f}')
print(f'Has isolated nodes: {data.has_isolated_nodes()}')
print(f'Has self-loops: {data.has_self_loops()}')
print(f'Is undirected: {data.is_undirected()}')
#%%
train_dataset = dataset[:33000]
test_dataset = dataset[33000:]
print(f'Number of training graphs: {len(train_dataset)}')
print(f'Number of test graphs: {len(test_dataset)}')
#%%
from torch_geometric.loader import DataLoader
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
for step, data in enumerate(train_loader):
print(f'Step {step + 1}:')
print('=======')
print(f'Number of graphs in the current batch: {data.num_graphs}')
print(data)
print()
#%%
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from torch_geometric.nn import global_mean_pool
class GCN(torch.nn.Module):
def __init__(self, hidden_channels):
super(GCN, self).__init__()
torch.manual_seed(12345)
self.conv1 = GCNConv(1, hidden_channels)
self.conv2 = GCNConv(hidden_channels, hidden_channels)
self.conv3 = GCNConv(hidden_channels, hidden_channels)
self.lin = Linear(hidden_channels, 1)
def forward(self, x, edge_index, batch):
# 1. Obtain node embeddings
x = self.conv1(x, edge_index)
x = x.relu()
x = self.conv2(x, edge_index)
x = x.relu()
x = self.conv3(x, edge_index)
# 2. Readout layer
x = global_mean_pool(x, batch) # [batch_size, hidden_channels]
# 3. Apply a final classifier
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin(x)
return x
model = GCN(hidden_channels=3)
print(model)
#%%
model = GCN(hidden_channels=64)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
criterion = torch.nn.functional.mse_loss
def train():
model.train()
for data in train_loader.dataset: # Iterate in batches over the training dataset.
out = model(data.x.reshape(-1,1), data.edge_index, data.batch) # Perform a single forward pass.
loss = criterion(out, data.y) # Compute the loss.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
optimizer.zero_grad() # Clear gradients.
def test(loader):
model.eval()
correct = 0
for data in loader: # Iterate in batches over the training/test dataset.
out = model(data.x.reshape(-1,1), data.edge_index, data.batch)
pred = out.argmax(dim=1) # Use the class with highest probability.
correct += int((pred == data.y).sum()) # Check against ground-truth labels.
return correct / len(loader.dataset) # Derive ratio of correct predictions.
#%%
for epoch in range(1, 171):
train()
train_acc = test(train_loader)
test_acc = test(test_loader)
print(f'Epoch: {epoch:03d}, Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')
And this is my model code.这是我的 model 代码。
RuntimeError Traceback (most recent call last)
Input In [7], in <cell line: 1>()
1 for epoch in range(1, 171):
----> 2 train()
3 train_acc = test(train_loader)
4 test_acc = test(test_loader)
Input In [6], in train()
6 model.train()
8 for data in train_loader.dataset: # Iterate in batches over the training dataset.
----> 9 out = model(data.x.reshape(-1,1), data.edge_index, data.batch) # Perform a single forward pass.
10 loss = criterion(out, data.y) # Compute the loss.
11 loss.backward() # Derive gradients.
File ~\anaconda3\lib\site-packages\torch\nn\modules\module.py:1110, in Module._call_impl(self, *input, **kwargs)
1106 # If we don't have any hooks, we want to skip the rest of the logic in
1107 # this function, and just call forward.
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
Input In [5], in GCN.forward(self, x, edge_index, batch)
15 def forward(self, x, edge_index, batch):
16 # 1. Obtain node embeddings
---> 17 x = self.conv1(x, edge_index)
18 x = x.relu()
19 x = self.conv2(x, edge_index)
File ~\anaconda3\lib\site-packages\torch\nn\modules\module.py:1110, in Module._call_impl(self, *input, **kwargs)
1106 # If we don't have any hooks, we want to skip the rest of the logic in
1107 # this function, and just call forward.
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
File ~\anaconda3\lib\site-packages\torch_geometric\nn\conv\gcn_conv.py:191, in GCNConv.forward(self, x, edge_index, edge_weight)
188 else:
189 edge_index = cache
--> 191 x = self.lin(x)
193 # propagate_type: (x: Tensor, edge_weight: OptTensor)
194 out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
195 size=None)
File ~\anaconda3\lib\site-packages\torch\nn\modules\module.py:1110, in Module._call_impl(self, *input, **kwargs)
1106 # If we don't have any hooks, we want to skip the rest of the logic in
1107 # this function, and just call forward.
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
File ~\anaconda3\lib\site-packages\torch_geometric\nn\dense\linear.py:118, in Linear.forward(self, x)
113 def forward(self, x: Tensor) -> Tensor:
114 r"""
115 Args:
116 x (Tensor): The features.
117 """
--> 118 return F.linear(x, self.weight, self.bias)
RuntimeError: expected scalar type Long but found Float
And that is the error.这就是错误。 I have tried so many different ways to convert the tensor to Long.我尝试了很多不同的方法将张量转换为 Long。
The targets is this:目标是这样的:
0 0.091205
1 0.091156
2 0.093943
3 0.091148
4 0.091168
...
43244 20.438217
43245 20.438217
43246 20.438217
43247 20.438217
43248 20.438217
My goal is linear regression, where the above is y
and x
are the corresponding graphs.我的目标是线性回归,上面是y
, x
是对应的图。
The reason lays in:原因在于:
The input x's dtype is "torch.int64", after GCNConv the x changes to "torch.float32",but it also expects torch.int64"输入 x 的 dtype 是“torch.int64”,在 GCNConv 之后 x 变为“torch.float32”,但它也需要 torch.int64”
Solve way解决方法
x=x.type(torch.float) x=x.type(torch.float)
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.