簡體   English   中英

如何在Pytorch中為圖像及​​其遮罩創建自定義數據集?

[英]How make customised dataset in Pytorch for images and their masks?

我有兩個tif圖像的數據集文件夾,一個是名為BMMCdata的文件夾,另一個是稱為BMMCmasks的BMMCdata圖像的遮罩(圖像名稱是對應的)。 我正在嘗試制作一個自定義的數據集,還隨機拆分數據以進行訓練和測試。 目前我遇到了錯誤

self.filenames.append(fn)
AttributeError: 'CustomDataset' object has no attribute 'filenames'

任何評論將不勝感激。

import torch
from torch.utils.data.dataset import Dataset  # For custom data-sets
from torchvision import transforms
from PIL import Image
import os.path as osp
import glob

folder_data = "/Users/parto/PycharmProjects/U-net/BMMCdata/data"

class CustomDataset(Dataset):
def __init__(self, root):

    self.filename = folder_data
    self.root = root
    self.to_tensor = transforms.ToTensor()
    filenames = glob.glob(osp.join(folder_data, '*.tif'))
    for fn in filenames:
        self.filenames.append(fn)
    self.len = len(self.filenames)
    print(fn)

def __getitem__(self, index):
    image = Image.open(self.filenames[index])
    return self.transform(image)

def __len__(self):

    return self.len
custom_img = CustomDataset(folder_data)
# total images in set
print(custom_img.len)

train_len = int(0.6*custom_img.len)
test_len = custom_img.len - train_len
train_set, test_set = CustomDataset.random_split(custom_img, lengths=[train_len, test_len])
# check lens of subset
len(train_set), len(test_set)

train_set = CustomDataset(folder_data)
train_set = torch.utils.data.TensorDataset(train_set, train=True, batch_size=4)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True, num_workers=1)
print(train_set)
print(train_loader)

test_set = torch.utils.data.DataLoader(Dataset, batch_size=4, sampler= train_sampler)
test_loader = torch.utils.data.DataLoader(Dataset, batch_size=4)

pytorch社區中@ptrblck給出的答案。 謝謝

 # get all the image and mask path and number of images
 folder_data = glob.glob("D:\\Neda\\Pytorch\\U-net\\BMMCdata\\data\\*.tif")
 folder_mask = glob.glob("D:\\Neda\\Pytorch\\U-net\\BMMCmasks\\masks\\*.tif")

 # split these path using a certain percentage
 len_data = len(folder_data)
 print(len_data)
 train_size = 0.6

 train_image_paths = folder_data[:int(len_data*train_size)]
 test_image_paths = folder_data[int(len_data*train_size):]

 train_mask_paths = folder_mask[:int(len_data*train_size)]
 test_mask_paths = folder_mask[int(len_data*train_size):]


 class CustomDataset(Dataset):
    def __init__(self, image_paths, target_paths, train=True):   # initial logic 
      happens like transform

         self.image_paths = image_paths
         self.target_paths = target_paths
         self.transforms = transforms.ToTensor()

    def __getitem__(self, index):

        image = Image.open(self.image_paths[index])
        mask = Image.open(self.target_paths[index])
        t_image = self.transforms(image)
     return t_image, mask

def __len__(self):  # return count of sample we have

    return len(self.image_paths)

train_dataset = CustomDataset(train_image_paths, train_mask_paths, train=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=1)

test_dataset = CustomDataset(test_image_paths, test_mask_paths, train=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=4, shuffle=False, num_workers=1)

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM