繁体   English   中英

无法解析arguments(使用pytorch的深度学习教程)

[英]Can't parse arguments (deep learning tutorial using pytorch)

我正在关注本教程。

第一个脚本运行良好,我的脚本文件夹中有一个“数据”文件夹,其中包含从 MRnet 下载的 MRI 数据。

然而,当谈到“火车”脚本时,我得到了一个错误。 这是完整的脚本和错误(使用 jupyter notebook):

import shutil
import os
import time
from datetime import datetime
import argparse
import numpy as np
from tqdm import tqdm

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchsample.transforms import RandomRotate, RandomTranslate, RandomFlip, ToTensor, Compose, RandomAffine
from torchvision import transforms
import torch.nn.functional as F
from tensorboardX import SummaryWriter
import nbimporter

from dataloader import MRDataset
import model

from sklearn import metrics


def train_model(model, train_loader, epoch, num_epochs, optimizer, writer, current_lr, log_every=100):
    _ = model.train()

    if torch.cuda.is_available():
        model.cuda()

    y_preds = []
    y_trues = []
    losses = []

    for i, (image, label, weight) in enumerate(train_loader):
        optimizer.zero_grad()

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
            weight = weight.cuda()

        label = label[0]
        weight = weight[0]

        prediction = model.forward(image.float())

        loss = torch.nn.BCEWithLogitsLoss(weight=weight)(prediction, label)
        loss.backward()
        optimizer.step()

        loss_value = loss.item()
        losses.append(loss_value)

        probas = torch.sigmoid(prediction)

        y_trues.append(int(label[0][1]))
        y_preds.append(probas[0][1].item())

        try:
            auc = metrics.roc_auc_score(y_trues, y_preds)
        except:
            auc = 0.5

        writer.add_scalar('Train/Loss', loss_value,
                          epoch * len(train_loader) + i)
        writer.add_scalar('Train/AUC', auc, epoch * len(train_loader) + i)

        if (i % log_every == 0) & (i > 0):
            print('''[Epoch: {0} / {1} |Single batch number : {2} / {3} ]| avg train loss {4} | train auc : {5} | lr : {6}'''.
                  format(
                      epoch + 1,
                      num_epochs,
                      i,
                      len(train_loader),
                      np.round(np.mean(losses), 4),
                      np.round(auc, 4),
                      current_lr
                  )
                  )

    writer.add_scalar('Train/AUC_epoch', auc, epoch + i)

    train_loss_epoch = np.round(np.mean(losses), 4)
    train_auc_epoch = np.round(auc, 4)
    return train_loss_epoch, train_auc_epoch


def evaluate_model(model, val_loader, epoch, num_epochs, writer, current_lr, log_every=20):
    _ = model.eval()

    if torch.cuda.is_available():
        model.cuda()

    y_trues = []
    y_preds = []
    losses = []

    for i, (image, label, weight) in enumerate(val_loader):

        if torch.cuda.is_available():
            image = image.cuda()
            label = label.cuda()
            weight = weight.cuda()

        label = label[0]
        weight = weight[0]

        prediction = model.forward(image.float())

        loss = torch.nn.BCEWithLogitsLoss(weight=weight)(prediction, label)

        loss_value = loss.item()
        losses.append(loss_value)

        probas = torch.sigmoid(prediction)

        y_trues.append(int(label[0][1]))
        y_preds.append(probas[0][1].item())

        try:
            auc = metrics.roc_auc_score(y_trues, y_preds)
        except:
            auc = 0.5

        writer.add_scalar('Val/Loss', loss_value, epoch * len(val_loader) + i)
        writer.add_scalar('Val/AUC', auc, epoch * len(val_loader) + i)

        if (i % log_every == 0) & (i > 0):
            print('''[Epoch: {0} / {1} |Single batch number : {2} / {3} ] | avg val loss {4} | val auc : {5} | lr : {6}'''.
                  format(
                      epoch + 1,
                      num_epochs,
                      i,
                      len(val_loader),
                      np.round(np.mean(losses), 4),
                      np.round(auc, 4),
                      current_lr
                  )
                  )

    writer.add_scalar('Val/AUC_epoch', auc, epoch + i)

    val_loss_epoch = np.round(np.mean(losses), 4)
    val_auc_epoch = np.round(auc, 4)
    return val_loss_epoch, val_auc_epoch

def get_lr(optimizer):
    for param_group in optimizer.param_groups:
        return param_group['lr']


def run(args):
    log_root_folder = "./logs/{0}/{1}/".format(args.task, args.plane)
    if args.flush_history == 1:
        objects = os.listdir(log_root_folder)
        for f in objects:
            if os.path.isdir(log_root_folder + f):
                shutil.rmtree(log_root_folder + f)

    now = datetime.now()
    logdir = log_root_folder + now.strftime("%Y%m%d-%H%M%S") + "/"
    os.makedirs(logdir)

    writer = SummaryWriter(logdir)

    augmentor = Compose([
        transforms.Lambda(lambda x: torch.Tensor(x)),
        RandomRotate(25),
        RandomTranslate([0.11, 0.11]),
        RandomFlip(),
        transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
    ])

    train_dataset = MRDataset('./data/', args.task,
                              args.plane, transform=augmentor, train=True)
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=1, shuffle=True, num_workers=11, drop_last=False)

    validation_dataset = MRDataset(
        './data/', args.task, args.plane, train=False)
    validation_loader = torch.utils.data.DataLoader(
        validation_dataset, batch_size=1, shuffle=-True, num_workers=11, drop_last=False)

    mrnet = model.MRNet()

    if torch.cuda.is_available():
        mrnet = mrnet.cuda()

    optimizer = optim.Adam(mrnet.parameters(), lr=args.lr, weight_decay=0.1)

    if args.lr_scheduler == "plateau":
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, patience=3, factor=.3, threshold=1e-4, verbose=True)
    elif args.lr_scheduler == "step":
        scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer, step_size=3, gamma=args.gamma)

    best_val_loss = float('inf')
    best_val_auc = float(0)

    num_epochs = args.epochs
    iteration_change_loss = 0
    patience = args.patience
    log_every = args.log_every

    t_start_training = time.time()

    for epoch in range(num_epochs):
        current_lr = get_lr(optimizer)

        t_start = time.time()
        
        train_loss, train_auc = train_model(
            mrnet, train_loader, epoch, num_epochs, optimizer, writer, current_lr, log_every)
        val_loss, val_auc = evaluate_model(
            mrnet, validation_loader, epoch, num_epochs, writer, current_lr)

        if args.lr_scheduler == 'plateau':
            scheduler.step(val_loss)
        elif args.lr_scheduler == 'step':
            scheduler.step()

        t_end = time.time()
        delta = t_end - t_start

        print("train loss : {0} | train auc {1} | val loss {2} | val auc {3} | elapsed time {4} s".format(
            train_loss, train_auc, val_loss, val_auc, delta))

        iteration_change_loss += 1
        print('-' * 30)

        if val_auc > best_val_auc:
            best_val_auc = val_auc
            if bool(args.save_model):
                file_name = f'model_{args.prefix_name}_{args.task}_{args.plane}_val_auc_{val_auc:0.4f}_train_auc_{train_auc:0.4f}_epoch_{epoch+1}.pth'
                for f in os.listdir('./models/'):
                    if (args.task in f) and (args.plane in f) and (args.prefix_name in f):
                        os.remove(f'./models/{f}')
                torch.save(mrnet, f'./models/{file_name}')

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            iteration_change_loss = 0

        if iteration_change_loss == patience:
            print('Early stopping after {0} iterations without the decrease of the val loss'.
                  format(iteration_change_loss))
            break

    t_end_training = time.time()
    print(f'training took {t_end_training - t_start_training} s')


def parse_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('-t', '--task', type=str, required=True,
                        choices=['abnormal', 'acl', 'meniscus'])
    parser.add_argument('-p', '--plane', type=str, required=True,
                        choices=['sagittal', 'coronal', 'axial'])
    parser.add_argument('--prefix_name', type=str, required=True)
    parser.add_argument('--augment', type=int, choices=[0, 1], default=1)
    parser.add_argument('--lr_scheduler', type=str,
                        default='plateau', choices=['plateau', 'step'])
    parser.add_argument('--gamma', type=float, default=0.5)
    parser.add_argument('--epochs', type=int, default=50)
    parser.add_argument('--lr', type=float, default=1e-5)
    parser.add_argument('--flush_history', type=int, choices=[0, 1], default=0)
    parser.add_argument('--save_model', type=int, choices=[0, 1], default=1)
    parser.add_argument('--patience', type=int, default=5)
    parser.add_argument('--log_every', type=int, default=100)
    args = parser.parse_args()
    return args


if __name__ == "__main__":
    args = parse_arguments()
    run(args)

错误:

usage: ipykernel_launcher.py [-h] -t {abnormal,acl,meniscus} -p
                             {sagittal,coronal,axial} --prefix_name
                             PREFIX_NAME [--augment {0,1}]
                             [--lr_scheduler {plateau,step}] [--gamma GAMMA]
                             [--epochs EPOCHS] [--lr LR]
                             [--flush_history {0,1}] [--save_model {0,1}]
                             [--patience PATIENCE] [--log_every LOG_EVERY]
ipykernel_launcher.py: error: the following arguments are required: -t/--task, -p/--plane, --prefix_name

%tb:

    ---------------------------------------------------------------------------
SystemExit                                Traceback (most recent call last)
<ipython-input-3-e6a34ab63dc0> in <module>
    275 
    276 if __name__ == "__main__":
--> 277     args = parse_arguments()
    278     run(args)

<ipython-input-3-e6a34ab63dc0> in parse_arguments()
    270     parser.add_argument('--patience', type=int, default=5)
    271     parser.add_argument('--log_every', type=int, default=100)
--> 272     args = parser.parse_args()
    273     return args
    274 

~\anaconda3\envs\Pytorch\lib\argparse.py in parse_args(self, args, namespace)
   1753     # =====================================
   1754     def parse_args(self, args=None, namespace=None):
-> 1755         args, argv = self.parse_known_args(args, namespace)
   1756         if argv:
   1757             msg = _('unrecognized arguments: %s')

~\anaconda3\envs\Pytorch\lib\argparse.py in parse_known_args(self, args, namespace)
   1785         # parse the arguments and exit if there are any errors
   1786         try:
-> 1787             namespace, args = self._parse_known_args(args, namespace)
   1788             if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
   1789                 args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))

~\anaconda3\envs\Pytorch\lib\argparse.py in _parse_known_args(self, arg_strings, namespace)
   2020         if required_actions:
   2021             self.error(_('the following arguments are required: %s') %
-> 2022                        ', '.join(required_actions))
   2023 
   2024         # make sure all required groups had one option present

~\anaconda3\envs\Pytorch\lib\argparse.py in error(self, message)
   2506         self.print_usage(_sys.stderr)
   2507         args = {'prog': self.prog, 'message': message}
-> 2508         self.exit(2, _('%(prog)s: error: %(message)s\n') % args)

~\anaconda3\envs\Pytorch\lib\argparse.py in exit(self, status, message)
   2493         if message:
   2494             self._print_message(message, _sys.stderr)
-> 2495         _sys.exit(status)
   2496 
   2497     def error(self, message):

SystemExit: 2

我不知道如何前进。 我被困在这里。 有人知道从这里到 go 的位置吗?

我会猜的。

创建ArgumentParser是为了在console / terminal而不是 Juputer 中运行Juputer

python script.py -t abnormal -p axial --prefix_name abc

和 Python 将这些 arguments 列为sys.argvArgumentParser自动使用来自sys.argv中的值parser.parse_args()

如果你想在juputer中运行它,那么你必须手动发送 arguments 作为列表

args = parser.parse_args( ["-t", "abnormal", "-p", "axial", "--prefix_name", "abc"] )

或者您必须将 append 参数传递给sys.argv

sys.argv.append("-t")
sys.argv.append("abnormal")
sys.argv.append("-p")
sys.argv.append("axial")
sys.argv.append("--prefix_name")
sys.argv.append("abc")

或使用.extend( list )

sys.argv.extend( ["-t", "abnormal", "-p", "axial", "--prefix_name", "abc"] )

或使用带有split(" ")的字符串

sys.argv.extend( "-t abnormal -p axial --prefix_name abc".split(' ') )

但是如果你在 Jupyter 中多次使用不同的 arguments 运行它,那么它会记住所有 arguments 并且你需要删除以前的 arguments

sys.argv.clear()
sys.argv.extend( ["-t", "abnormal", "-p", "axial", "--prefix_name", "abc"] )

或替换所有元素(有时可能有用的第一个除外)

sys.argv[1:] = ["-t", "abnormal", "-p", "axial", "--prefix_name", "abc"]

if __name__ == "__main__":

    sys.argv[1:] = ["-t", "abnormal", "-p", "axial", "--prefix_name", "abc"]

    args = parse_arguments()
    run(args)

最终,您可以更改 function parse_arguments()以使其更通用。

你可以设置

def parse_arguments(arguments=None):

    # ... code ...

    args = parser.parse_args(arguments)

然后您可以在控制台/终端中运行的脚本中使用它

args = parse_arguments()

或在Jupyter

args = parse_arguments( ["-t", "abnormal", "-p", "axial", "--prefix_name", "abc"] )

顺便提一句:

当您在console/terminal中运行时, sys.argv.append()sys.argv.extend()对于向所有执行添加一些选项也很有用

有模块shlex用于拆分 arguments,其中-msg "Hello World"有空格

正常text.split(" ")将创建不正确的列表['-msg', '"Hello', 'World"']

shlex.split(text)将创建正确的列表['-msg', 'Hello World']

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM