此处需要pip install pretrainedmodels
""" Finetuning Torchvision Models """ from __future__ import print_function from __future__ import division import torch import torch.nn as nn import torch.optim as optim import numpy as np import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import time import os import copy import argparse import pretrainedmodels.models.resnext as resnext print("PyTorch Version: ",torch.__version__) print("Torchvision Version: ",torchvision.__version__) # Top level data directory. Here we assume the format of the directory conforms # to the ImageFolder structure #data_dir = "./data/hymenoptera_data" data_dir = "/media/dell/dell/data/13/" # Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception] model_name = "resnext" # Number of classes in the dataset num_classes = 171 # Batch size for training (change depending on how much memory you have) batch_size = 16 # Number of epochs to train for num_epochs = 1000 # Flag for feature extracting. When False, we finetune the whole model, # when True we only update the reshaped layer params feature_extract = False # 参数设置,使得我们能够手动输入命令行参数,就是让风格变得和Linux命令行差不多 parser = argparse.ArgumentParser(description='PyTorch seresnet') parser.add_argument('--outf', default='/home/dell/Desktop/zhou/train7', help='folder to output images and model checkpoints') #输出结果保存路径 parser.add_argument('--net', default='/home/dell/Desktop/zhou/train7/resnext.pth', help="path to net (to continue training)") #恢复训练时的模型路径 args = parser.parse_args() def train_model(model, dataloaders, criterion, optimizer, num_epochs=25,is_inception=False): #def train_model(model, dataloaders, criterion, optimizer, num_epochs=25,scheduler, is_inception=False): since = time.time() val_acc_history = [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 print("Start Training, resnext!") # 定义遍历数据集的次数 with open("/home/dell/Desktop/zhou/train7/acc.txt", "w") as f1: with open("/home/dell/Desktop/zhou/train7/log.txt", "w")as f2: for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch+1, num_epochs)) print('*' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': #scheduler.step() model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): # Get model outputs and calculate loss # Special case for inception because in training it has an auxiliary output. In train # mode we calculate the loss by summing the final output and the auxiliary output # but in testing we only consider the final output. if is_inception and phase == 'train': # From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958 outputs, aux_outputs = model(inputs) loss1 = criterion(outputs, labels) loss2 = criterion(aux_outputs, labels) loss = loss1 + 0.4*loss2 else: outputs = model(inputs) loss = criterion(outputs, labels) _, preds = torch.max(outputs, 1) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) f2.write('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) f2.write('\n') f2.flush() # deep copy the model if phase == 'val': if (epoch+1)%5==0: #print('Saving model......') torch.save(model.state_dict(), '%s/inception_%03d.pth' % (args.outf, epoch + 1)) f1.write("EPOCH=%03d,Accuracy= %.3f%%" % (epoch + 1, 100*epoch_acc)) f1.write('\n') f1.flush() if phase == 'val' and epoch_acc > best_acc: f3 = open("/home/dell/Desktop/zhou/train7/best_acc.txt", "w") f3.write("EPOCH=%d,best_acc= %.3f%%" % (epoch + 1,100*epoch_acc)) f3.close() best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'val': val_acc_history.append(epoch_acc) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model, val_acc_history def set_parameter_requires_grad(model, feature_extracting): if feature_extracting: for param in model.parameters(): param.requires_grad = False def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True): # Initialize these variables which will be set in this if statement. Each of these # variables is model specific. model_ft = None input_size = 0 if model_name == "resnet": """ Resnet18 """ model_ft = models.resnet18(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name == "alexnet": """ Alexnet """ model_ft = models.alexnet(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.classifier[6].in_features model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes) input_size = 224 elif model_name == "vgg": """ VGG11_bn """ model_ft = models.vgg11_bn(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.classifier[6].in_features model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes) input_size = 224 elif model_name == "squeezenet": """ Squeezenet """ model_ft = models.squeezenet1_0(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1)) model_ft.num_classes = num_classes input_size = 224 elif model_name == "densenet": """ Densenet """ model_ft = models.densenet121(pretrained=use_pretrained) set_parameter_requires_grad(model_ft, feature_extract) num_ftrs = model_ft.classifier.in_features model_ft.classifier = nn.Linear(num_ftrs, num_classes) input_size = 224 elif model_name == "resnext": """ resnext Be careful, expects (3,224,224) sized images """ model_ft = resnext.resnext101_64x4d(num_classes=1000, pretrained='imagenet') set_parameter_requires_grad(model_ft, feature_extract) model_ft.last_linear = nn.Linear(2048, num_classes) #pre='/home/dell/Desktop/zhou/train6/inception_009.pth' #model_ft.load_state_dict(torch.load(pre)) input_size = 224 else: print("Invalid model name, exiting...") exit() return model_ft, input_size # Initialize the model for this run model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True) # Print the model we just instantiated #print(model_ft) data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Resize(input_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } print("Initializing Datasets and Dataloaders...") # Create training and validation datasets image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} # Create training and validation dataloaders dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']} # Detect if we have a GPU available device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") #we='/home/dell/Desktop/dj/inception_050.pth' #model_ft.load_state_dict(torch.load(we))#diaoyong # Send the model to GPU model_ft = model_ft.to(device) params_to_update = model_ft.parameters() print("Params to learn:") if feature_extract: params_to_update = [] for name,param in model_ft.named_parameters(): if param.requires_grad == True: params_to_update.append(param) print("\t",name) else: for name,param in model_ft.named_parameters(): if param.requires_grad == True: print("\t",name) # Observe that all parameters are being optimized optimizer_ft = optim.SGD(params_to_update, lr=0.01, momentum=0.9) # Decay LR by a factor of 0.1 every 7 epochs #exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=30, gamma=0.95) # Setup the loss fxn criterion = nn.CrossEntropyLoss() print(model_ft) # Train and evaluate model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs, is_inception=False)
以上这篇关于ResNeXt网络的pytorch实现就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持小牛知识库。
本文向大家介绍dpn网络的pytorch实现方式,包括了dpn网络的pytorch实现方式的使用技巧和注意事项,需要的朋友参考一下 我就废话不多说了,直接上代码吧! 以上这篇dpn网络的pytorch实现方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持呐喊教程。
本文向大家介绍pytorch打印网络结构的实例,包括了pytorch打印网络结构的实例的使用技巧和注意事项,需要的朋友参考一下 最简单的方法当然可以直接print(net),但是这样网络比较复杂的时候效果不太好,看着比较乱;以前使用caffe的时候有一个网站可以在线生成网络框图,tensorflow可以用tensor board,keras中可以用model.summary()、或者plot_mo
下图显示了一个简单的 IPv6 网络,图中的每个网络设备都标记了一个 IPv6 地址。 图1.8. 简单的 IPv6 网络 我们将四个主机(传感器或其它设备)分为两组,每组包含两个主机,就像一栋楼的两层。虽然此处只有 4 个设备,但是你可以在这样的网络中增加到 2^64 个设备。 我们创建了两个局域网,每个局域网均包含有一个路由器,这些路由器都连接到一个中心路由器 R1。 R1 提供到因特网的连接
本文向大家介绍pytorch 求网络模型参数实例,包括了pytorch 求网络模型参数实例的使用技巧和注意事项,需要的朋友参考一下 用pytorch训练一个神经网络时,我们通常会很关心模型的参数总量。下面分别介绍来两种方法求模型参数 一 .求得每一层的模型参数,然后自然的可以计算出总的参数。 1.先初始化一个网络模型model 比如我这里是 model=cliqueNet(里面是些初始化的参数)
本文向大家介绍关于ajax网络请求的封装实例,包括了关于ajax网络请求的封装实例的使用技巧和注意事项,需要的朋友参考一下 实例代码: 以上这篇关于ajax网络请求的封装实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持呐喊教程。
本文向大家介绍关于Pytorch的MLP模块实现方式,包括了关于Pytorch的MLP模块实现方式的使用技巧和注意事项,需要的朋友参考一下 MLP分类效果一般好于线性分类器,即将特征输入MLP中再经过softmax来进行分类。 具体实现为将原先线性分类模块: 替换为: 并且添加MLP模块: 看一下模块结构: 以上这篇关于Pytorch的MLP模块实现方式就是小编分享给大家的全部内容了,希望能给大家