如下所示:
# coding: utf-8 from __future__ import print_function import copy import click import cv2 import numpy as np import torch from torch.autograd import Variable from torchvision import models, transforms import matplotlib.pyplot as plt import load_caffemodel import scipy.io as sio # if model has LSTM # torch.backends.cudnn.enabled = False imgpath = 'D:/ck/files_detected_face224/' imgname = 'S055_002_00000025.png' # anger image_path = imgpath + imgname mean_file = [0.485, 0.456, 0.406] std_file = [0.229, 0.224, 0.225] raw_image = cv2.imread(image_path)[..., ::-1] print(raw_image.shape) raw_image = cv2.resize(raw_image, (224, ) * 2) image = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=mean_file, std =std_file, #mean = mean_file, #std = std_file, ) ])(raw_image).unsqueeze(0) print(image.shape) convert_image1 = image.numpy() convert_image1 = np.squeeze(convert_image1) # 3* 224 *224, C * H * W convert_image1 = convert_image1 * np.reshape(std_file,(3,1,1)) + np.reshape(mean_file,(3,1,1)) convert_image1 = np.transpose(convert_image1, (1,2,0)) # H * W * C print(convert_image1.shape) convert_image1 = convert_image1 * 255 diff = raw_image - convert_image1 err = np.max(diff) print(err) plt.imshow(np.uint8(convert_image1)) plt.show()
结论:
input_image = (raw_image / 255 - mean) ./ std
下面调查均值文件和方差文件是如何生成的:
mean_file = [0.485, 0.456, 0.406] std_file = [0.229, 0.224, 0.225]
# coding: utf-8 import matplotlib.pyplot as plt import argparse import os import numpy as np import torchvision import torchvision.transforms as transforms dataset_names = ('cifar10','cifar100','mnist') parser = argparse.ArgumentParser(description='PyTorchLab') parser.add_argument('-d', '--dataset', metavar='DATA', default='cifar10', choices=dataset_names, help='dataset to be used: ' + ' | '.join(dataset_names) + ' (default: cifar10)') args = parser.parse_args() data_dir = os.path.join('.', args.dataset) print(args.dataset) args.dataset = 'cifar10' if args.dataset == "cifar10": train_transform = transforms.Compose([transforms.ToTensor()]) train_set = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=True, transform=train_transform) #print(vars(train_set)) print(train_set.train_data.shape) print(train_set.train_data.mean(axis=(0,1,2))/255) print(train_set.train_data.std(axis=(0,1,2))/255) # imshow image train_data = train_set.train_data ind = 100 img0 = train_data[ind,...] ## test channel number, in total , the correct channel is : RGB,not like BGR in caffe # error produce #b,g,r=cv2.split(img0) #img0=cv2.merge([r,g,b]) print(img0.shape) print(type(img0)) plt.imshow(img0) plt.show() # in ship in sea #img0 = cv2.resize(img0,(224,224)) #cv2.imshow('img0',img0) #cv2.waitKey() elif args.dataset == "cifar100": train_transform = transforms.Compose([transforms.ToTensor()]) train_set = torchvision.datasets.CIFAR100(root=data_dir, train=True, download=True, transform=train_transform) #print(vars(train_set)) print(train_set.train_data.shape) print(np.mean(train_set.train_data, axis=(0,1,2))/255) print(np.std(train_set.train_data, axis=(0,1,2))/255) elif args.dataset == "mnist": train_transform = transforms.Compose([transforms.ToTensor()]) train_set = torchvision.datasets.MNIST(root=data_dir, train=True, download=True, transform=train_transform) #print(vars(train_set)) print(list(train_set.train_data.size())) print(train_set.train_data.float().mean()/255) print(train_set.train_data.float().std()/255)
结果:
cifar10 Files already downloaded and verified (50000, 32, 32, 3) [ 0.49139968 0.48215841 0.44653091] [ 0.24703223 0.24348513 0.26158784] (32, 32, 3) <class 'numpy.ndarray'>
使用matlab检测是如何计算mean_file和std_file的:
% load cifar10 dataset data = load('cifar10_train_data.mat'); train_data = data.train_data; disp(size(train_data)); temp = mean(train_data,1); disp(size(temp)); train_data = double(train_data); % compute mean_file mean_val = mean(mean(mean(train_data,1),2),3)/255; % compute std_file temp1 = train_data(:,:,:,1); std_val1 = std(temp1(:))/255; temp2 = train_data(:,:,:,2); std_val2 = std(temp2(:))/255; temp3 = train_data(:,:,:,3); std_val3 = std(temp3(:))/255; mean_val = squeeze(mean_val); std_val = [std_val1, std_val2, std_val3]; disp(mean_val); disp(std_val); % result: mean_val: [0.4914, 0.4822, 0.4465] % std_val: [0.2470, 0.2435, 0.2616]
均值计算的过程也可以遵循标准差的计算过程。为 了简单,例如对于一个矩阵,所有元素的均值,等于两个方向上先后均值。所以会直接采用如下的形式:
mean_val = mean(mean(mean(train_data,1),2),3)/255;
标准差的计算是每一个通道的对所有样本的求标准差。然后再除以255。
以上这篇Pytorch的mean和std调查实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持小牛知识库。
本文向大家介绍pytorch查看torch.Tensor和model是否在CUDA上的实例,包括了pytorch查看torch.Tensor和model是否在CUDA上的实例的使用技巧和注意事项,需要的朋友参考一下 今天训练faster R-CNN时,发现之前跑的很好的程序(是指在运行程序过程中,显卡利用率能够一直维持在70%以上),今天看的时候,显卡利用率很低,所以在想是不是我的训练数据torc
一种全栈 Javascript 开发架构,它的名称来自于:MongoDB,Express,Angular,Node。 使用方法: $ sudo npm install -g mean-cli$ mean init yourNewApp
本文向大家介绍在PyTorch中Tensor的查找和筛选例子,包括了在PyTorch中Tensor的查找和筛选例子的使用技巧和注意事项,需要的朋友参考一下 本文源码基于版本1.0,交互界面基于0.4.1 import torch 按照指定轴上的坐标进行过滤 index_select() 沿着某tensor的一个轴dim筛选若干个坐标 where() 用于将两个broadcastable的tenso
我有一堆数据帧,我连接成一个大数据帧。所有行都有一个datetime、一个名称,然后是一些具有随机值的列,例如,数据帧可以如下所示: 现在,我需要按对这些行进行分组,并将它们分成60分钟的bins,我目前的做法如下: 这可以用任何方式来完成吗?或者我必须将我的数据帧一分为二,然后在之后加入?
并行开发挺复杂的,特别是在试图用好线程和锁的过程中。如果要用到条件变量或std-atomics(一种无锁开发方式),那就更复杂了。C++0x提供了future和promise来简化任务线程间的返回值操作;同时为启动任务线程提供了packaged_task以方便操作。其中的关键点是允许2个任务间使用无(显式)锁的方式进行值传递;标准库帮你高效的做好这些了。基本思路很简单:当一个任务需要向父线程(启动
标准库函数bind()和function()定义于头文件<functional>中(该头文件还包括许多其他函数对象),用于处理函数及函数参数。bind()接受一个函数(或者函数对象,或者任何你可以通过”(…)”符号调用的事物),生成一个其有某一个或多个函数参数被“绑定”或重新组织的函数对象。(译注:顾名思义,bind()函数的意义就像它的函数名一样,是用来绑定函数调用的某些参数的。)例如: int