当前位置: 首页 > 编程笔记 >

Pytorch提取模型特征向量保存至csv的例子

樊浩初
2023-03-14
本文向大家介绍Pytorch提取模型特征向量保存至csv的例子,包括了Pytorch提取模型特征向量保存至csv的例子的使用技巧和注意事项,需要的朋友参考一下

Pytorch提取模型特征向量

# -*- coding: utf-8 -*-
"""
dj
"""
import torch
import torch.nn as nn
import os
from torchvision import models, transforms
from torch.autograd import Variable 
import numpy as np
from PIL import Image 
import torchvision.models as models
import pretrainedmodels
import pandas as pd
class FCViewer(nn.Module):
 def forward(self, x):
  return x.view(x.size(0), -1)
class M(nn.Module):
 def __init__(self, backbone1, drop, pretrained=True):
  super(M,self).__init__()
  if pretrained:
   img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained='imagenet') 
  else:
   img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained=None)  
  self.img_encoder = list(img_model.children())[:-2]
  self.img_encoder.append(nn.AdaptiveAvgPool2d(1))
  self.img_encoder = nn.Sequential(*self.img_encoder)
  if drop > 0:
   self.img_fc = nn.Sequential(FCViewer())         
  else:
   self.img_fc = nn.Sequential(
    FCViewer())
 def forward(self, x_img):
  x_img = self.img_encoder(x_img)
  x_img = self.img_fc(x_img)
  return x_img 
model1=M('resnet18',0,pretrained=True)
features_dir = '/home/cc/Desktop/features' 
transform1 = transforms.Compose([
  transforms.Resize(256),
  transforms.CenterCrop(224),
  transforms.ToTensor()]) 
file_path='/home/cc/Desktop/picture'
names = os.listdir(file_path)
print(names)
for name in names:
 pic=file_path+'/'+name
 img = Image.open(pic)
 img1 = transform1(img)
 x = Variable(torch.unsqueeze(img1, dim=0).float(), requires_grad=False)
 y = model1(x)
 y = y.data.numpy()
 y = y.tolist()
 #print(y)
 test=pd.DataFrame(data=y)
 #print(test)
 test.to_csv("/home/cc/Desktop/features/3.csv",mode='a+',index=None,header=None)

jiazaixunlianhaodemoxing

import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
class ResidualBlock(nn.Module):
 def __init__(self, inchannel, outchannel, stride=1):
  super(ResidualBlock, self).__init__()
  self.left = nn.Sequential(
   nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),
   nn.BatchNorm2d(outchannel),
   nn.ReLU(inplace=True),
   nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),
   nn.BatchNorm2d(outchannel)
  )
  self.shortcut = nn.Sequential()
  if stride != 1 or inchannel != outchannel:
   self.shortcut = nn.Sequential(
    nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),
    nn.BatchNorm2d(outchannel)
   )

 def forward(self, x):
  out = self.left(x)
  out += self.shortcut(x)
  out = F.relu(out)
  return out

class ResNet(nn.Module):
 def __init__(self, ResidualBlock, num_classes=10):
  super(ResNet, self).__init__()
  self.inchannel = 64
  self.conv1 = nn.Sequential(
   nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
   nn.BatchNorm2d(64),
   nn.ReLU(),
  )
  self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1)
  self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)
  self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)
  self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)
  self.fc = nn.Linear(512, num_classes)

 def make_layer(self, block, channels, num_blocks, stride):
  strides = [stride] + [1] * (num_blocks - 1) #strides=[1,1]
  layers = []
  for stride in strides:
   layers.append(block(self.inchannel, channels, stride))
   self.inchannel = channels
  return nn.Sequential(*layers)

 def forward(self, x):
  out = self.conv1(x)
  out = self.layer1(out)
  out = self.layer2(out)
  out = self.layer3(out)
  out = self.layer4(out)
  out = F.avg_pool2d(out, 4)
  out = out.view(out.size(0), -1)
  out = self.fc(out)
  return out


def ResNet18():

 return ResNet(ResidualBlock)

import os
from torchvision import models, transforms
from torch.autograd import Variable 
import numpy as np
from PIL import Image 
import torchvision.models as models
import pretrainedmodels
import pandas as pd
class FCViewer(nn.Module):
 def forward(self, x):
  return x.view(x.size(0), -1)
class M(nn.Module):
 def __init__(self, backbone1, drop, pretrained=True):
  super(M,self).__init__()
  if pretrained:
   img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained='imagenet') 
  else:
   img_model = ResNet18()
   we='/home/cc/Desktop/dj/model1/incption--7'
   # 模型定义-ResNet
   #net = ResNet18().to(device)
   img_model.load_state_dict(torch.load(we))#diaoyong  
  self.img_encoder = list(img_model.children())[:-2]
  self.img_encoder.append(nn.AdaptiveAvgPool2d(1))
  self.img_encoder = nn.Sequential(*self.img_encoder)
  if drop > 0:
   self.img_fc = nn.Sequential(FCViewer())         
  else:
   self.img_fc = nn.Sequential(
    FCViewer())
 def forward(self, x_img):
  x_img = self.img_encoder(x_img)
  x_img = self.img_fc(x_img)
  return x_img 
model1=M('resnet18',0,pretrained=None)
features_dir = '/home/cc/Desktop/features' 
transform1 = transforms.Compose([
  transforms.Resize(56),
  transforms.CenterCrop(32),
  transforms.ToTensor()]) 
file_path='/home/cc/Desktop/picture'
names = os.listdir(file_path)
print(names)
for name in names:
 pic=file_path+'/'+name
 img = Image.open(pic)
 img1 = transform1(img)
 x = Variable(torch.unsqueeze(img1, dim=0).float(), requires_grad=False)
 y = model1(x)
 y = y.data.numpy()
 y = y.tolist()
 #print(y)
 test=pd.DataFrame(data=y)
 #print(test)
 test.to_csv("/home/cc/Desktop/features/3.csv",mode='a+',index=None,header=None)

以上这篇Pytorch提取模型特征向量保存至csv的例子就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持小牛知识库。

 类似资料:
  • 卷积神经网络包括主要特征,提取。以下步骤用于实现卷积神经网络的特征提取。 第1步 导入相应的模型以使用“PyTorch”创建特征提取模型。 第2步 创建一类特征提取器,可以在需要时调用。

  • 本文向大家介绍pytorch实现用Resnet提取特征并保存为txt文件的方法,包括了pytorch实现用Resnet提取特征并保存为txt文件的方法的使用技巧和注意事项,需要的朋友参考一下 接触pytorch一天,发现pytorch上手的确比TensorFlow更快。可以更方便地实现用预训练的网络提特征。 以下是提取一张jpg图像的特征的程序: 以下是提取一个文件夹下所有jpg、jpeg图像的程

  • Spark特征提取(Extracting)的3种算法(TF-IDF、Word2Vec以及CountVectorizer)结合Demo进行一下理解 TF-IDF算法介绍: 词频-逆向文件频率(TF-IDF)是一种在文本挖掘中广泛使用的特征向量化方法,它可以体现一个文档中词语在语料库中的重要程度。 词语由t表示,文档由d表示,语料库由D表示。词频TF(t,,d)是词语t在文档d中出现的次数。文件频率D

  • 校验者: @if only 翻译者: @片刻 模块 sklearn.feature_extraction 可用于提取符合机器学习算法支持的特征,比如文本和图片。 Note 特征特征提取与 特征选择 有很大的不同:前者包括将任意数据(如文本或图像)转换为可用于机器学习的数值特征。后者是将这些特征应用到机器学习中。 4.2.1. 从字典类型加载特征 类 DictVectorizer 可用于将标准的Py

  • 本文向大家介绍TensorFlow模型保存和提取的方法,包括了TensorFlow模型保存和提取的方法的使用技巧和注意事项,需要的朋友参考一下 一、TensorFlow模型保存和提取方法 1. TensorFlow通过tf.train.Saver类实现神经网络模型的保存和提取。tf.train.Saver对象saver的save方法将TensorFlow模型保存到指定路径中,saver.save(