FDDB人脸数据集dataset的dataset数据集的制作

沈宏朗
2023-12-01

FDDB为图片多人脸目标检测数据集,本文根据vocdataset 进行改编,将FDDB数据集进行分割,并进行图像预处理,翻转,随机裁剪等数据集增强相关的预处理。
如有转载请 附本文链接 :https://blog.csdn.net/canmang1/article/details/108487673

# 每个标注的椭圆形人脸由六个元素组成。
# (ra, rb, Θ, cx, cy, s)
# ra,rb:半长轴、半短轴
# cx, cy:椭圆中心点坐标
# Θ:长轴与水平轴夹角(头往左偏Θ为正,头往右偏Θ为负)
# s:置信度得分
from os import listdir  # 返回指定的文件夹包含的文件或文件夹的名字的列表 path=os.getcwd()
from os.path import join #/后添加路径使用
from random import random
from PIL import Image, ImageDraw
import re
from multiprocessing import Pipe, Process
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
from tqdm import tqdm
import numpy as np
from sampling import sampleEzDetect
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
from tensorboardX import SummaryWriter

__all__ = ["vocClassName", "vocClassID", "vocDataset"]

#img = Image.open(“图片路径”).convert(‘RGB’)  此时使HWC,需要WHC 利用torch.contigous.view进行维度变换
class text_info:
    def __init__(self,path_filename,istraining=True,predict=False):
        self.path=path_filename
        self.pathlist =None
        self.istraining=istraining
        self.predict=predict
    def creat_dict_of_bbox(self,filepath):
        target_gt={}
        f_test_bbox=open(filepath, "r")
        test_bbox=f_test_bbox.readlines()
        pathlist=[]
        for lines in test_bbox:
            lines=lines.rstrip()
            if lines.startswith('2002') or lines.startswith('2003'):
                path=join('./train/',lines)
                path=path+'.jpg'
                target_gt[path]=[]
                pathlist.append(path)
            else:
                if lines==None:
                    continue
                lines=lines.split()[0:5]
                # print('slipt后的结果:'+str(lines))
                target_gt[path].append(lines)
        f_test_bbox.close()
        for path_1 in pathlist:
            if target_gt[path]==None:
                target_gt.pop(path)
        return pathlist,target_gt
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#解析,重新获得对应的ground_truth
    def special_picture_bboxes(self,file_path_train_or_test):
        loc_bboxes=[]
        pathlist, target_gt = text_info.creat_dict_of_bbox(self, self.path)
        num, gt_bbox = text_info.batch_image_data(self, target_gt)
        # 这里的bboxes为{ 0 : [ [] , [] , [] , [] ] }
        batch_gt_boxes = text_info.every_batch_bboxes(self, num, gt_bbox)
        #这里的index=[2,4,5],pathlist不出错的情况下,为一个数【n】
        index = [i for i, v in enumerate(pathlist) if v == file_path_train_or_test]
        for i in index:
            for j in range(len(batch_gt_boxes[i])):
                # (ra, rb, Θ, cx, cy, s)
                batch_gt_boxes[i][j][0] = float(batch_gt_boxes[i][j][3]) - float(batch_gt_boxes[i][j][0])
                batch_gt_boxes[i][j][1] = float(batch_gt_boxes[i][j][4]) - float(batch_gt_boxes[i][j][1])
                batch_gt_boxes[i][j][2] = float(batch_gt_boxes[i][j][3]) + float(batch_gt_boxes[i][j][0])
                batch_gt_boxes[i][j][3] = float(batch_gt_boxes[i][j][4]) + float(batch_gt_boxes[i][j][1])
                batch_gt_boxes[i][j][4]=float(batch_gt_boxes[i][j][2])
            loc_bboxes.append(batch_gt_boxes[i])
        return loc_bboxes[0]
    #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    def batch_image_data(self,dict_gt):
        filename = list(dict_gt.keys())
        number_bboxes=[]
        all_gt_bboxes=[]
        for i in range(len(filename)):
            index_number=dict_gt[filename[i]][0]
            number_bboxes.append(index_number)
            for index in range(int(index_number[0])):
                loc_bboxes=dict_gt[filename[i]][index+1]
                all_gt_bboxes.append(loc_bboxes)
        return number_bboxes,all_gt_bboxes
    #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
    #根据成批次的number和gt_bbox进行相应的
    def every_batch_bboxes(self,number_list,bboxes_list):
        batch=0
        box_number=0
        dict1={}
        for index in number_list:
            dict1[batch]=[]
            for i in range(int(index[0])):
                dict1[batch].append(bboxes_list[box_number])
                box_number+=1
            batch+=1
        return dict1
    def forward(self):
        pathlist,target_gt = text_info.creat_dict_of_bbox(self,self.path)
        self.pathlist=pathlist
        # file_name='E:/数据集/wider/WIDER_test/images/0--Parade/0_Parade_Parade_0_904.jpg'
        num, gt_bbox =text_info.batch_image_data(self,target_gt)
        batch_gt_boxes = text_info.every_batch_bboxes(self,num, gt_bbox)
        bboxes=text_info.gettxtInfo(self,pathlist,num,batch_gt_boxes)
        # return pathlist,num,batch_gt_boxes
        return bboxes
    def gettxtInfo(self,pathlist,num,batch_gt_boxes):
        bboxes=[]
        for key,value in batch_gt_boxes.items():
            # print('value'+str(value[0]))
            for i in range(int(num[key][0])):
                newAnn = {}
                newAnn['category_id']=pathlist[key]
                newAnn['xmin'] = float(value[i][3])-float(value[i][0])
                newAnn['ymin'] = float(value[i][4])-float(value[i][1])
                newAnn['xmax'] = float(value[i][3])+float(value[i][0])
                newAnn['ymax'] = float(value[i][4])+float(value[i][1])
                newAnn['angle']=float(value[i][2])
                bboxes.append(newAnn)
        return bboxes
# mean_std=main(allTrainingData,allTestingData)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
class vocDataset(data.Dataset):
    def __init__(self, config, isTraining=True):
        super(vocDataset, self).__init__()
        self.isTraining = isTraining
        self.config = config
        if isTraining:
            normalize = transforms.Normalize(mean=[train_data_mean_std['mean_rgb'][0], train_data_mean_std['mean_rgb'][1],train_data_mean_std['mean_rgb'][2]],
                                         std=[train_data_mean_std['std_rgb'][0],train_data_mean_std['std_rgb'][1],train_data_mean_std['std_rgb'][2]])  # 用均值和方差对图片的RGB值分别进行归一化(也有其他方法,这种相对比较简单)
        else:
            normalize = transforms.Normalize(mean=[test_data_mean_std['mean_rgb'][0], test_data_mean_std['mean_rgb'][1],test_data_mean_std['mean_rgb'][2]],
                                         std=[test_data_mean_std['std_rgb'][0],test_data_mean_std['std_rgb'][1],test_data_mean_std['std_rgb'][2]])
        self.transformer = transforms.Compose([transforms.ToTensor(), normalize])
    def __getitem__(self, index):
        item = None
        if self.isTraining:
            item = allTrainingData[index % len(allTrainingData)]
            #得到某个特定的图片的所有人脸bboxes
            allBboxes = train_info.special_picture_bboxes(item)
        else:
            item = allTestingData[index % len(allTestingData)]
            allBboxes = train_info.special_picture_bboxes(item)
        img = Image.open(item)  # item[0]为图像数据
# item[1]为通过getVOCInfo函数解析出真实label的数据
        imgWidth, imgHeight = img.size
        targetWidth = int((random() * 0.25 + 0.75) * imgWidth)
        targetHeight = int((random() * 0.25 + 0.75) * imgHeight)
        # 对图片进行随机crop,并保证bbox大小
        xmin = int(random() * (imgWidth - targetWidth))
        ymin = int(random() * (imgHeight - targetHeight))
        img = img.crop((xmin, ymin, xmin + targetWidth, ymin + targetHeight))
        img = img.resize((self.config.targetWidth, self.config.targetHeight), Image.BILINEAR)
        imgT = self.transformer(img)
        imgT = imgT * 256
        # 调整bbox
        bboxes = []
        for i in allBboxes:
            xl = int(i[0]) - xmin
            yt = int(i[1]) - ymin
            xr = int(i[2]) - xmin
            yb = int(i[3]) - ymin
            if xl < 0:
                xl = 0
            if xr >= targetWidth:
                xr = targetWidth - 1
            if yt < 0:
                yt = 0
            if yb >= targetHeight:
                yb = targetHeight - 1
            xl = xl / targetWidth  # 经过随机裁剪后得到targetwidth与height,求得是左右与上下占总体长宽的比例
            xr = xr / targetWidth
            yt = yt / targetHeight
            yb = yb / targetHeight
            if (xr - xl >= 0.05 and yb - yt >= 0.05):
                bbox = [1, xl, yt, xr, yb]
                bboxes.append(bbox)  # 生成的bbox是对整体位置比例的换算
        if len(bboxes) == 0:
            return self[index + 1]
        target = sampleEzDetect(self.config, bboxes)
        '''
        ### 对预测图片进行测试 ##########
        draw = ImageDraw.Draw(img)
        num = int(target[0])
        for j in range(0,num):
            offset = j * 6
            if ( target[offset + 1] < 0):
                break
            k = int(target[offset + 6])
            trueBox = [ target[offset + 2],
                        target[offset + 3],
                        target[offset + 4],
                        target[offset + 5] ]
            predBox = self.config.predBoxes[k]
            draw.rectangle([trueBox[0]*self.config.targetWidth,
                                        trueBox[1]*self.config.targetHeight,
                                        trueBox[2]*self.config.targetWidth,
                                        trueBox[3]*self.config.targetHeight])
            draw.rectangle([predBox[0]*self.config.targetWidth,
                                        predBox[1]*self.config.targetHeight,
                                        predBox[2]*self.config.targetWidth,
                                        predBox[3]*self.config.targetHeight], None, "red")
        del draw
        img.save("/tmp/{}.jpg".format(index) )
        '''
        return imgT, target
    def __len__(self):
        if self.isTraining:
            num = len(allTrainingData) - (len(allTrainingData) % self.config.batchSize)
            return num
        else:
            num = len(allTestingData) - (len(allTestingData) % self.config.batchSize)
            return num
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#调用就会加载
allData=[]
allTrainingData=[]
allTestingData=[]
for i in range(10):
    i+=1
    train_path_filename=('E:/数据集2/FDDB/FDDB-folds/FDDB-fold-0{}-ellipseList.txt'.format(i))
    if i==10:
        train_path_filename = ('E:/数据集2/FDDB/FDDB-folds/FDDB-fold-10-ellipseList.txt')
        train_info=text_info(train_path_filename,istraining=True,predict=False)

        allPREData,_=train_info.creat_dict_of_bbox(train_info.path)
        allData+=allPREData
for path_11 in allData:
    if Image.open(path_11).mode == 'L':
        if allData.index(path_11):
            p = allData.index(path_11)
            allData.pop(p)
for index in range(len(allData)):
    if index%10==0:
        allTestingData.append(allData[index])
    else:
        allTrainingData.append(allData[index])

train_data_mean_std={'mean_rgb': [0.3925,0.35197,0.39247], 'std_rgb': [0.0748, 0.0646, 0.0618]}
test_data_mean_std={'mean_rgb': [0.4890,0.4392,0.4890], 'std_rgb': [0.1013, 0.0899, 0.088]}

vocClassName = ['face']
vocClassID={}
for i in range(len(vocClassName)):
    vocClassID[vocClassName[i]]=i+1
 类似资料: