Face_Recognition 人脸识别函数详解

薛寒
2023-12-01

加载人脸图片文件

load_image_file(file, mode='RGB'):  

通过 PIL.image.open 加载图片文件

mode 有两种模式 ‘RGB’ 3通道 和 ‘L' 单通道

返回 numpy.array

查找人脸位置 人脸分割

face_locations(img, number_of_times_to_upsample=1, model="hog"): 

:Param

img 是一个 numpy.array 指定要查找人脸位置的图像矩阵

number_of_times_to_upsample 指定要查找的次数

model 指定查找的模式 'hog' 不精确但是在CPU上运算速度快 'CNN' 是一种深度学习的精确查找,但是速度慢。需要GPU/CUDA加速

返回 人脸位置 list  (top, right, bottom, left)

对人脸进行编码

face_encodings(face_image, known_face_locations=None, num_jitters=1, model="small"): 

:Param 

face_image 指定数据类型为numpy.array编码的人脸矩阵 数据类型

known_face_locations 指定人脸位置 如果值为None 则默认按照 'Hog'模式 调用 _raw_face_locations 查找人脸位置

num_jitters 重新采样编码次数 默认为1 

model 预测人脸关键点个数 large 为68个点 small 为 5个关键点

返回 128维特征向量 list

人脸对比

compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6):

:Param 

known_face_encodings  已经编码的人脸 list

face_encoding_to_check 要检测的单个人脸

tolerance 默认人脸对比距离长度  对比方式    np.linalg.norm(face_encodings - face_to_compare, axis=1)

返回 对比结果 list

 

附加自己编写的测试类

import face_recognition
import cv2
import numpy as np
import time



class Wqs_Face_Recognition:

    know_Encodings = []
    know_names = []
    tolerance = 0.4
    model = 'CNN'
    num_of_times_to_upSample = 1
    isDebug = False

    def __init__(self, tolerance=0.6, num_of_times_to_upSample = 1, model='CNN', imageType = cv2.IMREAD_COLOR, isDebug = False):
        self.tolerance = tolerance
        self.model = model
        self.num_of_times_to_upSample = num_of_times_to_upSample
        self.imageType = imageType
        self.isDebug = isDebug

    # 设置 识别精度 数据越小 精度越高
    def SetTolerance(self, tolerance):
        self.tolerance = tolerance

    # 获取 识别精度
    def GetTolerance(self):
        return self.tolerance

    def Set_Num_of_times_to_upSample(self, num_of_times_to_upSample):
        self.num_of_times_to_upSample = num_of_times_to_upSample

    def Get_Num_of_times_to_upSample(self, num_of_times_to_upSample):
        return self.num_of_times_to_upSample

    def SetModel(self, model):
        self.model = model

    def GetModel(self, model):
        return self.model

    #图像分割并命名
    def FaceSegmentation(self, faceFile:str = None, faceMat:np = None, faceName:str = None):
        if faceFile != None:
            faceMat = cv2.imread(faceFile)
        try:
            shp = faceMat.shape
            if self.isDebug:
                print('Recognition faceMat shape', faceMat.shape)
        except AttributeError:
            print('FaceSegmentation faceMat is None')
            return False
        locations = face_recognition.face_locations(faceMat, self.num_of_times_to_upSample, model=self.model)
        #cv2.imshow('l', liudehua_encoding)
        i = 0
        for location in locations:
            top, right, bottom, left = location
            if faceName == None and faceFile != None:
                index = faceFile.find('.')
                faceName = faceFile[:index]

            outName = faceName + '_name_' + str(i) + '.jpg'
            #filename = 'name%d.jpg' % i
            i = i+1
            faceFile = faceMat[top:bottom, left:right]

            cv2.imwrite(outName, faceFile)

    #识别特征编码并加载
    def Know_Encoding(self, faceFile:str = None, faceMat:np = None, faceName:str = None, index = -1):
        if faceFile != None:
            faceMat = cv2.imread(faceFile)
        try:
            shp = faceMat.shape
            if self.isDebug:
                print('Recognition faceMat shape', faceMat.shape)
        except AttributeError:
            print('Know_Encoding faceMat is None')
            return False
        if faceName == None and faceFile != None:
            loc = faceFile.find('.')
            faceName = faceFile[:loc]
        if faceName == None:
            faceName = 'NoName'
        tm = time.time()
        face_encoding = face_recognition.face_encodings(faceMat, model='small')[0]

        tm = time.time()- tm
        print('spend time ', tm)
        if index == -1:
            self.know_Encodings.append(face_encoding)
            self.know_names.append(faceName)
        else:
            self.know_Encodings.insert(index, face_encoding)
            self.know_names.insert(index, faceName)

    #加载已知人脸特征编码
    def LoadKnow_Encoding(self, face_encoding:np, name:np, index = -1):
        if index == -1:
            self.know_Encodings.append(face_encoding)
            self.know_names.append(name)
        else:
            self.know_Encodings.insert(index, face_encoding)
            self.know_names.insert(index, name)

    #全体查找人脸识别
    def AllRecognition(self, faceFile:str = None, faceMat:np = None):
        if faceFile != None:
            faceMat = cv2.imread(faceFile)
        try:
            shp = faceMat.shape
            if self.isDebug:
                print('Recognition faceMat shape', faceMat.shape)
        except AttributeError:
            print('AllRecognition faceMat is None')
            return False
        face_locations = face_recognition.face_locations(faceMat, self.num_of_times_to_upSample, model=self.model)
        faceEncodings = face_recognition.face_encodings(faceMat, face_locations)

        nameArray = []
        indexArray = []
        locationArray = []

        locationIndex = -1

        for face_location, face_encode in zip(face_locations , faceEncodings):
            index = 0
            locationIndex = locationIndex + 1

            (top, right, bottom, left) = face_location
            face = faceMat[top:bottom, left:right]
            cv2.imwrite('testFace.jpg', face)

            matches = face_recognition.compare_faces(self.know_Encodings, face_encode, self.tolerance)
            #index = matches.find('True')

            for match in matches:
                if match == True:
                    nameArray.append(self.know_names[index])
                    indexArray.append(index)
                    locationArray.append(face_locations[locationIndex])
                    #break
                index += 1
            if self.isDebug:
                for x in range(len(indexArray)):
                    print('Name', nameArray[x])
                    print('index', indexArray[x])
                    print('face_locations', locationArray[x])
                else:
                    print('Unknow people')
        return nameArray, indexArray, locationArray

    #精准人脸识别
    def Recognition(self,  know_Encoding:np, know_name:np, faceFile:str = None, faceMat:np = None):
        if faceFile != None:
            faceMat = cv2.imread(faceFile)
        try:
            shp = faceMat.shape
            if self.isDebug:
                print('Recognition faceMat shape', faceMat.shape)
        except AttributeError:
            print('Recognition faceMat is None')
            return False
        face_locations = face_recognition.face_locations(faceMat)
        faceEncodings = face_recognition.face_encodings(faceMat, face_locations)

        for face_location, face_encode in zip(face_locations, faceEncodings):

            (top, right, bottom, left) = face_location
            face = faceMat[top:bottom, left:right]
            if self.isDebug:
                cv2.imwrite('testFace.jpg', face)

            matches = face_recognition.compare_faces([know_Encoding], face_encode, self.tolerance)
            #index = matches.find('True')
            index = 0
            for match in matches:
                if match == True:
                    break
                index += 1

            if index < len(matches):
                if self.isDebug:
                    print('Know =', know_name)
                return True, know_name
            else:
                if self.isDebug:
                    print('Unknow people')
                return False, None

def drawRect(file:str, locations:np):
    img = cv2.imread(file)
    print('drawRect locations', locations)
    for loc in locations:
        cv2.rectangle(img,(loc[3], loc[0]), (loc[1], loc[2]), color= (255, 0, 0))
    cv2.imshow(file,img)
    cv2.waitKey(0)

def main():
    wqsRecognition = Wqs_Face_Recognition(0.4)

    wqsRecognition.FaceSegmentation('sunli1.jpg')
    wqsRecognition.FaceSegmentation('yangmi.jpg')


    wqsRecognition.Know_Encoding('sunli1_name_0.jpg', faceName='sunli1')
    mat = cv2.imread('yangmi_name_0.jpg')
    wqsRecognition.Know_Encoding(faceMat=mat, faceName= 'yangmi')

    mat = cv2.imread('ymTest.jpg')
    nameArray, indexArray, locations = wqsRecognition.AllRecognition(faceMat=mat)

    drawRect('ymTest.jpg', locations)

    wqsRecognition.tolerance = 0.6
    wqsRecognition.Recognition(wqsRecognition.know_Encodings[1], wqsRecognition.know_names[1], 'ymTest.jpg')

if __name__ == '__main__':
    main()

 

 类似资料: