当前位置: 首页 > 工具软件 > InsightFace > 使用案例 >

mxnet insightface特征提取

薛云瀚
2023-12-01

代码库在:https://github.com/deepinsight/insightface

 

知识点:

mx.model.load_checkpoint('../recognition/mxnet/new_model', 0)

net_model是json的名字,0是训练的epoch值。

fc1_output,感觉每一层都有output,fc1是最后一层的名字,最后一层是bn层,fc1_output是最后一层bn层的输出。

 

3.将model上传到insightface/models中。

gpu版替换代码:

gpu_id = 0
ctx = mx.gpu(gpu_id)
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)

cpu版单张图片提取特征:

# !/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import time
import math
import mxnet as mx
import cv2
import numpy as np
from collections import namedtuple



def single_input(path):
    img = cv2.imread(path)
    # mxnet三通道输入是严格的RGB格式,而cv2.imread的默认是BGR格式,因此需要做一个转换
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (112, 112))
    img = img.transpose(2, 0, 1)

    # 添加一个第四维度并构建NDArray
    img = img[np.newaxis, :]
    array = mx.nd.array(img)
    return array


if __name__ == "__main__":
    time_start = time.time()


    time0 = time.time()
    sym, arg_params, aux_params = mx.model.load_checkpoint('../recognition/mxnet/new_model', 0)
    # print(sym)


    # 提取中间某层输出帖子特征层作为输出
    all_layers = sym.get_internals()
    # print(all_layers)
    sym = all_layers['fc1_output']

    # 重建模型
    model = mx.mod.Module(symbol=sym, label_names=None)
    model.bind(for_training=False, data_shapes=[('data', (1, 3, 112, 112))])
    model.set_params(arg_params, aux_params)

    time1 = time.time()

    time_load = time1 - time0
    # print("模型加载和重建时间:{0}".format(time1 - time0))

    Batch = namedtuple("batch", ['data'])

    img1_path=r'E:\MNN\project\android-mnn_new\app\src\main\assets\0.jpg'

    array1 = single_input(img1_path)

    model.forward(Batch([array1]))
    vector1 = model.get_outputs()[0].asnumpy()
    vector1 = np.squeeze(vector1)

    print(vector1)



 

vim test.py

import face_model
import argparse
import cv2
import sys
import numpy as np
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
# 这个比较重要,人脸识别的model,我的经验是要用绝对路径
parser.add_argument('--model', default='/disk1t/insightface/insightface/models/model-r100-ii/model,0', help='path to load model.')
# 这个比较重要,年龄性别的model,我的经验是要用绝对路径
parser.add_argument('--ga-model', default='/disk1t/insightface/insightface/models/gamodel-r50/model,0', help='path to load model.')
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
args = parser.parse_args()
print(args)
# 加载model
model = face_model.FaceModel(args)
# 读取图片
img = cv2.imread('Tom_Hanks_54745.png')
# 模型加载图片
img = model.get_input(img)
# 获得特征
f1 = model.get_feature(img)
# 输出特征
print(f1)
#print(f1[0:10])
#gender, age = model.get_ga(img)
#print(gender)
#print(age)
#sys.exit(0)
#img = cv2.imread('/raid5data/dplearn/megaface/facescrubr/112x112/Tom_Hanks/Tom_Hanks_54733.png')
#f2 = model.get_feature(img)
#dist = np.sum(np.square(f1-f2))
#print(dist)
#sim = np.dot(f1, f2.T)
#print(sim)
#diff = np.subtract(source_feature, target_feature)
#dist = np.sum(np.square(diff),1)

 

获取指定层的输出
有些时候我们不需要网络的输出,而是只需要网络某个层的输出来通过网络提取图片的特征,这时候我们就需要指定提取层的名称,这里我们通过提取网络最后一层的全连接层为例

def get_specify_mod(model_str,ctx,data_shpae,layer_name):
    _vec = model_str.split(",")
    prefix = _vec[0]
    epoch = int(_vec[1])
    sym,arg_params,aux_params = mx.model.load_checkpoint(prefix,epoch)
    #获取神经网络所有的层
    all_layers = sym.get_internals()
    #获取输出层
    sym = all_layers[layer_name+"_output"]
    mod = mx.mod.Module(symbol=sym,context=ctx)
    mod.bind(data_shapes=[("data",data_shpae)])
    mod.set_params(arg_params,aux_params)
    return mod
    
def predict_specify(model_str,ctx,data_shape,img_path,label_path):
    label_names = get_label_names(label_path)
    #通过输出网络层的名称,输出层全连接层的名称为fc1
    mod = get_specify_mod(model_str,ctx,data_shape,layer_name="fc1")
    nd_img = preprocess_img(img_path,data_shape,ctx)
    #将需要预测的图片封装为Batch
    data_batch = mx.io.DataBatch(data=(nd_img,))
    #计算网络的预测值
    mod.forward(data_batch,is_train=False)
    #获取网络的输出值
    output = mod.get_outputs()[0]
    #对输出值进行softmax处理
    proba = mx.nd.softmax(output)
    #获取前top5的值
    top_proba = proba.topk(k=5)[0].asnumpy()
    for index in top_proba:
        probability = proba[0][int(index)].asscalar()*100
        pred_label_name = label_names[int(index)]
        print("label name=%s,probability=%f"%(pred_label_name,probability))


 

6.到此,人脸特征提取就完成了
 

人脸识别数据集精度验证:这个是固定阈值,测精度不太合适

# !/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import time
import math
import mxnet as mx
import cv2
import numpy as np
from collections import namedtuple



def str_expansion(sstring):
    """
    将“数字型”字符扩展
    """
    ssize = len(sstring)
    if ssize == 1:
        sstring = "00" + sstring
    elif ssize == 2:
        sstring = "0" + sstring
    return sstring


def str_expansion_lfw(sstring):
    """
    :param sstring: 将“数字型”字符扩展,针对lfw数据集
    """
    ssize = len(sstring)
    if ssize == 1:
        sstring = "000" + sstring
    elif ssize == 2:
        sstring = "00" + sstring
    elif ssize == 3:
        sstring = "0" + sstring
    return sstring


def cos_similarity(x, y):
    length = len(x)

    x_squre = 0
    y_squre = 0
    xy_inner_product = 0

    for i in range(length):
        x_squre += x[i] * x[i]
        y_squre += y[i] * y[i]
        xy_inner_product += x[i] * y[i]
    print(x_squre)
    print(y_squre)
    return xy_inner_product / (math.sqrt(x_squre) * math.sqrt(y_squre))


def single_input(path):
    img = cv2.imread(path)
    # mxnet三通道输入是严格的RGB格式,而cv2.imread的默认是BGR格式,因此需要做一个转换
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (112, 112))
    img = img.transpose(2, 0, 1)

    # 添加一个第四维度并构建NDArray
    img = img[np.newaxis, :]
    array = mx.nd.array(img)
    return array


if __name__ == "__main__":
    time_start = time.time()

    # verication_folder = "E:/face_detection/verification/IRimg_verification"
    verication_folder = r"E:\project\faceid\MobileFaceNet_Tutorial_Pytorch\data_set\LFW\lfw_align_112"

    file = r"E:\project\faceid\MobileFaceNet_Tutorial_Pytorch\data_set\LFW\pairs.txt"

    pair_list = []

    with open(file, 'r') as f:
        pair_list = f.readlines()

    time0 = time.time()
    sym, arg_params, aux_params = mx.model.load_checkpoint("mxnet/zwnwet_model", 0)
    print(sym)
    # print(arg_params)
    # print(aux_params)

    # 提取中间某层输出帖子特征层作为输出
    all_layers = sym.get_internals()
    print(all_layers)
    sym = all_layers['fc1_output']

    # 重建模型
    model = mx.mod.Module(symbol=sym, label_names=None)
    model.bind(for_training=False, data_shapes=[('data', (1, 3, 112, 112))])
    model.set_params(arg_params, aux_params)

    time1 = time.time()

    time_load = time1 - time0
    # print("模型加载和重建时间:{0}".format(time1 - time0))

    Batch = namedtuple("batch", ['data'])

    threshold = 0.6
    TP = 0
    TN = 0
    NUM_IR = 3280 / 2
    NUM_lfw = 6000 / 2

    time_frame = 0

    for item in pair_list:
        line = item.rstrip().split("\t")
        print(line)

        # 属于同一个人的图片对验证
        if len(line) == 3:
            time2 = time.time()
            folder = line[0]
            # img1 = str_expansion(str(int(line[1])-1)) + ".jpg"
            # img2 = str_expansion(str(int(line[2])-1)) + ".jpg"

            img1 = folder + '_' + str_expansion_lfw(line[1]) + ".jpg"
            img2 = folder + '_' + str_expansion_lfw(line[2]) + ".jpg"

            img1_path = os.path.join(verication_folder, folder, img1)
            img2_path = os.path.join(verication_folder, folder, img2)

            array1 = single_input(img1_path)
            array2 = single_input(img2_path)

            model.forward(Batch([array1]))
            vector1 = model.get_outputs()[0].asnumpy()
            vector1 = np.squeeze(vector1)

            model.forward(Batch([array2]))
            vector2 = model.get_outputs()[0].asnumpy()
            vector2 = np.squeeze(vector2)

            similarity = cos_similarity(vector1, vector2)
            time3 = time.time()
            time_frame = time3 - time2 + time_frame
            print(similarity, "\n")
            if similarity >= threshold:
                TP += 1

        # 属于不同的人的图片对验证
        if len(line) == 4:
            time4 = time.time()
            folder1 = line[0]
            # img1 = str_expansion(str(int(line[1])-1)) + ".jpg"
            img1 = folder1 + "_" + str_expansion_lfw(line[1]) + ".jpg"
            folder2 = line[2]
            # img2 = str_expansion(str(int(line[3])-1)) + ".jpg"
            img2 = folder2 + "_" + str_expansion_lfw(line[3]) + ".jpg"

            img1_path = os.path.join(verication_folder, folder1, img1)
            img2_path = os.path.join(verication_folder, folder2, img2)

            array1 = single_input(img1_path)
            array2 = single_input(img2_path)

            model.forward(Batch([array1]))
            vector1 = model.get_outputs()[0].asnumpy()
            vector1 = np.squeeze(vector1)

            model.forward(Batch([array2]))
            vector2 = model.get_outputs()[0].asnumpy()
            vector2 = np.squeeze(vector2)

            similarity = cos_similarity(vector1, vector2)
            time5 = time.time()
            time_frame = time5 - time4 + time_frame
            print(similarity, "\n")
            if similarity < threshold:
                TN += 1

    print("检真正确率:{0:.4f}".format(TP / NUM_lfw))
    print("拒假正确率:{0:.4f}".format(TN / NUM_lfw))
    print("模型加载时间: {0:.3f}s".format(time_load))
    print("检测一帧平均时间: {0:.3f}s".format(time_frame / (NUM_lfw * 2)))

    time_end = time.time()

    print("程序运行时间: {0:.2f}min".format((time_end - time_start) / 60))

 

 类似资料: