LZ看代码其实也是一行行看,不懂的就一行行debug,所以读代码就是这么回事。
所以主要的训练的代码主要在src/train.py中,整个训练代码并不是很复杂,作者把代码分块做的很好,这点还是很值得学习的
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import json
import torch
import torch.utils.data
from torchvision.transforms import transforms as T
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
print('Setting up data...')
# 获取对应的数据集和设定任务mot,还有其他任务吗?
# 可能会经常看到LZ自问自答的注释,恩,傻乎乎
Dataset = get_dataset(opt.dataset, opt.task)
# 打开对应数据设置的json文件,文件地址在../src/lib/cfg/data.json
f = open(opt.data_cfg)
data_config = json.load(f)
trainset_paths = data_config['train']
dataset_root = data_config['root']
f.close()
# 将(0-255)的H x W x C的图片转成 a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
transforms = T.Compose([T.ToTensor()])
dataset = Dataset(opt, dataset_root, trainset_paths, (1088, 608), augment=True, transforms=transforms)
opt = opts().update_dataset_info_and_set_heads(opt, dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
# Get dataloader
# 里面需要注意一点的是 pin_memory是固定对应内存,从而加速数据搬运
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, trainer.optimizer, opt.resume, opt.lr, opt.lr_step)
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if epoch % 5 == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
logger.close()
if __name__ == '__main__':
# os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
# os.environ['CUDA_VISIBLE_DEVICES'] = '2'
opt = opts().parse()
main(opt)
配置文件主要在src/lib/opts.py中
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
class opts(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
# basic experiment setting
self.parser.add_argument('task', default='mot', help='mot') #这个目前只有这一个任务设定
self.parser.add_argument('--dataset', default='jde', help='jde') #数据集使用
self.parser.add_argument('--exp_id', default='default') #这个是实模型保存的位置
self.parser.add_argument('--test', action='store_true') #这个默认是false
# self.parser.add_argument('--load_model', default='../models/ctdet_coco_dla_2x.pth',
# help='path to pretrained model')
self.parser.add_argument('--load_model', default='',
help='path to pretrained model')# 模型下载的地址
self.parser.add_argument('--resume', action='store_true',
help='resume an experiment. '
'Reloaded the optimizer parameter and '
'set load_model to model_last.pth '
'in the exp dir if load_model is empty.')
# system
self.parser.add_argument('--gpus', default='0, 1',
help='-1 for CPU, use comma for multiple gpus')
self.parser.add_argument('--num_workers', type=int, default=8,
help='dataloader threads. 0 for single-thread.')# 默认是八线程下载数据
self.parser.add_argument('--not_cuda_benchmark', action='store_true',
help='disable when the input size is not fixed.')
self.parser.add_argument('--seed', type=int, default=317,
help='random seed') # from CornerNet
# log
self.parser.add_argument('--print_iter', type=int, default=0,
help='disable progress bar and print to screen.')
self.parser.add_argument('--hide_data_time', action='store_true',
help='not display time during training.')
self.parser.add_argument('--save_all', action='store_true',
help='save model to disk every 5 epochs.')
self.parser.add_argument('--metric', default='loss',
help='main metric to save best model')
self.parser.add_argument('--vis_thresh', type=float, default=0.5,
help='visualization threshold.')
# model
self.parser.add_argument('--arch', default='dla_34',
help='model architecture. Currently tested'
'resdcn_34 | resdcn_50 | resfpndcn_34 |'
'dla_34 | hrnet_32') #选择网络结构
self.parser.add_argument('--head_conv', type=int, default=-1,
help='conv layer channels for output head'
'0 for no conv layer'
'-1 for default setting: '
'256 for resnets and 256 for dla.')
self.parser.add_argument('--down_ratio', type=int, default=4,
help='output stride. Currently only supports 4.')#下采样的比例
# input
self.parser.add_argument('--input_res', type=int, default=-1,
help='input height and width. -1 for default from '
'dataset. Will be overriden by input_h | input_w')
self.parser.add_argument('--input_h', type=int, default=-1,
help='input height. -1 for default from dataset.')
self.parser.add_argument('--input_w', type=int, default=-1,
help='input width. -1 for default from dataset.')
# train
self.parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate for batch size 32.')
self.parser.add_argument('--lr_step', type=str, default='20,27',
help='drop learning rate by 10.')
self.parser.add_argument('--num_epochs', type=int, default=30,
help='total training epochs.')
self.parser.add_argument('--batch_size', type=int, default=12,
help='batch size')
self.parser.add_argument('--master_batch_size', type=int, default=-1,
help='batch size on the master gpu.')
self.parser.add_argument('--num_iters', type=int, default=-1,
help='default: #samples / batch_size.')
self.parser.add_argument('--val_intervals', type=int, default=5,
help='number of epochs to run validation.')
self.parser.add_argument('--trainval', action='store_true',
help='include validation in training and '
'test on test set')
# test
self.parser.add_argument('--K', type=int, default=128,
help='max number of output objects.')
self.parser.add_argument('--not_prefetch_test', action='store_true',
help='not use parallal data pre-processing.')
self.parser.add_argument('--fix_res', action='store_true',
help='fix testing resolution or keep '
'the original resolution')
self.parser.add_argument('--keep_res', action='store_true',
help='keep the original resolution'
' during validation.')
# tracking
self.parser.add_argument('--test_mot16', default=False, help='test mot16')
self.parser.add_argument('--val_mot15', default=False, help='val mot15')
self.parser.add_argument('--test_mot15', default=False, help='test mot15')
self.parser.add_argument('--val_mot16', default=False, help='val mot16 or mot15')
self.parser.add_argument('--test_mot17', default=False, help='test mot17')
self.parser.add_argument('--val_mot17', default=False, help='val mot17')
self.parser.add_argument('--val_mot20', default=False, help='val mot20')
self.parser.add_argument('--test_mot20', default=False, help='test mot20')
self.parser.add_argument('--conf_thres', type=float, default=0.6, help='confidence thresh for tracking')
self.parser.add_argument('--det_thres', type=float, default=0.3, help='confidence thresh for detection')
self.parser.add_argument('--nms_thres', type=float, default=0.4, help='iou thresh for nms')
self.parser.add_argument('--track_buffer', type=int, default=30, help='tracking buffer')
self.parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
self.parser.add_argument('--input-video', type=str, default='../videos/MOT16-03.mp4',
help='path to the input video')
self.parser.add_argument('--output-format', type=str, default='video', help='video or text')
self.parser.add_argument('--output-root', type=str, default='../results', help='expected output root path')
# mot
self.parser.add_argument('--data_cfg', type=str,
default='../src/lib/cfg/data.json',
help='load data from cfg')
self.parser.add_argument('--data_dir', type=str, default='./data')
# loss
self.parser.add_argument('--mse_loss', action='store_true',
help='use mse loss or focal loss to train '
'keypoint heatmaps.')
self.parser.add_argument('--reg_loss', default='l1',
help='regression loss: sl1 | l1 | l2')
self.parser.add_argument('--hm_weight', type=float, default=1,
help='loss weight for keypoint heatmaps.')
self.parser.add_argument('--off_weight', type=float, default=1,
help='loss weight for keypoint local offsets.')
self.parser.add_argument('--wh_weight', type=float, default=0.1,
help='loss weight for bounding box size.')
self.parser.add_argument('--id_loss', default='ce',
help='reid loss: ce | triplet')
self.parser.add_argument('--id_weight', type=float, default=1,
help='loss weight for id')
self.parser.add_argument('--reid_dim', type=int, default=512,
help='feature dim for reid')
self.parser.add_argument('--norm_wh', action='store_true',
help='L1(\hat(y) / y, 1) or L1(\hat(y), y)')
self.parser.add_argument('--dense_wh', action='store_true',
help='apply weighted regression near center or '
'just apply regression on center point.')
self.parser.add_argument('--cat_spec_wh', action='store_true',
help='category specific bounding box size.')
self.parser.add_argument('--not_reg_offset', action='store_true',
help='not regress local offset.')
def parse(self, args=''):
if args == '':
opt = self.parser.parse_args()
else:
opt = self.parser.parse_args(args)
opt.gpus_str = opt.gpus
opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')]
opt.gpus = [i for i in range(len(opt.gpus))] if opt.gpus[0] >= 0 else [-1]
opt.lr_step = [int(i) for i in opt.lr_step.split(',')]
# 这个keep res的功能是什么?只固定大小进行测试
opt.fix_res = not opt.keep_res
print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.')
opt.reg_offset = not opt.not_reg_offset
if opt.head_conv == -1: # init default head_conv
opt.head_conv = 256 if 'dla' in opt.arch else 256 #这个就是256
# 这里为啥是31,要用pad呢?保证人的比例保持不变
opt.pad = 31
# 这个又是什么?
opt.num_stacks = 1
if opt.trainval:
opt.val_intervals = 100000000
# 使用多GPU的情况下需要分配对应的资源
if opt.master_batch_size == -1:
opt.master_batch_size = opt.batch_size // len(opt.gpus)
rest_batch_size = (opt.batch_size - opt.master_batch_size)
opt.chunk_sizes = [opt.master_batch_size]
for i in range(len(opt.gpus) - 1):
slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1)
if i < rest_batch_size % (len(opt.gpus) - 1):
slave_chunk_size += 1
opt.chunk_sizes.append(slave_chunk_size)
print('training chunk_sizes:', opt.chunk_sizes)
# 主要是输出模型的保存地址
opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..')
opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task)
opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id)
opt.debug_dir = os.path.join(opt.save_dir, 'debug')
print('The output will be saved to ', opt.save_dir)
# 下载地址的位置
if opt.resume and opt.load_model == '':
model_path = opt.save_dir[:-4] if opt.save_dir.endswith('TEST') \
else opt.save_dir
opt.load_model = os.path.join(model_path, 'model_last.pth')
return opt
# 这个是进行backbone后面head的设置,heads {'wh': 2, 'reg': 2, 'hm': 1, 'id': 512}
def update_dataset_info_and_set_heads(self, opt, dataset):
input_h, input_w = dataset.default_resolution
opt.mean, opt.std = dataset.mean, dataset.std
opt.num_classes = dataset.num_classes
# input_h(w): opt.input_h overrides opt.input_res overrides dataset default
input_h = opt.input_res if opt.input_res > 0 else input_h
input_w = opt.input_res if opt.input_res > 0 else input_w
opt.input_h = opt.input_h if opt.input_h > 0 else input_h
opt.input_w = opt.input_w if opt.input_w > 0 else input_w
opt.output_h = opt.input_h // opt.down_ratio
opt.output_w = opt.input_w // opt.down_ratio
opt.input_res = max(opt.input_h, opt.input_w)
opt.output_res = max(opt.output_h, opt.output_w)
if opt.task == 'mot':
opt.heads = {'hm': opt.num_classes,
'wh': 2 if not opt.cat_spec_wh else 2 * opt.num_classes,
'id': opt.reid_dim}
if opt.reg_offset:
opt.heads.update({'reg': 2})
opt.nID = dataset.nID
opt.img_size = (1088, 608)
else:
assert 0, 'task not defined!'
print('heads', opt.heads)
return opt
def init(self, args=''):
default_dataset_info = {
'mot': {'default_resolution': [608, 1088], 'num_classes': 1,
'mean': [0.408, 0.447, 0.470], 'std': [0.289, 0.274, 0.278],
'dataset': 'jde', 'nID': 14455},
}
class Struct:
def __init__(self, entries):
for k, v in entries.items():
self.__setattr__(k, v)
opt = self.parse(args)
dataset = Struct(default_dataset_info[opt.task])
opt.dataset = dataset.dataset
opt = self.update_dataset_info_and_set_heads(opt, dataset)
return opt
主要在src/lib/datasets/dataset/jde.py中的JointDataset类中
class JointDataset(LoadImagesAndLabels): # for training
default_resolution = [1088, 608]
mean = None
std = None
num_classes = 1
def __init__(self, opt, root, paths, img_size=(1088, 608), augment=False, transforms=None):
self.opt = opt
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
self.num_classes = 1
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
# 设置成128,为什么?输出的维度是128吗?最多有128个ID的意思
self.max_objs = opt.K
self.augment = augment
self.transforms = transforms
print('=' * 80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
imgs, labels, img_path, (input_h, input_w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
output_h = imgs.shape[1] // self.opt.down_ratio
output_w = imgs.shape[2] // self.opt.down_ratio
num_classes = self.num_classes
num_objs = labels.shape[0]
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs,), dtype=np.int64)
reg_mask = np.zeros((self.max_objs,), dtype=np.uint8)
ids = np.zeros((self.max_objs,), dtype=np.int64)
# 这两个版本的gaussian有什么区别
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
for k in range(num_objs):
label = labels[k]
bbox = label[2:]
cls_id = int(label[0])
bbox[[0, 2]] = bbox[[0, 2]] * output_w
bbox[[1, 3]] = bbox[[1, 3]] * output_h
bbox[0] = np.clip(bbox[0], 0, output_w - 1)
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = self.opt.hm_gauss if self.opt.mse_loss else radius
ct = np.array(
[bbox[0], bbox[1]], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
ids[k] = label[1]
ret = {'input': imgs, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'reg': reg, 'ids': ids}
return ret
初始化主要是把六个实验的数据集整合到一起,因为后面要通过loss来进行分类,所以还要统计有多少id,
================================================================================
dataset summary
OrderedDict([('caltech', 1043.0), ('prw', 933.0), ('mot17', 547.0), ('eth', 0), ('citypersons', 0), ('cuhksysu', 11931.0)])
total # identities: 14455
start index
OrderedDict([('caltech', 0), ('prw', 1043.0), ('mot17', 1976.0), ('eth', 2523.0), ('citypersons', 2523.0), ('cuhksysu', 2523.0)])
================================================================================
模型设置主要在src/lib/models/model.py中,有dcn标识的都是使用defomable convolution,如果用在前端的话,需要转成caffemodel,这样就需要将dcn换成正常的卷积才行,当然性能肯定会下降一部分
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from .networks.dlav0 import get_pose_net as get_dlav0
from .networks.pose_dla_dcn import get_pose_net as get_dla_dcn
from .networks.pose_hrnet import get_pose_net as get_pose_net_hrnet
from .networks.resnet_dcn import get_pose_net as get_pose_net_dcn
from .networks.resnet_fpn_dcn import get_pose_net as get_pose_net_fpn_dcn
_model_factory = {
'dlav0': get_dlav0, # default DLAup
'dla': get_dla_dcn,
'resdcn': get_pose_net_dcn,
'resfpndcn': get_pose_net_fpn_dcn,
'hrnet': get_pose_net_hrnet
}
def create_model(arch, heads, head_conv):
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
get_model = _model_factory[arch]
model = get_model(num_layers=num_layers, heads=heads, head_conv=head_conv)
return model
def load_model(model, model_path, optimizer=None, resume=False,
lr=None, lr_step=None):
start_epoch = 0
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
msg = 'If you see this, your model does not fully load the ' + \
'pre-trained weight. Please make sure ' + \
'you have correctly specified --arch xxx ' + \
'or set the correct --num_classes for your own dataset.'
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
print('Skip loading parameter {}, required shape{}, ' \
'loaded shape{}. {}'.format(
k, model_state_dict[k].shape, state_dict[k].shape, msg))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k) + msg)
for k in model_state_dict:
if not (k in state_dict):
print('No param {}.'.format(k) + msg)
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
# resume optimizer parameters
if optimizer is not None and resume:
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
start_lr = lr
for step in lr_step:
if start_epoch >= step:
start_lr *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = start_lr
print('Resumed optimizer with start lr', start_lr)
else:
print('No optimizer parameters in checkpoint.')
if optimizer is not None:
return model, optimizer, start_epoch
else:
return model
def save_model(path, epoch, model, optimizer=None):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {'epoch': epoch,
'state_dict': state_dict}
if not (optimizer is None):
data['optimizer'] = optimizer.state_dict()
torch.save(data, path)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
在src/lib/trains/mot.py中,MotTrainer类主要是继承BaseTrainer类的,BaseTrainer的定义在src/lib/trains/base_trainer.py中
base_trainer.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
from progress.bar import Bar
from models.data_parallel import DataParallel
from utils.utils import AverageMeter
class ModleWithLoss(torch.nn.Module):
def __init__(self, model, loss):
super(ModleWithLoss, self).__init__()
self.model = model
self.loss = loss
def forward(self, batch):
outputs = self.model(batch['input'])
loss, loss_stats = self.loss(outputs, batch)
return outputs[-1], loss, loss_stats
class BaseTrainer(object):
def __init__(
self, opt, model, optimizer=None):
self.opt = opt
self.optimizer = optimizer
# 获取对应的loss设置
self.loss_stats, self.loss = self._get_losses(opt)
self.model_with_loss = ModleWithLoss(model, self.loss)
self.optimizer.add_param_group({'params': self.loss.parameters()})
def set_device(self, gpus, chunk_sizes, device):
if len(gpus) > 1:
self.model_with_loss = DataParallel(
self.model_with_loss, device_ids=gpus,
chunk_sizes=chunk_sizes).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
data_time, batch_time = AverageMeter(), AverageMeter()
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for iter_id, batch in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output, loss, loss_stats = model_with_loss(batch)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(
epoch, iter_id, num_iters, phase=phase,
total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
if not opt.hide_data_time:
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
'|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0:
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.test:
self.save_result(output, batch, results)
del output, loss, loss_stats, batch
bar.finish()
ret = {k: v.avg for k, v in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.
return ret, results
def debug(self, batch, output, iter_id):
raise NotImplementedError
def save_result(self, output, batch, results):
raise NotImplementedError
def _get_losses(self, opt):
raise NotImplementedError
def val(self, epoch, data_loader):
return self.run_epoch('val', epoch, data_loader)
def train(self, epoch, data_loader):
return self.run_epoch('train', epoch, data_loader)
mot.py中
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.decode import mot_decode
from models.losses import FocalLoss
from models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss
from models.utils import _sigmoid, _tranpose_and_gather_feat
from utils.post_process import ctdet_post_process
from .base_trainer import BaseTrainer
class MotTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(MotTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss', 'id_loss']
loss = MotLoss(opt)
return loss_states, loss
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
dets = mot_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = ctdet_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0]
class MotLoss(torch.nn.Module):
def __init__(self, opt):
super(MotLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss() # crit: Focalloss
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None # crit_reg: RegL1Loss
self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
NormRegL1Loss() if opt.norm_wh else \
RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg # criti_wh: RegL1Loss
self.opt = opt
self.emb_dim = opt.reid_dim # 512
self.nID = opt.nID
self.classifier = nn.Linear(self.emb_dim, self.nID)
self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
# self.TriLoss = TripletLoss()
# emb_scale s_det s_id这些是初始化吗,有什么用呢?是对loss的权重的设定
self.emb_scale = math.sqrt(2) * math.log(self.nID - 1)
self.s_det = nn.Parameter(-1.85 * torch.ones(1))
self.s_id = nn.Parameter(-1.05 * torch.ones(1))
def forward(self, outputs, batch):
opt = self.opt
hm_loss, wh_loss, off_loss, id_loss = 0, 0, 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
if not opt.mse_loss:
output['hm'] = _sigmoid(output['hm'])
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.wh_weight > 0:
if opt.dense_wh:
mask_weight = batch['dense_wh_mask'].sum() + 1e-4
wh_loss += (
self.crit_wh(output['wh'] * batch['dense_wh_mask'],
batch['dense_wh'] * batch['dense_wh_mask']) /
mask_weight) / opt.num_stacks
else:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
if opt.id_weight > 0:
id_head = _tranpose_and_gather_feat(output['id'], batch['ind'])
id_head = id_head[batch['reg_mask'] > 0].contiguous()
id_head = self.emb_scale * F.normalize(id_head)
id_target = batch['ids'][batch['reg_mask'] > 0]
id_output = self.classifier(id_head).contiguous()
id_loss += self.IDLoss(id_output, id_target)
# id_loss += self.IDLoss(id_output, id_target) + self.TriLoss(id_head, id_target)
# loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + opt.off_weight * off_loss + opt.id_weight * id_loss
det_loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + opt.off_weight * off_loss
loss = torch.exp(-self.s_det) * det_loss + torch.exp(-self.s_id) * id_loss + (self.s_det + self.s_id)
loss *= 0.5
# print(loss, hm_loss, wh_loss, off_loss, id_loss)
loss_stats = {'loss': loss, 'hm_loss': hm_loss,
'wh_loss': wh_loss, 'off_loss': off_loss, 'id_loss': id_loss}
return loss, loss_stats
记得之前LZ研一的时候,有个小伙伴的导师就想给他定多目标跟踪的方向,当时深度学习还没有很好的应用到跟踪领域,主要还是传统方法那一套,KCF之类的方法,要求可能数学功底比较深厚,后面还是让那个小伙伴继续做单目标跟踪了,现在看来,如果是用深度学习这一套,要比之前好入门一些。
检测速度上来了,确实也能更好的跟踪
运行跟踪的代码是在src/track.py中,当中LZ是修改了当中一部分
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
import numpy as np
import torch
import glob
from tracker.multitracker import JDETracker
from tracking_utils import visualization as vis
from tracking_utils.log import logger
from tracking_utils.timer import Timer
from tracking_utils.evaluation import Evaluator
import datasets.dataset.jde as datasets
from tracking_utils.utils import mkdir_if_missing
from opts import opts
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
if save_dir:
mkdir_if_missing(save_dir)
#初始化跟踪器
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
# 最最最重要的应该就是这个tracker.update函数了,其实就是跟踪的具体实现
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
# save results
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
# if save_videos:
# output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
# cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
# os.system(cmd_str)
if save_videos:
imgs = sorted(glob.glob(osp.join(output_dir, '*.jpg')))
print(imgs[0])
test_img = cv2.imread(imgs[0])
print(test_img.shape)
h, w, c = test_img.shape
output_video_path = osp.join(output_dir, '{}.avi'.format(seq))
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
video = cv2.VideoWriter(output_video_path, fourcc, frame_rate, (w, h))
for img in imgs:
buff = cv2.imread(img)
video.write(buff)
video.release()
print("generate video done!")
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
# get summary
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
opt = opts().init()
if not opt.val_mot16:
seqs_str = '''KITTI-13
KITTI-17
ADL-Rundle-6
PETS09-S2L1
TUD-Campus
TUD-Stadtmitte'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/train')
else:
seqs_str = '''MOT16-02
MOT16-04
MOT16-05
MOT16-09
MOT16-10
MOT16-11
MOT16-13'''
data_root = os.path.join(opt.data_dir, 'MOT16/train')
if opt.test_mot16:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = os.path.join(opt.data_dir, 'MOT16/test')
if opt.test_mot15:
seqs_str = '''ADL-Rundle-1
ADL-Rundle-3
AVG-TownCentre
ETH-Crossing
ETH-Jelmoli
ETH-Linthescher
KITTI-16
KITTI-19
PETS09-S2L2
TUD-Crossing
Venice-1'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/test')
if opt.test_mot17:
seqs_str = '''MOT17-01-SDP
MOT17-03-SDP
MOT17-06-SDP
MOT17-07-SDP
MOT17-08-SDP
MOT17-12-SDP
MOT17-14-SDP'''
data_root = os.path.join(opt.data_dir, 'MOT17/images/test')
if opt.val_mot17:
seqs_str = '''MOT17-02-SDP
MOT17-04-SDP
MOT17-05-SDP
MOT17-09-SDP
MOT17-10-SDP
MOT17-11-SDP
MOT17-13-SDP'''
data_root = os.path.join(opt.data_dir, 'MOT17/images/train')
if opt.val_mot15:
seqs_str = '''KITTI-13
KITTI-17
ETH-Bahnhof
ETH-Sunnyday
PETS09-S2L1
TUD-Campus
TUD-Stadtmitte
ADL-Rundle-6
ADL-Rundle-8
ETH-Pedcross2
TUD-Stadtmitte'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/train')
if opt.val_mot20:
seqs_str = '''MOT20-01
MOT20-02
MOT20-03
MOT20-05
'''
data_root = os.path.join(opt.data_dir, 'MOT20/images/train')
if opt.test_mot20:
seqs_str = '''MOT20-04
MOT20-06
MOT20-07
MOT20-08
'''
data_root = os.path.join(opt.data_dir, 'MOT20/images/test')
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
# exp_name='MOT15_val_all_dla34',
exp_name='MOT15_val_all_dla34_0908',
show_image=False,
save_images=False,
save_videos=True)
其实都是在update里面,但是代码里面分了5步,LZ也准备分5步进行,第一部分就是网络推断,训练完的网络要怎么用?
''' Step 1: Network forward, get detections & embeddings'''
with torch.no_grad():
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid_() # 因为我们是要求概率,用sigmoid可以把值域控制在(0,1)范围内
wh = output['wh']
id_feature = output['id'] # (1, 512, 152, 272)
# 按照通道进行归一化
id_feature = F.normalize(id_feature, dim=1) # (1, 128, 152, 272)?为什么要做归一化,没有改变维度
# 这里其实还挺重要的,因为基本上为了保证速度和精度的平衡,输入都会进行缩放,但是如果下采样四倍,就很有可能有四个像素的误差值
reg = output['reg'] if self.opt.reg_offset else None
# det输出的顺序是[bboxes, scores, clses]
dets, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh,
K=self.opt.K) # 这步和之前dbface中的后处理部分差不多
id_feature = _tranpose_and_gather_feat(id_feature, inds) #获取id_feature
id_feature = id_feature.squeeze(0)
id_feature = id_feature.cpu().numpy()
dets = self.post_process(dets, meta) #这步主要是用来恢复缩放的比例
dets = self.merge_outputs([dets])[1] #因为之前设置每张图最多有128个id,如果超过了,则按照scores得到前128个结果
remain_inds = dets[:, 4] > self.opt.conf_thres # 根据设定的阈值再进行一波过滤
dets = dets[remain_inds]
id_feature = id_feature[remain_inds]
# vis
'''
for i in range(0, dets.shape[0]):
bbox = dets[i][0:4]
cv2.rectangle(img0, (bbox[0], bbox[1]),
(bbox[2], bbox[3]),
(0, 255, 0), 2)
cv2.imshow('dets', img0)
cv2.waitKey(0)
id0 = id0-1
'''
# 这一步很重要的,使用每个结果实例化一个STrack,用来管理当中的每个检测对象的参数,主要在初始化时进行参数
# 设定,记录当前检测框和id_feature,STrack的feature跟踪也是有时间效应的,所以设置的alpha=0.9
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
(tlbrs, f) in zip(dets[:, :5], id_feature)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated: # 第一次跟踪到,但是还没有被激活,也就是一个目标至少要出现两次才有可能被激活
unconfirmed.append(track)
else:
tracked_stracks.append(track)
Normalize函数的定义: v = v max ( ∥ v ∥ p , ϵ ) v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)} v=max(∥v∥p,ϵ)v
这是第二步,数据关联,其实在最初检测到目标和第二次检测到目标时,这一步基本上都是跳过的,因为strack_pool都是[].
# 第一次数据关联,计算特征距离
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks) #把已经丢失的帧和跟踪到的帧放在一起,这个机制在一个目标丢失大概1秒中内,是可以找到的(当然不要特征变换特别明显)
# Predict the current location with KF
# for strack in strack_pool:
# strack.predict()
STrack.multi_predict(strack_pool) #使用Kalman_filter先进行预测,更新对应的mean和covariance
dists = matching.embedding_distance(strack_pool, detections)
# dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
#这一步其实是有两个作用,a.先计算马氏距离,当距离过大时将cost_matrix设置为无穷
# b.更新cost_matrix,将马氏距离融合到最后的cost_matrix中
dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool, detections)
#根据计算的cost_matrix进行线性分配,使用的是lapjv,这个算法还没有细看具体算法流程,
# 它和匈牙利算法可以互换,相对来说会速度快一些(据说)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
#将匹配上的结果进行更新,包括frame_id,框的位置,kalman_filter(mean和covariance)
# score,以及id_feature特征更新,会保留一定的历史信息
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
第三步,使用IOU进行再一次的数据关联,把检测中没有匹配成功的目标和原有跟踪目标中没有匹配成功的目标进行二次匹配,使用IOU来计算距离,并再次使用lapjv来继续进行分配
''' Step 3: Second association, with IOU'''
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
# 如果原有的跟踪目标没有和新检测到的目标匹配成功,那么就认为是目标丢失了
# 需要设定lost的状态,并把跟踪目标移到lost_stracks中去
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections) # 计算根据iou得到的距离矩阵
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id) # 更新
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
当经过上面一系列数据关联后,仍然无法将检测到的目标和现有跟踪目标进行匹配时,就需要增加新的id了,因为此时视频中新出现了一个跟踪目标
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.kalman_filter, self.frame_id) #初始化新的踪迹(记录对应的id,初始帧,初始化均值和协方差)
activated_starcks.append(track)
""" Step 5: Update state"""
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks) # 把激活的跟踪目标加到对应的跟踪目标中
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks) # 把重新找到的目标加到对应的跟踪目标中
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks) # 从丢失的跟踪目标中去除可以跟踪的目标
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) # 去除在1s中内没有出现的跟踪目标
self.removed_stracks.extend(removed_stracks) # 移除的跟踪目标
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
# get scores of lost tracks
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
小伙伴可能进行会看到LZ在代码中经常自问自答,因为代码也不是一遍就能看懂,有的时候要debug好几轮才能看懂,请小伙伴见谅啦!