我正在尝试训练实例分割模型。Im使用以下代码生成tfrecord。
flags = tf.app.flags
flags.DEFINE_string('data_dir', '', 'Root directory to raw pet dataset.')
flags.DEFINE_string('output_dir', '', 'Path to directory to output TFRecords.')
flags.DEFINE_string('label_map_path', 'data/pet_label_map.pbtxt',
'Path to label map proto')
flags.DEFINE_boolean('faces_only', True, 'If True, generates bounding boxes '
'for pet faces. Otherwise generates bounding boxes (as '
'well as segmentations for full pet bodies). Note that '
'in the latter case, the resulting files are much larger.')
flags.DEFINE_string('mask_type', 'png', 'How to represent instance '
'segmentation masks. Options are "png" or "numerical".')
FLAGS = flags.FLAGS
def get_class_name_from_filename(file_name):
match = re.match(r'([A-Za-z_]+)(_[0-9]+\.jpg)', file_name, re.I)
return match.groups()[0]
def dict_to_tf_example(data,
mask_path,
label_map_dict,
image_subdirectory,
ignore_difficult_instances=False,
faces_only=True,
mask_type='png'):
img_path = os.path.join(image_subdirectory, data['filename'])
with tf.gfile.GFile(img_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
with tf.gfile.GFile(mask_path, 'rb') as fid:
encoded_mask_png = fid.read()
encoded_png_io = io.BytesIO(encoded_mask_png)
mask = PIL.Image.open(encoded_png_io)
if mask.format != 'PNG':
raise ValueError('Mask format not PNG')
mask_np = np.asarray(mask)
nonbackground_indices_x = np.any(mask_np != 2, axis=0)
nonbackground_indices_y = np.any(mask_np != 2, axis=1)
nonzero_x_indices = np.where(nonbackground_indices_x)
nonzero_y_indices = np.where(nonbackground_indices_y)
width = int(data['size']['width'])
height = int(data['size']['height'])
xmins = []
ymins = []
xmaxs = []
ymaxs = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
masks = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
if faces_only:
xmin = float(obj['bndbox']['xmin'])
xmax = float(obj['bndbox']['xmax'])
ymin = float(obj['bndbox']['ymin'])
ymax = float(obj['bndbox']['ymax'])
else:
xmin = float(np.min(nonzero_x_indices))
xmax = float(np.max(nonzero_x_indices))
ymin = float(np.min(nonzero_y_indices))
ymax = float(np.max(nonzero_y_indices))
xmins.append(xmin / width)
ymins.append(ymin / height)
xmaxs.append(xmax / width)
ymaxs.append(ymax / height)
class_name = get_class_name_from_filename(data['filename'])
classes_text.append(class_name.encode('utf8'))
classes.append(label_map_dict[class_name])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
if not faces_only:
mask_remapped = (mask_np != 2).astype(np.uint8)
masks.append(mask_remapped)
feature_dict = {
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}
if not faces_only:
if mask_type == 'numerical':
mask_stack = np.stack(masks).astype(np.float32)
masks_flattened = np.reshape(mask_stack, [-1])
feature_dict['image/object/mask'] = (
dataset_util.float_list_feature(masks_flattened.tolist()))
elif mask_type == 'png':
encoded_mask_png_list = []
for mask in masks:
img = PIL.Image.fromarray(mask)
output = io.BytesIO()
img.save(output, format='PNG')
encoded_mask_png_list.append(output.getvalue())
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png_list))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example
def create_tf_record(output_filename,
label_map_dict,
annotations_dir,
image_dir,
examples,
faces_only=True,
mask_type='png'):
writer = tf.python_io.TFRecordWriter(output_filename)
for idx, example in enumerate(examples):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(examples))
xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')
mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png')
if not os.path.exists(xml_path):
logging.warning('Could not find %s, ignoring example.', xml_path)
continue
with tf.gfile.GFile(xml_path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
try:
tf_example = dict_to_tf_example(
data,
mask_path,
label_map_dict,
image_dir,
faces_only=faces_only,
mask_type=mask_type)
writer.write(tf_example.SerializeToString())
except ValueError:
logging.warning('Invalid example: %s, ignoring.', xml_path)
writer.close()
# TODO(derekjchow): Add test for pet/PASCAL main files.
def main(_):
data_dir = FLAGS.data_dir
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
logging.info('Reading from Pet dataset.')
image_dir = os.path.join(data_dir, 'images')
annotations_dir = os.path.join(data_dir, 'annotations')
examples_path = os.path.join(annotations_dir, 'trainval.txt')
examples_list = dataset_util.read_examples_list(examples_path)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(0.7 * num_examples)
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logging.info('%d training and %d validation examples.',
len(train_examples), len(val_examples))
train_output_path = os.path.join(FLAGS.output_dir, 'pet_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record')
if FLAGS.faces_only:
train_output_path = os.path.join(FLAGS.output_dir,
'pet_train_with_masks.record')
val_output_path = os.path.join(FLAGS.output_dir,
'pet_val_with_masks.record')
create_tf_record(
train_output_path,
label_map_dict,
annotations_dir,
image_dir,
train_examples,
faces_only=FLAGS.faces_only,
mask_type=FLAGS.mask_type)
create_tf_record(
val_output_path,
label_map_dict,
annotations_dir,
image_dir,
val_examples,
faces_only=FLAGS.faces_only,
mask_type=FLAGS.mask_type)
if __name__ == '__main__':
tf.app.run()
我用来训练的数据集有37个带有图像和遮罩的类。数据集在这里
然而,当我尝试训练时,我得到了以下错误。
回溯(最近一次调用last):tf中第167行的文件“train.py”。应用程序。run()文件“/anaconda3/envs/conda/lib/python3.6/site packages/tensorflow/python/platform/app.py”,第126行,在run\u sys中。退出(main(argv))文件“train.py”,第163行,在主工作名中,是标志的主管。train_dir)文件“/Users/Documents/research/models/research/object_detection/trainer.py”,第275行,列车内克隆=模型部署。创建克隆(deploy\u config,model\u fn,[input\u queue])文件“/Users/Documents/research/models/research/research/research/research/models/research/research/research/research/research/research/research/slim/deploy/model\u deploy.py”,第193行,在create\u loss\u dict=detection\u model中。丢失(预测、真实图像、形状)文件“/用户/文档/研究/模型/研究/对象检测/元体系结构/faster\u rcnn\u meta\u arch.py”,第1608行,在丢失地面真相屏蔽列表中,文件“/用户/文档/研究/模型/研究/对象检测/元体系结构/faster\u rcnn\u meta\u arch.py”,第1837行,在“丢失”框中,分类器引发ValueError('未提供Groundtruth实例掩码。'ValueError:未提供Groundtruth实例掩码。请配置
我怎么能整理出来呢?
在我的例子中,创建tf记录
是正常的。类似于OP发布的内容。这个问题的解决方案是我需要在pipeline.config
文件中添加两行。
train_input_reader {
label_map_path: "path/to/pbtxt"
tf_record_input_reader {
input_path: "path/to/traindata.tfrecord"
}
load_instance_masks: true
mask_type: PNG_MASKS
}
必须将两行load\u instance\u masks:true
和mask\u type:PNG\u masks
添加到train\u input\u reader
中。默认情况下,管道中不会出现这种情况。来自model zoo的配置文件。
希望这有帮助。
本文向大家介绍Pytorch加载部分预训练模型的参数实例,包括了Pytorch加载部分预训练模型的参数实例的使用技巧和注意事项,需要的朋友参考一下 前言 自从从深度学习框架caffe转到Pytorch之后,感觉Pytorch的优点妙不可言,各种设计简洁,方便研究网络结构修改,容易上手,比TensorFlow的臃肿好多了。对于深度学习的初学者,Pytorch值得推荐。今天主要主要谈谈Pytorch是
本文向大家介绍tensorflow 固定部分参数训练,只训练部分参数的实例,包括了tensorflow 固定部分参数训练,只训练部分参数的实例的使用技巧和注意事项,需要的朋友参考一下 在使用tensorflow来训练一个模型的时候,有时候需要依靠验证集来判断模型是否已经过拟合,是否需要停止训练。 1.首先想到的是用tf.placeholder()载入不同的数据来进行计算,比如 这种方式很简单,也很
我试图用下面的代码训练模型,但我一直在方法上收到错误,它告诉我将更改为。为什么?
我想训练用于提取人名的模型(NER系统的一部分),但我想使这个模型无大小写(我的意思是,该模型不会考虑字母大小写,大写字母和小写字母之间没有区别),因为我有嘈杂的文本。 那么训练步骤中是否有任何参数可以做到这一点,或者任何其他方式?
我仍在学习Python和创建模型,对使用Spacy的NLP非常陌生。我曾经https://spacy.io/usage/training#ner培训Spacy现有的模式——en_core_web_sm。 我用我的领域特定实体训练了这个模型。 现在我假设我会在输出目录中找到一个模型文件。相反,我有4个子文件夹——词汇表、ner、标记器、解析器。还有2个文件meta.json和标记器。ner子文件夹有
在之前的描述中,我们通常把机器学习模型和训练算法当作黑箱子来处理。如果你实践过前几章的一些示例,你惊奇的发现你可以优化回归系统,改进数字图像的分类器,你甚至可以零基础搭建一个垃圾邮件的分类器,但是你却对它们内部的工作流程一无所知。事实上,许多场合你都不需要知道这些黑箱子的内部有什么,干了什么。 然而,如果你对其内部的工作流程有一定了解的话,当面对一个机器学习任务时候,这些理论可以帮助你快速的找到恰