该数据库是目前规模较大的行人数据库,采用车载摄像头拍摄,约10个小时左右,视频的分辨率为640x480,30帧/秒。标注了约249,884帧(约137分钟),350000个矩形框,2300个行人,另外还对矩形框之间的时间对应关系及其遮挡的情况进行标注。
下载好的Caltech行人数据集,解压放在Caltech文件夹内,目录情况如下图。
1 图片转换
将.seq
文件转换为.jpg
文件并保存在某一目录下。
seq2jpg.py
# -*- coding:utf-8 -*-
import os
import fnmatch
def open_save(file, savepath):
f = open(file, 'rb')
# 将seq文件的内容转化成str类型
string = f.read().decode('latin-1')
# splitstring是图片的前缀,可以理解成seq是以splitstring为分隔的多个jpg合成的文件
splitstring = "\xFF\xD8\xFF\xE0\x00\x10\x4A\x46\x49\x46"
# split函数做一个测试,因此返回结果的第一个是在seq文件中是空,因此后面省略掉第一个
strlist = string.split(splitstring)
f.close()
count = 0
# 遍历每一个jpg文件内容,然后加上前缀合成图片
for img in strlist:
filename = str(count)+'.jpg'
filename = savepath + filename
if count > 0:
i = open(filename, 'wb+')
i.write(splitstring.encode('latin-1'))
i.write(img.encode('latin-1'))
i.close()
count = count + 1
print("Generating JPEGImages jpg file of picture:{}".format(filename))
def main():
seq_inputdir = "E:\\dataset\\Caltech"
jpg_outputdir = "E:\\dataset\\Caltech_VOC\\JPEGImages"
for dir_set in os.listdir(seq_inputdir):
# 若果是set01、set02……这类文件夹
if dir_set[0:3] == "set" and os.path.splitext(dir_set)[1] == '':
dir_set_seq = os.path.join(seq_inputdir, dir_set)
for dir_seq in os.listdir(dir_set_seq):
if fnmatch.fnmatch(dir_seq, '*.seq'):
file_dir = os.path.join(dir_set_seq, dir_seq)
save_dir = os.path.join(jpg_outputdir, dir_set) + '_' + dir_seq.split('.')[0] + '_'
open_save(file_dir, save_dir)
if __name__ == '__main__':
main()
print("Success!")
2 文本转换
将.vbb
文件转换为.xml
文件并保存在某一目录下。
vbb2voc.py
# -*- coding:utf-8 -*-
# -*- coding:utf-8 -*-
import os, glob
from scipy.io import loadmat
from collections import defaultdict
import numpy as np
from lxml import etree, objectify
def vbb_anno2dict(vbb_file, cam_id):
filename = os.path.splitext(os.path.basename(vbb_file))[0]
# 定义字典对象annos
annos = defaultdict(dict)
vbb = loadmat(vbb_file)
# object info in each frame: id, pos, occlusion, lock, posv
objLists = vbb['A'][0][0][1][0]
objLbl = [str(v[0]) for v in vbb['A'][0][0][4][0]] # 可查看所有类别
# person index
person_index_list = np.where(np.array(objLbl) == "person")[0] # 只选取类别为‘person’的xml
for frame_id, obj in enumerate(objLists):
if len(obj) > 0:
frame_name = str(cam_id) + "_" + str(filename) + "_" + str(frame_id+1) + ".jpg"
annos[frame_name] = defaultdict(list)
annos[frame_name]["id"] = frame_name
annos[frame_name]["label"] = "person"
for id, pos, occl in zip(obj['id'][0], obj['pos'][0], obj['occl'][0]):
id = int(id[0][0]) - 1 # for matlab start from 1 not 0
if not id in person_index_list: # only use bbox whose label is person
continue
pos = pos[0].tolist()
occl = int(occl[0][0])
annos[frame_name]["occlusion"].append(occl)
annos[frame_name]["bbox"].append(pos)
if not annos[frame_name]["bbox"]:
del annos[frame_name]
print(annos)
return annos
def instance2xml_base(anno, bbox_type='xyxy'):
"""bbox_type: xyxy (xmin, ymin, xmax, ymax); xywh (xmin, ymin, width, height)"""
assert bbox_type in ['xyxy', 'xywh']
E = objectify.ElementMaker(annotate=False)
anno_tree = E.annotation(
E.folder('VOC2014_instance/person'),
E.filename(anno['id']),
E.source(
E.database('Caltech pedestrian'),
E.annotation('Caltech pedestrian'),
E.image('Caltech pedestrian'),
E.url('None')
),
E.size(
E.width(640),
E.height(480),
E.depth(3)
),
E.segmented(0),
)
for index, bbox in enumerate(anno['bbox']):
bbox = [float(x) for x in bbox]
if bbox_type == 'xyxy':
xmin, ymin, w, h = bbox
xmax = xmin+w
ymax = ymin+h
else:
xmin, ymin, xmax, ymax = bbox
E = objectify.ElementMaker(annotate=False)
anno_tree.append(
E.object(
E.name(anno['label']),
E.bndbox(
E.xmin(int(xmin)),
E.ymin(int(ymin)),
E.xmax(int(xmax)),
E.ymax(int(ymax))
),
E.difficult(0),
E.occlusion(anno["occlusion"][index])
)
)
return anno_tree
def parse_anno_file(vbb_inputdir, vbb_outputdir):
# annotation sub-directories in hda annotation input directory
assert os.path.exists(vbb_inputdir)
sub_dirs = os.listdir(vbb_inputdir) # 对应set00,set01...
for sub_dir in sub_dirs:
print("Parsing annotations of camera: ", sub_dir)
cam_id = sub_dir
# 获取某一个子set下面的所有vbb文件
vbb_files = glob.glob(os.path.join(vbb_inputdir, sub_dir, "*.vbb"))
for vbb_file in vbb_files:
# 返回一个vbb文件中所有的帧的标注结果
annos = vbb_anno2dict(vbb_file, cam_id)
if annos:
# 组成xml文件的存储文件夹,形如“/Users/chenguanghao/Desktop/Caltech/xmlresult/”
vbb_outdir = vbb_outputdir
# 如果不存在 vbb_outdir
if not os.path.exists(vbb_outdir):
os.makedirs(vbb_outdir)
for filename, anno in sorted(annos.items(), key=lambda x: x[0]):
if "bbox" in anno:
anno_tree = instance2xml_base(anno)
outfile = os.path.join(vbb_outdir, os.path.splitext(filename)[0]+".xml")
print("Generating annotation xml file of picture: ", filename)
# 生成最终的xml文件,对应一张图片
etree.ElementTree(anno_tree).write(outfile, pretty_print=True)
def visualize_bbox(xml_file, img_file):
import cv2
tree = etree.parse(xml_file)
# load image
image = cv2.imread(img_file)
origin = cv2.imread(img_file)
# 获取一张图片的所有bbox
for bbox in tree.xpath('//bndbox'):
coord = []
for corner in bbox.getchildren():
coord.append(int(float(corner.text)))
print(coord)
cv2.rectangle(image, (coord[0], coord[1]), (coord[2], coord[3]), (0, 0, 255), 2)
# visualize image
cv2.imshow("test", image)
cv2.imshow('origin', origin)
cv2.waitKey(0)
def main():
vbb_inputdir = "F:\\dataset\\Caltech\\annotations"
vbb_outputdir = "F:\\dataset\\Caltech_VOC\\Annotations"
parse_anno_file(vbb_inputdir, vbb_outputdir)
if __name__ == "__main__":
main()
print("Success!")
得到如下文件:
Annotations内包含122,187个xml文件。(每一个xml文件内都包含“person”标签)
JPEGImages内包含249,884个jpg文件。(很多jpg文件内不含有目标,所以要比xml文件多)
3 数据筛选
由于Annotations内包含122,187个xml文件,JPEGImages内包含249,884个jpg文件,不能实现一一对应,所以要去除多余的jpg文件。
但,其实很多xml文件内标注的person由于目标太小,导致及其不清晰,应该将其去除,再进一步处理。
这是数据集中的一张图片,猜猜人在哪?
欸,在这里!!
由于这些目标让我难以描述,所以将此类目标(尺寸较小)去除掉。
3.1 根据目标面积进行筛选
将符合条件的目标保存到另一个目录下。
# -*- coding:utf-8 -*-
import xml.etree.ElementTree as ET
import os
space = 40*70
xml_path = "F:\\dataset\\Caltech_VOC\\Annotations"
save_path = "F:\\dataset\\Caltech_VOC\\Annotations_"
def filter_xml():
for file_name in os.listdir(xml_path):
file_path = os.path.join(xml_path, file_name)
print(file_path)
in_file = open(file_path)
tree = ET.parse(in_file) # ET是一个xml文件解析库,ET.parse()打开xml文件。parse--"解析"
root = tree.getroot() # 获取根节点
write_flag = False
for obj in root.findall('object'): # 找到根节点下所有“object”节点
bbox = obj.find('bndbox')
x1 = int(bbox.find('xmin').text)
y1 = int(bbox.find('ymin').text)
x2 = int(bbox.find('xmax').text)
y2 = int(bbox.find('ymax').text)
if (x2-x1)*(y2-y1) >= space:
write_flag = True
if write_flag:
tree.write(os.path.join(save_path, file_name))
print("save file: {}".format(os.path.join(save_path, file_name)))
if __name__ == '__main__':
filter_xml()
3.2 根据xml保存jpg
将含有xml文件的jpg保存在另一目录下。
import os
import shutil
from tqdm import tqdm
xml_path = "F:\\dataset\\Caltech_VOC\\Annotations_"
jpg_path = "F:\\dataset\\Caltech_VOC\\JPEGImages"
save_path = "F:\\dataset\\Caltech_VOC\\JPEGImages_"
if __name__ == '__main__':
filter = []
print("filter loading...")
for file in tqdm(os.listdir(xml_path)):
filter.append(os.path.splitext(file)[0])
for file in os.listdir(jpg_path):
if os.path.splitext(file)[0] in filter:
old_path = os.path.join(jpg_path, file)
new_path = os.path.join(save_path, file)
shutil.copyfile(old_path, new_path)
print("Success save jpg file: {}".format(new_path))
print("Success!")
处理完成后:
版权声明:本文为See_Star原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。