tracker.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import cv2
  19. import glob
  20. import paddle
  21. import numpy as np
  22. from paddlex.ppdet.core.workspace import create
  23. from paddlex.ppdet.utils.checkpoint import load_weight, load_pretrain_weight
  24. from paddlex.ppdet.modeling.mot.utils import Timer, load_det_results
  25. from paddlex.ppdet.modeling.mot import visualization as mot_vis
  26. from paddlex.ppdet.metrics import Metric, MOTMetric
  27. from paddlex.ppdet.utils import stats
  28. from .callbacks import Callback, ComposeCallback
  29. from .export_utils import _dump_infer_config
  30. from paddlex.ppdet.utils.logger import setup_logger
  31. logger = setup_logger(__name__)
  32. __all__ = ['Tracker']
  33. class Tracker(object):
  34. def __init__(self, cfg, mode='eval'):
  35. self.cfg = cfg
  36. assert mode.lower() in ['test', 'eval'], \
  37. "mode should be 'test' or 'eval'"
  38. self.mode = mode.lower()
  39. self.optimizer = None
  40. # build MOT data loader
  41. self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]
  42. # build model
  43. self.model = create(cfg.architecture)
  44. self.status = {}
  45. self.start_epoch = 0
  46. # initial default callbacks
  47. self._init_callbacks()
  48. # initial default metrics
  49. self._init_metrics()
  50. self._reset_metrics()
  51. def _init_callbacks(self):
  52. self._callbacks = []
  53. self._compose_callback = None
  54. def _init_metrics(self):
  55. if self.mode in ['test']:
  56. self._metrics = []
  57. return
  58. if self.cfg.metric == 'MOT':
  59. self._metrics = [MOTMetric(), ]
  60. else:
  61. logger.warn("Metric not support for metric type {}".format(
  62. self.cfg.metric))
  63. self._metrics = []
  64. def _reset_metrics(self):
  65. for metric in self._metrics:
  66. metric.reset()
  67. def register_callbacks(self, callbacks):
  68. callbacks = [h for h in list(callbacks) if h is not None]
  69. for c in callbacks:
  70. assert isinstance(c, Callback), \
  71. "metrics shoule be instances of subclass of Metric"
  72. self._callbacks.extend(callbacks)
  73. self._compose_callback = ComposeCallback(self._callbacks)
  74. def register_metrics(self, metrics):
  75. metrics = [m for m in list(metrics) if m is not None]
  76. for m in metrics:
  77. assert isinstance(m, Metric), \
  78. "metrics shoule be instances of subclass of Metric"
  79. self._metrics.extend(metrics)
  80. def load_weights_jde(self, weights):
  81. load_weight(self.model, weights, self.optimizer)
  82. def load_weights_sde(self, det_weights, reid_weights):
  83. if self.model.detector:
  84. load_weight(self.model.detector, det_weights, self.optimizer)
  85. load_weight(self.model.reid, reid_weights, self.optimizer)
  86. def _eval_seq_jde(self,
  87. dataloader,
  88. save_dir=None,
  89. show_image=False,
  90. frame_rate=30):
  91. if save_dir:
  92. if not os.path.exists(save_dir): os.makedirs(save_dir)
  93. tracker = self.model.tracker
  94. tracker.max_time_lost = int(frame_rate / 30.0 * tracker.track_buffer)
  95. timer = Timer()
  96. results = []
  97. frame_id = 0
  98. self.status['mode'] = 'track'
  99. self.model.eval()
  100. for step_id, data in enumerate(dataloader):
  101. self.status['step_id'] = step_id
  102. if frame_id % 40 == 0:
  103. logger.info('Processing frame {} ({:.2f} fps)'.format(
  104. frame_id, 1. / max(1e-5, timer.average_time)))
  105. # forward
  106. timer.tic()
  107. online_targets = self.model(data)
  108. online_tlwhs, online_ids = [], []
  109. for t in online_targets:
  110. tlwh = t.tlwh
  111. tid = t.track_id
  112. vertical = tlwh[2] / tlwh[3] > 1.6
  113. if tlwh[2] * tlwh[3] > tracker.min_box_area and not vertical:
  114. online_tlwhs.append(tlwh)
  115. online_ids.append(tid)
  116. timer.toc()
  117. # save results
  118. results.append((frame_id + 1, online_tlwhs, online_ids))
  119. self.save_results(data, frame_id, online_ids, online_tlwhs,
  120. timer.average_time, show_image, save_dir)
  121. frame_id += 1
  122. return results, frame_id, timer.average_time, timer.calls
  123. def _eval_seq_sde(self,
  124. dataloader,
  125. save_dir=None,
  126. show_image=False,
  127. frame_rate=30,
  128. det_file=''):
  129. if save_dir:
  130. if not os.path.exists(save_dir): os.makedirs(save_dir)
  131. tracker = self.model.tracker
  132. use_detector = False if not self.model.detector else True
  133. timer = Timer()
  134. results = []
  135. frame_id = 0
  136. self.status['mode'] = 'track'
  137. self.model.eval()
  138. self.model.reid.eval()
  139. if not use_detector:
  140. dets_list = load_det_results(det_file, len(dataloader))
  141. logger.info('Finish loading detection results file {}.'.format(
  142. det_file))
  143. for step_id, data in enumerate(dataloader):
  144. self.status['step_id'] = step_id
  145. if frame_id % 40 == 0:
  146. logger.info('Processing frame {} ({:.2f} fps)'.format(
  147. frame_id, 1. / max(1e-5, timer.average_time)))
  148. timer.tic()
  149. if not use_detector:
  150. timer.tic()
  151. dets = dets_list[frame_id]
  152. bbox_tlwh = paddle.to_tensor(dets['bbox'], dtype='float32')
  153. pred_scores = paddle.to_tensor(dets['score'], dtype='float32')
  154. if bbox_tlwh.shape[0] > 0:
  155. pred_bboxes = paddle.concat(
  156. (bbox_tlwh[:, 0:2],
  157. bbox_tlwh[:, 2:4] + bbox_tlwh[:, 0:2]),
  158. axis=1)
  159. else:
  160. pred_bboxes = []
  161. pred_scores = []
  162. data.update({
  163. 'pred_bboxes': pred_bboxes,
  164. 'pred_scores': pred_scores
  165. })
  166. # forward
  167. timer.tic()
  168. online_targets = self.model(data)
  169. online_tlwhs = []
  170. online_ids = []
  171. for track in online_targets:
  172. if not track.is_confirmed() or track.time_since_update > 1:
  173. continue
  174. tlwh = track.to_tlwh()
  175. track_id = track.track_id
  176. online_tlwhs.append(tlwh)
  177. online_ids.append(track_id)
  178. timer.toc()
  179. # save results
  180. results.append((frame_id + 1, online_tlwhs, online_ids))
  181. self.save_results(data, frame_id, online_ids, online_tlwhs,
  182. timer.average_time, show_image, save_dir)
  183. frame_id += 1
  184. return results, frame_id, timer.average_time, timer.calls
  185. def mot_evaluate(self,
  186. data_root,
  187. seqs,
  188. output_dir,
  189. data_type='mot',
  190. model_type='JDE',
  191. save_images=False,
  192. save_videos=False,
  193. show_image=False,
  194. det_results_dir=''):
  195. if not os.path.exists(output_dir): os.makedirs(output_dir)
  196. result_root = os.path.join(output_dir, 'mot_results')
  197. if not os.path.exists(result_root): os.makedirs(result_root)
  198. assert data_type in ['mot', 'kitti'], \
  199. "data_type should be 'mot' or 'kitti'"
  200. assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
  201. "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
  202. # run tracking
  203. n_frame = 0
  204. timer_avgs, timer_calls = [], []
  205. for seq in seqs:
  206. save_dir = os.path.join(
  207. output_dir, 'mot_outputs',
  208. seq) if save_images or save_videos else None
  209. logger.info('start seq: {}'.format(seq))
  210. infer_dir = os.path.join(data_root, seq, 'img1')
  211. images = self.get_infer_images(infer_dir)
  212. self.dataset.set_images(images)
  213. dataloader = create('EvalMOTReader')(self.dataset, 0)
  214. result_filename = os.path.join(result_root, '{}.txt'.format(seq))
  215. meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read(
  216. )
  217. frame_rate = int(meta_info[meta_info.find('frameRate') + 10:
  218. meta_info.find('\nseqLength')])
  219. if model_type in ['JDE', 'FairMOT']:
  220. results, nf, ta, tc = self._eval_seq_jde(
  221. dataloader,
  222. save_dir=save_dir,
  223. show_image=show_image,
  224. frame_rate=frame_rate)
  225. elif model_type in ['DeepSORT']:
  226. results, nf, ta, tc = self._eval_seq_sde(
  227. dataloader,
  228. save_dir=save_dir,
  229. show_image=show_image,
  230. frame_rate=frame_rate,
  231. det_file=os.path.join(det_results_dir,
  232. '{}.txt'.format(seq)))
  233. else:
  234. raise ValueError(model_type)
  235. self.write_mot_results(result_filename, results, data_type)
  236. n_frame += nf
  237. timer_avgs.append(ta)
  238. timer_calls.append(tc)
  239. if save_videos:
  240. output_video_path = os.path.join(save_dir, '..',
  241. '{}_vis.mp4'.format(seq))
  242. cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
  243. save_dir, output_video_path)
  244. os.system(cmd_str)
  245. logger.info('Save video in {}.'.format(output_video_path))
  246. logger.info('Evaluate seq: {}'.format(seq))
  247. # update metrics
  248. for metric in self._metrics:
  249. metric.update(data_root, seq, data_type, result_root,
  250. result_filename)
  251. timer_avgs = np.asarray(timer_avgs)
  252. timer_calls = np.asarray(timer_calls)
  253. all_time = np.dot(timer_avgs, timer_calls)
  254. avg_time = all_time / np.sum(timer_calls)
  255. logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
  256. all_time, 1.0 / avg_time))
  257. # accumulate metric to log out
  258. for metric in self._metrics:
  259. metric.accumulate()
  260. metric.log()
  261. # reset metric states for metric may performed multiple times
  262. self._reset_metrics()
  263. def get_infer_images(self, infer_dir):
  264. assert infer_dir is None or os.path.isdir(infer_dir), \
  265. "{} is not a directory".format(infer_dir)
  266. images = set()
  267. assert os.path.isdir(infer_dir), \
  268. "infer_dir {} is not a directory".format(infer_dir)
  269. exts = ['jpg', 'jpeg', 'png', 'bmp']
  270. exts += [ext.upper() for ext in exts]
  271. for ext in exts:
  272. images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
  273. images = list(images)
  274. images.sort()
  275. assert len(images) > 0, "no image found in {}".format(infer_dir)
  276. logger.info("Found {} inference images in total.".format(len(images)))
  277. return images
  278. def mot_predict(self,
  279. video_file,
  280. output_dir,
  281. data_type='mot',
  282. model_type='JDE',
  283. save_images=False,
  284. save_videos=True,
  285. show_image=False,
  286. det_results_dir=''):
  287. if not os.path.exists(output_dir): os.makedirs(output_dir)
  288. result_root = os.path.join(output_dir, 'mot_results')
  289. if not os.path.exists(result_root): os.makedirs(result_root)
  290. assert data_type in ['mot', 'kitti'], \
  291. "data_type should be 'mot' or 'kitti'"
  292. assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
  293. "model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
  294. # run tracking
  295. seq = video_file.split('/')[-1].split('.')[0]
  296. save_dir = os.path.join(output_dir, 'mot_outputs',
  297. seq) if save_images or save_videos else None
  298. logger.info('Starting tracking {}'.format(video_file))
  299. self.dataset.set_video(video_file)
  300. dataloader = create('TestMOTReader')(self.dataset, 0)
  301. result_filename = os.path.join(result_root, '{}.txt'.format(seq))
  302. frame_rate = self.dataset.frame_rate
  303. if model_type in ['JDE', 'FairMOT']:
  304. results, nf, ta, tc = self._eval_seq_jde(
  305. dataloader,
  306. save_dir=save_dir,
  307. show_image=show_image,
  308. frame_rate=frame_rate)
  309. elif model_type in ['DeepSORT']:
  310. results, nf, ta, tc = self._eval_seq_sde(
  311. dataloader,
  312. save_dir=save_dir,
  313. show_image=show_image,
  314. frame_rate=frame_rate,
  315. det_file=os.path.join(det_results_dir, '{}.txt'.format(seq)))
  316. else:
  317. raise ValueError(model_type)
  318. if save_videos:
  319. output_video_path = os.path.join(save_dir, '..',
  320. '{}_vis.mp4'.format(seq))
  321. cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
  322. save_dir, output_video_path)
  323. os.system(cmd_str)
  324. logger.info('Save video in {}'.format(output_video_path))
  325. def write_mot_results(self, filename, results, data_type='mot'):
  326. if data_type in ['mot', 'mcmot', 'lab']:
  327. save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
  328. elif data_type == 'kitti':
  329. save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
  330. else:
  331. raise ValueError(data_type)
  332. with open(filename, 'w') as f:
  333. for frame_id, tlwhs, track_ids in results:
  334. if data_type == 'kitti':
  335. frame_id -= 1
  336. for tlwh, track_id in zip(tlwhs, track_ids):
  337. if track_id < 0:
  338. continue
  339. x1, y1, w, h = tlwh
  340. x2, y2 = x1 + w, y1 + h
  341. line = save_format.format(
  342. frame=frame_id,
  343. id=track_id,
  344. x1=x1,
  345. y1=y1,
  346. x2=x2,
  347. y2=y2,
  348. w=w,
  349. h=h)
  350. f.write(line)
  351. logger.info('MOT results save in {}'.format(filename))
  352. def save_results(self, data, frame_id, online_ids, online_tlwhs,
  353. average_time, show_image, save_dir):
  354. if show_image or save_dir is not None:
  355. assert 'ori_image' in data
  356. img0 = data['ori_image'].numpy()[0]
  357. online_im = mot_vis.plot_tracking(
  358. img0,
  359. online_tlwhs,
  360. online_ids,
  361. frame_id=frame_id,
  362. fps=1. / average_time)
  363. if show_image:
  364. cv2.imshow('online_im', online_im)
  365. if save_dir is not None:
  366. cv2.imwrite(
  367. os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
  368. online_im)