mot_metrics.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import paddle
  19. import numpy as np
  20. from scipy import interpolate
  21. import paddle.nn.functional as F
  22. from .map_utils import ap_per_class
  23. from paddlex.ppdet.modeling.bbox_utils import bbox_iou_np_expand
  24. from .mot_eval_utils import MOTEvaluator
  25. from .metrics import Metric
  26. from paddlex.ppdet.utils.logger import setup_logger
  27. logger = setup_logger(__name__)
  28. __all__ = ['JDEDetMetric', 'JDEReIDMetric', 'MOTMetric']
  29. class JDEDetMetric(Metric):
  30. def __init__(self, overlap_thresh=0.5):
  31. self.overlap_thresh = overlap_thresh
  32. self.reset()
  33. def reset(self):
  34. self.AP_accum = np.zeros(1)
  35. self.AP_accum_count = np.zeros(1)
  36. def update(self, inputs, outputs):
  37. bboxes = outputs['bbox'][:, 2:].numpy()
  38. scores = outputs['bbox'][:, 1].numpy()
  39. labels = outputs['bbox'][:, 0].numpy()
  40. bbox_lengths = outputs['bbox_num'].numpy()
  41. if bboxes.shape[0] == 1 and bboxes.sum() == 0.0:
  42. return
  43. gt_boxes = inputs['gt_bbox'].numpy()[0]
  44. gt_labels = inputs['gt_class'].numpy()[0]
  45. if gt_labels.shape[0] == 0:
  46. return
  47. correct = []
  48. detected = []
  49. for i in range(bboxes.shape[0]):
  50. obj_pred = 0
  51. pred_bbox = bboxes[i].reshape(1, 4)
  52. # Compute iou with target boxes
  53. iou = bbox_iou_np_expand(pred_bbox, gt_boxes, x1y1x2y2=True)[0]
  54. # Extract index of largest overlap
  55. best_i = np.argmax(iou)
  56. # If overlap exceeds threshold and classification is correct mark as correct
  57. if iou[best_i] > self.overlap_thresh and obj_pred == gt_labels[
  58. best_i] and best_i not in detected:
  59. correct.append(1)
  60. detected.append(best_i)
  61. else:
  62. correct.append(0)
  63. # Compute Average Precision (AP) per class
  64. target_cls = list(gt_labels.T[0])
  65. AP, AP_class, R, P = ap_per_class(
  66. tp=correct,
  67. conf=scores,
  68. pred_cls=np.zeros_like(scores),
  69. target_cls=target_cls)
  70. self.AP_accum_count += np.bincount(AP_class, minlength=1)
  71. self.AP_accum += np.bincount(AP_class, minlength=1, weights=AP)
  72. def accumulate(self):
  73. logger.info("Accumulating evaluatation results...")
  74. self.map_stat = self.AP_accum[0] / (self.AP_accum_count[0] + 1E-16)
  75. def log(self):
  76. map_stat = 100. * self.map_stat
  77. logger.info("mAP({:.2f}) = {:.2f}%".format(self.overlap_thresh,
  78. map_stat))
  79. def get_results(self):
  80. return self.map_stat
  81. class JDEReIDMetric(Metric):
  82. def __init__(self, far_levels=[1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]):
  83. self.far_levels = far_levels
  84. self.reset()
  85. def reset(self):
  86. self.embedding = []
  87. self.id_labels = []
  88. self.eval_results = {}
  89. def update(self, inputs, outputs):
  90. for out in outputs:
  91. feat, label = out[:-1].clone().detach(), int(out[-1])
  92. if label != -1:
  93. self.embedding.append(feat)
  94. self.id_labels.append(label)
  95. def accumulate(self):
  96. logger.info("Computing pairwise similairity...")
  97. assert len(self.embedding) == len(self.id_labels)
  98. if len(self.embedding) < 1:
  99. return None
  100. embedding = paddle.stack(self.embedding, axis=0)
  101. emb = F.normalize(embedding, axis=1).numpy()
  102. pdist = np.matmul(emb, emb.T)
  103. id_labels = np.array(self.id_labels, dtype='int32').reshape(-1, 1)
  104. n = len(id_labels)
  105. id_lbl = np.tile(id_labels, n).T
  106. gt = id_lbl == id_lbl.T
  107. up_triangle = np.where(np.triu(pdist) - np.eye(n) * pdist != 0)
  108. pdist = pdist[up_triangle]
  109. gt = gt[up_triangle]
  110. # lazy import metrics here
  111. from sklearn import metrics
  112. far, tar, threshold = metrics.roc_curve(gt, pdist)
  113. interp = interpolate.interp1d(far, tar)
  114. tar_at_far = [interp(x) for x in self.far_levels]
  115. for f, fa in enumerate(self.far_levels):
  116. self.eval_results['TPR@FAR={:.7f}'.format(fa)] = ' {:.4f}'.format(
  117. tar_at_far[f])
  118. def log(self):
  119. for k, v in self.eval_results.items():
  120. logger.info('{}: {}'.format(k, v))
  121. def get_results(self):
  122. return self.eval_results
  123. class MOTMetric(Metric):
  124. def __init__(self, save_summary=False):
  125. self.save_summary = save_summary
  126. self.MOTEvaluator = MOTEvaluator
  127. self.result_root = None
  128. self.reset()
  129. def reset(self):
  130. self.accs = []
  131. self.seqs = []
  132. def update(self, data_root, seq, data_type, result_root, result_filename):
  133. evaluator = self.MOTEvaluator(data_root, seq, data_type)
  134. self.accs.append(evaluator.eval_file(result_filename))
  135. self.seqs.append(seq)
  136. self.result_root = result_root
  137. def accumulate(self):
  138. import motmetrics as mm
  139. import openpyxl
  140. metrics = mm.metrics.motchallenge_metrics
  141. mh = mm.metrics.create()
  142. summary = self.MOTEvaluator.get_summary(self.accs, self.seqs, metrics)
  143. self.strsummary = mm.io.render_summary(
  144. summary,
  145. formatters=mh.formatters,
  146. namemap=mm.io.motchallenge_metric_names)
  147. if self.save_summary:
  148. self.MOTEvaluator.save_summary(
  149. summary, os.path.join(self.result_root, 'summary.xlsx'))
  150. def log(self):
  151. print(self.strsummary)
  152. def get_results(self):
  153. return self.strsummary