processors.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import math
  15. import re
  16. from typing import List
  17. import cv2
  18. import numpy as np
  19. from ...utils.benchmark import benchmark
  20. @benchmark.timeit
  21. class OCRReisizeNormImg:
  22. """for ocr image resize and normalization"""
  23. def __init__(self, rec_image_shape=[3, 48, 320], input_shape=None):
  24. super().__init__()
  25. self.rec_image_shape = rec_image_shape
  26. self.input_shape = input_shape
  27. self.max_imgW = 3200
  28. def resize_norm_img(self, img, max_wh_ratio):
  29. """resize and normalize the img"""
  30. imgC, imgH, imgW = self.rec_image_shape
  31. assert imgC == img.shape[2]
  32. imgW = int((imgH * max_wh_ratio))
  33. if imgW > self.max_imgW:
  34. resized_image = cv2.resize(img, (self.max_imgW, imgH))
  35. resized_w = self.max_imgW
  36. imgW = self.max_imgW
  37. else:
  38. h, w = img.shape[:2]
  39. ratio = w / float(h)
  40. if math.ceil(imgH * ratio) > imgW:
  41. resized_w = imgW
  42. else:
  43. resized_w = int(math.ceil(imgH * ratio))
  44. resized_image = cv2.resize(img, (resized_w, imgH))
  45. resized_image = resized_image.astype("float32")
  46. resized_image = resized_image.transpose((2, 0, 1)) / 255
  47. resized_image -= 0.5
  48. resized_image /= 0.5
  49. padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
  50. padding_im[:, :, 0:resized_w] = resized_image
  51. return padding_im
  52. def __call__(self, imgs):
  53. """apply"""
  54. if self.input_shape is None:
  55. return [self.resize(img) for img in imgs]
  56. else:
  57. return [self.staticResize(img) for img in imgs]
  58. def resize(self, img):
  59. imgC, imgH, imgW = self.rec_image_shape
  60. max_wh_ratio = imgW / imgH
  61. h, w = img.shape[:2]
  62. wh_ratio = w * 1.0 / h
  63. max_wh_ratio = max(max_wh_ratio, wh_ratio)
  64. img = self.resize_norm_img(img, max_wh_ratio)
  65. return img
  66. def staticResize(self, img):
  67. imgC, imgH, imgW = self.input_shape
  68. resized_image = cv2.resize(img, (int(imgW), int(imgH)))
  69. resized_image = resized_image.transpose((2, 0, 1)) / 255
  70. resized_image -= 0.5
  71. resized_image /= 0.5
  72. return resized_image
  73. @benchmark.timeit
  74. class BaseRecLabelDecode:
  75. """Convert between text-label and text-index"""
  76. def __init__(self, character_str=None, use_space_char=True):
  77. super().__init__()
  78. self.reverse = False
  79. character_list = (
  80. list(character_str)
  81. if character_str is not None
  82. else list("0123456789abcdefghijklmnopqrstuvwxyz")
  83. )
  84. if use_space_char:
  85. character_list.append(" ")
  86. character_list = self.add_special_char(character_list)
  87. self.dict = {}
  88. for i, char in enumerate(character_list):
  89. self.dict[char] = i
  90. self.character = character_list
  91. def pred_reverse(self, pred):
  92. """pred_reverse"""
  93. pred_re = []
  94. c_current = ""
  95. for c in pred:
  96. if not bool(re.search("[a-zA-Z0-9 :*./%+-]", c)):
  97. if c_current != "":
  98. pred_re.append(c_current)
  99. pred_re.append(c)
  100. c_current = ""
  101. else:
  102. c_current += c
  103. if c_current != "":
  104. pred_re.append(c_current)
  105. return "".join(pred_re[::-1])
  106. def add_special_char(self, character_list):
  107. """add_special_char"""
  108. return character_list
  109. def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
  110. """convert text-index into text-label."""
  111. result_list = []
  112. ignored_tokens = self.get_ignored_tokens()
  113. batch_size = len(text_index)
  114. for batch_idx in range(batch_size):
  115. selection = np.ones(len(text_index[batch_idx]), dtype=bool)
  116. if is_remove_duplicate:
  117. selection[1:] = text_index[batch_idx][1:] != text_index[batch_idx][:-1]
  118. for ignored_token in ignored_tokens:
  119. selection &= text_index[batch_idx] != ignored_token
  120. char_list = [
  121. self.character[text_id] for text_id in text_index[batch_idx][selection]
  122. ]
  123. if text_prob is not None:
  124. conf_list = text_prob[batch_idx][selection]
  125. else:
  126. conf_list = [1] * len(selection)
  127. if len(conf_list) == 0:
  128. conf_list = [0]
  129. text = "".join(char_list)
  130. if self.reverse: # for arabic rec
  131. text = self.pred_reverse(text)
  132. result_list.append((text, np.mean(conf_list).tolist()))
  133. return result_list
  134. def get_ignored_tokens(self):
  135. """get_ignored_tokens"""
  136. return [0] # for ctc blank
  137. def __call__(self, pred):
  138. """apply"""
  139. preds = np.array(pred)
  140. if isinstance(preds, tuple) or isinstance(preds, list):
  141. preds = preds[-1]
  142. preds_idx = preds.argmax(axis=-1)
  143. preds_prob = preds.max(axis=-1)
  144. text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
  145. texts = []
  146. scores = []
  147. for t in text:
  148. texts.append(t[0])
  149. scores.append(t[1])
  150. return texts, scores
  151. @benchmark.timeit
  152. class CTCLabelDecode(BaseRecLabelDecode):
  153. """Convert between text-label and text-index"""
  154. def __init__(self, character_list=None, use_space_char=True):
  155. super().__init__(character_list, use_space_char=use_space_char)
  156. def __call__(self, pred):
  157. """apply"""
  158. preds = np.array(pred[0])
  159. preds_idx = preds.argmax(axis=-1)
  160. preds_prob = preds.max(axis=-1)
  161. text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
  162. texts = []
  163. scores = []
  164. for t in text:
  165. texts.append(t[0])
  166. scores.append(t[1])
  167. return texts, scores
  168. def add_special_char(self, character_list):
  169. """add_special_char"""
  170. character_list = ["blank"] + character_list
  171. return character_list
  172. @benchmark.timeit
  173. class ToBatch:
  174. """A class for batching and padding images to a uniform width."""
  175. def __pad_imgs(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  176. """Pad images to the maximum width in the batch.
  177. Args:
  178. imgs (list of np.ndarrays): List of images to pad.
  179. Returns:
  180. list of np.ndarrays: List of padded images.
  181. """
  182. max_width = max(img.shape[2] for img in imgs)
  183. padded_imgs = []
  184. for img in imgs:
  185. _, height, width = img.shape
  186. pad_width = max_width - width
  187. padded_img = np.pad(
  188. img,
  189. ((0, 0), (0, 0), (0, pad_width)),
  190. mode="constant",
  191. constant_values=0,
  192. )
  193. padded_imgs.append(padded_img)
  194. return padded_imgs
  195. def __call__(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  196. """Call method to pad images and stack them into a batch.
  197. Args:
  198. imgs (list of np.ndarrays): List of images to process.
  199. Returns:
  200. list of np.ndarrays: List containing a stacked tensor of the padded images.
  201. """
  202. imgs = self.__pad_imgs(imgs)
  203. return [np.stack(imgs, axis=0).astype(dtype=np.float32, copy=False)]