__init__.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import os.path as osp
  16. from collections import defaultdict, Counter
  17. from PIL import Image
  18. import json
  19. from ...base import BaseDatasetChecker
  20. from .dataset_src import check, split_dataset, deep_analyse, convert
  21. from ..model_list import MODELS
  22. class TextRecDatasetChecker(BaseDatasetChecker):
  23. """Dataset Checker for Text Recognition Model"""
  24. entities = MODELS
  25. sample_num = 10
  26. def convert_dataset(self, src_dataset_dir: str) -> str:
  27. """convert the dataset from other type to specified type
  28. Args:
  29. src_dataset_dir (str): the root directory of dataset.
  30. Returns:
  31. str: the root directory of converted dataset.
  32. """
  33. return convert(
  34. self.check_dataset_config.convert.src_dataset_type, src_dataset_dir
  35. )
  36. def split_dataset(self, src_dataset_dir: str) -> str:
  37. """repartition the train and validation dataset
  38. Args:
  39. src_dataset_dir (str): the root directory of dataset.
  40. Returns:
  41. str: the root directory of splited dataset.
  42. """
  43. return split_dataset(
  44. src_dataset_dir,
  45. self.check_dataset_config.split.train_percent,
  46. self.check_dataset_config.split.val_percent,
  47. )
  48. def check_dataset(self, dataset_dir: str, sample_num: int = sample_num) -> dict:
  49. """check if the dataset meets the specifications and get dataset summary
  50. Args:
  51. dataset_dir (str): the root directory of dataset.
  52. sample_num (int): the number to be sampled.
  53. Returns:
  54. dict: dataset summary.
  55. """
  56. return check(
  57. dataset_dir,
  58. self.global_config.output,
  59. sample_num=10,
  60. dataset_type=self.get_dataset_type(),
  61. )
  62. def analyse(self, dataset_dir: str) -> dict:
  63. """deep analyse dataset
  64. Args:
  65. dataset_dir (str): the root directory of dataset.
  66. Returns:
  67. dict: the deep analysis results.
  68. """
  69. if self.global_config["model"] in ["LaTeX_OCR_rec"]:
  70. datatype = "LaTeXOCRDataset"
  71. else:
  72. datatype = "MSTextRecDataset"
  73. return deep_analyse(dataset_dir, self.output, datatype=datatype)
  74. def get_show_type(self) -> str:
  75. """get the show type of dataset
  76. Returns:
  77. str: show type
  78. """
  79. return "image"
  80. def get_dataset_type(self) -> str:
  81. """return the dataset type
  82. Returns:
  83. str: dataset type
  84. """
  85. if self.global_config["model"] in ["LaTeX_OCR_rec"]:
  86. return "LaTeXOCRDataset"
  87. else:
  88. return "MSTextRecDataset"