check_dataset.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import json
  16. import os.path as osp
  17. from collections import defaultdict
  18. from .....utils.errors import DatasetFileNotFoundError, CheckFailedError
  19. def check(dataset_dir,
  20. output_dir,
  21. dataset_type="PubTabTableRecDataset",
  22. sample_num=10):
  23. """
  24. Check whether the dataset is valid.
  25. """
  26. if dataset_type == 'PubTabTableRecDataset':
  27. # Custom dataset
  28. if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
  29. raise DatasetFileNotFoundError(file_path=dataset_dir)
  30. tags = ['train', 'val']
  31. max_recorded_sample_cnts = 50
  32. sample_cnts = dict()
  33. sample_paths = defaultdict(list)
  34. for tag in tags:
  35. file_list = osp.join(dataset_dir, f'{tag}.txt')
  36. if not osp.exists(file_list):
  37. if tag in ('train', 'val'):
  38. # train and val file lists must exist
  39. raise DatasetFileNotFoundError(
  40. file_path=file_list,
  41. solution=f"Ensure that both `train.txt` and `val.txt` exist in {dataset_dir}"
  42. )
  43. else:
  44. # tag == 'test'
  45. continue
  46. else:
  47. with open(file_list, 'r', encoding='utf-8') as f:
  48. all_lines = f.readlines()
  49. sample_cnts[tag] = len(all_lines)
  50. for line in all_lines:
  51. info = json.loads(line.strip("\n"))
  52. file_name = info['filename']
  53. cells = info['html']['cells'].copy()
  54. structure = info['html']['structure']['tokens'].copy()
  55. img_path = osp.join(dataset_dir, file_name)
  56. if len(sample_paths[tag]) < max_recorded_sample_cnts:
  57. sample_paths[tag].append(
  58. os.path.relpath(img_path, output_dir))
  59. if not os.path.exists(img_path):
  60. raise DatasetFileNotFoundError(file_path=img_path)
  61. boxes_num = len(cells)
  62. tokens_num = sum([
  63. structure.count(x)
  64. for x in ['<td>', '<td', '<eb></eb>', '<td></td>']
  65. ])
  66. if boxes_num != tokens_num:
  67. raise CheckFailedError(
  68. f"The number of cells needs to be consistent with the number of tokens "\
  69. "but the number of cells is {boxes_num}, and the number of tokens is {tokens_num}."
  70. )
  71. meta = {}
  72. meta['train_samples'] = sample_cnts['train']
  73. meta['train_sample_paths'] = sample_paths['train'][:sample_num]
  74. meta['val_samples'] = sample_cnts['val']
  75. meta['val_sample_paths'] = sample_paths['val'][:sample_num]
  76. return meta