convert_dataset.py 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import json
  16. from .....utils.file_interface import custom_open
  17. def check_src_dataset(root_dir, dataset_type):
  18. """check src dataset format validity"""
  19. if dataset_type in ("LabelMe"):
  20. anno_suffix = ".json"
  21. else:
  22. raise ConvertFailedError(
  23. message=f"数据格式转换失败!不支持{dataset_type}格式数据集。当前仅支持 LabelMe 格式。"
  24. )
  25. err_msg_prefix = f"数据格式转换失败!请参考上述`{dataset_type}格式数据集示例`检查待转换数据集格式。"
  26. for anno in ["label.txt", "annotations", "images"]:
  27. src_anno_path = os.path.join(root_dir, anno)
  28. if not os.path.exists(src_anno_path):
  29. raise ConvertFailedError(
  30. message=f"{err_msg_prefix}保证{src_anno_path}文件存在。"
  31. )
  32. return None
  33. def convert(dataset_type, input_dir):
  34. """convert dataset to multilabel format"""
  35. # check format validity
  36. check_src_dataset(input_dir, dataset_type)
  37. if dataset_type in ("LabelMe"):
  38. convert_labelme_dataset(input_dir)
  39. else:
  40. raise ConvertFailedError(
  41. message=f"数据格式转换失败!不支持{dataset_type}格式数据集。当前仅支持 LabelMe 格式。"
  42. )
  43. def convert_labelme_dataset(root_dir):
  44. image_dir = os.path.join(root_dir, "images")
  45. anno_path = os.path.join(root_dir, "annotations")
  46. label_path = os.path.join(root_dir, "label.txt")
  47. train_rate = 50
  48. gallery_rate = 30
  49. query_rate = 20
  50. tags = ["train", "gallery", "query"]
  51. label_dict = {}
  52. image_files = []
  53. with custom_open(label_path, "r") as f:
  54. lines = f.readlines()
  55. for idx, line in enumerate(lines):
  56. line = line.strip()
  57. label_dict[line] = str(idx)
  58. for json_file in os.listdir(anno_path):
  59. with custom_open(os.path.join(anno_path, json_file), "r") as f:
  60. data = json.load(f)
  61. filename = data["imagePath"].strip().split("/")[2]
  62. image_path = os.path.join("images", filename)
  63. for label, value in data["flags"].items():
  64. if value:
  65. image_files.append(f"{image_path} {label_dict[label]}\n")
  66. start = 0
  67. image_num = len(image_files)
  68. rate_list = [train_rate, gallery_rate, query_rate]
  69. for i, tag in enumerate(tags):
  70. rate = rate_list[i]
  71. if rate == 0:
  72. continue
  73. end = start + round(image_num * rate / 100)
  74. if sum(rate_list[i + 1 :]) == 0:
  75. end = image_num
  76. txt_file = os.path.abspath(os.path.join(root_dir, tag + ".txt"))
  77. with custom_open(txt_file, "w") as f:
  78. m = 0
  79. for id in range(start, end):
  80. m += 1
  81. f.write(image_files[id])
  82. start = end