download.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import os.path as osp
  19. import sys
  20. import yaml
  21. import shutil
  22. import requests
  23. import tqdm
  24. import hashlib
  25. import base64
  26. import binascii
  27. import tarfile
  28. import zipfile
  29. from .voc_utils import create_list
  30. from paddlex.ppdet.core.workspace import BASE_KEY
  31. from .logger import setup_logger
  32. logger = setup_logger(__name__)
  33. __all__ = [
  34. 'get_weights_path', 'get_dataset_path', 'get_config_path',
  35. 'download_dataset', 'create_voc_list'
  36. ]
  37. WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/weights")
  38. DATASET_HOME = osp.expanduser("~/.cache/paddle/dataset")
  39. CONFIGS_HOME = osp.expanduser("~/.cache/paddle/configs")
  40. # dict of {dataset_name: (download_info, sub_dirs)}
  41. # download info: [(url, md5sum)]
  42. DATASETS = {
  43. 'coco': ([
  44. (
  45. 'http://images.cocodataset.org/zips/train2017.zip',
  46. 'cced6f7f71b7629ddf16f17bbcfab6b2', ),
  47. (
  48. 'http://images.cocodataset.org/zips/val2017.zip',
  49. '442b8da7639aecaf257c1dceb8ba8c80', ),
  50. (
  51. 'http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
  52. 'f4bbac642086de4f52a3fdda2de5fa2c', ),
  53. ], ["annotations", "train2017", "val2017"]),
  54. 'voc': ([
  55. (
  56. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
  57. '6cd6e144f989b92b3379bac3b3de84fd', ),
  58. (
  59. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
  60. 'c52e279531787c972589f7e41ab4ae64', ),
  61. (
  62. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
  63. 'b6e924de25625d8de591ea690078ad9f', ),
  64. ], ["VOCdevkit/VOC2012", "VOCdevkit/VOC2007"]),
  65. 'wider_face': ([
  66. (
  67. 'https://dataset.bj.bcebos.com/wider_face/WIDER_train.zip',
  68. '3fedf70df600953d25982bcd13d91ba2', ),
  69. (
  70. 'https://dataset.bj.bcebos.com/wider_face/WIDER_val.zip',
  71. 'dfa7d7e790efa35df3788964cf0bbaea', ),
  72. (
  73. 'https://dataset.bj.bcebos.com/wider_face/wider_face_split.zip',
  74. 'a4a898d6193db4b9ef3260a68bad0dc7', ),
  75. ], ["WIDER_train", "WIDER_val", "wider_face_split"]),
  76. 'fruit': ([(
  77. 'https://dataset.bj.bcebos.com/PaddleDetection_demo/fruit.tar',
  78. 'baa8806617a54ccf3685fa7153388ae6', ), ],
  79. ['Annotations', 'JPEGImages']),
  80. 'roadsign_voc': ([(
  81. 'https://paddlemodels.bj.bcebos.com/object_detection/roadsign_voc.tar',
  82. '8d629c0f880dd8b48de9aeff44bf1f3e', ), ], ['annotations', 'images']),
  83. 'roadsign_coco': ([(
  84. 'https://paddlemodels.bj.bcebos.com/object_detection/roadsign_coco.tar',
  85. '49ce5a9b5ad0d6266163cd01de4b018e', ), ], ['annotations', 'images']),
  86. 'mot': (),
  87. 'objects365': ()
  88. }
  89. DOWNLOAD_RETRY_LIMIT = 3
  90. PPDET_WEIGHTS_DOWNLOAD_URL_PREFIX = 'https://paddledet.bj.bcebos.com/'
  91. def parse_url(url):
  92. url = url.replace("ppdet://", PPDET_WEIGHTS_DOWNLOAD_URL_PREFIX)
  93. return url
  94. def get_weights_path(url):
  95. """Get weights path from WEIGHTS_HOME, if not exists,
  96. download it from url.
  97. """
  98. url = parse_url(url)
  99. path, _ = get_path(url, WEIGHTS_HOME)
  100. return path
  101. def get_config_path(url):
  102. """Get weights path from CONFIGS_HOME, if not exists,
  103. download it from url.
  104. """
  105. url = parse_url(url)
  106. path, _ = get_path(url, CONFIGS_HOME)
  107. _download_config(path, url, CONFIGS_HOME)
  108. return path
  109. def _download_config(cfg_path, cfg_url, cur_dir):
  110. with open(cfg_path) as f:
  111. cfg = yaml.load(f, Loader=yaml.Loader)
  112. # download dependence base ymls
  113. if BASE_KEY in cfg:
  114. base_ymls = list(cfg[BASE_KEY])
  115. for base_yml in base_ymls:
  116. if base_yml.startswith("~"):
  117. base_yml = os.path.expanduser(base_yml)
  118. relpath = osp.relpath(base_yml, cfg_path)
  119. if not base_yml.startswith('/'):
  120. relpath = base_yml
  121. base_yml = os.path.join(os.path.dirname(cfg_path), base_yml)
  122. if osp.isfile(base_yml):
  123. logger.debug("Found _BASE_ config: {}".format(base_yml))
  124. continue
  125. # download to CONFIGS_HOME firstly
  126. base_yml_url = osp.join(osp.split(cfg_url)[0], relpath)
  127. path, _ = get_path(base_yml_url, CONFIGS_HOME)
  128. # move from CONFIGS_HOME to dst_path to restore config directory structure
  129. dst_path = osp.join(cur_dir, relpath)
  130. dst_dir = osp.split(dst_path)[0]
  131. if not osp.isdir(dst_dir):
  132. os.makedirs(dst_dir)
  133. shutil.move(path, dst_path)
  134. # perfrom download base yml recursively
  135. _download_config(dst_path, base_yml_url, osp.split(dst_path)[0])
  136. def get_dataset_path(path, annotation, image_dir):
  137. """
  138. If path exists, return path.
  139. Otherwise, get dataset path from DATASET_HOME, if not exists,
  140. download it.
  141. """
  142. if _dataset_exists(path, annotation, image_dir):
  143. return path
  144. logger.info(
  145. "Dataset {} is not valid for reason above, try searching {} or "
  146. "downloading dataset...".format(osp.realpath(path), DATASET_HOME))
  147. data_name = os.path.split(path.strip().lower())[-1]
  148. for name, dataset in DATASETS.items():
  149. if data_name == name:
  150. logger.debug("Parse dataset_dir {} as dataset "
  151. "{}".format(path, name))
  152. if name == 'objects365':
  153. raise NotImplementedError(
  154. "Dataset {} is not valid for download automatically. "
  155. "Please apply and download the dataset from "
  156. "https://www.objects365.org/download.html".format(name))
  157. data_dir = osp.join(DATASET_HOME, name)
  158. if name == 'mot':
  159. if osp.exists(path) or osp.exists(data_dir):
  160. return data_dir
  161. else:
  162. raise NotImplementedError(
  163. "Dataset {} is not valid for download automatically. "
  164. "Please apply and download the dataset following docs/tutorials/PrepareMOTDataSet.md".
  165. format(name))
  166. # For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007
  167. if name in ['voc', 'fruit', 'roadsign_voc']:
  168. exists = True
  169. for sub_dir in dataset[1]:
  170. check_dir = osp.join(data_dir, sub_dir)
  171. if osp.exists(check_dir):
  172. logger.info("Found {}".format(check_dir))
  173. else:
  174. exists = False
  175. if exists:
  176. return data_dir
  177. # voc exist is checked above, voc is not exist here
  178. check_exist = name != 'voc' and name != 'fruit' and name != 'roadsign_voc'
  179. for url, md5sum in dataset[0]:
  180. get_path(url, data_dir, md5sum, check_exist)
  181. # voc should create list after download
  182. if name == 'voc':
  183. create_voc_list(data_dir)
  184. return data_dir
  185. # not match any dataset in DATASETS
  186. raise ValueError(
  187. "Dataset {} is not valid and cannot parse dataset type "
  188. "'{}' for automaticly downloading, which only supports "
  189. "'voc' , 'coco', 'wider_face', 'fruit', 'roadsign_voc' and 'mot' currently".
  190. format(path, osp.split(path)[-1]))
  191. def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
  192. logger.debug("Create voc file list...")
  193. devkit_dir = osp.join(data_dir, devkit_subdir)
  194. years = ['2007', '2012']
  195. # NOTE: since using auto download VOC
  196. # dataset, VOC default label list should be used,
  197. # do not generate label_list.txt here. For default
  198. # label, see ../data/source/voc.py
  199. create_list(devkit_dir, years, data_dir)
  200. logger.debug("Create voc file list finished")
  201. def map_path(url, root_dir):
  202. # parse path after download to decompress under root_dir
  203. fname = osp.split(url)[-1]
  204. zip_formats = ['.zip', '.tar', '.gz']
  205. fpath = fname
  206. for zip_format in zip_formats:
  207. fpath = fpath.replace(zip_format, '')
  208. return osp.join(root_dir, fpath)
  209. def get_path(url, root_dir, md5sum=None, check_exist=True):
  210. """ Download from given url to root_dir.
  211. if file or directory specified by url is exists under
  212. root_dir, return the path directly, otherwise download
  213. from url and decompress it, return the path.
  214. url (str): download url
  215. root_dir (str): root dir for downloading, it should be
  216. WEIGHTS_HOME or DATASET_HOME
  217. md5sum (str): md5 sum of download package
  218. """
  219. # parse path after download to decompress under root_dir
  220. fullpath = map_path(url, root_dir)
  221. # For same zip file, decompressed directory name different
  222. # from zip file name, rename by following map
  223. decompress_name_map = {
  224. "VOCtrainval_11-May-2012": "VOCdevkit/VOC2012",
  225. "VOCtrainval_06-Nov-2007": "VOCdevkit/VOC2007",
  226. "VOCtest_06-Nov-2007": "VOCdevkit/VOC2007",
  227. "annotations_trainval": "annotations"
  228. }
  229. for k, v in decompress_name_map.items():
  230. if fullpath.find(k) >= 0:
  231. fullpath = osp.join(osp.split(fullpath)[0], v)
  232. if osp.exists(fullpath) and check_exist:
  233. if not osp.isfile(fullpath) or \
  234. _check_exist_file_md5(fullpath, md5sum, url):
  235. logger.debug("Found {}".format(fullpath))
  236. return fullpath, True
  237. else:
  238. os.remove(fullpath)
  239. fullname = _download(url, root_dir, md5sum)
  240. # new weights format which postfix is 'pdparams' not
  241. # need to decompress
  242. if osp.splitext(fullname)[-1] not in ['.pdparams', '.yml']:
  243. _decompress(fullname)
  244. return fullpath, False
  245. def download_dataset(path, dataset=None):
  246. if dataset not in DATASETS.keys():
  247. logger.error("Unknown dataset {}, it should be "
  248. "{}".format(dataset, DATASETS.keys()))
  249. return
  250. dataset_info = DATASETS[dataset][0]
  251. for info in dataset_info:
  252. get_path(info[0], path, info[1], False)
  253. logger.debug("Download dataset {} finished.".format(dataset))
  254. def _dataset_exists(path, annotation, image_dir):
  255. """
  256. Check if user define dataset exists
  257. """
  258. if not osp.exists(path):
  259. logger.warning("Config dataset_dir {} is not exits, "
  260. "dataset config is not valid".format(path))
  261. return False
  262. if annotation:
  263. annotation_path = osp.join(path, annotation)
  264. if not osp.isfile(annotation_path):
  265. logger.warning("Config annotation {} is not a "
  266. "file, dataset config is not "
  267. "valid".format(annotation_path))
  268. return False
  269. if image_dir:
  270. image_path = osp.join(path, image_dir)
  271. if not osp.isdir(image_path):
  272. logger.warning("Config image_dir {} is not a "
  273. "directory, dataset config is not "
  274. "valid".format(image_path))
  275. return False
  276. return True
  277. def _download(url, path, md5sum=None):
  278. """
  279. Download from url, save to path.
  280. url (str): download url
  281. path (str): download to given path
  282. """
  283. if not osp.exists(path):
  284. os.makedirs(path)
  285. fname = osp.split(url)[-1]
  286. fullname = osp.join(path, fname)
  287. retry_cnt = 0
  288. while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum,
  289. url)):
  290. if retry_cnt < DOWNLOAD_RETRY_LIMIT:
  291. retry_cnt += 1
  292. else:
  293. raise RuntimeError("Download from {} failed. "
  294. "Retry limit reached".format(url))
  295. logger.info("Downloading {} from {}".format(fname, url))
  296. # NOTE: windows path join may incur \, which is invalid in url
  297. if sys.platform == "win32":
  298. url = url.replace('\\', '/')
  299. req = requests.get(url, stream=True)
  300. if req.status_code != 200:
  301. raise RuntimeError("Downloading from {} failed with code "
  302. "{}!".format(url, req.status_code))
  303. # For protecting download interupted, download to
  304. # tmp_fullname firstly, move tmp_fullname to fullname
  305. # after download finished
  306. tmp_fullname = fullname + "_tmp"
  307. total_size = req.headers.get('content-length')
  308. with open(tmp_fullname, 'wb') as f:
  309. if total_size:
  310. for chunk in tqdm.tqdm(
  311. req.iter_content(chunk_size=1024),
  312. total=(int(total_size) + 1023) // 1024,
  313. unit='KB'):
  314. f.write(chunk)
  315. else:
  316. for chunk in req.iter_content(chunk_size=1024):
  317. if chunk:
  318. f.write(chunk)
  319. shutil.move(tmp_fullname, fullname)
  320. return fullname
  321. def _check_exist_file_md5(filename, md5sum, url):
  322. # if md5sum is None, and file to check is weights file,
  323. # read md5um from url and check, else check md5sum directly
  324. return _md5check_from_url(filename, url) if md5sum is None \
  325. and filename.endswith('pdparams') \
  326. else _md5check(filename, md5sum)
  327. def _md5check_from_url(filename, url):
  328. # For weights in bcebos URLs, MD5 value is contained
  329. # in request header as 'content_md5'
  330. req = requests.get(url, stream=True)
  331. content_md5 = req.headers.get('content-md5')
  332. req.close()
  333. if not content_md5 or _md5check(
  334. filename,
  335. binascii.hexlify(base64.b64decode(content_md5.strip('"'))).decode(
  336. )):
  337. return True
  338. else:
  339. return False
  340. def _md5check(fullname, md5sum=None):
  341. if md5sum is None:
  342. return True
  343. logger.debug("File {} md5 checking...".format(fullname))
  344. md5 = hashlib.md5()
  345. with open(fullname, 'rb') as f:
  346. for chunk in iter(lambda: f.read(4096), b""):
  347. md5.update(chunk)
  348. calc_md5sum = md5.hexdigest()
  349. if calc_md5sum != md5sum:
  350. logger.warning("File {} md5 check failed, {}(calc) != "
  351. "{}(base)".format(fullname, calc_md5sum, md5sum))
  352. return False
  353. return True
  354. def _decompress(fname):
  355. """
  356. Decompress for zip and tar file
  357. """
  358. logger.info("Decompressing {}...".format(fname))
  359. # For protecting decompressing interupted,
  360. # decompress to fpath_tmp directory firstly, if decompress
  361. # successed, move decompress files to fpath and delete
  362. # fpath_tmp and remove download compress file.
  363. fpath = osp.split(fname)[0]
  364. fpath_tmp = osp.join(fpath, 'tmp')
  365. if osp.isdir(fpath_tmp):
  366. shutil.rmtree(fpath_tmp)
  367. os.makedirs(fpath_tmp)
  368. if fname.find('tar') >= 0:
  369. with tarfile.open(fname) as tf:
  370. tf.extractall(path=fpath_tmp)
  371. elif fname.find('zip') >= 0:
  372. with zipfile.ZipFile(fname) as zf:
  373. zf.extractall(path=fpath_tmp)
  374. else:
  375. raise TypeError("Unsupport compress file type {}".format(fname))
  376. for f in os.listdir(fpath_tmp):
  377. src_dir = osp.join(fpath_tmp, f)
  378. dst_dir = osp.join(fpath, f)
  379. _move_and_merge_tree(src_dir, dst_dir)
  380. shutil.rmtree(fpath_tmp)
  381. os.remove(fname)
  382. def _move_and_merge_tree(src, dst):
  383. """
  384. Move src directory to dst, if dst is already exists,
  385. merge src to dst
  386. """
  387. if not osp.exists(dst):
  388. shutil.move(src, dst)
  389. elif osp.isfile(src):
  390. shutil.move(src, dst)
  391. else:
  392. for fp in os.listdir(src):
  393. src_fp = osp.join(src, fp)
  394. dst_fp = osp.join(dst, fp)
  395. if osp.isdir(src_fp):
  396. if osp.isdir(dst_fp):
  397. _move_and_merge_tree(src_fp, dst_fp)
  398. else:
  399. shutil.move(src_fp, dst_fp)
  400. elif osp.isfile(src_fp) and \
  401. not osp.isfile(dst_fp):
  402. shutil.move(src_fp, dst_fp)