env.py 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import sys
  15. import glob
  16. import os
  17. import os.path as osp
  18. import random
  19. import numpy as np
  20. import multiprocessing as mp
  21. import paddle
  22. from . import logging
  23. def get_environ_info():
  24. """collect environment information"""
  25. env_info = dict()
  26. # TODO is_compiled_with_cuda() has not been moved
  27. compiled_with_cuda = paddle.is_compiled_with_cuda()
  28. if compiled_with_cuda:
  29. if 'gpu' in paddle.get_device():
  30. gpu_nums = paddle.distributed.get_world_size()
  31. else:
  32. gpu_nums = 0
  33. if gpu_nums == 0:
  34. os.environ['CUDA_VISIBLE_DEVICES'] = ''
  35. place = 'gpu' if compiled_with_cuda and gpu_nums else 'cpu'
  36. env_info['place'] = place
  37. env_info['num'] = int(os.environ.get('CPU_NUM', 1))
  38. if place == 'gpu':
  39. env_info['num'] = gpu_nums
  40. return env_info
  41. def get_num_workers(num_workers):
  42. if num_workers == 'auto':
  43. num_workers = mp.cpu_count() // 2 if mp.cpu_count() // 2 < 2 else 2
  44. return num_workers
  45. def init_parallel_env():
  46. env = os.environ
  47. if 'FLAGS_allocator_strategy' not in os.environ:
  48. os.environ['FLAGS_allocator_strategy'] = 'auto_growth'
  49. dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env
  50. if dist:
  51. trainer_id = int(env['PADDLE_TRAINER_ID'])
  52. local_seed = (99 + trainer_id)
  53. random.seed(local_seed)
  54. np.random.seed(local_seed)
  55. paddle.distributed.init_parallel_env()