-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcheck_distributed.py
executable file
·46 lines (38 loc) · 1.44 KB
/
check_distributed.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import torch
import torch.distributed as dist
import os
def check_distributed_training():
# Check if PyTorch is available
if not torch.cuda.is_available():
print(
"CUDA is not available. Distributed training is typically done with GPUs."
)
return False
# Check if NCCL backend is available (for multi-GPU training)
if not dist.is_nccl_available():
print("NCCL backend is not available.")
return False
# Check if your system has more than one GPU for multi-GPU training
num_gpus = torch.cuda.device_count()
if num_gpus < 2:
print(
f"Only {num_gpus} GPU(s) detected. Distributed training usually requires at least 2 GPUs."
)
return False
else:
print(f"{num_gpus} GPU(s) detected, suitable for distributed training.")
# Check if 'torch.distributed' package is properly installed and can be initialized
try:
dist.init_process_group(
backend="nccl", init_method="tcp://127.0.0.1:23456", rank=0, world_size=1
)
print("Distributed package initialized successfully.")
except Exception as e:
print(f"Error initializing distributed package: {e}")
return False
return True
if __name__ == "__main__":
if check_distributed_training():
print("Distributed training is possible on this machine.")
else:
print("Distributed training is not possible on this machine.")