From ae061538a724ba46a9d40984e475551ea7f63cbd Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 27 Sep 2024 14:27:56 -0700 Subject: [PATCH] [misc][distributed] add VLLM_SKIP_P2P_CHECK flag (#8911) --- .../distributed/device_communicators/custom_all_reduce.py | 4 ++++ vllm/envs.py | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/vllm/distributed/device_communicators/custom_all_reduce.py b/vllm/distributed/device_communicators/custom_all_reduce.py index d239d645edc14..c95192a5a1bcc 100644 --- a/vllm/distributed/device_communicators/custom_all_reduce.py +++ b/vllm/distributed/device_communicators/custom_all_reduce.py @@ -28,6 +28,10 @@ def _can_p2p(rank: int, world_size: int) -> bool: for i in range(world_size): if i == rank: continue + if envs.VLLM_SKIP_P2P_CHECK: + logger.info( + "Skipping P2P check and trusting the driver's P2P report.") + return torch.cuda.can_device_access_peer(rank, i) if not gpu_p2p_access_check(rank, i): return False return True diff --git a/vllm/envs.py b/vllm/envs.py index 07371bcf1f13e..9ab36f7bddfe1 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -64,6 +64,7 @@ VLLM_USE_TRITON_AWQ: bool = False VLLM_ALLOW_RUNTIME_LORA_UPDATING: bool = False VLLM_ALLOW_DEPRECATED_BEAM_SEARCH: bool = False + VLLM_SKIP_P2P_CHECK: bool = False def get_default_cache_root(): @@ -424,6 +425,13 @@ def get_default_config_root(): lambda: (os.environ.get("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "0").strip().lower() in ("1", "true")), + + # By default, vLLM will check the peer-to-peer capability itself, + # in case of broken drivers. See https://github.com/vllm-project/vllm/blob/a9b15c606fea67a072416ea0ea115261a2756058/vllm/distributed/device_communicators/custom_all_reduce_utils.py#L101-L108 for details. # noqa + # If this env var is set to 1, vLLM will skip the peer-to-peer check, + # and trust the driver's peer-to-peer capability report. + "VLLM_SKIP_P2P_CHECK": + lambda: os.getenv("VLLM_SKIP_P2P_CHECK", "0") == "1", } # end-env-vars-definition