Skip to content

Commit

Permalink
[Fix]fix multiprocessing fails to launch on Ascend NPU
Browse files Browse the repository at this point in the history
  • Loading branch information
zhouzaida authored and xuuyangg committed Aug 17, 2023
1 parent 488fddc commit 486a1d2
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 5 deletions.
1 change: 1 addition & 0 deletions .github/workflows/merge_stage_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ on:
- "CONTRIBUTING_zh-CN.md"
- ".pre-commit-config.yaml"
- ".pre-commit-config-zh-cn.yaml"
- "examples/**"
branches:
- main

Expand Down
1 change: 1 addition & 0 deletions .github/workflows/pr_stage_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ on:
- "CONTRIBUTING_zh-CN.md"
- ".pre-commit-config.yaml"
- ".pre-commit-config-zh-cn.yaml"
- "examples/**"

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
Expand Down
5 changes: 0 additions & 5 deletions mmengine/device/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional

import torch
Expand All @@ -8,10 +7,6 @@
import torch_npu # noqa: F401
import torch_npu.npu.utils as npu_utils

# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
Expand Down
7 changes: 7 additions & 0 deletions mmengine/model/base_model/base_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os
from abc import abstractmethod
from collections import OrderedDict
from typing import Dict, Optional, Tuple, Union
Expand Down Expand Up @@ -267,6 +268,12 @@ def _set_device(self, device: torch.device) -> None:
buffers in this module.
"""

if device.type == 'npu':
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)

def apply_fn(module):
if not isinstance(module, BaseDataPreprocessor):
return
Expand Down

0 comments on commit 486a1d2

Please sign in to comment.