Skip to content

Commit

Permalink
Adopt generate binary build matrix to call via main (#1088)
Browse files Browse the repository at this point in the history
Following improvements/modifications are done:

Related to this PR:
pytorch/pytorch#88997

1. Adopt to use passs arguments via main so we can call the script in
this way:
```
for osys in OperatingSystem:
    release_matrix[osys] = {}
    for package in PackageType:
        command=["--channel", CHANNEL, "--operating-system", osys.value,  "--package-type", package.value]
        if osys == OperatingSystem.MACOS_ARM64 and package == PackageType.LIBTORCH:
            continue
        elif osys == OperatingSystem.LINUX and package == PackageType.WHEEL:
            command+=["--with-py311", ENABLE, "--with-pypi-cudnn", ENABLE]

        f = io.StringIO()
        with redirect_stdout(f):
            generate_binary_build_matrix.main(command)

        release_matrix[osys][package] = json.loads(f.getvalue())["include"]
```

2. Pass with-pypi-cudnn and with-py311 only to generate wheel. Since
these parameter are for wheel only.
3. Add with-pypi-cudnn argument to main
4. Fix MacOS abi_versions = [PRE_CXX11_ABI, CXX11_ABI] , since we do
have these builds
[HUD](https://hud.pytorch.org/hud/pytorch/pytorch/nightly/1?per_page=50&name_filter=macos-binary-lib)
  • Loading branch information
atalman authored Nov 16, 2022
1 parent d1630b7 commit e0dc952
Showing 1 changed file with 28 additions and 12 deletions.
40 changes: 28 additions & 12 deletions tools/scripts/generate_binary_build_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def get_wheel_install_command(channel: str, gpu_arch_type: str, desired_cuda: st
desired_cuda_pkg = f"{desired_cuda}_pypi_cudnn" if with_pypi else desired_cuda
return f"{whl_install_command} --extra-index-url {get_base_download_url_for_repo('whl', channel, gpu_arch_type, desired_cuda_pkg)}"

def generate_conda_matrix(os: str, channel: str, with_cuda: str, with_py311: str) -> List[Dict[str, str]]:
def generate_conda_matrix(os: str, channel: str, with_cuda: str) -> List[Dict[str, str]]:
ret: List[Dict[str, str]] = []
arches = ["cpu"]
python_versions = FULL_PYTHON_VERSIONS
Expand Down Expand Up @@ -215,7 +215,6 @@ def generate_libtorch_matrix(
os: str,
channel: str,
with_cuda: str,
with_py311: str,
abi_versions: Optional[List[str]] = None,
arches: Optional[List[str]] = None,
libtorch_variants: Optional[List[str]] = None,
Expand All @@ -239,6 +238,8 @@ def generate_libtorch_matrix(
abi_versions = [RELEASE, DEBUG]
elif os == "linux":
abi_versions = [PRE_CXX11_ABI, CXX11_ABI]
elif os == "macos":
abi_versions = [PRE_CXX11_ABI, CXX11_ABI]

if libtorch_variants is None:
libtorch_variants = [
Expand Down Expand Up @@ -349,7 +350,7 @@ def generate_wheels_matrix(
"desired_cuda": translate_desired_cuda(
gpu_arch_type, gpu_arch_version
),
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
"container_image": mod.WHEEL_CONTAINER_IMAGES[arch_version],
"package_type": package_type,
"pytorch_extra_install_requirements":
"nvidia-cuda-runtime-cu11;"
Expand Down Expand Up @@ -391,7 +392,7 @@ def generate_wheels_matrix(
"libtorch": generate_libtorch_matrix,
}

def main() -> None:
def main(args) -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--package-type",
Expand Down Expand Up @@ -427,8 +428,15 @@ def main() -> None:
choices=[ENABLE, DISABLE],
default=os.getenv("WITH_PY311", DISABLE),
)
parser.add_argument(
"--with-pypi-cudnn",
help="Include PyPI cudnn builds",
type=str,
choices=[ENABLE, DISABLE],
default=os.getenv("WITH_PYPI_CUDNN", DISABLE),
)

options = parser.parse_args()
options = parser.parse_args(args)
includes = []

package_types = PACKAGE_TYPES if options.package_type == "all" else [options.package_type]
Expand All @@ -437,15 +445,23 @@ def main() -> None:
for channel in channels:
for package in package_types:
initialize_globals(channel)
includes.extend(
GENERATING_FUNCTIONS_BY_PACKAGE_TYPE[package](options.operating_system,
channel,
options.with_cuda,
options.with_py311)
)
if package == "wheel":
includes.extend(
GENERATING_FUNCTIONS_BY_PACKAGE_TYPE[package](options.operating_system,
channel,
options.with_cuda,
options.with_py311,
options.with_pypi_cudnn)
)
else:
includes.extend(
GENERATING_FUNCTIONS_BY_PACKAGE_TYPE[package](options.operating_system,
channel,
options.with_cuda)
)


print(json.dumps({"include": includes}))

if __name__ == "__main__":
main()
main(sys.argv[1:])

0 comments on commit e0dc952

Please sign in to comment.