From b6c0b47ce4386e1fab3369eb079e03be5137e576 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 13 Dec 2022 15:09:52 +0100 Subject: [PATCH 01/22] improve prototype GPU CI config --- .../prototype-transforms-tests-linux-gpu.yml | 41 +++++++++++++------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index 01f1c1cccf8..4fcc70be8b1 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -16,8 +16,10 @@ jobs: tests: strategy: matrix: - python_version: ["3.8"] - cuda_arch_version: ["11.6"] + python_version: + - "3.8" + cuda_arch_version: + - "11.6" fail-fast: false uses: pytorch/test-infra/.github/workflows/linux_job.yml@main with: @@ -27,43 +29,58 @@ jobs: gpu-arch-version: ${{ matrix.cuda_arch_version }} timeout: 120 script: | - # Mark Build Directory Safe + echo '::group::Mark Build Directory Safe' git config --global --add safe.directory /__w/vision/vision + echo '::endgroup::' - # Set up Environment Variables + echo '::group::Set up Environment Variables' export PYTHON_VERSION="${{ matrix.python_version }}" export VERSION="${{ matrix.cuda_arch_version }}" export CUDATOOLKIT="pytorch-cuda=${VERSION}" + echo '::endgroup::' - # Set CHANNEL + echo '::group::Set CHANNEL' if [[ (${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then export CHANNEL=test else export CHANNEL=nightly fi + echo '::endgroup::' - # Create Conda Env - conda create -yp ci_env python="${PYTHON_VERSION}" numpy libpng jpeg scipy + echo '::group::Create Conda Env' + conda create --prefix ci_env \ + --quiet --yes \ + python="${PYTHON_VERSION}" numpy libpng jpeg scipy conda activate /work/ci_env + echo '::endgroup::' - # Install PyTorch, Torchvision, and testing libraries + echo '::group::Install PyTorch' set -ex conda install \ - --yes \ + --quiet --yes \ -c "pytorch-${CHANNEL}" \ -c nvidia \ pytorch \ "${CUDATOOLKIT}" - python3 -c "import torch; exit(not torch.cuda.is_available())" + echo '::endgroup::' + echo '::group::Install TorchVision' python3 setup.py develop + echo '::endgroup::' + + echo '::group::Collect environment information' + python3 -m torch.utils.collect_env + echo '::endgroup::' + + echo '::group::Install testing utilities' python3 -m pip install pytest pytest-mock pytest-cov + echo '::endgroup::' - # Run Tests - python3 -m torch.utils.collect_env + echo '::group::Run Tests' python3 -m pytest \ --durations=20 \ --cov=torchvision/prototype/transforms \ --cov-report=term-missing \ test/test_prototype_transforms*.py + echo '::endgroup::' From 8a04e2b94a3b2c84763fa3675aaa831ccb1cab2f Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 13 Dec 2022 16:42:23 +0100 Subject: [PATCH 02/22] consolidate CPU and GPU prototype workflows --- .../prototype-transforms-tests-linux-gpu.yml | 96 +++++++++++-------- 1 file changed, 55 insertions(+), 41 deletions(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index 4fcc70be8b1..0b3f9eaa417 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -6,81 +6,95 @@ on: branches: - nightly - main - - release/* - workflow_dispatch: - -env: - CHANNEL: "nightly" jobs: tests: strategy: matrix: - python_version: + python-version: + - "3.7" - "3.8" - cuda_arch_version: - - "11.6" + - "3.9" + - "3.10" + gpu-arch-type: + - cpu + gpu-arch-version: + - "" + runner: + - linux.2xlarge + include: + - python-version: "3.8" + gpu-arch-type: cuda + gpu-arch-version: "11.6" + runner: linux.4xlarge.nvidia.gpu fail-fast: false uses: pytorch/test-infra/.github/workflows/linux_job.yml@main with: - runner: linux.4xlarge.nvidia.gpu repository: pytorch/vision - gpu-arch-type: cuda - gpu-arch-version: ${{ matrix.cuda_arch_version }} - timeout: 120 + gpu-arch-type: ${{ matrix.gpu-arch-type }} + gpu-arch-version: ${{ matrix.gpu-arch-version }} + runner: ${{ matrix.python-version }} + job-name: Foo-${{ matrix.python-version }} + timeout: 45 script: | - echo '::group::Mark Build Directory Safe' + # Mark Build Directory Safe git config --global --add safe.directory /__w/vision/vision - echo '::endgroup::' - echo '::group::Set up Environment Variables' - export PYTHON_VERSION="${{ matrix.python_version }}" - export VERSION="${{ matrix.cuda_arch_version }}" - export CUDATOOLKIT="pytorch-cuda=${VERSION}" - echo '::endgroup::' - - echo '::group::Set CHANNEL' + echo '::group::Set PyTorch conda channel' if [[ (${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then - export CHANNEL=test + POSTFIX=test else - export CHANNEL=nightly + POSTFIX=nightly fi + PYTORCH_CHANNEL=pytorch-"${POSTFIX}" + echo "${PYTORCH_CHANNEL}" echo '::endgroup::' - - echo '::group::Create Conda Env' - conda create --prefix ci_env \ + + echo '::group::Set PyTorch conda mutex' + if [[ ${{ matrix.gpu-arch-type }} = 'cuda' ]]; then + PYTORCH_MUTEX="pytorch-cuda=${{ matrix.gpu-arch-version }}" + else + PYTORCH_MUTEX=cpuonly + fi + echo $"{MUTEX}" + echo '::endgroup::' + + echo '::group::Create conda environment' + conda create --prefix $PWD/ci \ --quiet --yes \ - python="${PYTHON_VERSION}" numpy libpng jpeg scipy - conda activate /work/ci_env + python=${{ matrix.python-version }} \ + numpy libpng jpeg scipy + conda activate $PWD/ci echo '::endgroup::' echo '::group::Install PyTorch' - set -ex conda install \ --quiet --yes \ - -c "pytorch-${CHANNEL}" \ + -c "${PYTORCH_CHANNEL}" \ -c nvidia \ pytorch \ - "${CUDATOOLKIT}" - python3 -c "import torch; exit(not torch.cuda.is_available())" + "${PYTORCH_MUTEX}" + if [[ ${{ matrix.gpu-arch-type }} = 'cuda' ]]; then + python3 -c "import torch; exit(not torch.cuda.is_available())" + fi echo '::endgroup::' echo '::group::Install TorchVision' - python3 setup.py develop + python setup.py develop echo '::endgroup::' - echo '::group::Collect environment information' - python3 -m torch.utils.collect_env + echo '::group::Collect PyTorch environment information' + python -m torch.utils.collect_env echo '::endgroup::' echo '::group::Install testing utilities' - python3 -m pip install pytest pytest-mock pytest-cov + python -m pip install pytest pytest-mock pytest-cov echo '::endgroup::' echo '::group::Run Tests' - python3 -m pytest \ - --durations=20 \ - --cov=torchvision/prototype/transforms \ - --cov-report=term-missing \ - test/test_prototype_transforms*.py + pytest \ + --durations=20 \ + --cov=torchvision/prototype/transforms \ + --cov-report=term-missing \ + test/test_prototype_transforms*.py echo '::endgroup::' From c931ff2c27bbc2b04324b777ae39a6fc25e50b8a Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 13 Dec 2022 16:42:56 +0100 Subject: [PATCH 03/22] [REVERTME] deactivate other CI --- .circleci/.gitignore | 1 - .circleci/build_docs/commit_docs.sh | 35 - .circleci/config.yml | 2059 ----------------- .circleci/config.yml.in | 1130 --------- .circleci/regenerate.py | 370 --- .circleci/smoke_test/docker/Dockerfile | 34 - .../android/scripts/binary_android_build.sh | 27 - .../android/scripts/binary_android_upload.sh | 34 - .../android/scripts/install_gradle.sh | 19 - .../unittest/ios/scripts/binary_ios_build.sh | 47 - .../unittest/ios/scripts/binary_ios_upload.sh | 42 - .../unittest/linux/scripts/environment.yml | 16 - .circleci/unittest/linux/scripts/install.sh | 40 - .../unittest/linux/scripts/post_process.sh | 6 - .../linux/scripts/run-clang-format.py | 331 --- .circleci/unittest/linux/scripts/run_test.sh | 9 - .circleci/unittest/linux/scripts/setup_env.sh | 47 - .../unittest/windows/scripts/environment.yml | 19 - .circleci/unittest/windows/scripts/install.sh | 52 - .../windows/scripts/install_conda.bat | 1 - .../unittest/windows/scripts/post_process.sh | 6 - .../unittest/windows/scripts/run_test.sh | 12 - .../unittest/windows/scripts/set_cuda_envs.sh | 48 - .../unittest/windows/scripts/setup_env.sh | 45 - .../windows/scripts/vc_env_helper.bat | 39 - .github/workflows/build-conda-linux.yml | 45 - .github/workflows/build-conda-m1.yml | 46 - .github/workflows/build-conda-macos.yml | 46 - .github/workflows/build-wheels-linux.yml | 44 - .github/workflows/build-wheels-m1.yml | 45 - .github/workflows/build-wheels-macos.yml | 45 - .github/workflows/pr-labels.yml | 35 - .github/workflows/prototype-tests.yml | 73 - .github/workflows/test-linux-cpu.yml | 57 - .github/workflows/test-linux-gpu.yml | 61 - .github/workflows/test-m1.yml | 50 - .github/workflows/tests-schedule.yml | 57 - 37 files changed, 5073 deletions(-) delete mode 100644 .circleci/.gitignore delete mode 100755 .circleci/build_docs/commit_docs.sh delete mode 100644 .circleci/config.yml delete mode 100644 .circleci/config.yml.in delete mode 100755 .circleci/regenerate.py delete mode 100644 .circleci/smoke_test/docker/Dockerfile delete mode 100644 .circleci/unittest/android/scripts/binary_android_build.sh delete mode 100644 .circleci/unittest/android/scripts/binary_android_upload.sh delete mode 100755 .circleci/unittest/android/scripts/install_gradle.sh delete mode 100755 .circleci/unittest/ios/scripts/binary_ios_build.sh delete mode 100644 .circleci/unittest/ios/scripts/binary_ios_upload.sh delete mode 100644 .circleci/unittest/linux/scripts/environment.yml delete mode 100755 .circleci/unittest/linux/scripts/install.sh delete mode 100755 .circleci/unittest/linux/scripts/post_process.sh delete mode 100755 .circleci/unittest/linux/scripts/run-clang-format.py delete mode 100755 .circleci/unittest/linux/scripts/run_test.sh delete mode 100755 .circleci/unittest/linux/scripts/setup_env.sh delete mode 100644 .circleci/unittest/windows/scripts/environment.yml delete mode 100644 .circleci/unittest/windows/scripts/install.sh delete mode 100644 .circleci/unittest/windows/scripts/install_conda.bat delete mode 100644 .circleci/unittest/windows/scripts/post_process.sh delete mode 100644 .circleci/unittest/windows/scripts/run_test.sh delete mode 100644 .circleci/unittest/windows/scripts/set_cuda_envs.sh delete mode 100644 .circleci/unittest/windows/scripts/setup_env.sh delete mode 100644 .circleci/unittest/windows/scripts/vc_env_helper.bat delete mode 100644 .github/workflows/build-conda-linux.yml delete mode 100644 .github/workflows/build-conda-m1.yml delete mode 100644 .github/workflows/build-conda-macos.yml delete mode 100644 .github/workflows/build-wheels-linux.yml delete mode 100644 .github/workflows/build-wheels-m1.yml delete mode 100644 .github/workflows/build-wheels-macos.yml delete mode 100644 .github/workflows/pr-labels.yml delete mode 100644 .github/workflows/prototype-tests.yml delete mode 100644 .github/workflows/test-linux-cpu.yml delete mode 100644 .github/workflows/test-linux-gpu.yml delete mode 100644 .github/workflows/test-m1.yml delete mode 100644 .github/workflows/tests-schedule.yml diff --git a/.circleci/.gitignore b/.circleci/.gitignore deleted file mode 100644 index 485dee64bcf..00000000000 --- a/.circleci/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea diff --git a/.circleci/build_docs/commit_docs.sh b/.circleci/build_docs/commit_docs.sh deleted file mode 100755 index 04e3538fefc..00000000000 --- a/.circleci/build_docs/commit_docs.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -set -ex - - -if [ "$2" == "" ]; then - echo call as "$0" "" "" - echo where src is the root of the built documentation git checkout and - echo branch should be "main" or "1.7" or so - exit 1 -fi - -src=$1 -target=$2 - -echo "committing docs from ${src} to ${target}" - -pushd "${src}" -git checkout gh-pages -mkdir -p ./"${target}" -rm -rf ./"${target}"/* -cp -r "${src}/docs/build/html/"* ./"$target" -if [ "${target}" == "main" ]; then - mkdir -p ./_static - rm -rf ./_static/* - cp -r "${src}/docs/build/html/_static/"* ./_static - git add --all ./_static || true -fi -git add --all ./"${target}" || true -git config user.email "soumith+bot@pytorch.org" -git config user.name "pytorchbot" -# If there aren't changes, don't make a commit; push is no-op -git commit -m "auto-generating sphinx docs" || true -git remote add https https://github.com/pytorch/vision.git -git push -u https gh-pages diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index d15a20216bb..00000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,2059 +0,0 @@ -version: 2.1 - -# How to test the Linux jobs: -# - Install CircleCI local CLI: https://circleci.com/docs/2.0/local-cli/ -# - circleci config process .circleci/config.yml > gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.7 -# - Replace binary_linux_wheel_py3.7 with the name of the job you want to test. -# Job names are 'name:' key. - -executors: - windows-cpu: - machine: - resource_class: windows.xlarge - image: windows-server-2019-vs2019:stable - shell: bash.exe - - windows-gpu: - machine: - resource_class: windows.gpu.nvidia.medium - image: windows-server-2019-nvidia:stable - shell: bash.exe - -commands: - checkout_merge: - description: "checkout merge branch" - steps: - - checkout -# - run: -# name: Checkout merge branch -# command: | -# set -ex -# BRANCH=$(git rev-parse --abbrev-ref HEAD) -# if [[ "$BRANCH" != "main" ]]; then -# git fetch --force origin ${CIRCLE_BRANCH}/merge:merged/${CIRCLE_BRANCH} -# git checkout "merged/$CIRCLE_BRANCH" -# fi - designate_upload_channel: - description: "inserts the correct upload channel into ${BASH_ENV}" - steps: - - run: - name: adding UPLOAD_CHANNEL to BASH_ENV - command: | - our_upload_channel=nightly - # On tags upload to test instead - if [[ -n "${CIRCLE_TAG}" ]]; then - our_upload_channel=test - fi - echo "export UPLOAD_CHANNEL=${our_upload_channel}" >> ${BASH_ENV} - - brew_update: - description: "Update Homebrew and install base formulae" - steps: - - run: - name: Update Homebrew - no_output_timeout: "10m" - command: | - set -ex - - # Update repositories manually. - # Running `brew update` produces a comparison between the - # current checkout and the updated checkout, which takes a - # very long time because the existing checkout is 2y old. - for path in $(find /usr/local/Homebrew -type d -name .git) - do - cd $path/.. - git fetch --depth=1 origin - git reset --hard origin/master - done - - export HOMEBREW_NO_AUTO_UPDATE=1 - - # Install expect and moreutils so that we can call `unbuffer` and `ts`. - # moreutils installs a `parallel` executable by default, which conflicts - # with the executable from the GNU `parallel`, so we must unlink GNU - # `parallel` first, and relink it afterwards. - brew install coreutils - brew unlink parallel - brew install moreutils - brew link parallel --overwrite - brew install expect - - brew_install: - description: "Install Homebrew formulae" - parameters: - formulae: - type: string - default: "" - steps: - - run: - name: Install << parameters.formulae >> - no_output_timeout: "10m" - command: | - set -ex - export HOMEBREW_NO_AUTO_UPDATE=1 - brew install << parameters.formulae >> - - run_brew_for_ios_build: - steps: - - brew_update - - brew_install: - formulae: libtool - - apt_install: - parameters: - args: - type: string - descr: - type: string - default: "" - update: - type: boolean - default: true - steps: - - run: - name: > - <<^ parameters.descr >> apt install << parameters.args >> <> - <<# parameters.descr >> << parameters.descr >> <> - command: | - <<# parameters.update >> sudo apt update -qy <> - sudo apt install << parameters.args >> - - pip_install: - parameters: - args: - type: string - descr: - type: string - default: "" - user: - type: boolean - default: true - steps: - - run: - name: > - <<^ parameters.descr >> pip install << parameters.args >> <> - <<# parameters.descr >> << parameters.descr >> <> - command: > - pip install - <<# parameters.user >> --user <> - --progress-bar=off - << parameters.args >> - - install_torchvision: - parameters: - editable: - type: boolean - default: true - steps: - - pip_install: - args: --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu - descr: Install PyTorch from nightly releases - - pip_install: - args: --no-build-isolation <<# parameters.editable >> --editable <> . - descr: Install torchvision <<# parameters.editable >> in editable mode <> - - # Most of the test suite is handled by the `unittest` jobs, with completely different workflow and setup. - # This command can be used if only a selection of tests need to be run, for ad-hoc files. - run_tests_selective: - parameters: - file_or_dir: - type: string - steps: - - run: - name: Install test utilities - command: pip install --progress-bar=off pytest pytest-mock - - run: - name: Run tests - command: pytest --junitxml=test-results/junit.xml -v --durations 20 <> - - store_test_results: - path: test-results - - download_model_weights: - parameters: - extract_roots: - type: string - default: "torchvision/models" - background: - type: boolean - default: true - steps: - - apt_install: - args: parallel wget - descr: Install download utilitites - - run: - name: Download model weights - background: << parameters.background >> - command: | - mkdir -p ~/.cache/torch/hub/checkpoints - python scripts/collect_model_urls.py << parameters.extract_roots >> \ - | parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci' - -binary_common: &binary_common - parameters: - # Edit these defaults to do a release - build_version: - description: "version number of release binary; by default, build a nightly" - type: string - default: "" - pytorch_version: - description: "PyTorch version to build against; by default, use a nightly" - type: string - default: "" - # Don't edit these - python_version: - description: "Python version to build against (e.g., 3.7)" - type: string - cu_version: - description: "CUDA version to build against, in CU format (e.g., cpu or cu100)" - type: string - default: "cpu" - unicode_abi: - description: "Python 2.7 wheel only: whether or not we are cp27mu (default: no)" - type: string - default: "" - wheel_docker_image: - description: "Wheel only: what docker image to use" - type: string - default: "" - conda_docker_image: - description: "Conda only: what docker image to use" - type: string - default: "pytorch/conda-builder:cpu" - environment: - PYTHON_VERSION: << parameters.python_version >> - PYTORCH_VERSION: << parameters.pytorch_version >> - UNICODE_ABI: << parameters.unicode_abi >> - CU_VERSION: << parameters.cu_version >> - MACOSX_DEPLOYMENT_TARGET: 10.9 - -torchvision_ios_params: &torchvision_ios_params - parameters: - build_environment: - type: string - default: "" - ios_arch: - type: string - default: "" - ios_platform: - type: string - default: "" - environment: - BUILD_ENVIRONMENT: << parameters.build_environment >> - IOS_ARCH: << parameters.ios_arch >> - IOS_PLATFORM: << parameters.ios_platform >> - -torchvision_android_params: &torchvision_android_params - parameters: - build_environment: - type: string - default: "" - environment: - BUILD_ENVIRONMENT: << parameters.build_environment >> - -smoke_test_common: &smoke_test_common - <<: *binary_common - docker: - - image: torchvision/smoke_test:latest - -jobs: - circleci_consistency: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - pip_install: - args: jinja2 pyyaml - - run: - name: Check CircleCI config consistency - command: | - python .circleci/regenerate.py - git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) - - lint_python_and_config: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - pip_install: - args: pre-commit - descr: Install lint utilities - - run: - name: Install pre-commit hooks - command: pre-commit install-hooks - - run: - name: Lint Python code and config files - command: pre-commit run --all-files - - run: - name: Required lint modifications - when: on_fail - command: git --no-pager diff - - lint_c: - docker: - - image: cimg/python:3.7 - steps: - - apt_install: - args: libtinfo5 - descr: Install additional system libraries - - checkout - - run: - name: Install lint utilities - command: | - curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format - chmod +x clang-format - sudo mv clang-format /opt/clang-format - - run: - name: Lint C code - command: ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format - - run: - name: Required lint modifications - when: on_fail - command: git --no-pager diff - - type_check_python: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision: - editable: true - - pip_install: - args: mypy - descr: Install Python type check utilities - - run: - name: Check Python types statically - command: mypy --install-types --non-interactive --config-file mypy.ini - - unittest_torchhub: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision - - run_tests_selective: - file_or_dir: test/test_hub.py - - unittest_onnx: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision - - pip_install: - args: onnx onnxruntime - descr: Install ONNX - - run_tests_selective: - file_or_dir: test/test_onnx.py - - unittest_extended: - docker: - - image: cimg/python:3.7 - resource_class: xlarge - steps: - - checkout - - download_model_weights - - install_torchvision - - run: - name: Enable extended tests - command: echo 'export PYTORCH_TEST_WITH_EXTENDED=1' >> $BASH_ENV - - run_tests_selective: - file_or_dir: test/test_extended_*.py - - binary_linux_wheel: - <<: *binary_common - docker: - - image: << parameters.wheel_docker_image >> - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_linux_conda: - <<: *binary_common - docker: - - image: "<< parameters.conda_docker_image >>" - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - packaging/build_conda.sh - - store_artifacts: - path: /opt/conda/conda-bld/linux-64 - - persist_to_workspace: - root: /opt/conda/conda-bld/linux-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_win_conda: - <<: *binary_common - executor: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate base - conda install -yq conda-build "conda-package-handling!=1.5.0" - packaging/build_conda.sh - rm /C/tools/miniconda3/conda-bld/win-64/vs${VC_YEAR}*.tar.bz2 - - store_artifacts: - path: C:/tools/miniconda3/conda-bld/win-64 - - persist_to_workspace: - root: C:/tools/miniconda3/conda-bld/win-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_win_wheel: - <<: *binary_common - executor: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build wheel packages - no_output_timeout: 30m - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_macos_wheel: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - # Cannot easily deduplicate this as source'ing activate - # will set environment variables which we need to propagate - # to build_wheel.sh - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_ios_build: - <<: *torchvision_ios_params - macos: - xcode: "14.0" - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run_brew_for_ios_build - - run: - name: Build - no_output_timeout: "1h" - command: | - script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_build.sh" - cat "$script" - source "$script" - - persist_to_workspace: - root: /Users/distiller/workspace/ - paths: ios - - binary_ios_upload: - <<: *torchvision_ios_params - macos: - xcode: "14.0" - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run_brew_for_ios_build - - run: - name: Upload - no_output_timeout: "1h" - command: | - script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_upload.sh" - cat "$script" - source "$script" - - binary_android_build: - <<: *torchvision_android_params - docker: - - image: cimg/android:2021.08-ndk - resource_class: xlarge - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run: - name: Build - no_output_timeout: "1h" - command: | - script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_build.sh" - cat "$script" - source "$script" - - store_artifacts: - path: ~/workspace/artifacts - - binary_android_upload: - <<: *torchvision_android_params - docker: - - image: cimg/android:2021.08-ndk - resource_class: xlarge - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run: - name: Upload - no_output_timeout: "1h" - command: | - script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_upload.sh" - cat "$script" - source "$script" - - binary_macos_conda: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - conda install -yq conda-build - packaging/build_conda.sh - - store_artifacts: - path: /Users/distiller/miniconda3/conda-bld/osx-64 - - persist_to_workspace: - root: /Users/distiller/miniconda3/conda-bld/osx-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - # Requires org-member context - binary_conda_upload: - docker: - - image: continuumio/miniconda - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - command: | - # Prevent credential from leaking - conda install -yq anaconda-client - set -x - anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force - - # Requires org-member context - binary_wheel_upload: - parameters: - subfolder: - description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" - type: string - docker: - - image: cimg/python:3.7 - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - checkout - - pip_install: - args: awscli - - run: - command: | - export PATH="$HOME/.local/bin:$PATH" - # Prevent credential from leaking - set +x - export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" - export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" - set -x - for pkg in ~/workspace/*.whl; do - aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read - done - - smoke_test_linux_conda: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-nightly pytorch - conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_linux_pip: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - - pip_install: - args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_docker_image_build: - machine: - image: ubuntu-2004:202104-01 - resource_class: large - environment: - image_name: torchvision/smoke_test - steps: - - checkout - - designate_upload_channel - - run: - name: Build and push Docker image - no_output_timeout: "1h" - command: | - set +x - echo "${DOCKER_HUB_TOKEN}" | docker login --username "${DOCKER_HUB_USERNAME}" --password-stdin - set -x - cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} - docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} ${image_name}:latest - docker push ${image_name}:${CIRCLE_WORKFLOW_ID} - docker push ${image_name}:latest - - smoke_test_win_conda: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-nightly pytorch - conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_win_pip: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - - pip_install: - args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - unittest_linux_cpu: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cpu" - resource_class: 2xlarge+ - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - - key: env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_linux_gpu: - <<: *binary_common - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - resource_class: gpu.nvidia.medium - environment: - image_name: "pytorch/manylinux-cuda116" - CU_VERSION: << parameters.cu_version >> - PYTHON_VERSION: << parameters.python_version >> - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: docker run -e PYTHON_VERSION -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - - key: env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - # Here we create an envlist file that contains some env variables that we want the docker container to be aware of. - # Normally, the CIRCLECI variable is set and available on all CI workflows: https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables. - # They're avaiable in all the other workflows (OSX and Windows). - # But here, we're running the unittest_linux_gpu workflows in a docker container, where those variables aren't accessible. - # So instead we dump the variables we need in env.list and we pass that file when invoking "docker run". - name: export CIRCLECI env var - command: echo "CIRCLECI=true" >> ./env.list - - run: - name: Install torchvision - command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: docker run --env-file ./env.list -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post Process - command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_windows_cpu: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - save_cache: - - key: env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/windows/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/windows/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_windows_gpu: - <<: *binary_common - executor: - name: windows-gpu - environment: - CUDA_VERSION: "11.6" - PYTHON_VERSION: << parameters.python_version >> - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - save_cache: - - key: env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - name: Install CUDA - command: packaging/windows/internal/cuda_install.bat - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - name: Install torchvision - command: .circleci/unittest/windows/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/windows/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_macos_cpu: - <<: *binary_common - macos: - xcode: "14.0" - resource_class: large - steps: - - checkout - - designate_upload_channel - - run: - name: Install wget - command: HOMEBREW_NO_AUTO_UPDATE=1 brew install wget - # Disable brew auto update which is very slow - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - - keys: - - env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - - key: env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - cmake_linux_cpu: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cpu" - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Setup conda - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: packaging/build_cmake.sh - - cmake_linux_gpu: - <<: *binary_common - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - resource_class: gpu.nvidia.small - environment: - PYTHON_VERSION: << parameters.python_version >> - PYTORCH_VERSION: << parameters.pytorch_version >> - UNICODE_ABI: << parameters.unicode_abi >> - CU_VERSION: << parameters.cu_version >> - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Setup conda - command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Build torchvision C++ distribution and test - no_output_timeout: 30m - command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh - - cmake_macos_cpu: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - conda install -yq conda-build cmake - packaging/build_cmake.sh - - cmake_windows_cpu: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/build_cmake.sh - - cmake_windows_gpu: - <<: *binary_common - executor: - name: windows-gpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - packaging/build_cmake.sh - - build_docs: - <<: *binary_common - docker: - - image: cimg/python:3.7 - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - checkout - - download_model_weights - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - designate_upload_channel - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Build docs - command: | - set -ex - # turn v1.12.0rc3 into 1.12.0 - tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/') - VERSION=${tag:-main} - eval "$(./conda/bin/conda shell.bash hook)" - conda activate ./env - pushd docs - pip install --progress-bar=off -r requirements.txt - make html - popd - - persist_to_workspace: - root: ./ - paths: - - "*" - - store_artifacts: - path: ./docs/build/html - destination: docs - - upload_docs: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cuda100" - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - run: - name: Generate netrc - command: | - # set credentials for https pushing - # requires the org-member context - cat > ~/.netrc \< gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.7 -# - Replace binary_linux_wheel_py3.7 with the name of the job you want to test. -# Job names are 'name:' key. - -executors: - windows-cpu: - machine: - resource_class: windows.xlarge - image: windows-server-2019-vs2019:stable - shell: bash.exe - - windows-gpu: - machine: - resource_class: windows.gpu.nvidia.medium - image: windows-server-2019-nvidia:stable - shell: bash.exe - -commands: - checkout_merge: - description: "checkout merge branch" - steps: - - checkout -# - run: -# name: Checkout merge branch -# command: | -# set -ex -# BRANCH=$(git rev-parse --abbrev-ref HEAD) -# if [[ "$BRANCH" != "main" ]]; then -# git fetch --force origin ${CIRCLE_BRANCH}/merge:merged/${CIRCLE_BRANCH} -# git checkout "merged/$CIRCLE_BRANCH" -# fi - designate_upload_channel: - description: "inserts the correct upload channel into ${BASH_ENV}" - steps: - - run: - name: adding UPLOAD_CHANNEL to BASH_ENV - command: | - our_upload_channel=nightly - # On tags upload to test instead - if [[ -n "${CIRCLE_TAG}" ]]; then - our_upload_channel=test - fi - echo "export UPLOAD_CHANNEL=${our_upload_channel}" >> ${BASH_ENV} - - brew_update: - description: "Update Homebrew and install base formulae" - steps: - - run: - name: Update Homebrew - no_output_timeout: "10m" - command: | - set -ex - - # Update repositories manually. - # Running `brew update` produces a comparison between the - # current checkout and the updated checkout, which takes a - # very long time because the existing checkout is 2y old. - for path in $(find /usr/local/Homebrew -type d -name .git) - do - cd $path/.. - git fetch --depth=1 origin - git reset --hard origin/master - done - - export HOMEBREW_NO_AUTO_UPDATE=1 - - # Install expect and moreutils so that we can call `unbuffer` and `ts`. - # moreutils installs a `parallel` executable by default, which conflicts - # with the executable from the GNU `parallel`, so we must unlink GNU - # `parallel` first, and relink it afterwards. - brew install coreutils - brew unlink parallel - brew install moreutils - brew link parallel --overwrite - brew install expect - - brew_install: - description: "Install Homebrew formulae" - parameters: - formulae: - type: string - default: "" - steps: - - run: - name: Install << parameters.formulae >> - no_output_timeout: "10m" - command: | - set -ex - export HOMEBREW_NO_AUTO_UPDATE=1 - brew install << parameters.formulae >> - - run_brew_for_ios_build: - steps: - - brew_update - - brew_install: - formulae: libtool - - apt_install: - parameters: - args: - type: string - descr: - type: string - default: "" - update: - type: boolean - default: true - steps: - - run: - name: > - <<^ parameters.descr >> apt install << parameters.args >> <> - <<# parameters.descr >> << parameters.descr >> <> - command: | - <<# parameters.update >> sudo apt update -qy <> - sudo apt install << parameters.args >> - - pip_install: - parameters: - args: - type: string - descr: - type: string - default: "" - user: - type: boolean - default: true - steps: - - run: - name: > - <<^ parameters.descr >> pip install << parameters.args >> <> - <<# parameters.descr >> << parameters.descr >> <> - command: > - pip install - <<# parameters.user >> --user <> - --progress-bar=off - << parameters.args >> - - install_torchvision: - parameters: - editable: - type: boolean - default: true - steps: - - pip_install: - args: --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu - descr: Install PyTorch from nightly releases - - pip_install: - args: --no-build-isolation <<# parameters.editable >> --editable <> . - descr: Install torchvision <<# parameters.editable >> in editable mode <> - - # Most of the test suite is handled by the `unittest` jobs, with completely different workflow and setup. - # This command can be used if only a selection of tests need to be run, for ad-hoc files. - run_tests_selective: - parameters: - file_or_dir: - type: string - steps: - - run: - name: Install test utilities - command: pip install --progress-bar=off pytest pytest-mock - - run: - name: Run tests - command: pytest --junitxml=test-results/junit.xml -v --durations 20 <> - - store_test_results: - path: test-results - - download_model_weights: - parameters: - extract_roots: - type: string - default: "torchvision/models" - background: - type: boolean - default: true - steps: - - apt_install: - args: parallel wget - descr: Install download utilitites - - run: - name: Download model weights - background: << parameters.background >> - command: | - mkdir -p ~/.cache/torch/hub/checkpoints - python scripts/collect_model_urls.py << parameters.extract_roots >> \ - | parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci' - -binary_common: &binary_common - parameters: - # Edit these defaults to do a release - build_version: - description: "version number of release binary; by default, build a nightly" - type: string - default: "" - pytorch_version: - description: "PyTorch version to build against; by default, use a nightly" - type: string - default: "" - # Don't edit these - python_version: - description: "Python version to build against (e.g., 3.7)" - type: string - cu_version: - description: "CUDA version to build against, in CU format (e.g., cpu or cu100)" - type: string - default: "cpu" - unicode_abi: - description: "Python 2.7 wheel only: whether or not we are cp27mu (default: no)" - type: string - default: "" - wheel_docker_image: - description: "Wheel only: what docker image to use" - type: string - default: "" - conda_docker_image: - description: "Conda only: what docker image to use" - type: string - default: "pytorch/conda-builder:cpu" - environment: - PYTHON_VERSION: << parameters.python_version >> - PYTORCH_VERSION: << parameters.pytorch_version >> - UNICODE_ABI: << parameters.unicode_abi >> - CU_VERSION: << parameters.cu_version >> - MACOSX_DEPLOYMENT_TARGET: 10.9 - -torchvision_ios_params: &torchvision_ios_params - parameters: - build_environment: - type: string - default: "" - ios_arch: - type: string - default: "" - ios_platform: - type: string - default: "" - environment: - BUILD_ENVIRONMENT: << parameters.build_environment >> - IOS_ARCH: << parameters.ios_arch >> - IOS_PLATFORM: << parameters.ios_platform >> - -torchvision_android_params: &torchvision_android_params - parameters: - build_environment: - type: string - default: "" - environment: - BUILD_ENVIRONMENT: << parameters.build_environment >> - -smoke_test_common: &smoke_test_common - <<: *binary_common - docker: - - image: torchvision/smoke_test:latest - -jobs: - circleci_consistency: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - pip_install: - args: jinja2 pyyaml - - run: - name: Check CircleCI config consistency - command: | - python .circleci/regenerate.py - git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) - - lint_python_and_config: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - pip_install: - args: pre-commit - descr: Install lint utilities - - run: - name: Install pre-commit hooks - command: pre-commit install-hooks - - run: - name: Lint Python code and config files - command: pre-commit run --all-files - - run: - name: Required lint modifications - when: on_fail - command: git --no-pager diff - - lint_c: - docker: - - image: cimg/python:3.7 - steps: - - apt_install: - args: libtinfo5 - descr: Install additional system libraries - - checkout - - run: - name: Install lint utilities - command: | - curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format - chmod +x clang-format - sudo mv clang-format /opt/clang-format - - run: - name: Lint C code - command: ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format - - run: - name: Required lint modifications - when: on_fail - command: git --no-pager diff - - type_check_python: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision: - editable: true - - pip_install: - args: mypy - descr: Install Python type check utilities - - run: - name: Check Python types statically - command: mypy --install-types --non-interactive --config-file mypy.ini - - unittest_torchhub: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision - - run_tests_selective: - file_or_dir: test/test_hub.py - - unittest_onnx: - docker: - - image: cimg/python:3.7 - steps: - - checkout - - install_torchvision - - pip_install: - args: onnx onnxruntime - descr: Install ONNX - - run_tests_selective: - file_or_dir: test/test_onnx.py - - unittest_extended: - docker: - - image: cimg/python:3.7 - resource_class: xlarge - steps: - - checkout - - download_model_weights - - install_torchvision - - run: - name: Enable extended tests - command: echo 'export PYTORCH_TEST_WITH_EXTENDED=1' >> $BASH_ENV - - run_tests_selective: - file_or_dir: test/test_extended_*.py - - binary_linux_wheel: - <<: *binary_common - docker: - - image: << parameters.wheel_docker_image >> - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_linux_conda: - <<: *binary_common - docker: - - image: "<< parameters.conda_docker_image >>" - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - packaging/build_conda.sh - - store_artifacts: - path: /opt/conda/conda-bld/linux-64 - - persist_to_workspace: - root: /opt/conda/conda-bld/linux-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_win_conda: - <<: *binary_common - executor: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build conda packages - no_output_timeout: 30m - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate base - conda install -yq conda-build "conda-package-handling!=1.5.0" - packaging/build_conda.sh - rm /C/tools/miniconda3/conda-bld/win-64/vs${VC_YEAR}*.tar.bz2 - - store_artifacts: - path: C:/tools/miniconda3/conda-bld/win-64 - - persist_to_workspace: - root: C:/tools/miniconda3/conda-bld/win-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_win_wheel: - <<: *binary_common - executor: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Build wheel packages - no_output_timeout: 30m - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - store_test_results: - path: build_results/ - - binary_macos_wheel: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - # Cannot easily deduplicate this as source'ing activate - # will set environment variables which we need to propagate - # to build_wheel.sh - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - packaging/build_wheel.sh - - store_artifacts: - path: dist - - persist_to_workspace: - root: dist - paths: - - "*" - - binary_ios_build: - <<: *torchvision_ios_params - macos: - xcode: "14.0" - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run_brew_for_ios_build - - run: - name: Build - no_output_timeout: "1h" - command: | - script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_build.sh" - cat "$script" - source "$script" - - persist_to_workspace: - root: /Users/distiller/workspace/ - paths: ios - - binary_ios_upload: - <<: *torchvision_ios_params - macos: - xcode: "14.0" - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run_brew_for_ios_build - - run: - name: Upload - no_output_timeout: "1h" - command: | - script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_upload.sh" - cat "$script" - source "$script" - - binary_android_build: - <<: *torchvision_android_params - docker: - - image: cimg/android:2021.08-ndk - resource_class: xlarge - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run: - name: Build - no_output_timeout: "1h" - command: | - script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_build.sh" - cat "$script" - source "$script" - - store_artifacts: - path: ~/workspace/artifacts - - binary_android_upload: - <<: *torchvision_android_params - docker: - - image: cimg/android:2021.08-ndk - resource_class: xlarge - steps: - - attach_workspace: - at: ~/workspace - - checkout - - run: - name: Upload - no_output_timeout: "1h" - command: | - script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_upload.sh" - cat "$script" - source "$script" - - binary_macos_conda: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - conda install -yq conda-build - packaging/build_conda.sh - - store_artifacts: - path: /Users/distiller/miniconda3/conda-bld/osx-64 - - persist_to_workspace: - root: /Users/distiller/miniconda3/conda-bld/osx-64 - paths: - - "*" - - store_test_results: - path: build_results/ - - # Requires org-member context - binary_conda_upload: - docker: - - image: continuumio/miniconda - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - command: | - # Prevent credential from leaking - conda install -yq anaconda-client - set -x - anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force - - # Requires org-member context - binary_wheel_upload: - parameters: - subfolder: - description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" - type: string - docker: - - image: cimg/python:3.7 - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - checkout - - pip_install: - args: awscli - - run: - command: | - export PATH="$HOME/.local/bin:$PATH" - # Prevent credential from leaking - set +x - export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" - export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" - set -x - for pkg in ~/workspace/*.whl; do - aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read - done - - smoke_test_linux_conda: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-nightly pytorch - conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_linux_pip: - <<: *smoke_test_common - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - - pip_install: - args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - - run: - name: smoke test - command: | - source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_docker_image_build: - machine: - image: ubuntu-2004:202104-01 - resource_class: large - environment: - image_name: torchvision/smoke_test - steps: - - checkout - - designate_upload_channel - - run: - name: Build and push Docker image - no_output_timeout: "1h" - command: | - set +x - echo "${DOCKER_HUB_TOKEN}" | docker login --username "${DOCKER_HUB_USERNAME}" --password-stdin - set -x - cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} - docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} ${image_name}:latest - docker push ${image_name}:${CIRCLE_WORKFLOW_ID} - docker push ${image_name}:latest - - smoke_test_win_conda: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda env remove -n python${PYTHON_VERSION} || true - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - conda install -v -y -c pytorch-nightly pytorch - conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - smoke_test_win_pip: - <<: *binary_common - executor: - name: windows-cpu - steps: - - attach_workspace: - at: ~/workspace - - designate_upload_channel - - run: - name: install binaries - command: | - set -x - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} - conda activate python${PYTHON_VERSION} - - pip_install: - args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html - - run: - name: smoke test - command: | - eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" - conda activate python${PYTHON_VERSION} - python -c "import torchvision" - - unittest_linux_cpu: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cpu" - resource_class: 2xlarge+ - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - {% raw %} - keys: - - env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - {% raw %} - key: env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_linux_gpu: - <<: *binary_common - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - resource_class: gpu.nvidia.medium - environment: - image_name: "pytorch/manylinux-cuda116" - CU_VERSION: << parameters.cu_version >> - PYTHON_VERSION: << parameters.python_version >> - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - {% raw %} - keys: - - env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - - run: - name: Setup - command: docker run -e PYTHON_VERSION -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - {% raw %} - key: env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - paths: - - conda - - env - - run: - # Here we create an envlist file that contains some env variables that we want the docker container to be aware of. - # Normally, the CIRCLECI variable is set and available on all CI workflows: https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables. - # They're avaiable in all the other workflows (OSX and Windows). - # But here, we're running the unittest_linux_gpu workflows in a docker container, where those variables aren't accessible. - # So instead we dump the variables we need in env.list and we pass that file when invoking "docker run". - name: export CIRCLECI env var - command: echo "CIRCLECI=true" >> ./env.list - - run: - name: Install torchvision - command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: docker run --env-file ./env.list -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post Process - command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_windows_cpu: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - {% raw %} - keys: - - env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - save_cache: - {% raw %} - key: env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/windows/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/windows/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_windows_gpu: - <<: *binary_common - executor: - name: windows-gpu - environment: - CUDA_VERSION: "11.6" - PYTHON_VERSION: << parameters.python_version >> - steps: - - checkout - - designate_upload_channel - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - {% raw %} - keys: - - env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - - run: - name: Setup - command: .circleci/unittest/windows/scripts/setup_env.sh - - save_cache: - {% raw %} - key: env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - paths: - - conda - - env - - run: - name: Install CUDA - command: packaging/windows/internal/cuda_install.bat - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - name: Install torchvision - command: .circleci/unittest/windows/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/windows/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/windows/scripts/post_process.sh - - store_test_results: - path: test-results - - unittest_macos_cpu: - <<: *binary_common - macos: - xcode: "14.0" - resource_class: large - steps: - - checkout - - designate_upload_channel - - run: - name: Install wget - command: HOMEBREW_NO_AUTO_UPDATE=1 brew install wget - # Disable brew auto update which is very slow - - run: - name: Generate cache key - # This will refresh cache on Sundays, nightly build should generate new cache. - command: echo "$(date +"%Y-%U")" > .circleci-weekly - - restore_cache: - {% raw %} - keys: - - env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - save_cache: - {% raw %} - key: env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} - {% endraw %} - paths: - - conda - - env - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Run tests - command: .circleci/unittest/linux/scripts/run_test.sh - - run: - name: Post process - command: .circleci/unittest/linux/scripts/post_process.sh - - store_test_results: - path: test-results - - cmake_linux_cpu: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cpu" - resource_class: 2xlarge+ - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Setup conda - command: .circleci/unittest/linux/scripts/setup_env.sh - - run: packaging/build_cmake.sh - - cmake_linux_gpu: - <<: *binary_common - machine: - image: ubuntu-2004-cuda-11.4:202110-01 - resource_class: gpu.nvidia.small - environment: - PYTHON_VERSION: << parameters.python_version >> - PYTORCH_VERSION: << parameters.pytorch_version >> - UNICODE_ABI: << parameters.unicode_abi >> - CU_VERSION: << parameters.cu_version >> - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Setup conda - command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh - - run: - name: Build torchvision C++ distribution and test - no_output_timeout: 30m - command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh - - cmake_macos_cpu: - <<: *binary_common - macos: - xcode: "14.0" - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - sh conda.sh -b - source $HOME/miniconda3/bin/activate - conda install -yq conda-build cmake - packaging/build_cmake.sh - - cmake_windows_cpu: - <<: *binary_common - executor: - name: windows-cpu - steps: - - checkout_merge - - designate_upload_channel - - run: - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/build_cmake.sh - - cmake_windows_gpu: - <<: *binary_common - executor: - name: windows-gpu - steps: - - checkout_merge - - designate_upload_channel - - run: - name: Update CUDA driver - command: packaging/windows/internal/driver_update.bat - - run: - command: | - set -ex - source packaging/windows/internal/vc_install_helper.sh - packaging/windows/internal/cuda_install.bat - packaging/build_cmake.sh - - build_docs: - <<: *binary_common - docker: - - image: cimg/python:3.7 - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - checkout - - download_model_weights - - run: - name: Setup - command: .circleci/unittest/linux/scripts/setup_env.sh - - designate_upload_channel - - run: - name: Install torchvision - command: .circleci/unittest/linux/scripts/install.sh - - run: - name: Build docs - command: | - set -ex - # turn v1.12.0rc3 into 1.12.0 - tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/') - VERSION=${tag:-main} - eval "$(./conda/bin/conda shell.bash hook)" - conda activate ./env - pushd docs - pip install --progress-bar=off -r requirements.txt - make html - popd - - persist_to_workspace: - root: ./ - paths: - - "*" - - store_artifacts: - path: ./docs/build/html - destination: docs - - upload_docs: - <<: *binary_common - docker: - - image: "pytorch/manylinux-cuda100" - resource_class: 2xlarge+ - steps: - - attach_workspace: - at: ~/workspace - - run: - name: Generate netrc - command: | - # set credentials for https pushing - # requires the org-member context - cat > ~/.netrc \<> ~/.bashrc -CMD [ "/bin/bash"] diff --git a/.circleci/unittest/android/scripts/binary_android_build.sh b/.circleci/unittest/android/scripts/binary_android_build.sh deleted file mode 100644 index 0d8c0d47d8a..00000000000 --- a/.circleci/unittest/android/scripts/binary_android_build.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -set -ex -o pipefail - -echo "DIR: $(pwd)" -echo "ANDROID_HOME=${ANDROID_HOME}" -echo "ANDROID_NDK_HOME=${ANDROID_NDK_HOME}" -echo "JAVA_HOME=${JAVA_HOME}" - -WORKSPACE=/home/circleci/workspace -VISION_ANDROID=/home/circleci/project/android - -. /home/circleci/project/.circleci/unittest/android/scripts/install_gradle.sh - -GRADLE_LOCAL_PROPERTIES=${VISION_ANDROID}/local.properties -rm -f $GRADLE_LOCAL_PROPERTIES - -echo "sdk.dir=${ANDROID_HOME}" >> $GRADLE_LOCAL_PROPERTIES -echo "ndk.dir=${ANDROID_NDK_HOME}" >> $GRADLE_LOCAL_PROPERTIES - -echo "GRADLE_PATH $GRADLE_PATH" -echo "GRADLE_HOME $GRADLE_HOME" - -${GRADLE_PATH} --scan --stacktrace --debug --no-daemon -p ${VISION_ANDROID} assemble || true - -mkdir -p ~/workspace/artifacts -find . -type f -name *aar -print | xargs tar cfvz ~/workspace/artifacts/artifacts-aars.tgz -find . -type f -name *apk -print | xargs tar cfvz ~/workspace/artifacts/artifacts-apks.tgz diff --git a/.circleci/unittest/android/scripts/binary_android_upload.sh b/.circleci/unittest/android/scripts/binary_android_upload.sh deleted file mode 100644 index 1472a877d90..00000000000 --- a/.circleci/unittest/android/scripts/binary_android_upload.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -set -ex -o pipefail - -echo "DIR: $(pwd)" -echo "ANDROID_HOME=${ANDROID_HOME}" -echo "ANDROID_NDK_HOME=${ANDROID_NDK_HOME}" -echo "JAVA_HOME=${JAVA_HOME}" - -WORKSPACE=/home/circleci/workspace -VISION_ANDROID=/home/circleci/project/android - -. /home/circleci/project/.circleci/unittest/android/scripts/install_gradle.sh - -GRADLE_LOCAL_PROPERTIES=${VISION_ANDROID}/local.properties -rm -f $GRADLE_LOCAL_PROPERTIES -GRADLE_PROPERTIES=/home/circleci/project/android/gradle.properties - -echo "sdk.dir=${ANDROID_HOME}" >> $GRADLE_LOCAL_PROPERTIES -echo "ndk.dir=${ANDROID_NDK_HOME}" >> $GRADLE_LOCAL_PROPERTIES - -echo "SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES -echo "mavenCentralRepositoryUsername=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES -echo "SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES -echo "mavenCentralRepositoryPassword=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES - -echo "signing.keyId=${ANDROID_SIGN_KEY}" >> $GRADLE_PROPERTIES -echo "signing.password=${ANDROID_SIGN_PASS}" >> $GRADLE_PROPERTIES - -cat /home/circleci/project/android/gradle.properties | grep VERSION - -${GRADLE_PATH} --scan --stacktrace --debug --no-daemon -p ${VISION_ANDROID} ops:uploadArchives - -mkdir -p ~/workspace/artifacts -find . -type f -name *aar -print | xargs tar cfvz ~/workspace/artifacts/artifacts-aars.tgz diff --git a/.circleci/unittest/android/scripts/install_gradle.sh b/.circleci/unittest/android/scripts/install_gradle.sh deleted file mode 100755 index 5f803abfa94..00000000000 --- a/.circleci/unittest/android/scripts/install_gradle.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -ex - -_https_amazon_aws=https://ossci-android.s3.amazonaws.com -GRADLE_VERSION=6.8.3 - -_gradle_home=/opt/gradle -sudo rm -rf $gradle_home -sudo mkdir -p $_gradle_home - -curl --silent --output /tmp/gradle.zip --retry 3 $_https_amazon_aws/gradle-${GRADLE_VERSION}-bin.zip - -sudo unzip -q /tmp/gradle.zip -d $_gradle_home -rm /tmp/gradle.zip - -sudo chmod -R 777 $_gradle_home - -export GRADLE_HOME=$_gradle_home/gradle-$GRADLE_VERSION -export GRADLE_PATH=${GRADLE_HOME}/bin/gradle diff --git a/.circleci/unittest/ios/scripts/binary_ios_build.sh b/.circleci/unittest/ios/scripts/binary_ios_build.sh deleted file mode 100755 index e2ad7b0c55f..00000000000 --- a/.circleci/unittest/ios/scripts/binary_ios_build.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -set -ex -o pipefail - -echo "" -echo "DIR: $(pwd)" -WORKSPACE=/Users/distiller/workspace -PROJ_ROOT_IOS=/Users/distiller/project/ios -PYTORCH_IOS_NIGHTLY_NAME=libtorch_ios_nightly_build.zip -export TCLLIBPATH="/usr/local/lib" - -# install conda -curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -chmod +x ~/conda.sh -/bin/bash ~/conda.sh -b -p ~/anaconda -export PATH="~/anaconda/bin:${PATH}" -source ~/anaconda/bin/activate - -# install dependencies -conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests typing_extensions wget --yes -conda install -c conda-forge valgrind --yes -export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} - -# sync submodules -cd ${PROJ_ROOT_IOS} -git submodule sync -git submodule update --init --recursive - -# download pytorch-iOS nightly build and unzip it -mkdir -p ${PROJ_ROOT_IOS}/lib -mkdir -p ${PROJ_ROOT_IOS}/build -mkdir -p ${PROJ_ROOT_IOS}/pytorch -TORCH_ROOT="${PROJ_ROOT_IOS}/pytorch" - -cd ${TORCH_ROOT} -wget https://ossci-ios-build.s3.amazonaws.com/${PYTORCH_IOS_NIGHTLY_NAME} -mkdir -p ./build_ios -unzip -d ./build_ios ./${PYTORCH_IOS_NIGHTLY_NAME} - -LIBTORCH_HEADER_ROOT="${TORCH_ROOT}/build_ios/install/include" -cd ${PROJ_ROOT_IOS} -IOS_ARCH=${IOS_ARCH} LIBTORCH_HEADER_ROOT=${LIBTORCH_HEADER_ROOT} ./build_ios.sh -rm -rf ${TORCH_ROOT} - -# store the binary -DEST_DIR=${WORKSPACE}/ios/${IOS_ARCH} -mkdir -p ${DEST_DIR} -cp ${PROJ_ROOT_IOS}/lib/*.a ${DEST_DIR} diff --git a/.circleci/unittest/ios/scripts/binary_ios_upload.sh b/.circleci/unittest/ios/scripts/binary_ios_upload.sh deleted file mode 100644 index ce56388e5da..00000000000 --- a/.circleci/unittest/ios/scripts/binary_ios_upload.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -set -ex -o pipefail - -echo "" -echo "DIR: $(pwd)" - -WORKSPACE=/Users/distiller/workspace -PROJ_ROOT=/Users/distiller/project -ARTIFACTS_DIR=${WORKSPACE}/ios -ls ${ARTIFACTS_DIR} -ZIP_DIR=${WORKSPACE}/zip -mkdir -p ${ZIP_DIR}/install/lib - -# build a FAT bianry -cd ${ZIP_DIR}/install/lib -libs=("${ARTIFACTS_DIR}/x86_64/libtorchvision_ops.a" "${ARTIFACTS_DIR}/arm64/libtorchvision_ops.a") -lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/libtorchvision_ops.a -lipo -i ${ZIP_DIR}/install/lib/*.a - -# copy the license -cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/ -# zip the library -ZIPFILE=libtorchvision_ops_ios_nightly_build.zip -cd ${ZIP_DIR} -#for testing -touch version.txt -echo $(date +%s) > version.txt -zip -r ${ZIPFILE} install version.txt LICENSE - -# upload to aws -# Install conda then 'conda install' awscli -curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -chmod +x ~/conda.sh -/bin/bash ~/conda.sh -b -p ~/anaconda -export PATH="~/anaconda/bin:${PATH}" -source ~/anaconda/bin/activate -conda install -c conda-forge awscli --yes -set +x -export AWS_ACCESS_KEY_ID=${AWS_S3_ACCESS_KEY_FOR_PYTORCH_BINARY_UPLOAD} -export AWS_SECRET_ACCESS_KEY=${AWS_S3_ACCESS_SECRET_FOR_PYTORCH_BINARY_UPLOAD} -set -x -aws s3 cp ${ZIPFILE} s3://ossci-ios-build/ --acl public-read diff --git a/.circleci/unittest/linux/scripts/environment.yml b/.circleci/unittest/linux/scripts/environment.yml deleted file mode 100644 index fae96c5f93c..00000000000 --- a/.circleci/unittest/linux/scripts/environment.yml +++ /dev/null @@ -1,16 +0,0 @@ -channels: - - pytorch - - defaults -dependencies: - - pytest - - pytest-cov - - pytest-mock - - pip - - libpng - - jpeg - - ca-certificates - - h5py - - pip: - - future - - scipy - - av < 10 diff --git a/.circleci/unittest/linux/scripts/install.sh b/.circleci/unittest/linux/scripts/install.sh deleted file mode 100755 index 54722842a74..00000000000 --- a/.circleci/unittest/linux/scripts/install.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -unset PYTORCH_VERSION -# For unittest, nightly PyTorch is used as the following section, -# so no need to set PYTORCH_VERSION. -# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. - -set -e - -eval "$(./conda/bin/conda shell.bash hook)" -conda activate ./env - -if [ "${CU_VERSION:-}" == cpu ] ; then - cudatoolkit="cpuonly" - version="cpu" -else - if [[ ${#CU_VERSION} -eq 4 ]]; then - CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}" - elif [[ ${#CU_VERSION} -eq 5 ]]; then - CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}" - fi - echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION: ${CU_VERSION} " - version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" - cudatoolkit="pytorch-cuda=${version}" -fi - -case "$(uname -s)" in - Darwin*) os=MacOSX;; - *) os=Linux -esac - -printf "Installing PyTorch with %s\n" "${cudatoolkit}" -if [ "${os}" == "MacOSX" ]; then - conda install -y -c "pytorch-${UPLOAD_CHANNEL}" "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}" -else - conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}" -fi - -printf "* Installing torchvision\n" -python setup.py develop diff --git a/.circleci/unittest/linux/scripts/post_process.sh b/.circleci/unittest/linux/scripts/post_process.sh deleted file mode 100755 index e97bf2a7b1b..00000000000 --- a/.circleci/unittest/linux/scripts/post_process.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -set -e - -eval "$(./conda/bin/conda shell.bash hook)" -conda activate ./env diff --git a/.circleci/unittest/linux/scripts/run-clang-format.py b/.circleci/unittest/linux/scripts/run-clang-format.py deleted file mode 100755 index 5c61b2519e0..00000000000 --- a/.circleci/unittest/linux/scripts/run-clang-format.py +++ /dev/null @@ -1,331 +0,0 @@ -#!/usr/bin/env python -""" -MIT License - -Copyright (c) 2017 Guillaume Papin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -A wrapper script around clang-format, suitable for linting multiple files -and to use for continuous integration. - -This is an alternative API for the clang-format command line. -It runs over multiple files and directories in parallel. -A diff output is produced and a sensible exit code is returned. - -""" - -import argparse -import difflib -import fnmatch -import multiprocessing -import os -import signal -import subprocess -import sys -import traceback -from functools import partial - -try: - from subprocess import DEVNULL # py3k -except ImportError: - DEVNULL = open(os.devnull, "wb") - - -DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu" - - -class ExitStatus: - SUCCESS = 0 - DIFF = 1 - TROUBLE = 2 - - -def list_files(files, recursive=False, extensions=None, exclude=None): - if extensions is None: - extensions = [] - if exclude is None: - exclude = [] - - out = [] - for file in files: - if recursive and os.path.isdir(file): - for dirpath, dnames, fnames in os.walk(file): - fpaths = [os.path.join(dirpath, fname) for fname in fnames] - for pattern in exclude: - # os.walk() supports trimming down the dnames list - # by modifying it in-place, - # to avoid unnecessary directory listings. - dnames[:] = [x for x in dnames if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)] - fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)] - for f in fpaths: - ext = os.path.splitext(f)[1][1:] - if ext in extensions: - out.append(f) - else: - out.append(file) - return out - - -def make_diff(file, original, reformatted): - return list( - difflib.unified_diff( - original, reformatted, fromfile=f"{file}\t(original)", tofile=f"{file}\t(reformatted)", n=3 - ) - ) - - -class DiffError(Exception): - def __init__(self, message, errs=None): - super().__init__(message) - self.errs = errs or [] - - -class UnexpectedError(Exception): - def __init__(self, message, exc=None): - super().__init__(message) - self.formatted_traceback = traceback.format_exc() - self.exc = exc - - -def run_clang_format_diff_wrapper(args, file): - try: - ret = run_clang_format_diff(args, file) - return ret - except DiffError: - raise - except Exception as e: - raise UnexpectedError(f"{file}: {e.__class__.__name__}: {e}", e) - - -def run_clang_format_diff(args, file): - try: - with open(file, encoding="utf-8") as f: - original = f.readlines() - except OSError as exc: - raise DiffError(str(exc)) - invocation = [args.clang_format_executable, file] - - # Use of utf-8 to decode the process output. - # - # Hopefully, this is the correct thing to do. - # - # It's done due to the following assumptions (which may be incorrect): - # - clang-format will returns the bytes read from the files as-is, - # without conversion, and it is already assumed that the files use utf-8. - # - if the diagnostics were internationalized, they would use utf-8: - # > Adding Translations to Clang - # > - # > Not possible yet! - # > Diagnostic strings should be written in UTF-8, - # > the client can translate to the relevant code page if needed. - # > Each translation completely replaces the format string - # > for the diagnostic. - # > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation - - try: - proc = subprocess.Popen( - invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding="utf-8" - ) - except OSError as exc: - raise DiffError(f"Command '{subprocess.list2cmdline(invocation)}' failed to start: {exc}") - proc_stdout = proc.stdout - proc_stderr = proc.stderr - - # hopefully the stderr pipe won't get full and block the process - outs = list(proc_stdout.readlines()) - errs = list(proc_stderr.readlines()) - proc.wait() - if proc.returncode: - raise DiffError( - "Command '{}' returned non-zero exit status {}".format( - subprocess.list2cmdline(invocation), proc.returncode - ), - errs, - ) - return make_diff(file, original, outs), errs - - -def bold_red(s): - return "\x1b[1m\x1b[31m" + s + "\x1b[0m" - - -def colorize(diff_lines): - def bold(s): - return "\x1b[1m" + s + "\x1b[0m" - - def cyan(s): - return "\x1b[36m" + s + "\x1b[0m" - - def green(s): - return "\x1b[32m" + s + "\x1b[0m" - - def red(s): - return "\x1b[31m" + s + "\x1b[0m" - - for line in diff_lines: - if line[:4] in ["--- ", "+++ "]: - yield bold(line) - elif line.startswith("@@ "): - yield cyan(line) - elif line.startswith("+"): - yield green(line) - elif line.startswith("-"): - yield red(line) - else: - yield line - - -def print_diff(diff_lines, use_color): - if use_color: - diff_lines = colorize(diff_lines) - sys.stdout.writelines(diff_lines) - - -def print_trouble(prog, message, use_colors): - error_text = "error:" - if use_colors: - error_text = bold_red(error_text) - print(f"{prog}: {error_text} {message}", file=sys.stderr) - - -def main(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - "--clang-format-executable", - metavar="EXECUTABLE", - help="path to the clang-format executable", - default="clang-format", - ) - parser.add_argument( - "--extensions", - help=f"comma separated list of file extensions (default: {DEFAULT_EXTENSIONS})", - default=DEFAULT_EXTENSIONS, - ) - parser.add_argument("-r", "--recursive", action="store_true", help="run recursively over directories") - parser.add_argument("files", metavar="file", nargs="+") - parser.add_argument("-q", "--quiet", action="store_true") - parser.add_argument( - "-j", - metavar="N", - type=int, - default=0, - help="run N clang-format jobs in parallel (default number of cpus + 1)", - ) - parser.add_argument( - "--color", default="auto", choices=["auto", "always", "never"], help="show colored diff (default: auto)" - ) - parser.add_argument( - "-e", - "--exclude", - metavar="PATTERN", - action="append", - default=[], - help="exclude paths matching the given glob-like pattern(s) from recursive search", - ) - - args = parser.parse_args() - - # use default signal handling, like diff return SIGINT value on ^C - # https://bugs.python.org/issue14229#msg156446 - signal.signal(signal.SIGINT, signal.SIG_DFL) - try: - signal.SIGPIPE - except AttributeError: - # compatibility, SIGPIPE does not exist on Windows - pass - else: - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - colored_stdout = False - colored_stderr = False - if args.color == "always": - colored_stdout = True - colored_stderr = True - elif args.color == "auto": - colored_stdout = sys.stdout.isatty() - colored_stderr = sys.stderr.isatty() - - version_invocation = [args.clang_format_executable, "--version"] - try: - subprocess.check_call(version_invocation, stdout=DEVNULL) - except subprocess.CalledProcessError as e: - print_trouble(parser.prog, str(e), use_colors=colored_stderr) - return ExitStatus.TROUBLE - except OSError as e: - print_trouble( - parser.prog, - f"Command '{subprocess.list2cmdline(version_invocation)}' failed to start: {e}", - use_colors=colored_stderr, - ) - return ExitStatus.TROUBLE - - retcode = ExitStatus.SUCCESS - files = list_files( - args.files, recursive=args.recursive, exclude=args.exclude, extensions=args.extensions.split(",") - ) - - if not files: - return - - njobs = args.j - if njobs == 0: - njobs = multiprocessing.cpu_count() + 1 - njobs = min(len(files), njobs) - - if njobs == 1: - # execute directly instead of in a pool, - # less overhead, simpler stacktraces - it = (run_clang_format_diff_wrapper(args, file) for file in files) - pool = None - else: - pool = multiprocessing.Pool(njobs) - it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files) - while True: - try: - outs, errs = next(it) - except StopIteration: - break - except DiffError as e: - print_trouble(parser.prog, str(e), use_colors=colored_stderr) - retcode = ExitStatus.TROUBLE - sys.stderr.writelines(e.errs) - except UnexpectedError as e: - print_trouble(parser.prog, str(e), use_colors=colored_stderr) - sys.stderr.write(e.formatted_traceback) - retcode = ExitStatus.TROUBLE - # stop at the first unexpected error, - # something could be very wrong, - # don't process all files unnecessarily - if pool: - pool.terminate() - break - else: - sys.stderr.writelines(errs) - if outs == []: - continue - if not args.quiet: - print_diff(outs, use_color=colored_stdout) - if retcode == ExitStatus.SUCCESS: - retcode = ExitStatus.DIFF - return retcode - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/.circleci/unittest/linux/scripts/run_test.sh b/.circleci/unittest/linux/scripts/run_test.sh deleted file mode 100755 index 8f6b8cb8485..00000000000 --- a/.circleci/unittest/linux/scripts/run_test.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -set -e - -eval "$(./conda/bin/conda shell.bash hook)" -conda activate ./env - -python -m torch.utils.collect_env -pytest --junitxml=test-results/junit.xml -v --durations 20 diff --git a/.circleci/unittest/linux/scripts/setup_env.sh b/.circleci/unittest/linux/scripts/setup_env.sh deleted file mode 100755 index 0574cdff1cf..00000000000 --- a/.circleci/unittest/linux/scripts/setup_env.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# This script is for setting up environment in which unit test is ran. -# To speed up the CI time, the resulting environment is cached. -# -# Do not install PyTorch and torchvision here, otherwise they also get cached. - -set -e - -this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -# Avoid error: "fatal: unsafe repository" -git config --global --add safe.directory '*' -root_dir="$(git rev-parse --show-toplevel)" -conda_dir="${root_dir}/conda" -env_dir="${root_dir}/env" - -cd "${root_dir}" - -case "$(uname -s)" in - Darwin*) os=MacOSX;; - *) os=Linux -esac - -# 1. Install conda at ./conda -if [ ! -d "${conda_dir}" ]; then - printf "* Installing conda\n" - wget -O miniconda.sh "http://repo.continuum.io/miniconda/Miniconda3-latest-${os}-x86_64.sh" - bash ./miniconda.sh -b -f -p "${conda_dir}" -fi -eval "$(${conda_dir}/bin/conda shell.bash hook)" - -# 2. Create test environment at ./env -if [ ! -d "${env_dir}" ]; then - printf "* Creating a test environment\n" - conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION" -fi -conda activate "${env_dir}" - -# 3. Install Conda dependencies -printf "* Installing dependencies (except PyTorch)\n" -FFMPEG_PIN="=4.2" -if [[ "${PYTHON_VERSION}" = "3.9" ]]; then - FFMPEG_PIN=">=4.2" -fi - -conda install -y -c pytorch "ffmpeg${FFMPEG_PIN}" -conda env update --file "${this_dir}/environment.yml" --prune diff --git a/.circleci/unittest/windows/scripts/environment.yml b/.circleci/unittest/windows/scripts/environment.yml deleted file mode 100644 index d229aafb41a..00000000000 --- a/.circleci/unittest/windows/scripts/environment.yml +++ /dev/null @@ -1,19 +0,0 @@ -channels: - - pytorch - - defaults -dependencies: - - pytest - - pytest-cov - - pytest-mock - - pip - - libpng - - jpeg - - ca-certificates - - hdf5 - - setuptools - - pip: - - future - - scipy - - av !=9.1.1, <10 - - dataclasses - - h5py diff --git a/.circleci/unittest/windows/scripts/install.sh b/.circleci/unittest/windows/scripts/install.sh deleted file mode 100644 index 85920abb8da..00000000000 --- a/.circleci/unittest/windows/scripts/install.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash - -unset PYTORCH_VERSION -# For unittest, nightly PyTorch is used as the following section, -# so no need to set PYTORCH_VERSION. -# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. - -set -ex - -this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" -conda activate ./env - -# TODO, refactor the below logic to make it easy to understand how to get correct cuda_version. -if [ "${CU_VERSION:-}" == cpu ] ; then - cudatoolkit="cpuonly" - version="cpu" -else - if [[ ${#CU_VERSION} -eq 4 ]]; then - CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}" - elif [[ ${#CU_VERSION} -eq 5 ]]; then - CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}" - fi - - cuda_toolkit_pckg="cudatoolkit" - if [[ $CUDA_VERSION == 11.6 || $CUDA_VERSION == 11.7 ]]; then - cuda_toolkit_pckg="pytorch-cuda" - fi - - echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION" - version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" - cudatoolkit="${cuda_toolkit_pckg}=${version}" -fi - -printf "Installing PyTorch with %s\n" "${cudatoolkit}" -conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}" - -torch_cuda=$(python -c "import torch; print(torch.cuda.is_available())") -echo torch.cuda.is_available is $torch_cuda - -if [ ! -z "${CUDA_VERSION:-}" ] ; then - if [ "$torch_cuda" == "False" ]; then - echo "torch with cuda installed but torch.cuda.is_available() is False" - exit 1 - fi -fi - -source "$this_dir/set_cuda_envs.sh" - -printf "* Installing torchvision\n" -"$this_dir/vc_env_helper.bat" python setup.py develop diff --git a/.circleci/unittest/windows/scripts/install_conda.bat b/.circleci/unittest/windows/scripts/install_conda.bat deleted file mode 100644 index 6052ad08b10..00000000000 --- a/.circleci/unittest/windows/scripts/install_conda.bat +++ /dev/null @@ -1 +0,0 @@ -start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda% diff --git a/.circleci/unittest/windows/scripts/post_process.sh b/.circleci/unittest/windows/scripts/post_process.sh deleted file mode 100644 index 5c5cbb758a9..00000000000 --- a/.circleci/unittest/windows/scripts/post_process.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -set -e - -eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" -conda activate ./env diff --git a/.circleci/unittest/windows/scripts/run_test.sh b/.circleci/unittest/windows/scripts/run_test.sh deleted file mode 100644 index 802ad37f511..00000000000 --- a/.circleci/unittest/windows/scripts/run_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e - -eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" -conda activate ./env - -this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -source "$this_dir/set_cuda_envs.sh" - -python -m torch.utils.collect_env -pytest --junitxml=test-results/junit.xml -v --durations 20 diff --git a/.circleci/unittest/windows/scripts/set_cuda_envs.sh b/.circleci/unittest/windows/scripts/set_cuda_envs.sh deleted file mode 100644 index 7db3137b594..00000000000 --- a/.circleci/unittest/windows/scripts/set_cuda_envs.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -set -ex - -echo CU_VERSION is "${CU_VERSION}" -echo CUDA_VERSION is "${CUDA_VERSION}" - -# Currenly, CU_VERSION and CUDA_VERSION are not consistent. -# to understand this code, see https://github.com/pytorch/vision/issues/4443 -version="cpu" -if [[ ! -z "${CUDA_VERSION}" ]] ; then - version="$CUDA_VERSION" -else - if [[ ${#CU_VERSION} -eq 5 ]]; then - version="${CU_VERSION:2:2}.${CU_VERSION:4:1}" - fi -fi - -# Don't use if [[ "$version" == "cpu" ]]; then exit 0 fi. -# It would exit the shell. One result is cpu tests would not run if the shell exit. -# Unless there's an error, Don't exit. -if [[ "$version" != "cpu" ]]; then - # set cuda envs - export PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${version}/bin:/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${version}/libnvvp:$PATH" - export CUDA_PATH_V${version/./_}="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${version}" - export CUDA_PATH="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${version}" - - if [ ! -d "$CUDA_PATH" ]; then - echo "$CUDA_PATH" does not exist - exit 1 - fi - - if [ ! -f "${CUDA_PATH}\include\nvjpeg.h" ]; then - echo "nvjpeg does not exist" - exit 1 - fi - - # check cuda driver version - for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do - if [[ -x "$path" ]]; then - "$path" || echo "true"; - break - fi - done - - which nvcc - nvcc --version - env | grep CUDA -fi diff --git a/.circleci/unittest/windows/scripts/setup_env.sh b/.circleci/unittest/windows/scripts/setup_env.sh deleted file mode 100644 index 5eeb2e17b48..00000000000 --- a/.circleci/unittest/windows/scripts/setup_env.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -# This script is for setting up environment in which unit test is ran. -# To speed up the CI time, the resulting environment is cached. -# -# Do not install PyTorch and torchvision here, otherwise they also get cached. - -set -e - -this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -root_dir="$(git rev-parse --show-toplevel)" -conda_dir="${root_dir}/conda" -env_dir="${root_dir}/env" - -cd "${root_dir}" - -# 1. Install conda at ./conda -if [ ! -d "${conda_dir}" ]; then - printf "* Installing conda\n" - export tmp_conda="$(echo $conda_dir | tr '/' '\\')" - export miniconda_exe="$(echo $root_dir | tr '/' '\\')\\miniconda.exe" - curl --output miniconda.exe https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe -O - "$this_dir/install_conda.bat" - unset tmp_conda - unset miniconda_exe -fi - -eval "$(${conda_dir}/Scripts/conda.exe 'shell.bash' 'hook')" - -# 2. Create test environment at ./env -if [ ! -d "${env_dir}" ]; then - printf "* Creating a test environment\n" - conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION" -fi -conda activate "${env_dir}" - -# 3. Install Conda dependencies -printf "* Installing dependencies (except PyTorch)\n" -conda env update --file "${this_dir}/environment.yml" --prune - -# 4. Downgrade setuptools on Python 3.7. -# See https://github.com/pytorch/vision/pull/5868 -if [[ "${PYTHON_VERSION}" == '3.7' ]]; then - pip install --upgrade setuptools==58.0.4 -fi diff --git a/.circleci/unittest/windows/scripts/vc_env_helper.bat b/.circleci/unittest/windows/scripts/vc_env_helper.bat deleted file mode 100644 index 9410135677a..00000000000 --- a/.circleci/unittest/windows/scripts/vc_env_helper.bat +++ /dev/null @@ -1,39 +0,0 @@ -@echo on - -set VC_VERSION_LOWER=16 -set VC_VERSION_UPPER=17 - -for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do ( - if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( - set "VS15INSTALLDIR=%%i" - set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat" - goto vswhere - ) -) - -:vswhere -if "%VSDEVCMD_ARGS%" == "" ( - call "%VS15VCVARSALL%" x64 || exit /b 1 -) else ( - call "%VS15VCVARSALL%" x64 %VSDEVCMD_ARGS% || exit /b 1 -) - -@echo on - -set DISTUTILS_USE_SDK=1 - -set args=%1 -shift -:start -if [%1] == [] goto done -set args=%args% %1 -shift -goto start - -:done -if "%args%" == "" ( - echo Usage: vc_env_helper.bat [command] [args] - echo e.g. vc_env_helper.bat cl /c test.cpp -) - -%args% || exit /b 1 diff --git a/.github/workflows/build-conda-linux.yml b/.github/workflows/build-conda-linux.yml deleted file mode 100644 index d38ba9fa2aa..00000000000 --- a/.github/workflows/build-conda-linux.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Build Linux Conda - -on: - pull_request: - push: - branches: - - nightly - workflow_dispatch: - -jobs: - generate-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: conda - os: linux - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build: - needs: generate-matrix - strategy: - fail-fast: false - matrix: - include: - - repository: pytorch/vision - pre-script: "" - post-script: "" - conda-package-directory: packaging/torchvision - smoke-test-script: test/smoke_test.py - package-name: torchvision - name: ${{ matrix.repository }} - uses: pytorch/test-infra/.github/workflows/build_conda_linux.yml@main - with: - conda-package-directory: ${{ matrix.conda-package-directory }} - repository: ${{ matrix.repository }} - ref: "" - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build-matrix: ${{ needs.generate-matrix.outputs.matrix }} - pre-script: ${{ matrix.pre-script }} - post-script: ${{ matrix.post-script }} - package-name: ${{ matrix.package-name }} - smoke-test-script: ${{ matrix.smoke-test-script }} - trigger-event: ${{ github.event_name }} - secrets: - CONDA_PYTORCHBOT_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} diff --git a/.github/workflows/build-conda-m1.yml b/.github/workflows/build-conda-m1.yml deleted file mode 100644 index cb0b687f0b0..00000000000 --- a/.github/workflows/build-conda-m1.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Build M1 Conda - -on: - pull_request: - push: - branches: - - nightly - workflow_dispatch: - -jobs: - generate-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: conda - os: macos-arm64 - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build: - needs: generate-matrix - strategy: - fail-fast: false - matrix: - include: - - repository: pytorch/vision - pre-script: "" - post-script: "" - conda-package-directory: packaging/torchvision - smoke-test-script: test/smoke_test.py - package-name: torchvision - name: ${{ matrix.repository }} - uses: pytorch/test-infra/.github/workflows/build_conda_macos.yml@main - with: - conda-package-directory: ${{ matrix.conda-package-directory }} - repository: ${{ matrix.repository }} - ref: "" - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build-matrix: ${{ needs.generate-matrix.outputs.matrix }} - pre-script: ${{ matrix.pre-script }} - post-script: ${{ matrix.post-script }} - package-name: ${{ matrix.package-name }} - smoke-test-script: ${{ matrix.smoke-test-script }} - runner-type: macos-m1-12 - trigger-event: ${{ github.event_name }} - secrets: - CONDA_PYTORCHBOT_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} diff --git a/.github/workflows/build-conda-macos.yml b/.github/workflows/build-conda-macos.yml deleted file mode 100644 index 4b43528a326..00000000000 --- a/.github/workflows/build-conda-macos.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Build Macos Conda - -on: - pull_request: - push: - branches: - - nightly - workflow_dispatch: - -jobs: - generate-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: conda - os: macos - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build: - needs: generate-matrix - strategy: - fail-fast: false - matrix: - include: - - repository: pytorch/vision - pre-script: "" - post-script: "" - conda-package-directory: packaging/torchvision - smoke-test-script: test/smoke_test.py - package-name: torchvision - name: ${{ matrix.repository }} - uses: pytorch/test-infra/.github/workflows/build_conda_macos.yml@main - with: - conda-package-directory: ${{ matrix.conda-package-directory }} - repository: ${{ matrix.repository }} - ref: "" - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build-matrix: ${{ needs.generate-matrix.outputs.matrix }} - pre-script: ${{ matrix.pre-script }} - post-script: ${{ matrix.post-script }} - package-name: ${{ matrix.package-name }} - smoke-test-script: ${{ matrix.smoke-test-script }} - runner-type: macos-12 - trigger-event: ${{ github.event_name }} - secrets: - CONDA_PYTORCHBOT_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} diff --git a/.github/workflows/build-wheels-linux.yml b/.github/workflows/build-wheels-linux.yml deleted file mode 100644 index 806b5331e24..00000000000 --- a/.github/workflows/build-wheels-linux.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Build Linux Wheels - -on: - pull_request: - push: - branches: - - nightly - workflow_dispatch: - -jobs: - generate-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: wheel - os: linux - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build: - needs: generate-matrix - strategy: - fail-fast: false - matrix: - include: - - repository: pytorch/vision - pre-script: packaging/pre_build_script.sh - post-script: packaging/post_build_script.sh - smoke-test-script: test/smoke_test.py - package-name: torchvision - name: ${{ matrix.repository }} - uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@main - with: - repository: ${{ matrix.repository }} - ref: "" - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build-matrix: ${{ needs.generate-matrix.outputs.matrix }} - pre-script: ${{ matrix.pre-script }} - post-script: ${{ matrix.post-script }} - package-name: ${{ matrix.package-name }} - smoke-test-script: ${{ matrix.smoke-test-script }} - trigger-event: ${{ github.event_name }} - secrets: - AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID }} - AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/build-wheels-m1.yml b/.github/workflows/build-wheels-m1.yml deleted file mode 100644 index 7f5446ebbf0..00000000000 --- a/.github/workflows/build-wheels-m1.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Build M1 Wheels - -on: - pull_request: - push: - branches: - - nightly - workflow_dispatch: - -jobs: - generate-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: wheel - os: macos-arm64 - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build: - needs: generate-matrix - strategy: - fail-fast: false - matrix: - include: - - repository: pytorch/vision - pre-script: packaging/pre_build_script.sh - post-script: packaging/post_build_script.sh - smoke-test-script: test/smoke_test.py - package-name: torchvision - name: ${{ matrix.repository }} - uses: pytorch/test-infra/.github/workflows/build_wheels_macos.yml@main - with: - repository: ${{ matrix.repository }} - ref: "" - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build-matrix: ${{ needs.generate-matrix.outputs.matrix }} - pre-script: ${{ matrix.pre-script }} - post-script: ${{ matrix.post-script }} - package-name: ${{ matrix.package-name }} - runner-type: macos-m1-12 - smoke-test-script: ${{ matrix.smoke-test-script }} - trigger-event: ${{ github.event_name }} - secrets: - AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID }} - AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/build-wheels-macos.yml b/.github/workflows/build-wheels-macos.yml deleted file mode 100644 index 78677961d05..00000000000 --- a/.github/workflows/build-wheels-macos.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Build Macos Wheels - -on: - pull_request: - push: - branches: - - nightly - workflow_dispatch: - -jobs: - generate-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: wheel - os: macos - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build: - needs: generate-matrix - strategy: - fail-fast: false - matrix: - include: - - repository: pytorch/vision - pre-script: packaging/pre_build_script.sh - post-script: packaging/post_build_script.sh - smoke-test-script: test/smoke_test.py - package-name: torchvision - name: ${{ matrix.repository }} - uses: pytorch/test-infra/.github/workflows/build_wheels_macos.yml@main - with: - repository: ${{ matrix.repository }} - ref: "" - test-infra-repository: pytorch/test-infra - test-infra-ref: main - build-matrix: ${{ needs.generate-matrix.outputs.matrix }} - pre-script: ${{ matrix.pre-script }} - post-script: ${{ matrix.post-script }} - package-name: ${{ matrix.package-name }} - runner-type: macos-12 - smoke-test-script: ${{ matrix.smoke-test-script }} - trigger-event: ${{ github.event_name }} - secrets: - AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID }} - AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/pr-labels.yml b/.github/workflows/pr-labels.yml deleted file mode 100644 index 20c37e4fd88..00000000000 --- a/.github/workflows/pr-labels.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: pr-labels - -on: - push: - branches: - - main - -jobs: - is-properly-labeled: - runs-on: ubuntu-latest - - steps: - - name: Set up python - uses: actions/setup-python@v2 - - - name: Install requests - run: pip install requests - - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Process commit and find merger responsible for labeling - id: commit - run: echo "::set-output name=merger::$(python .github/process_commit.py ${{ github.sha }})" - - - name: Ping merger responsible for labeling if necessary - if: ${{ steps.commit.outputs.merger != '' }} - uses: mshick/add-pr-comment@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - message: | - Hey ${{ steps.commit.outputs.merger }}! - - You merged this PR, but no labels were added. The list of valid labels is available at https://github.com/pytorch/vision/blob/main/.github/process_commit.py diff --git a/.github/workflows/prototype-tests.yml b/.github/workflows/prototype-tests.yml deleted file mode 100644 index d3a9fbf1e3e..00000000000 --- a/.github/workflows/prototype-tests.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: tests - -on: - pull_request: - -jobs: - prototype: - strategy: - matrix: - os: - - ubuntu-latest - - windows-latest - - macos-latest - fail-fast: false - - runs-on: ${{ matrix.os }} - - steps: - - name: Set up python - uses: actions/setup-python@v3 - with: - python-version: 3.7 - - - name: Upgrade system packages - run: python -m pip install --upgrade pip setuptools wheel - - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Install PyTorch nightly builds - run: pip install --progress-bar=off --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu/ - - - name: Install torchvision - run: pip install --progress-bar=off --no-build-isolation --editable . - - - name: Install other prototype dependencies - run: pip install --progress-bar=off scipy pycocotools h5py - - - name: Install test requirements - run: pip install --progress-bar=off pytest pytest-mock pytest-cov - - - name: Mark setup as complete - id: setup - run: exit 0 - - - name: Run prototype datapoints tests - shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/datapoints \ - --cov-report=term-missing \ - test/test_prototype_datapoints*.py - - - name: Run prototype transforms tests - if: success() || ( failure() && steps.setup.conclusion == 'success' ) - shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/transforms \ - --cov-report=term-missing \ - test/test_prototype_transforms*.py - - - name: Run prototype models tests - if: success() || ( failure() && steps.setup.conclusion == 'success' ) - shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/models \ - --cov-report=term-missing \ - test/test_prototype_models*.py diff --git a/.github/workflows/test-linux-cpu.yml b/.github/workflows/test-linux-cpu.yml deleted file mode 100644 index 234ad97f4d6..00000000000 --- a/.github/workflows/test-linux-cpu.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: Unit-tests on Linux CPU - -on: - pull_request: - push: - branches: - - nightly - - main - - release/* - workflow_dispatch: - -env: - CHANNEL: "nightly" - -jobs: - tests: - strategy: - matrix: - python_version: ["3.7", "3.8", "3.9", "3.10"] - fail-fast: false - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - with: - runner: linux.12xlarge - repository: pytorch/vision - script: | - # Mark Build Directory Safe - git config --global --add safe.directory /__w/vision/vision - - # Set up Environment Variables - export PYTHON_VERSION="${{ matrix.python_version }}" - export VERSION="cpu" - export CUDATOOLKIT="cpuonly" - - # Set CHANNEL - if [[ (${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then - export CHANNEL=test - else - export CHANNEL=nightly - fi - - # Create Conda Env - conda create -yp ci_env python="${PYTHON_VERSION}" numpy libpng jpeg scipy - conda activate /work/ci_env - - # Install PyTorch, Torchvision, and testing libraries - set -ex - conda install \ - --yes \ - -c "pytorch-${CHANNEL}" \ - -c nvidia "pytorch-${CHANNEL}"::pytorch[build="*${VERSION}*"] \ - "${CUDATOOLKIT}" - python3 setup.py develop - python3 -m pip install pytest pytest-mock 'av<10' - - # Run Tests - python3 -m torch.utils.collect_env - python3 -m pytest --junitxml=test-results/junit.xml -v --durations 20 diff --git a/.github/workflows/test-linux-gpu.yml b/.github/workflows/test-linux-gpu.yml deleted file mode 100644 index a4d938f23ed..00000000000 --- a/.github/workflows/test-linux-gpu.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Unit-tests on Linux GPU - -on: - pull_request: - push: - branches: - - nightly - - main - - release/* - workflow_dispatch: - -env: - CHANNEL: "nightly" - -jobs: - tests: - strategy: - matrix: - python_version: ["3.8"] - cuda_arch_version: ["11.6"] - fail-fast: false - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - with: - runner: linux.g5.4xlarge.nvidia.gpu - repository: pytorch/vision - gpu-arch-type: cuda - gpu-arch-version: ${{ matrix.cuda_arch_version }} - timeout: 120 - script: | - # Mark Build Directory Safe - git config --global --add safe.directory /__w/vision/vision - - # Set up Environment Variables - export PYTHON_VERSION="${{ matrix.python_version }}" - export VERSION="${{ matrix.cuda_arch_version }}" - export CUDATOOLKIT="pytorch-cuda=${VERSION}" - - # Set CHANNEL - if [[ (${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then - export CHANNEL=test - else - export CHANNEL=nightly - fi - - # Create Conda Env - conda create -yp ci_env python="${PYTHON_VERSION}" numpy libpng jpeg scipy - conda activate /work/ci_env - - # Install PyTorch, Torchvision, and testing libraries - set -ex - conda install \ - --yes \ - -c "pytorch-${CHANNEL}" \ - -c nvidia "pytorch-${CHANNEL}"::pytorch[build="*${VERSION}*"] \ - "${CUDATOOLKIT}" - python3 setup.py develop - python3 -m pip install pytest pytest-mock 'av<10' - - # Run Tests - python3 -m torch.utils.collect_env - python3 -m pytest --junitxml=test-results/junit.xml -v --durations 20 diff --git a/.github/workflows/test-m1.yml b/.github/workflows/test-m1.yml deleted file mode 100644 index c03fa9f76e4..00000000000 --- a/.github/workflows/test-m1.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Unit-tests on M1 -on: - pull_request: - push: - branches: - - nightly - - main - - release/* - workflow_dispatch: -env: - CHANNEL: "nightly" -jobs: - tests: - name: "Unit-tests on M1" - runs-on: macos-m1-12 - strategy: - matrix: - py_vers: [ "3.8"] - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - name: Set Release CHANNEL (for release) - if: ${{ (github.event_name == 'pull_request' && startsWith(github.base_ref, 'release')) || startsWith(github.ref, 'refs/heads/release') }} - run: | - echo "CHANNEL=test" >> "$GITHUB_ENV" - - name: Install TorchVision - shell: arch -arch arm64 bash {0} - env: - ENV_NAME: conda-env-${{ github.run_id }} - PY_VERS: ${{ matrix.py_vers }} - run: | - . ~/miniconda3/etc/profile.d/conda.sh - # Needed for JPEG library detection as setup.py detects conda presence by running `shutil.which('conda')` - export PATH=~/miniconda3/bin:$PATH - set -ex - conda create -yp ${ENV_NAME} python=${PY_VERS} numpy libpng jpeg scipy - conda run -p ${ENV_NAME} python3 -mpip install --pre torch --extra-index-url=https://download.pytorch.org/whl/${CHANNEL} - conda run -p ${ENV_NAME} python3 setup.py develop - conda run -p ${ENV_NAME} python3 -mpip install pytest pytest-mock 'av<10' - - name: Run tests - shell: arch -arch arm64 bash {0} - env: - ENV_NAME: conda-env-${{ github.run_id }} - PY_VERS: ${{ matrix.py_vers }} - run: | - . ~/miniconda3/etc/profile.d/conda.sh - set -ex - conda run -p ${ENV_NAME} --no-capture-output python3 -u -mpytest -v --tb=long --durations 20 - conda env remove -p ${ENV_NAME} diff --git a/.github/workflows/tests-schedule.yml b/.github/workflows/tests-schedule.yml deleted file mode 100644 index 1e5a78207fe..00000000000 --- a/.github/workflows/tests-schedule.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: tests - -on: - pull_request: - paths: - - "test/test_datasets_download.py" - - ".github/failed_schedule_issue_template.md" - - ".github/workflows/tests-schedule.yml" - - schedule: - - cron: "0 9 * * *" - -jobs: - download: - runs-on: ubuntu-latest - - steps: - - name: Set up python - uses: actions/setup-python@v2 - with: - python-version: 3.7 - - - name: Upgrade system packages - run: python -m pip install --upgrade pip setuptools wheel - - - name: SSL - run: python -c 'import ssl; print(ssl.OPENSSL_VERSION)' - - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Install torch nightly build - run: pip install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - - - name: Install torchvision - run: pip install --no-build-isolation --editable . - - - name: Install all optional dataset requirements - run: pip install scipy pycocotools lmdb requests - - - name: Install tests requirements - run: pip install pytest - - - name: Run tests - run: pytest -ra -v test/test_datasets_download.py - - - uses: JasonEtco/create-an-issue@v2.4.0 - name: Create issue if download tests failed - if: failure() && github.event_name == 'schedule' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO: ${{ github.repository }} - WORKFLOW: ${{ github.workflow }} - JOB: ${{ github.job }} - ID: ${{ github.run_id }} - with: - filename: .github/failed_schedule_issue_template.md From 5f5f5a1bc7c0d3e4d88fb5c90726fad9b4940470 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 13 Dec 2022 16:44:04 +0100 Subject: [PATCH 04/22] syntax --- .../workflows/prototype-transforms-tests-linux-gpu.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index 0b3f9eaa417..c6a25fabf8c 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -22,11 +22,11 @@ jobs: - "" runner: - linux.2xlarge - include: - - python-version: "3.8" - gpu-arch-type: cuda - gpu-arch-version: "11.6" - runner: linux.4xlarge.nvidia.gpu + include: + - python-version: "3.8" + gpu-arch-type: cuda + gpu-arch-version: "11.6" + runner: linux.4xlarge.nvidia.gpu fail-fast: false uses: pytorch/test-infra/.github/workflows/linux_job.yml@main with: From 63170f255e5dfd226e9d0879a93cf16cb6942f09 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 13 Dec 2022 16:47:23 +0100 Subject: [PATCH 05/22] fix name --- .github/workflows/prototype-transforms-tests-linux-gpu.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index c6a25fabf8c..5e0dd771192 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -1,4 +1,4 @@ -name: Prototype transforms unit-tests on Linux GPU +name: Prototype Unit-tests on Linux on: pull_request: @@ -34,7 +34,6 @@ jobs: gpu-arch-type: ${{ matrix.gpu-arch-type }} gpu-arch-version: ${{ matrix.gpu-arch-version }} runner: ${{ matrix.python-version }} - job-name: Foo-${{ matrix.python-version }} timeout: 45 script: | # Mark Build Directory Safe From 666cec91031a82c7215e9b13a8b336c9b5c85f89 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 13 Dec 2022 16:54:34 +0100 Subject: [PATCH 06/22] fix echo mutex --- .github/workflows/prototype-transforms-tests-linux-gpu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index 5e0dd771192..81c0257f834 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -55,7 +55,7 @@ jobs: else PYTORCH_MUTEX=cpuonly fi - echo $"{MUTEX}" + echo $"{PYTORCH_MUTEX}" echo '::endgroup::' echo '::group::Create conda environment' From 2abda7a37aa627bdf52a320da40cbbdee3e119d3 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 13 Dec 2022 16:55:30 +0100 Subject: [PATCH 07/22] style --- .../workflows/prototype-transforms-tests-linux-gpu.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index 81c0257f834..f643d3fd022 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -16,12 +16,9 @@ jobs: - "3.8" - "3.9" - "3.10" - gpu-arch-type: - - cpu - gpu-arch-version: - - "" - runner: - - linux.2xlarge + gpu-arch-type: ["cpu"] + gpu-arch-version: [""] + runner: ["linux.2xlarge"] include: - python-version: "3.8" gpu-arch-type: cuda From 5a18e7dc546f95b4b08a5d14b3af0cd2d589c346 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 12:01:11 +0100 Subject: [PATCH 08/22] fix runner --- .github/workflows/prototype-transforms-tests-linux-gpu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index f643d3fd022..8b2eb29e9eb 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -30,7 +30,7 @@ jobs: repository: pytorch/vision gpu-arch-type: ${{ matrix.gpu-arch-type }} gpu-arch-version: ${{ matrix.gpu-arch-version }} - runner: ${{ matrix.python-version }} + runner: ${{ matrix.runner }} timeout: 45 script: | # Mark Build Directory Safe From 1edf8a992951bf7ed7ed31dd7fe6f520e177492f Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 12:05:31 +0100 Subject: [PATCH 09/22] add jobname --- .github/workflows/prototype-transforms-tests-linux-gpu.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index 8b2eb29e9eb..02333e7e98b 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -27,6 +27,7 @@ jobs: fail-fast: false uses: pytorch/test-infra/.github/workflows/linux_job.yml@main with: + job-name: Python ${{ matrix.python-version }}, ${{ matrix.gpu-arch-type }} repository: pytorch/vision gpu-arch-type: ${{ matrix.gpu-arch-type }} gpu-arch-version: ${{ matrix.gpu-arch-version }} From 903b7358f7c55e9cd2c747fe6bc704f20ed3262f Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 12:10:07 +0100 Subject: [PATCH 10/22] run all prototype tests --- .../workflows/prototype-transforms-tests-linux-gpu.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index 02333e7e98b..9070eccc9eb 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -88,10 +88,10 @@ jobs: python -m pip install pytest pytest-mock pytest-cov echo '::endgroup::' - echo '::group::Run Tests' + echo '::group::Run prototype tests' pytest \ - --durations=20 \ - --cov=torchvision/prototype/transforms \ + --durations=25 \ + --cov=torchvision/prototype \ --cov-report=term-missing \ - test/test_prototype_transforms*.py + test/test_prototype*.py echo '::endgroup::' From bdfef64daaaa597fe0124cce35000d2d93db9192 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 12:38:00 +0100 Subject: [PATCH 11/22] fix mutex echo --- .github/workflows/prototype-transforms-tests-linux-gpu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index 9070eccc9eb..aacad82a242 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -53,7 +53,7 @@ jobs: else PYTORCH_MUTEX=cpuonly fi - echo $"{PYTORCH_MUTEX}" + echo "${PYTORCH_MUTEX}" echo '::endgroup::' echo '::group::Create conda environment' From 8d040e7ea31ce03f99f89d4d7dedb1e626385a81 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 12:41:07 +0100 Subject: [PATCH 12/22] cleanup --- .github/workflows/prototype-transforms-tests-linux-gpu.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-transforms-tests-linux-gpu.yml index aacad82a242..6856ce78191 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-transforms-tests-linux-gpu.yml @@ -2,10 +2,6 @@ name: Prototype Unit-tests on Linux on: pull_request: - push: - branches: - - nightly - - main jobs: tests: @@ -85,7 +81,7 @@ jobs: echo '::endgroup::' echo '::group::Install testing utilities' - python -m pip install pytest pytest-mock pytest-cov + pip install --progress-bar=off pytest pytest-mock pytest-cov echo '::endgroup::' echo '::group::Run prototype tests' From caf5a25589761e867bf4a9da7f33a6df5ba1ae3d Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 12:43:12 +0100 Subject: [PATCH 13/22] Revert "[REVERTME] deactivate other CI" This reverts commit c931ff2c27bbc2b04324b777ae39a6fc25e50b8a. --- .circleci/.gitignore | 1 + .circleci/build_docs/commit_docs.sh | 35 + .circleci/config.yml | 2059 +++++++++++++++++ .circleci/config.yml.in | 1130 +++++++++ .circleci/regenerate.py | 370 +++ .circleci/smoke_test/docker/Dockerfile | 34 + .../android/scripts/binary_android_build.sh | 27 + .../android/scripts/binary_android_upload.sh | 34 + .../android/scripts/install_gradle.sh | 19 + .../unittest/ios/scripts/binary_ios_build.sh | 47 + .../unittest/ios/scripts/binary_ios_upload.sh | 42 + .../unittest/linux/scripts/environment.yml | 16 + .circleci/unittest/linux/scripts/install.sh | 40 + .../unittest/linux/scripts/post_process.sh | 6 + .../linux/scripts/run-clang-format.py | 331 +++ .circleci/unittest/linux/scripts/run_test.sh | 9 + .circleci/unittest/linux/scripts/setup_env.sh | 47 + .../unittest/windows/scripts/environment.yml | 19 + .circleci/unittest/windows/scripts/install.sh | 52 + .../windows/scripts/install_conda.bat | 1 + .../unittest/windows/scripts/post_process.sh | 6 + .../unittest/windows/scripts/run_test.sh | 12 + .../unittest/windows/scripts/set_cuda_envs.sh | 48 + .../unittest/windows/scripts/setup_env.sh | 45 + .../windows/scripts/vc_env_helper.bat | 39 + .github/workflows/build-conda-linux.yml | 45 + .github/workflows/build-conda-m1.yml | 46 + .github/workflows/build-conda-macos.yml | 46 + .github/workflows/build-wheels-linux.yml | 44 + .github/workflows/build-wheels-m1.yml | 45 + .github/workflows/build-wheels-macos.yml | 45 + .github/workflows/pr-labels.yml | 35 + .github/workflows/prototype-tests.yml | 73 + .github/workflows/test-linux-cpu.yml | 57 + .github/workflows/test-linux-gpu.yml | 61 + .github/workflows/test-m1.yml | 50 + .github/workflows/tests-schedule.yml | 57 + 37 files changed, 5073 insertions(+) create mode 100644 .circleci/.gitignore create mode 100755 .circleci/build_docs/commit_docs.sh create mode 100644 .circleci/config.yml create mode 100644 .circleci/config.yml.in create mode 100755 .circleci/regenerate.py create mode 100644 .circleci/smoke_test/docker/Dockerfile create mode 100644 .circleci/unittest/android/scripts/binary_android_build.sh create mode 100644 .circleci/unittest/android/scripts/binary_android_upload.sh create mode 100755 .circleci/unittest/android/scripts/install_gradle.sh create mode 100755 .circleci/unittest/ios/scripts/binary_ios_build.sh create mode 100644 .circleci/unittest/ios/scripts/binary_ios_upload.sh create mode 100644 .circleci/unittest/linux/scripts/environment.yml create mode 100755 .circleci/unittest/linux/scripts/install.sh create mode 100755 .circleci/unittest/linux/scripts/post_process.sh create mode 100755 .circleci/unittest/linux/scripts/run-clang-format.py create mode 100755 .circleci/unittest/linux/scripts/run_test.sh create mode 100755 .circleci/unittest/linux/scripts/setup_env.sh create mode 100644 .circleci/unittest/windows/scripts/environment.yml create mode 100644 .circleci/unittest/windows/scripts/install.sh create mode 100644 .circleci/unittest/windows/scripts/install_conda.bat create mode 100644 .circleci/unittest/windows/scripts/post_process.sh create mode 100644 .circleci/unittest/windows/scripts/run_test.sh create mode 100644 .circleci/unittest/windows/scripts/set_cuda_envs.sh create mode 100644 .circleci/unittest/windows/scripts/setup_env.sh create mode 100644 .circleci/unittest/windows/scripts/vc_env_helper.bat create mode 100644 .github/workflows/build-conda-linux.yml create mode 100644 .github/workflows/build-conda-m1.yml create mode 100644 .github/workflows/build-conda-macos.yml create mode 100644 .github/workflows/build-wheels-linux.yml create mode 100644 .github/workflows/build-wheels-m1.yml create mode 100644 .github/workflows/build-wheels-macos.yml create mode 100644 .github/workflows/pr-labels.yml create mode 100644 .github/workflows/prototype-tests.yml create mode 100644 .github/workflows/test-linux-cpu.yml create mode 100644 .github/workflows/test-linux-gpu.yml create mode 100644 .github/workflows/test-m1.yml create mode 100644 .github/workflows/tests-schedule.yml diff --git a/.circleci/.gitignore b/.circleci/.gitignore new file mode 100644 index 00000000000..485dee64bcf --- /dev/null +++ b/.circleci/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/.circleci/build_docs/commit_docs.sh b/.circleci/build_docs/commit_docs.sh new file mode 100755 index 00000000000..04e3538fefc --- /dev/null +++ b/.circleci/build_docs/commit_docs.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -ex + + +if [ "$2" == "" ]; then + echo call as "$0" "" "" + echo where src is the root of the built documentation git checkout and + echo branch should be "main" or "1.7" or so + exit 1 +fi + +src=$1 +target=$2 + +echo "committing docs from ${src} to ${target}" + +pushd "${src}" +git checkout gh-pages +mkdir -p ./"${target}" +rm -rf ./"${target}"/* +cp -r "${src}/docs/build/html/"* ./"$target" +if [ "${target}" == "main" ]; then + mkdir -p ./_static + rm -rf ./_static/* + cp -r "${src}/docs/build/html/_static/"* ./_static + git add --all ./_static || true +fi +git add --all ./"${target}" || true +git config user.email "soumith+bot@pytorch.org" +git config user.name "pytorchbot" +# If there aren't changes, don't make a commit; push is no-op +git commit -m "auto-generating sphinx docs" || true +git remote add https https://github.com/pytorch/vision.git +git push -u https gh-pages diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 00000000000..d15a20216bb --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,2059 @@ +version: 2.1 + +# How to test the Linux jobs: +# - Install CircleCI local CLI: https://circleci.com/docs/2.0/local-cli/ +# - circleci config process .circleci/config.yml > gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.7 +# - Replace binary_linux_wheel_py3.7 with the name of the job you want to test. +# Job names are 'name:' key. + +executors: + windows-cpu: + machine: + resource_class: windows.xlarge + image: windows-server-2019-vs2019:stable + shell: bash.exe + + windows-gpu: + machine: + resource_class: windows.gpu.nvidia.medium + image: windows-server-2019-nvidia:stable + shell: bash.exe + +commands: + checkout_merge: + description: "checkout merge branch" + steps: + - checkout +# - run: +# name: Checkout merge branch +# command: | +# set -ex +# BRANCH=$(git rev-parse --abbrev-ref HEAD) +# if [[ "$BRANCH" != "main" ]]; then +# git fetch --force origin ${CIRCLE_BRANCH}/merge:merged/${CIRCLE_BRANCH} +# git checkout "merged/$CIRCLE_BRANCH" +# fi + designate_upload_channel: + description: "inserts the correct upload channel into ${BASH_ENV}" + steps: + - run: + name: adding UPLOAD_CHANNEL to BASH_ENV + command: | + our_upload_channel=nightly + # On tags upload to test instead + if [[ -n "${CIRCLE_TAG}" ]]; then + our_upload_channel=test + fi + echo "export UPLOAD_CHANNEL=${our_upload_channel}" >> ${BASH_ENV} + + brew_update: + description: "Update Homebrew and install base formulae" + steps: + - run: + name: Update Homebrew + no_output_timeout: "10m" + command: | + set -ex + + # Update repositories manually. + # Running `brew update` produces a comparison between the + # current checkout and the updated checkout, which takes a + # very long time because the existing checkout is 2y old. + for path in $(find /usr/local/Homebrew -type d -name .git) + do + cd $path/.. + git fetch --depth=1 origin + git reset --hard origin/master + done + + export HOMEBREW_NO_AUTO_UPDATE=1 + + # Install expect and moreutils so that we can call `unbuffer` and `ts`. + # moreutils installs a `parallel` executable by default, which conflicts + # with the executable from the GNU `parallel`, so we must unlink GNU + # `parallel` first, and relink it afterwards. + brew install coreutils + brew unlink parallel + brew install moreutils + brew link parallel --overwrite + brew install expect + + brew_install: + description: "Install Homebrew formulae" + parameters: + formulae: + type: string + default: "" + steps: + - run: + name: Install << parameters.formulae >> + no_output_timeout: "10m" + command: | + set -ex + export HOMEBREW_NO_AUTO_UPDATE=1 + brew install << parameters.formulae >> + + run_brew_for_ios_build: + steps: + - brew_update + - brew_install: + formulae: libtool + + apt_install: + parameters: + args: + type: string + descr: + type: string + default: "" + update: + type: boolean + default: true + steps: + - run: + name: > + <<^ parameters.descr >> apt install << parameters.args >> <> + <<# parameters.descr >> << parameters.descr >> <> + command: | + <<# parameters.update >> sudo apt update -qy <> + sudo apt install << parameters.args >> + + pip_install: + parameters: + args: + type: string + descr: + type: string + default: "" + user: + type: boolean + default: true + steps: + - run: + name: > + <<^ parameters.descr >> pip install << parameters.args >> <> + <<# parameters.descr >> << parameters.descr >> <> + command: > + pip install + <<# parameters.user >> --user <> + --progress-bar=off + << parameters.args >> + + install_torchvision: + parameters: + editable: + type: boolean + default: true + steps: + - pip_install: + args: --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu + descr: Install PyTorch from nightly releases + - pip_install: + args: --no-build-isolation <<# parameters.editable >> --editable <> . + descr: Install torchvision <<# parameters.editable >> in editable mode <> + + # Most of the test suite is handled by the `unittest` jobs, with completely different workflow and setup. + # This command can be used if only a selection of tests need to be run, for ad-hoc files. + run_tests_selective: + parameters: + file_or_dir: + type: string + steps: + - run: + name: Install test utilities + command: pip install --progress-bar=off pytest pytest-mock + - run: + name: Run tests + command: pytest --junitxml=test-results/junit.xml -v --durations 20 <> + - store_test_results: + path: test-results + + download_model_weights: + parameters: + extract_roots: + type: string + default: "torchvision/models" + background: + type: boolean + default: true + steps: + - apt_install: + args: parallel wget + descr: Install download utilitites + - run: + name: Download model weights + background: << parameters.background >> + command: | + mkdir -p ~/.cache/torch/hub/checkpoints + python scripts/collect_model_urls.py << parameters.extract_roots >> \ + | parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci' + +binary_common: &binary_common + parameters: + # Edit these defaults to do a release + build_version: + description: "version number of release binary; by default, build a nightly" + type: string + default: "" + pytorch_version: + description: "PyTorch version to build against; by default, use a nightly" + type: string + default: "" + # Don't edit these + python_version: + description: "Python version to build against (e.g., 3.7)" + type: string + cu_version: + description: "CUDA version to build against, in CU format (e.g., cpu or cu100)" + type: string + default: "cpu" + unicode_abi: + description: "Python 2.7 wheel only: whether or not we are cp27mu (default: no)" + type: string + default: "" + wheel_docker_image: + description: "Wheel only: what docker image to use" + type: string + default: "" + conda_docker_image: + description: "Conda only: what docker image to use" + type: string + default: "pytorch/conda-builder:cpu" + environment: + PYTHON_VERSION: << parameters.python_version >> + PYTORCH_VERSION: << parameters.pytorch_version >> + UNICODE_ABI: << parameters.unicode_abi >> + CU_VERSION: << parameters.cu_version >> + MACOSX_DEPLOYMENT_TARGET: 10.9 + +torchvision_ios_params: &torchvision_ios_params + parameters: + build_environment: + type: string + default: "" + ios_arch: + type: string + default: "" + ios_platform: + type: string + default: "" + environment: + BUILD_ENVIRONMENT: << parameters.build_environment >> + IOS_ARCH: << parameters.ios_arch >> + IOS_PLATFORM: << parameters.ios_platform >> + +torchvision_android_params: &torchvision_android_params + parameters: + build_environment: + type: string + default: "" + environment: + BUILD_ENVIRONMENT: << parameters.build_environment >> + +smoke_test_common: &smoke_test_common + <<: *binary_common + docker: + - image: torchvision/smoke_test:latest + +jobs: + circleci_consistency: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - pip_install: + args: jinja2 pyyaml + - run: + name: Check CircleCI config consistency + command: | + python .circleci/regenerate.py + git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) + + lint_python_and_config: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - pip_install: + args: pre-commit + descr: Install lint utilities + - run: + name: Install pre-commit hooks + command: pre-commit install-hooks + - run: + name: Lint Python code and config files + command: pre-commit run --all-files + - run: + name: Required lint modifications + when: on_fail + command: git --no-pager diff + + lint_c: + docker: + - image: cimg/python:3.7 + steps: + - apt_install: + args: libtinfo5 + descr: Install additional system libraries + - checkout + - run: + name: Install lint utilities + command: | + curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format + chmod +x clang-format + sudo mv clang-format /opt/clang-format + - run: + name: Lint C code + command: ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format + - run: + name: Required lint modifications + when: on_fail + command: git --no-pager diff + + type_check_python: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - install_torchvision: + editable: true + - pip_install: + args: mypy + descr: Install Python type check utilities + - run: + name: Check Python types statically + command: mypy --install-types --non-interactive --config-file mypy.ini + + unittest_torchhub: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - install_torchvision + - run_tests_selective: + file_or_dir: test/test_hub.py + + unittest_onnx: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - install_torchvision + - pip_install: + args: onnx onnxruntime + descr: Install ONNX + - run_tests_selective: + file_or_dir: test/test_onnx.py + + unittest_extended: + docker: + - image: cimg/python:3.7 + resource_class: xlarge + steps: + - checkout + - download_model_weights + - install_torchvision + - run: + name: Enable extended tests + command: echo 'export PYTORCH_TEST_WITH_EXTENDED=1' >> $BASH_ENV + - run_tests_selective: + file_or_dir: test/test_extended_*.py + + binary_linux_wheel: + <<: *binary_common + docker: + - image: << parameters.wheel_docker_image >> + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Build conda packages + no_output_timeout: 30m + command: | + set -ex + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + + binary_linux_conda: + <<: *binary_common + docker: + - image: "<< parameters.conda_docker_image >>" + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Build conda packages + no_output_timeout: 30m + command: | + set -ex + packaging/build_conda.sh + - store_artifacts: + path: /opt/conda/conda-bld/linux-64 + - persist_to_workspace: + root: /opt/conda/conda-bld/linux-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_win_conda: + <<: *binary_common + executor: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Build conda packages + no_output_timeout: 30m + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate base + conda install -yq conda-build "conda-package-handling!=1.5.0" + packaging/build_conda.sh + rm /C/tools/miniconda3/conda-bld/win-64/vs${VC_YEAR}*.tar.bz2 + - store_artifacts: + path: C:/tools/miniconda3/conda-bld/win-64 + - persist_to_workspace: + root: C:/tools/miniconda3/conda-bld/win-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_win_wheel: + <<: *binary_common + executor: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Build wheel packages + no_output_timeout: 30m + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_macos_wheel: + <<: *binary_common + macos: + xcode: "14.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + # Cannot easily deduplicate this as source'ing activate + # will set environment variables which we need to propagate + # to build_wheel.sh + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + + binary_ios_build: + <<: *torchvision_ios_params + macos: + xcode: "14.0" + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run_brew_for_ios_build + - run: + name: Build + no_output_timeout: "1h" + command: | + script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_build.sh" + cat "$script" + source "$script" + - persist_to_workspace: + root: /Users/distiller/workspace/ + paths: ios + + binary_ios_upload: + <<: *torchvision_ios_params + macos: + xcode: "14.0" + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run_brew_for_ios_build + - run: + name: Upload + no_output_timeout: "1h" + command: | + script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_upload.sh" + cat "$script" + source "$script" + + binary_android_build: + <<: *torchvision_android_params + docker: + - image: cimg/android:2021.08-ndk + resource_class: xlarge + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Build + no_output_timeout: "1h" + command: | + script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_build.sh" + cat "$script" + source "$script" + - store_artifacts: + path: ~/workspace/artifacts + + binary_android_upload: + <<: *torchvision_android_params + docker: + - image: cimg/android:2021.08-ndk + resource_class: xlarge + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Upload + no_output_timeout: "1h" + command: | + script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_upload.sh" + cat "$script" + source "$script" + + binary_macos_conda: + <<: *binary_common + macos: + xcode: "14.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + conda install -yq conda-build + packaging/build_conda.sh + - store_artifacts: + path: /Users/distiller/miniconda3/conda-bld/osx-64 + - persist_to_workspace: + root: /Users/distiller/miniconda3/conda-bld/osx-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + # Requires org-member context + binary_conda_upload: + docker: + - image: continuumio/miniconda + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + command: | + # Prevent credential from leaking + conda install -yq anaconda-client + set -x + anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force + + # Requires org-member context + binary_wheel_upload: + parameters: + subfolder: + description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" + type: string + docker: + - image: cimg/python:3.7 + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - checkout + - pip_install: + args: awscli + - run: + command: | + export PATH="$HOME/.local/bin:$PATH" + # Prevent credential from leaking + set +x + export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" + export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" + set -x + for pkg in ~/workspace/*.whl; do + aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read + done + + smoke_test_linux_conda: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + conda install -v -y -c pytorch-nightly pytorch + conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_linux_pip: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + - pip_install: + args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_docker_image_build: + machine: + image: ubuntu-2004:202104-01 + resource_class: large + environment: + image_name: torchvision/smoke_test + steps: + - checkout + - designate_upload_channel + - run: + name: Build and push Docker image + no_output_timeout: "1h" + command: | + set +x + echo "${DOCKER_HUB_TOKEN}" | docker login --username "${DOCKER_HUB_USERNAME}" --password-stdin + set -x + cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} + docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} ${image_name}:latest + docker push ${image_name}:${CIRCLE_WORKFLOW_ID} + docker push ${image_name}:latest + + smoke_test_win_conda: + <<: *binary_common + executor: + name: windows-cpu + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda env remove -n python${PYTHON_VERSION} || true + conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda activate python${PYTHON_VERSION} + conda install -v -y -c pytorch-nightly pytorch + conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) + - run: + name: smoke test + command: | + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_win_pip: + <<: *binary_common + executor: + name: windows-cpu + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda activate python${PYTHON_VERSION} + - pip_install: + args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - run: + name: smoke test + command: | + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + unittest_linux_cpu: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cpu" + resource_class: 2xlarge+ + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + + key: env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_linux_gpu: + <<: *binary_common + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.medium + environment: + image_name: "pytorch/manylinux-cuda116" + CU_VERSION: << parameters.cu_version >> + PYTHON_VERSION: << parameters.python_version >> + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: docker run -e PYTHON_VERSION -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + + key: env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + # Here we create an envlist file that contains some env variables that we want the docker container to be aware of. + # Normally, the CIRCLECI variable is set and available on all CI workflows: https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables. + # They're avaiable in all the other workflows (OSX and Windows). + # But here, we're running the unittest_linux_gpu workflows in a docker container, where those variables aren't accessible. + # So instead we dump the variables we need in env.list and we pass that file when invoking "docker run". + name: export CIRCLECI env var + command: echo "CIRCLECI=true" >> ./env.list + - run: + name: Install torchvision + command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: docker run --env-file ./env.list -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post Process + command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_windows_cpu: + <<: *binary_common + executor: + name: windows-cpu + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: .circleci/unittest/windows/scripts/setup_env.sh + - save_cache: + + key: env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/windows/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/windows/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/windows/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_windows_gpu: + <<: *binary_common + executor: + name: windows-gpu + environment: + CUDA_VERSION: "11.6" + PYTHON_VERSION: << parameters.python_version >> + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: .circleci/unittest/windows/scripts/setup_env.sh + - save_cache: + + key: env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install CUDA + command: packaging/windows/internal/cuda_install.bat + - run: + name: Update CUDA driver + command: packaging/windows/internal/driver_update.bat + - run: + name: Install torchvision + command: .circleci/unittest/windows/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/windows/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/windows/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_macos_cpu: + <<: *binary_common + macos: + xcode: "14.0" + resource_class: large + steps: + - checkout + - designate_upload_channel + - run: + name: Install wget + command: HOMEBREW_NO_AUTO_UPDATE=1 brew install wget + # Disable brew auto update which is very slow + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + + key: env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + cmake_linux_cpu: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cpu" + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Setup conda + command: .circleci/unittest/linux/scripts/setup_env.sh + - run: packaging/build_cmake.sh + + cmake_linux_gpu: + <<: *binary_common + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small + environment: + PYTHON_VERSION: << parameters.python_version >> + PYTORCH_VERSION: << parameters.pytorch_version >> + UNICODE_ABI: << parameters.unicode_abi >> + CU_VERSION: << parameters.cu_version >> + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Setup conda + command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh + - run: + name: Build torchvision C++ distribution and test + no_output_timeout: 30m + command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh + + cmake_macos_cpu: + <<: *binary_common + macos: + xcode: "14.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + conda install -yq conda-build cmake + packaging/build_cmake.sh + + cmake_windows_cpu: + <<: *binary_common + executor: + name: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/build_cmake.sh + + cmake_windows_gpu: + <<: *binary_common + executor: + name: windows-gpu + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Update CUDA driver + command: packaging/windows/internal/driver_update.bat + - run: + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + packaging/build_cmake.sh + + build_docs: + <<: *binary_common + docker: + - image: cimg/python:3.7 + resource_class: 2xlarge+ + steps: + - attach_workspace: + at: ~/workspace + - checkout + - download_model_weights + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - designate_upload_channel + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Build docs + command: | + set -ex + # turn v1.12.0rc3 into 1.12.0 + tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/') + VERSION=${tag:-main} + eval "$(./conda/bin/conda shell.bash hook)" + conda activate ./env + pushd docs + pip install --progress-bar=off -r requirements.txt + make html + popd + - persist_to_workspace: + root: ./ + paths: + - "*" + - store_artifacts: + path: ./docs/build/html + destination: docs + + upload_docs: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda100" + resource_class: 2xlarge+ + steps: + - attach_workspace: + at: ~/workspace + - run: + name: Generate netrc + command: | + # set credentials for https pushing + # requires the org-member context + cat > ~/.netrc \< gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.7 +# - Replace binary_linux_wheel_py3.7 with the name of the job you want to test. +# Job names are 'name:' key. + +executors: + windows-cpu: + machine: + resource_class: windows.xlarge + image: windows-server-2019-vs2019:stable + shell: bash.exe + + windows-gpu: + machine: + resource_class: windows.gpu.nvidia.medium + image: windows-server-2019-nvidia:stable + shell: bash.exe + +commands: + checkout_merge: + description: "checkout merge branch" + steps: + - checkout +# - run: +# name: Checkout merge branch +# command: | +# set -ex +# BRANCH=$(git rev-parse --abbrev-ref HEAD) +# if [[ "$BRANCH" != "main" ]]; then +# git fetch --force origin ${CIRCLE_BRANCH}/merge:merged/${CIRCLE_BRANCH} +# git checkout "merged/$CIRCLE_BRANCH" +# fi + designate_upload_channel: + description: "inserts the correct upload channel into ${BASH_ENV}" + steps: + - run: + name: adding UPLOAD_CHANNEL to BASH_ENV + command: | + our_upload_channel=nightly + # On tags upload to test instead + if [[ -n "${CIRCLE_TAG}" ]]; then + our_upload_channel=test + fi + echo "export UPLOAD_CHANNEL=${our_upload_channel}" >> ${BASH_ENV} + + brew_update: + description: "Update Homebrew and install base formulae" + steps: + - run: + name: Update Homebrew + no_output_timeout: "10m" + command: | + set -ex + + # Update repositories manually. + # Running `brew update` produces a comparison between the + # current checkout and the updated checkout, which takes a + # very long time because the existing checkout is 2y old. + for path in $(find /usr/local/Homebrew -type d -name .git) + do + cd $path/.. + git fetch --depth=1 origin + git reset --hard origin/master + done + + export HOMEBREW_NO_AUTO_UPDATE=1 + + # Install expect and moreutils so that we can call `unbuffer` and `ts`. + # moreutils installs a `parallel` executable by default, which conflicts + # with the executable from the GNU `parallel`, so we must unlink GNU + # `parallel` first, and relink it afterwards. + brew install coreutils + brew unlink parallel + brew install moreutils + brew link parallel --overwrite + brew install expect + + brew_install: + description: "Install Homebrew formulae" + parameters: + formulae: + type: string + default: "" + steps: + - run: + name: Install << parameters.formulae >> + no_output_timeout: "10m" + command: | + set -ex + export HOMEBREW_NO_AUTO_UPDATE=1 + brew install << parameters.formulae >> + + run_brew_for_ios_build: + steps: + - brew_update + - brew_install: + formulae: libtool + + apt_install: + parameters: + args: + type: string + descr: + type: string + default: "" + update: + type: boolean + default: true + steps: + - run: + name: > + <<^ parameters.descr >> apt install << parameters.args >> <> + <<# parameters.descr >> << parameters.descr >> <> + command: | + <<# parameters.update >> sudo apt update -qy <> + sudo apt install << parameters.args >> + + pip_install: + parameters: + args: + type: string + descr: + type: string + default: "" + user: + type: boolean + default: true + steps: + - run: + name: > + <<^ parameters.descr >> pip install << parameters.args >> <> + <<# parameters.descr >> << parameters.descr >> <> + command: > + pip install + <<# parameters.user >> --user <> + --progress-bar=off + << parameters.args >> + + install_torchvision: + parameters: + editable: + type: boolean + default: true + steps: + - pip_install: + args: --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu + descr: Install PyTorch from nightly releases + - pip_install: + args: --no-build-isolation <<# parameters.editable >> --editable <> . + descr: Install torchvision <<# parameters.editable >> in editable mode <> + + # Most of the test suite is handled by the `unittest` jobs, with completely different workflow and setup. + # This command can be used if only a selection of tests need to be run, for ad-hoc files. + run_tests_selective: + parameters: + file_or_dir: + type: string + steps: + - run: + name: Install test utilities + command: pip install --progress-bar=off pytest pytest-mock + - run: + name: Run tests + command: pytest --junitxml=test-results/junit.xml -v --durations 20 <> + - store_test_results: + path: test-results + + download_model_weights: + parameters: + extract_roots: + type: string + default: "torchvision/models" + background: + type: boolean + default: true + steps: + - apt_install: + args: parallel wget + descr: Install download utilitites + - run: + name: Download model weights + background: << parameters.background >> + command: | + mkdir -p ~/.cache/torch/hub/checkpoints + python scripts/collect_model_urls.py << parameters.extract_roots >> \ + | parallel -j0 'wget --no-verbose -O ~/.cache/torch/hub/checkpoints/`basename {}` {}\?source=ci' + +binary_common: &binary_common + parameters: + # Edit these defaults to do a release + build_version: + description: "version number of release binary; by default, build a nightly" + type: string + default: "" + pytorch_version: + description: "PyTorch version to build against; by default, use a nightly" + type: string + default: "" + # Don't edit these + python_version: + description: "Python version to build against (e.g., 3.7)" + type: string + cu_version: + description: "CUDA version to build against, in CU format (e.g., cpu or cu100)" + type: string + default: "cpu" + unicode_abi: + description: "Python 2.7 wheel only: whether or not we are cp27mu (default: no)" + type: string + default: "" + wheel_docker_image: + description: "Wheel only: what docker image to use" + type: string + default: "" + conda_docker_image: + description: "Conda only: what docker image to use" + type: string + default: "pytorch/conda-builder:cpu" + environment: + PYTHON_VERSION: << parameters.python_version >> + PYTORCH_VERSION: << parameters.pytorch_version >> + UNICODE_ABI: << parameters.unicode_abi >> + CU_VERSION: << parameters.cu_version >> + MACOSX_DEPLOYMENT_TARGET: 10.9 + +torchvision_ios_params: &torchvision_ios_params + parameters: + build_environment: + type: string + default: "" + ios_arch: + type: string + default: "" + ios_platform: + type: string + default: "" + environment: + BUILD_ENVIRONMENT: << parameters.build_environment >> + IOS_ARCH: << parameters.ios_arch >> + IOS_PLATFORM: << parameters.ios_platform >> + +torchvision_android_params: &torchvision_android_params + parameters: + build_environment: + type: string + default: "" + environment: + BUILD_ENVIRONMENT: << parameters.build_environment >> + +smoke_test_common: &smoke_test_common + <<: *binary_common + docker: + - image: torchvision/smoke_test:latest + +jobs: + circleci_consistency: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - pip_install: + args: jinja2 pyyaml + - run: + name: Check CircleCI config consistency + command: | + python .circleci/regenerate.py + git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) + + lint_python_and_config: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - pip_install: + args: pre-commit + descr: Install lint utilities + - run: + name: Install pre-commit hooks + command: pre-commit install-hooks + - run: + name: Lint Python code and config files + command: pre-commit run --all-files + - run: + name: Required lint modifications + when: on_fail + command: git --no-pager diff + + lint_c: + docker: + - image: cimg/python:3.7 + steps: + - apt_install: + args: libtinfo5 + descr: Install additional system libraries + - checkout + - run: + name: Install lint utilities + command: | + curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format + chmod +x clang-format + sudo mv clang-format /opt/clang-format + - run: + name: Lint C code + command: ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format + - run: + name: Required lint modifications + when: on_fail + command: git --no-pager diff + + type_check_python: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - install_torchvision: + editable: true + - pip_install: + args: mypy + descr: Install Python type check utilities + - run: + name: Check Python types statically + command: mypy --install-types --non-interactive --config-file mypy.ini + + unittest_torchhub: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - install_torchvision + - run_tests_selective: + file_or_dir: test/test_hub.py + + unittest_onnx: + docker: + - image: cimg/python:3.7 + steps: + - checkout + - install_torchvision + - pip_install: + args: onnx onnxruntime + descr: Install ONNX + - run_tests_selective: + file_or_dir: test/test_onnx.py + + unittest_extended: + docker: + - image: cimg/python:3.7 + resource_class: xlarge + steps: + - checkout + - download_model_weights + - install_torchvision + - run: + name: Enable extended tests + command: echo 'export PYTORCH_TEST_WITH_EXTENDED=1' >> $BASH_ENV + - run_tests_selective: + file_or_dir: test/test_extended_*.py + + binary_linux_wheel: + <<: *binary_common + docker: + - image: << parameters.wheel_docker_image >> + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Build conda packages + no_output_timeout: 30m + command: | + set -ex + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + + binary_linux_conda: + <<: *binary_common + docker: + - image: "<< parameters.conda_docker_image >>" + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Build conda packages + no_output_timeout: 30m + command: | + set -ex + packaging/build_conda.sh + - store_artifacts: + path: /opt/conda/conda-bld/linux-64 + - persist_to_workspace: + root: /opt/conda/conda-bld/linux-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_win_conda: + <<: *binary_common + executor: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Build conda packages + no_output_timeout: 30m + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate base + conda install -yq conda-build "conda-package-handling!=1.5.0" + packaging/build_conda.sh + rm /C/tools/miniconda3/conda-bld/win-64/vs${VC_YEAR}*.tar.bz2 + - store_artifacts: + path: C:/tools/miniconda3/conda-bld/win-64 + - persist_to_workspace: + root: C:/tools/miniconda3/conda-bld/win-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_win_wheel: + <<: *binary_common + executor: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Build wheel packages + no_output_timeout: 30m + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_macos_wheel: + <<: *binary_common + macos: + xcode: "14.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + # Cannot easily deduplicate this as source'ing activate + # will set environment variables which we need to propagate + # to build_wheel.sh + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + + binary_ios_build: + <<: *torchvision_ios_params + macos: + xcode: "14.0" + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run_brew_for_ios_build + - run: + name: Build + no_output_timeout: "1h" + command: | + script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_build.sh" + cat "$script" + source "$script" + - persist_to_workspace: + root: /Users/distiller/workspace/ + paths: ios + + binary_ios_upload: + <<: *torchvision_ios_params + macos: + xcode: "14.0" + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run_brew_for_ios_build + - run: + name: Upload + no_output_timeout: "1h" + command: | + script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_upload.sh" + cat "$script" + source "$script" + + binary_android_build: + <<: *torchvision_android_params + docker: + - image: cimg/android:2021.08-ndk + resource_class: xlarge + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Build + no_output_timeout: "1h" + command: | + script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_build.sh" + cat "$script" + source "$script" + - store_artifacts: + path: ~/workspace/artifacts + + binary_android_upload: + <<: *torchvision_android_params + docker: + - image: cimg/android:2021.08-ndk + resource_class: xlarge + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Upload + no_output_timeout: "1h" + command: | + script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_upload.sh" + cat "$script" + source "$script" + + binary_macos_conda: + <<: *binary_common + macos: + xcode: "14.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + conda install -yq conda-build + packaging/build_conda.sh + - store_artifacts: + path: /Users/distiller/miniconda3/conda-bld/osx-64 + - persist_to_workspace: + root: /Users/distiller/miniconda3/conda-bld/osx-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + # Requires org-member context + binary_conda_upload: + docker: + - image: continuumio/miniconda + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + command: | + # Prevent credential from leaking + conda install -yq anaconda-client + set -x + anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force + + # Requires org-member context + binary_wheel_upload: + parameters: + subfolder: + description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" + type: string + docker: + - image: cimg/python:3.7 + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - checkout + - pip_install: + args: awscli + - run: + command: | + export PATH="$HOME/.local/bin:$PATH" + # Prevent credential from leaking + set +x + export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" + export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" + set -x + for pkg in ~/workspace/*.whl; do + aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read + done + + smoke_test_linux_conda: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + conda install -v -y -c pytorch-nightly pytorch + conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_linux_pip: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + - pip_install: + args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_docker_image_build: + machine: + image: ubuntu-2004:202104-01 + resource_class: large + environment: + image_name: torchvision/smoke_test + steps: + - checkout + - designate_upload_channel + - run: + name: Build and push Docker image + no_output_timeout: "1h" + command: | + set +x + echo "${DOCKER_HUB_TOKEN}" | docker login --username "${DOCKER_HUB_USERNAME}" --password-stdin + set -x + cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} + docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} ${image_name}:latest + docker push ${image_name}:${CIRCLE_WORKFLOW_ID} + docker push ${image_name}:latest + + smoke_test_win_conda: + <<: *binary_common + executor: + name: windows-cpu + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda env remove -n python${PYTHON_VERSION} || true + conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda activate python${PYTHON_VERSION} + conda install -v -y -c pytorch-nightly pytorch + conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) + - run: + name: smoke test + command: | + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_win_pip: + <<: *binary_common + executor: + name: windows-cpu + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda activate python${PYTHON_VERSION} + - pip_install: + args: $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - run: + name: smoke test + command: | + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + unittest_linux_cpu: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cpu" + resource_class: 2xlarge+ + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_linux_gpu: + <<: *binary_common + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.medium + environment: + image_name: "pytorch/manylinux-cuda116" + CU_VERSION: << parameters.cu_version >> + PYTHON_VERSION: << parameters.python_version >> + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: docker run -e PYTHON_VERSION -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + # Here we create an envlist file that contains some env variables that we want the docker container to be aware of. + # Normally, the CIRCLECI variable is set and available on all CI workflows: https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables. + # They're avaiable in all the other workflows (OSX and Windows). + # But here, we're running the unittest_linux_gpu workflows in a docker container, where those variables aren't accessible. + # So instead we dump the variables we need in env.list and we pass that file when invoking "docker run". + name: export CIRCLECI env var + command: echo "CIRCLECI=true" >> ./env.list + - run: + name: Install torchvision + command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: docker run --env-file ./env.list -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post Process + command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_windows_cpu: + <<: *binary_common + executor: + name: windows-cpu + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: .circleci/unittest/windows/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/windows/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/windows/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/windows/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_windows_gpu: + <<: *binary_common + executor: + name: windows-gpu + environment: + CUDA_VERSION: "11.6" + PYTHON_VERSION: << parameters.python_version >> + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: .circleci/unittest/windows/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install CUDA + command: packaging/windows/internal/cuda_install.bat + - run: + name: Update CUDA driver + command: packaging/windows/internal/driver_update.bat + - run: + name: Install torchvision + command: .circleci/unittest/windows/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/windows/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/windows/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_macos_cpu: + <<: *binary_common + macos: + xcode: "14.0" + resource_class: large + steps: + - checkout + - designate_upload_channel + - run: + name: Install wget + command: HOMEBREW_NO_AUTO_UPDATE=1 brew install wget + # Disable brew auto update which is very slow + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + cmake_linux_cpu: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cpu" + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Setup conda + command: .circleci/unittest/linux/scripts/setup_env.sh + - run: packaging/build_cmake.sh + + cmake_linux_gpu: + <<: *binary_common + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small + environment: + PYTHON_VERSION: << parameters.python_version >> + PYTORCH_VERSION: << parameters.pytorch_version >> + UNICODE_ABI: << parameters.unicode_abi >> + CU_VERSION: << parameters.cu_version >> + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Setup conda + command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh + - run: + name: Build torchvision C++ distribution and test + no_output_timeout: 30m + command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh + + cmake_macos_cpu: + <<: *binary_common + macos: + xcode: "14.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + conda install -yq conda-build cmake + packaging/build_cmake.sh + + cmake_windows_cpu: + <<: *binary_common + executor: + name: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/build_cmake.sh + + cmake_windows_gpu: + <<: *binary_common + executor: + name: windows-gpu + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Update CUDA driver + command: packaging/windows/internal/driver_update.bat + - run: + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + packaging/build_cmake.sh + + build_docs: + <<: *binary_common + docker: + - image: cimg/python:3.7 + resource_class: 2xlarge+ + steps: + - attach_workspace: + at: ~/workspace + - checkout + - download_model_weights + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - designate_upload_channel + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Build docs + command: | + set -ex + # turn v1.12.0rc3 into 1.12.0 + tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/') + VERSION=${tag:-main} + eval "$(./conda/bin/conda shell.bash hook)" + conda activate ./env + pushd docs + pip install --progress-bar=off -r requirements.txt + make html + popd + - persist_to_workspace: + root: ./ + paths: + - "*" + - store_artifacts: + path: ./docs/build/html + destination: docs + + upload_docs: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda100" + resource_class: 2xlarge+ + steps: + - attach_workspace: + at: ~/workspace + - run: + name: Generate netrc + command: | + # set credentials for https pushing + # requires the org-member context + cat > ~/.netrc \<> ~/.bashrc +CMD [ "/bin/bash"] diff --git a/.circleci/unittest/android/scripts/binary_android_build.sh b/.circleci/unittest/android/scripts/binary_android_build.sh new file mode 100644 index 00000000000..0d8c0d47d8a --- /dev/null +++ b/.circleci/unittest/android/scripts/binary_android_build.sh @@ -0,0 +1,27 @@ +#!/bin/bash +set -ex -o pipefail + +echo "DIR: $(pwd)" +echo "ANDROID_HOME=${ANDROID_HOME}" +echo "ANDROID_NDK_HOME=${ANDROID_NDK_HOME}" +echo "JAVA_HOME=${JAVA_HOME}" + +WORKSPACE=/home/circleci/workspace +VISION_ANDROID=/home/circleci/project/android + +. /home/circleci/project/.circleci/unittest/android/scripts/install_gradle.sh + +GRADLE_LOCAL_PROPERTIES=${VISION_ANDROID}/local.properties +rm -f $GRADLE_LOCAL_PROPERTIES + +echo "sdk.dir=${ANDROID_HOME}" >> $GRADLE_LOCAL_PROPERTIES +echo "ndk.dir=${ANDROID_NDK_HOME}" >> $GRADLE_LOCAL_PROPERTIES + +echo "GRADLE_PATH $GRADLE_PATH" +echo "GRADLE_HOME $GRADLE_HOME" + +${GRADLE_PATH} --scan --stacktrace --debug --no-daemon -p ${VISION_ANDROID} assemble || true + +mkdir -p ~/workspace/artifacts +find . -type f -name *aar -print | xargs tar cfvz ~/workspace/artifacts/artifacts-aars.tgz +find . -type f -name *apk -print | xargs tar cfvz ~/workspace/artifacts/artifacts-apks.tgz diff --git a/.circleci/unittest/android/scripts/binary_android_upload.sh b/.circleci/unittest/android/scripts/binary_android_upload.sh new file mode 100644 index 00000000000..1472a877d90 --- /dev/null +++ b/.circleci/unittest/android/scripts/binary_android_upload.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set -ex -o pipefail + +echo "DIR: $(pwd)" +echo "ANDROID_HOME=${ANDROID_HOME}" +echo "ANDROID_NDK_HOME=${ANDROID_NDK_HOME}" +echo "JAVA_HOME=${JAVA_HOME}" + +WORKSPACE=/home/circleci/workspace +VISION_ANDROID=/home/circleci/project/android + +. /home/circleci/project/.circleci/unittest/android/scripts/install_gradle.sh + +GRADLE_LOCAL_PROPERTIES=${VISION_ANDROID}/local.properties +rm -f $GRADLE_LOCAL_PROPERTIES +GRADLE_PROPERTIES=/home/circleci/project/android/gradle.properties + +echo "sdk.dir=${ANDROID_HOME}" >> $GRADLE_LOCAL_PROPERTIES +echo "ndk.dir=${ANDROID_NDK_HOME}" >> $GRADLE_LOCAL_PROPERTIES + +echo "SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES +echo "mavenCentralRepositoryUsername=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES +echo "SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES +echo "mavenCentralRepositoryPassword=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES + +echo "signing.keyId=${ANDROID_SIGN_KEY}" >> $GRADLE_PROPERTIES +echo "signing.password=${ANDROID_SIGN_PASS}" >> $GRADLE_PROPERTIES + +cat /home/circleci/project/android/gradle.properties | grep VERSION + +${GRADLE_PATH} --scan --stacktrace --debug --no-daemon -p ${VISION_ANDROID} ops:uploadArchives + +mkdir -p ~/workspace/artifacts +find . -type f -name *aar -print | xargs tar cfvz ~/workspace/artifacts/artifacts-aars.tgz diff --git a/.circleci/unittest/android/scripts/install_gradle.sh b/.circleci/unittest/android/scripts/install_gradle.sh new file mode 100755 index 00000000000..5f803abfa94 --- /dev/null +++ b/.circleci/unittest/android/scripts/install_gradle.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -ex + +_https_amazon_aws=https://ossci-android.s3.amazonaws.com +GRADLE_VERSION=6.8.3 + +_gradle_home=/opt/gradle +sudo rm -rf $gradle_home +sudo mkdir -p $_gradle_home + +curl --silent --output /tmp/gradle.zip --retry 3 $_https_amazon_aws/gradle-${GRADLE_VERSION}-bin.zip + +sudo unzip -q /tmp/gradle.zip -d $_gradle_home +rm /tmp/gradle.zip + +sudo chmod -R 777 $_gradle_home + +export GRADLE_HOME=$_gradle_home/gradle-$GRADLE_VERSION +export GRADLE_PATH=${GRADLE_HOME}/bin/gradle diff --git a/.circleci/unittest/ios/scripts/binary_ios_build.sh b/.circleci/unittest/ios/scripts/binary_ios_build.sh new file mode 100755 index 00000000000..e2ad7b0c55f --- /dev/null +++ b/.circleci/unittest/ios/scripts/binary_ios_build.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -ex -o pipefail + +echo "" +echo "DIR: $(pwd)" +WORKSPACE=/Users/distiller/workspace +PROJ_ROOT_IOS=/Users/distiller/project/ios +PYTORCH_IOS_NIGHTLY_NAME=libtorch_ios_nightly_build.zip +export TCLLIBPATH="/usr/local/lib" + +# install conda +curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh +chmod +x ~/conda.sh +/bin/bash ~/conda.sh -b -p ~/anaconda +export PATH="~/anaconda/bin:${PATH}" +source ~/anaconda/bin/activate + +# install dependencies +conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests typing_extensions wget --yes +conda install -c conda-forge valgrind --yes +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} + +# sync submodules +cd ${PROJ_ROOT_IOS} +git submodule sync +git submodule update --init --recursive + +# download pytorch-iOS nightly build and unzip it +mkdir -p ${PROJ_ROOT_IOS}/lib +mkdir -p ${PROJ_ROOT_IOS}/build +mkdir -p ${PROJ_ROOT_IOS}/pytorch +TORCH_ROOT="${PROJ_ROOT_IOS}/pytorch" + +cd ${TORCH_ROOT} +wget https://ossci-ios-build.s3.amazonaws.com/${PYTORCH_IOS_NIGHTLY_NAME} +mkdir -p ./build_ios +unzip -d ./build_ios ./${PYTORCH_IOS_NIGHTLY_NAME} + +LIBTORCH_HEADER_ROOT="${TORCH_ROOT}/build_ios/install/include" +cd ${PROJ_ROOT_IOS} +IOS_ARCH=${IOS_ARCH} LIBTORCH_HEADER_ROOT=${LIBTORCH_HEADER_ROOT} ./build_ios.sh +rm -rf ${TORCH_ROOT} + +# store the binary +DEST_DIR=${WORKSPACE}/ios/${IOS_ARCH} +mkdir -p ${DEST_DIR} +cp ${PROJ_ROOT_IOS}/lib/*.a ${DEST_DIR} diff --git a/.circleci/unittest/ios/scripts/binary_ios_upload.sh b/.circleci/unittest/ios/scripts/binary_ios_upload.sh new file mode 100644 index 00000000000..ce56388e5da --- /dev/null +++ b/.circleci/unittest/ios/scripts/binary_ios_upload.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -ex -o pipefail + +echo "" +echo "DIR: $(pwd)" + +WORKSPACE=/Users/distiller/workspace +PROJ_ROOT=/Users/distiller/project +ARTIFACTS_DIR=${WORKSPACE}/ios +ls ${ARTIFACTS_DIR} +ZIP_DIR=${WORKSPACE}/zip +mkdir -p ${ZIP_DIR}/install/lib + +# build a FAT bianry +cd ${ZIP_DIR}/install/lib +libs=("${ARTIFACTS_DIR}/x86_64/libtorchvision_ops.a" "${ARTIFACTS_DIR}/arm64/libtorchvision_ops.a") +lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/libtorchvision_ops.a +lipo -i ${ZIP_DIR}/install/lib/*.a + +# copy the license +cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/ +# zip the library +ZIPFILE=libtorchvision_ops_ios_nightly_build.zip +cd ${ZIP_DIR} +#for testing +touch version.txt +echo $(date +%s) > version.txt +zip -r ${ZIPFILE} install version.txt LICENSE + +# upload to aws +# Install conda then 'conda install' awscli +curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh +chmod +x ~/conda.sh +/bin/bash ~/conda.sh -b -p ~/anaconda +export PATH="~/anaconda/bin:${PATH}" +source ~/anaconda/bin/activate +conda install -c conda-forge awscli --yes +set +x +export AWS_ACCESS_KEY_ID=${AWS_S3_ACCESS_KEY_FOR_PYTORCH_BINARY_UPLOAD} +export AWS_SECRET_ACCESS_KEY=${AWS_S3_ACCESS_SECRET_FOR_PYTORCH_BINARY_UPLOAD} +set -x +aws s3 cp ${ZIPFILE} s3://ossci-ios-build/ --acl public-read diff --git a/.circleci/unittest/linux/scripts/environment.yml b/.circleci/unittest/linux/scripts/environment.yml new file mode 100644 index 00000000000..fae96c5f93c --- /dev/null +++ b/.circleci/unittest/linux/scripts/environment.yml @@ -0,0 +1,16 @@ +channels: + - pytorch + - defaults +dependencies: + - pytest + - pytest-cov + - pytest-mock + - pip + - libpng + - jpeg + - ca-certificates + - h5py + - pip: + - future + - scipy + - av < 10 diff --git a/.circleci/unittest/linux/scripts/install.sh b/.circleci/unittest/linux/scripts/install.sh new file mode 100755 index 00000000000..54722842a74 --- /dev/null +++ b/.circleci/unittest/linux/scripts/install.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +unset PYTORCH_VERSION +# For unittest, nightly PyTorch is used as the following section, +# so no need to set PYTORCH_VERSION. +# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env + +if [ "${CU_VERSION:-}" == cpu ] ; then + cudatoolkit="cpuonly" + version="cpu" +else + if [[ ${#CU_VERSION} -eq 4 ]]; then + CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}" + elif [[ ${#CU_VERSION} -eq 5 ]]; then + CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}" + fi + echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION: ${CU_VERSION} " + version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" + cudatoolkit="pytorch-cuda=${version}" +fi + +case "$(uname -s)" in + Darwin*) os=MacOSX;; + *) os=Linux +esac + +printf "Installing PyTorch with %s\n" "${cudatoolkit}" +if [ "${os}" == "MacOSX" ]; then + conda install -y -c "pytorch-${UPLOAD_CHANNEL}" "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}" +else + conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}" +fi + +printf "* Installing torchvision\n" +python setup.py develop diff --git a/.circleci/unittest/linux/scripts/post_process.sh b/.circleci/unittest/linux/scripts/post_process.sh new file mode 100755 index 00000000000..e97bf2a7b1b --- /dev/null +++ b/.circleci/unittest/linux/scripts/post_process.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env diff --git a/.circleci/unittest/linux/scripts/run-clang-format.py b/.circleci/unittest/linux/scripts/run-clang-format.py new file mode 100755 index 00000000000..5c61b2519e0 --- /dev/null +++ b/.circleci/unittest/linux/scripts/run-clang-format.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python +""" +MIT License + +Copyright (c) 2017 Guillaume Papin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +A wrapper script around clang-format, suitable for linting multiple files +and to use for continuous integration. + +This is an alternative API for the clang-format command line. +It runs over multiple files and directories in parallel. +A diff output is produced and a sensible exit code is returned. + +""" + +import argparse +import difflib +import fnmatch +import multiprocessing +import os +import signal +import subprocess +import sys +import traceback +from functools import partial + +try: + from subprocess import DEVNULL # py3k +except ImportError: + DEVNULL = open(os.devnull, "wb") + + +DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu" + + +class ExitStatus: + SUCCESS = 0 + DIFF = 1 + TROUBLE = 2 + + +def list_files(files, recursive=False, extensions=None, exclude=None): + if extensions is None: + extensions = [] + if exclude is None: + exclude = [] + + out = [] + for file in files: + if recursive and os.path.isdir(file): + for dirpath, dnames, fnames in os.walk(file): + fpaths = [os.path.join(dirpath, fname) for fname in fnames] + for pattern in exclude: + # os.walk() supports trimming down the dnames list + # by modifying it in-place, + # to avoid unnecessary directory listings. + dnames[:] = [x for x in dnames if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)] + fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)] + for f in fpaths: + ext = os.path.splitext(f)[1][1:] + if ext in extensions: + out.append(f) + else: + out.append(file) + return out + + +def make_diff(file, original, reformatted): + return list( + difflib.unified_diff( + original, reformatted, fromfile=f"{file}\t(original)", tofile=f"{file}\t(reformatted)", n=3 + ) + ) + + +class DiffError(Exception): + def __init__(self, message, errs=None): + super().__init__(message) + self.errs = errs or [] + + +class UnexpectedError(Exception): + def __init__(self, message, exc=None): + super().__init__(message) + self.formatted_traceback = traceback.format_exc() + self.exc = exc + + +def run_clang_format_diff_wrapper(args, file): + try: + ret = run_clang_format_diff(args, file) + return ret + except DiffError: + raise + except Exception as e: + raise UnexpectedError(f"{file}: {e.__class__.__name__}: {e}", e) + + +def run_clang_format_diff(args, file): + try: + with open(file, encoding="utf-8") as f: + original = f.readlines() + except OSError as exc: + raise DiffError(str(exc)) + invocation = [args.clang_format_executable, file] + + # Use of utf-8 to decode the process output. + # + # Hopefully, this is the correct thing to do. + # + # It's done due to the following assumptions (which may be incorrect): + # - clang-format will returns the bytes read from the files as-is, + # without conversion, and it is already assumed that the files use utf-8. + # - if the diagnostics were internationalized, they would use utf-8: + # > Adding Translations to Clang + # > + # > Not possible yet! + # > Diagnostic strings should be written in UTF-8, + # > the client can translate to the relevant code page if needed. + # > Each translation completely replaces the format string + # > for the diagnostic. + # > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation + + try: + proc = subprocess.Popen( + invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding="utf-8" + ) + except OSError as exc: + raise DiffError(f"Command '{subprocess.list2cmdline(invocation)}' failed to start: {exc}") + proc_stdout = proc.stdout + proc_stderr = proc.stderr + + # hopefully the stderr pipe won't get full and block the process + outs = list(proc_stdout.readlines()) + errs = list(proc_stderr.readlines()) + proc.wait() + if proc.returncode: + raise DiffError( + "Command '{}' returned non-zero exit status {}".format( + subprocess.list2cmdline(invocation), proc.returncode + ), + errs, + ) + return make_diff(file, original, outs), errs + + +def bold_red(s): + return "\x1b[1m\x1b[31m" + s + "\x1b[0m" + + +def colorize(diff_lines): + def bold(s): + return "\x1b[1m" + s + "\x1b[0m" + + def cyan(s): + return "\x1b[36m" + s + "\x1b[0m" + + def green(s): + return "\x1b[32m" + s + "\x1b[0m" + + def red(s): + return "\x1b[31m" + s + "\x1b[0m" + + for line in diff_lines: + if line[:4] in ["--- ", "+++ "]: + yield bold(line) + elif line.startswith("@@ "): + yield cyan(line) + elif line.startswith("+"): + yield green(line) + elif line.startswith("-"): + yield red(line) + else: + yield line + + +def print_diff(diff_lines, use_color): + if use_color: + diff_lines = colorize(diff_lines) + sys.stdout.writelines(diff_lines) + + +def print_trouble(prog, message, use_colors): + error_text = "error:" + if use_colors: + error_text = bold_red(error_text) + print(f"{prog}: {error_text} {message}", file=sys.stderr) + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--clang-format-executable", + metavar="EXECUTABLE", + help="path to the clang-format executable", + default="clang-format", + ) + parser.add_argument( + "--extensions", + help=f"comma separated list of file extensions (default: {DEFAULT_EXTENSIONS})", + default=DEFAULT_EXTENSIONS, + ) + parser.add_argument("-r", "--recursive", action="store_true", help="run recursively over directories") + parser.add_argument("files", metavar="file", nargs="+") + parser.add_argument("-q", "--quiet", action="store_true") + parser.add_argument( + "-j", + metavar="N", + type=int, + default=0, + help="run N clang-format jobs in parallel (default number of cpus + 1)", + ) + parser.add_argument( + "--color", default="auto", choices=["auto", "always", "never"], help="show colored diff (default: auto)" + ) + parser.add_argument( + "-e", + "--exclude", + metavar="PATTERN", + action="append", + default=[], + help="exclude paths matching the given glob-like pattern(s) from recursive search", + ) + + args = parser.parse_args() + + # use default signal handling, like diff return SIGINT value on ^C + # https://bugs.python.org/issue14229#msg156446 + signal.signal(signal.SIGINT, signal.SIG_DFL) + try: + signal.SIGPIPE + except AttributeError: + # compatibility, SIGPIPE does not exist on Windows + pass + else: + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + colored_stdout = False + colored_stderr = False + if args.color == "always": + colored_stdout = True + colored_stderr = True + elif args.color == "auto": + colored_stdout = sys.stdout.isatty() + colored_stderr = sys.stderr.isatty() + + version_invocation = [args.clang_format_executable, "--version"] + try: + subprocess.check_call(version_invocation, stdout=DEVNULL) + except subprocess.CalledProcessError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + return ExitStatus.TROUBLE + except OSError as e: + print_trouble( + parser.prog, + f"Command '{subprocess.list2cmdline(version_invocation)}' failed to start: {e}", + use_colors=colored_stderr, + ) + return ExitStatus.TROUBLE + + retcode = ExitStatus.SUCCESS + files = list_files( + args.files, recursive=args.recursive, exclude=args.exclude, extensions=args.extensions.split(",") + ) + + if not files: + return + + njobs = args.j + if njobs == 0: + njobs = multiprocessing.cpu_count() + 1 + njobs = min(len(files), njobs) + + if njobs == 1: + # execute directly instead of in a pool, + # less overhead, simpler stacktraces + it = (run_clang_format_diff_wrapper(args, file) for file in files) + pool = None + else: + pool = multiprocessing.Pool(njobs) + it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files) + while True: + try: + outs, errs = next(it) + except StopIteration: + break + except DiffError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + retcode = ExitStatus.TROUBLE + sys.stderr.writelines(e.errs) + except UnexpectedError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + sys.stderr.write(e.formatted_traceback) + retcode = ExitStatus.TROUBLE + # stop at the first unexpected error, + # something could be very wrong, + # don't process all files unnecessarily + if pool: + pool.terminate() + break + else: + sys.stderr.writelines(errs) + if outs == []: + continue + if not args.quiet: + print_diff(outs, use_color=colored_stdout) + if retcode == ExitStatus.SUCCESS: + retcode = ExitStatus.DIFF + return retcode + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.circleci/unittest/linux/scripts/run_test.sh b/.circleci/unittest/linux/scripts/run_test.sh new file mode 100755 index 00000000000..8f6b8cb8485 --- /dev/null +++ b/.circleci/unittest/linux/scripts/run_test.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env + +python -m torch.utils.collect_env +pytest --junitxml=test-results/junit.xml -v --durations 20 diff --git a/.circleci/unittest/linux/scripts/setup_env.sh b/.circleci/unittest/linux/scripts/setup_env.sh new file mode 100755 index 00000000000..0574cdff1cf --- /dev/null +++ b/.circleci/unittest/linux/scripts/setup_env.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +# This script is for setting up environment in which unit test is ran. +# To speed up the CI time, the resulting environment is cached. +# +# Do not install PyTorch and torchvision here, otherwise they also get cached. + +set -e + +this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +# Avoid error: "fatal: unsafe repository" +git config --global --add safe.directory '*' +root_dir="$(git rev-parse --show-toplevel)" +conda_dir="${root_dir}/conda" +env_dir="${root_dir}/env" + +cd "${root_dir}" + +case "$(uname -s)" in + Darwin*) os=MacOSX;; + *) os=Linux +esac + +# 1. Install conda at ./conda +if [ ! -d "${conda_dir}" ]; then + printf "* Installing conda\n" + wget -O miniconda.sh "http://repo.continuum.io/miniconda/Miniconda3-latest-${os}-x86_64.sh" + bash ./miniconda.sh -b -f -p "${conda_dir}" +fi +eval "$(${conda_dir}/bin/conda shell.bash hook)" + +# 2. Create test environment at ./env +if [ ! -d "${env_dir}" ]; then + printf "* Creating a test environment\n" + conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION" +fi +conda activate "${env_dir}" + +# 3. Install Conda dependencies +printf "* Installing dependencies (except PyTorch)\n" +FFMPEG_PIN="=4.2" +if [[ "${PYTHON_VERSION}" = "3.9" ]]; then + FFMPEG_PIN=">=4.2" +fi + +conda install -y -c pytorch "ffmpeg${FFMPEG_PIN}" +conda env update --file "${this_dir}/environment.yml" --prune diff --git a/.circleci/unittest/windows/scripts/environment.yml b/.circleci/unittest/windows/scripts/environment.yml new file mode 100644 index 00000000000..d229aafb41a --- /dev/null +++ b/.circleci/unittest/windows/scripts/environment.yml @@ -0,0 +1,19 @@ +channels: + - pytorch + - defaults +dependencies: + - pytest + - pytest-cov + - pytest-mock + - pip + - libpng + - jpeg + - ca-certificates + - hdf5 + - setuptools + - pip: + - future + - scipy + - av !=9.1.1, <10 + - dataclasses + - h5py diff --git a/.circleci/unittest/windows/scripts/install.sh b/.circleci/unittest/windows/scripts/install.sh new file mode 100644 index 00000000000..85920abb8da --- /dev/null +++ b/.circleci/unittest/windows/scripts/install.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +unset PYTORCH_VERSION +# For unittest, nightly PyTorch is used as the following section, +# so no need to set PYTORCH_VERSION. +# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. + +set -ex + +this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" +conda activate ./env + +# TODO, refactor the below logic to make it easy to understand how to get correct cuda_version. +if [ "${CU_VERSION:-}" == cpu ] ; then + cudatoolkit="cpuonly" + version="cpu" +else + if [[ ${#CU_VERSION} -eq 4 ]]; then + CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}" + elif [[ ${#CU_VERSION} -eq 5 ]]; then + CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}" + fi + + cuda_toolkit_pckg="cudatoolkit" + if [[ $CUDA_VERSION == 11.6 || $CUDA_VERSION == 11.7 ]]; then + cuda_toolkit_pckg="pytorch-cuda" + fi + + echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION" + version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" + cudatoolkit="${cuda_toolkit_pckg}=${version}" +fi + +printf "Installing PyTorch with %s\n" "${cudatoolkit}" +conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c nvidia "pytorch-${UPLOAD_CHANNEL}"::pytorch[build="*${version}*"] "${cudatoolkit}" + +torch_cuda=$(python -c "import torch; print(torch.cuda.is_available())") +echo torch.cuda.is_available is $torch_cuda + +if [ ! -z "${CUDA_VERSION:-}" ] ; then + if [ "$torch_cuda" == "False" ]; then + echo "torch with cuda installed but torch.cuda.is_available() is False" + exit 1 + fi +fi + +source "$this_dir/set_cuda_envs.sh" + +printf "* Installing torchvision\n" +"$this_dir/vc_env_helper.bat" python setup.py develop diff --git a/.circleci/unittest/windows/scripts/install_conda.bat b/.circleci/unittest/windows/scripts/install_conda.bat new file mode 100644 index 00000000000..6052ad08b10 --- /dev/null +++ b/.circleci/unittest/windows/scripts/install_conda.bat @@ -0,0 +1 @@ +start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda% diff --git a/.circleci/unittest/windows/scripts/post_process.sh b/.circleci/unittest/windows/scripts/post_process.sh new file mode 100644 index 00000000000..5c5cbb758a9 --- /dev/null +++ b/.circleci/unittest/windows/scripts/post_process.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" +conda activate ./env diff --git a/.circleci/unittest/windows/scripts/run_test.sh b/.circleci/unittest/windows/scripts/run_test.sh new file mode 100644 index 00000000000..802ad37f511 --- /dev/null +++ b/.circleci/unittest/windows/scripts/run_test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" +conda activate ./env + +this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +source "$this_dir/set_cuda_envs.sh" + +python -m torch.utils.collect_env +pytest --junitxml=test-results/junit.xml -v --durations 20 diff --git a/.circleci/unittest/windows/scripts/set_cuda_envs.sh b/.circleci/unittest/windows/scripts/set_cuda_envs.sh new file mode 100644 index 00000000000..7db3137b594 --- /dev/null +++ b/.circleci/unittest/windows/scripts/set_cuda_envs.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -ex + +echo CU_VERSION is "${CU_VERSION}" +echo CUDA_VERSION is "${CUDA_VERSION}" + +# Currenly, CU_VERSION and CUDA_VERSION are not consistent. +# to understand this code, see https://github.com/pytorch/vision/issues/4443 +version="cpu" +if [[ ! -z "${CUDA_VERSION}" ]] ; then + version="$CUDA_VERSION" +else + if [[ ${#CU_VERSION} -eq 5 ]]; then + version="${CU_VERSION:2:2}.${CU_VERSION:4:1}" + fi +fi + +# Don't use if [[ "$version" == "cpu" ]]; then exit 0 fi. +# It would exit the shell. One result is cpu tests would not run if the shell exit. +# Unless there's an error, Don't exit. +if [[ "$version" != "cpu" ]]; then + # set cuda envs + export PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${version}/bin:/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${version}/libnvvp:$PATH" + export CUDA_PATH_V${version/./_}="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${version}" + export CUDA_PATH="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${version}" + + if [ ! -d "$CUDA_PATH" ]; then + echo "$CUDA_PATH" does not exist + exit 1 + fi + + if [ ! -f "${CUDA_PATH}\include\nvjpeg.h" ]; then + echo "nvjpeg does not exist" + exit 1 + fi + + # check cuda driver version + for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do + if [[ -x "$path" ]]; then + "$path" || echo "true"; + break + fi + done + + which nvcc + nvcc --version + env | grep CUDA +fi diff --git a/.circleci/unittest/windows/scripts/setup_env.sh b/.circleci/unittest/windows/scripts/setup_env.sh new file mode 100644 index 00000000000..5eeb2e17b48 --- /dev/null +++ b/.circleci/unittest/windows/scripts/setup_env.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# This script is for setting up environment in which unit test is ran. +# To speed up the CI time, the resulting environment is cached. +# +# Do not install PyTorch and torchvision here, otherwise they also get cached. + +set -e + +this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +root_dir="$(git rev-parse --show-toplevel)" +conda_dir="${root_dir}/conda" +env_dir="${root_dir}/env" + +cd "${root_dir}" + +# 1. Install conda at ./conda +if [ ! -d "${conda_dir}" ]; then + printf "* Installing conda\n" + export tmp_conda="$(echo $conda_dir | tr '/' '\\')" + export miniconda_exe="$(echo $root_dir | tr '/' '\\')\\miniconda.exe" + curl --output miniconda.exe https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe -O + "$this_dir/install_conda.bat" + unset tmp_conda + unset miniconda_exe +fi + +eval "$(${conda_dir}/Scripts/conda.exe 'shell.bash' 'hook')" + +# 2. Create test environment at ./env +if [ ! -d "${env_dir}" ]; then + printf "* Creating a test environment\n" + conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION" +fi +conda activate "${env_dir}" + +# 3. Install Conda dependencies +printf "* Installing dependencies (except PyTorch)\n" +conda env update --file "${this_dir}/environment.yml" --prune + +# 4. Downgrade setuptools on Python 3.7. +# See https://github.com/pytorch/vision/pull/5868 +if [[ "${PYTHON_VERSION}" == '3.7' ]]; then + pip install --upgrade setuptools==58.0.4 +fi diff --git a/.circleci/unittest/windows/scripts/vc_env_helper.bat b/.circleci/unittest/windows/scripts/vc_env_helper.bat new file mode 100644 index 00000000000..9410135677a --- /dev/null +++ b/.circleci/unittest/windows/scripts/vc_env_helper.bat @@ -0,0 +1,39 @@ +@echo on + +set VC_VERSION_LOWER=16 +set VC_VERSION_UPPER=17 + +for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do ( + if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( + set "VS15INSTALLDIR=%%i" + set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat" + goto vswhere + ) +) + +:vswhere +if "%VSDEVCMD_ARGS%" == "" ( + call "%VS15VCVARSALL%" x64 || exit /b 1 +) else ( + call "%VS15VCVARSALL%" x64 %VSDEVCMD_ARGS% || exit /b 1 +) + +@echo on + +set DISTUTILS_USE_SDK=1 + +set args=%1 +shift +:start +if [%1] == [] goto done +set args=%args% %1 +shift +goto start + +:done +if "%args%" == "" ( + echo Usage: vc_env_helper.bat [command] [args] + echo e.g. vc_env_helper.bat cl /c test.cpp +) + +%args% || exit /b 1 diff --git a/.github/workflows/build-conda-linux.yml b/.github/workflows/build-conda-linux.yml new file mode 100644 index 00000000000..d38ba9fa2aa --- /dev/null +++ b/.github/workflows/build-conda-linux.yml @@ -0,0 +1,45 @@ +name: Build Linux Conda + +on: + pull_request: + push: + branches: + - nightly + workflow_dispatch: + +jobs: + generate-matrix: + uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main + with: + package-type: conda + os: linux + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build: + needs: generate-matrix + strategy: + fail-fast: false + matrix: + include: + - repository: pytorch/vision + pre-script: "" + post-script: "" + conda-package-directory: packaging/torchvision + smoke-test-script: test/smoke_test.py + package-name: torchvision + name: ${{ matrix.repository }} + uses: pytorch/test-infra/.github/workflows/build_conda_linux.yml@main + with: + conda-package-directory: ${{ matrix.conda-package-directory }} + repository: ${{ matrix.repository }} + ref: "" + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build-matrix: ${{ needs.generate-matrix.outputs.matrix }} + pre-script: ${{ matrix.pre-script }} + post-script: ${{ matrix.post-script }} + package-name: ${{ matrix.package-name }} + smoke-test-script: ${{ matrix.smoke-test-script }} + trigger-event: ${{ github.event_name }} + secrets: + CONDA_PYTORCHBOT_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} diff --git a/.github/workflows/build-conda-m1.yml b/.github/workflows/build-conda-m1.yml new file mode 100644 index 00000000000..cb0b687f0b0 --- /dev/null +++ b/.github/workflows/build-conda-m1.yml @@ -0,0 +1,46 @@ +name: Build M1 Conda + +on: + pull_request: + push: + branches: + - nightly + workflow_dispatch: + +jobs: + generate-matrix: + uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main + with: + package-type: conda + os: macos-arm64 + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build: + needs: generate-matrix + strategy: + fail-fast: false + matrix: + include: + - repository: pytorch/vision + pre-script: "" + post-script: "" + conda-package-directory: packaging/torchvision + smoke-test-script: test/smoke_test.py + package-name: torchvision + name: ${{ matrix.repository }} + uses: pytorch/test-infra/.github/workflows/build_conda_macos.yml@main + with: + conda-package-directory: ${{ matrix.conda-package-directory }} + repository: ${{ matrix.repository }} + ref: "" + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build-matrix: ${{ needs.generate-matrix.outputs.matrix }} + pre-script: ${{ matrix.pre-script }} + post-script: ${{ matrix.post-script }} + package-name: ${{ matrix.package-name }} + smoke-test-script: ${{ matrix.smoke-test-script }} + runner-type: macos-m1-12 + trigger-event: ${{ github.event_name }} + secrets: + CONDA_PYTORCHBOT_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} diff --git a/.github/workflows/build-conda-macos.yml b/.github/workflows/build-conda-macos.yml new file mode 100644 index 00000000000..4b43528a326 --- /dev/null +++ b/.github/workflows/build-conda-macos.yml @@ -0,0 +1,46 @@ +name: Build Macos Conda + +on: + pull_request: + push: + branches: + - nightly + workflow_dispatch: + +jobs: + generate-matrix: + uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main + with: + package-type: conda + os: macos + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build: + needs: generate-matrix + strategy: + fail-fast: false + matrix: + include: + - repository: pytorch/vision + pre-script: "" + post-script: "" + conda-package-directory: packaging/torchvision + smoke-test-script: test/smoke_test.py + package-name: torchvision + name: ${{ matrix.repository }} + uses: pytorch/test-infra/.github/workflows/build_conda_macos.yml@main + with: + conda-package-directory: ${{ matrix.conda-package-directory }} + repository: ${{ matrix.repository }} + ref: "" + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build-matrix: ${{ needs.generate-matrix.outputs.matrix }} + pre-script: ${{ matrix.pre-script }} + post-script: ${{ matrix.post-script }} + package-name: ${{ matrix.package-name }} + smoke-test-script: ${{ matrix.smoke-test-script }} + runner-type: macos-12 + trigger-event: ${{ github.event_name }} + secrets: + CONDA_PYTORCHBOT_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }} diff --git a/.github/workflows/build-wheels-linux.yml b/.github/workflows/build-wheels-linux.yml new file mode 100644 index 00000000000..806b5331e24 --- /dev/null +++ b/.github/workflows/build-wheels-linux.yml @@ -0,0 +1,44 @@ +name: Build Linux Wheels + +on: + pull_request: + push: + branches: + - nightly + workflow_dispatch: + +jobs: + generate-matrix: + uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main + with: + package-type: wheel + os: linux + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build: + needs: generate-matrix + strategy: + fail-fast: false + matrix: + include: + - repository: pytorch/vision + pre-script: packaging/pre_build_script.sh + post-script: packaging/post_build_script.sh + smoke-test-script: test/smoke_test.py + package-name: torchvision + name: ${{ matrix.repository }} + uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@main + with: + repository: ${{ matrix.repository }} + ref: "" + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build-matrix: ${{ needs.generate-matrix.outputs.matrix }} + pre-script: ${{ matrix.pre-script }} + post-script: ${{ matrix.post-script }} + package-name: ${{ matrix.package-name }} + smoke-test-script: ${{ matrix.smoke-test-script }} + trigger-event: ${{ github.event_name }} + secrets: + AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID }} + AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/build-wheels-m1.yml b/.github/workflows/build-wheels-m1.yml new file mode 100644 index 00000000000..7f5446ebbf0 --- /dev/null +++ b/.github/workflows/build-wheels-m1.yml @@ -0,0 +1,45 @@ +name: Build M1 Wheels + +on: + pull_request: + push: + branches: + - nightly + workflow_dispatch: + +jobs: + generate-matrix: + uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main + with: + package-type: wheel + os: macos-arm64 + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build: + needs: generate-matrix + strategy: + fail-fast: false + matrix: + include: + - repository: pytorch/vision + pre-script: packaging/pre_build_script.sh + post-script: packaging/post_build_script.sh + smoke-test-script: test/smoke_test.py + package-name: torchvision + name: ${{ matrix.repository }} + uses: pytorch/test-infra/.github/workflows/build_wheels_macos.yml@main + with: + repository: ${{ matrix.repository }} + ref: "" + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build-matrix: ${{ needs.generate-matrix.outputs.matrix }} + pre-script: ${{ matrix.pre-script }} + post-script: ${{ matrix.post-script }} + package-name: ${{ matrix.package-name }} + runner-type: macos-m1-12 + smoke-test-script: ${{ matrix.smoke-test-script }} + trigger-event: ${{ github.event_name }} + secrets: + AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID }} + AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/build-wheels-macos.yml b/.github/workflows/build-wheels-macos.yml new file mode 100644 index 00000000000..78677961d05 --- /dev/null +++ b/.github/workflows/build-wheels-macos.yml @@ -0,0 +1,45 @@ +name: Build Macos Wheels + +on: + pull_request: + push: + branches: + - nightly + workflow_dispatch: + +jobs: + generate-matrix: + uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main + with: + package-type: wheel + os: macos + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build: + needs: generate-matrix + strategy: + fail-fast: false + matrix: + include: + - repository: pytorch/vision + pre-script: packaging/pre_build_script.sh + post-script: packaging/post_build_script.sh + smoke-test-script: test/smoke_test.py + package-name: torchvision + name: ${{ matrix.repository }} + uses: pytorch/test-infra/.github/workflows/build_wheels_macos.yml@main + with: + repository: ${{ matrix.repository }} + ref: "" + test-infra-repository: pytorch/test-infra + test-infra-ref: main + build-matrix: ${{ needs.generate-matrix.outputs.matrix }} + pre-script: ${{ matrix.pre-script }} + post-script: ${{ matrix.post-script }} + package-name: ${{ matrix.package-name }} + runner-type: macos-12 + smoke-test-script: ${{ matrix.smoke-test-script }} + trigger-event: ${{ github.event_name }} + secrets: + AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_UPLOADER_ACCESS_KEY_ID }} + AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_UPLOADER_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/pr-labels.yml b/.github/workflows/pr-labels.yml new file mode 100644 index 00000000000..20c37e4fd88 --- /dev/null +++ b/.github/workflows/pr-labels.yml @@ -0,0 +1,35 @@ +name: pr-labels + +on: + push: + branches: + - main + +jobs: + is-properly-labeled: + runs-on: ubuntu-latest + + steps: + - name: Set up python + uses: actions/setup-python@v2 + + - name: Install requests + run: pip install requests + + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Process commit and find merger responsible for labeling + id: commit + run: echo "::set-output name=merger::$(python .github/process_commit.py ${{ github.sha }})" + + - name: Ping merger responsible for labeling if necessary + if: ${{ steps.commit.outputs.merger != '' }} + uses: mshick/add-pr-comment@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + message: | + Hey ${{ steps.commit.outputs.merger }}! + + You merged this PR, but no labels were added. The list of valid labels is available at https://github.com/pytorch/vision/blob/main/.github/process_commit.py diff --git a/.github/workflows/prototype-tests.yml b/.github/workflows/prototype-tests.yml new file mode 100644 index 00000000000..d3a9fbf1e3e --- /dev/null +++ b/.github/workflows/prototype-tests.yml @@ -0,0 +1,73 @@ +name: tests + +on: + pull_request: + +jobs: + prototype: + strategy: + matrix: + os: + - ubuntu-latest + - windows-latest + - macos-latest + fail-fast: false + + runs-on: ${{ matrix.os }} + + steps: + - name: Set up python + uses: actions/setup-python@v3 + with: + python-version: 3.7 + + - name: Upgrade system packages + run: python -m pip install --upgrade pip setuptools wheel + + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Install PyTorch nightly builds + run: pip install --progress-bar=off --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu/ + + - name: Install torchvision + run: pip install --progress-bar=off --no-build-isolation --editable . + + - name: Install other prototype dependencies + run: pip install --progress-bar=off scipy pycocotools h5py + + - name: Install test requirements + run: pip install --progress-bar=off pytest pytest-mock pytest-cov + + - name: Mark setup as complete + id: setup + run: exit 0 + + - name: Run prototype datapoints tests + shell: bash + run: | + pytest \ + --durations=20 \ + --cov=torchvision/prototype/datapoints \ + --cov-report=term-missing \ + test/test_prototype_datapoints*.py + + - name: Run prototype transforms tests + if: success() || ( failure() && steps.setup.conclusion == 'success' ) + shell: bash + run: | + pytest \ + --durations=20 \ + --cov=torchvision/prototype/transforms \ + --cov-report=term-missing \ + test/test_prototype_transforms*.py + + - name: Run prototype models tests + if: success() || ( failure() && steps.setup.conclusion == 'success' ) + shell: bash + run: | + pytest \ + --durations=20 \ + --cov=torchvision/prototype/models \ + --cov-report=term-missing \ + test/test_prototype_models*.py diff --git a/.github/workflows/test-linux-cpu.yml b/.github/workflows/test-linux-cpu.yml new file mode 100644 index 00000000000..234ad97f4d6 --- /dev/null +++ b/.github/workflows/test-linux-cpu.yml @@ -0,0 +1,57 @@ +name: Unit-tests on Linux CPU + +on: + pull_request: + push: + branches: + - nightly + - main + - release/* + workflow_dispatch: + +env: + CHANNEL: "nightly" + +jobs: + tests: + strategy: + matrix: + python_version: ["3.7", "3.8", "3.9", "3.10"] + fail-fast: false + uses: pytorch/test-infra/.github/workflows/linux_job.yml@main + with: + runner: linux.12xlarge + repository: pytorch/vision + script: | + # Mark Build Directory Safe + git config --global --add safe.directory /__w/vision/vision + + # Set up Environment Variables + export PYTHON_VERSION="${{ matrix.python_version }}" + export VERSION="cpu" + export CUDATOOLKIT="cpuonly" + + # Set CHANNEL + if [[ (${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then + export CHANNEL=test + else + export CHANNEL=nightly + fi + + # Create Conda Env + conda create -yp ci_env python="${PYTHON_VERSION}" numpy libpng jpeg scipy + conda activate /work/ci_env + + # Install PyTorch, Torchvision, and testing libraries + set -ex + conda install \ + --yes \ + -c "pytorch-${CHANNEL}" \ + -c nvidia "pytorch-${CHANNEL}"::pytorch[build="*${VERSION}*"] \ + "${CUDATOOLKIT}" + python3 setup.py develop + python3 -m pip install pytest pytest-mock 'av<10' + + # Run Tests + python3 -m torch.utils.collect_env + python3 -m pytest --junitxml=test-results/junit.xml -v --durations 20 diff --git a/.github/workflows/test-linux-gpu.yml b/.github/workflows/test-linux-gpu.yml new file mode 100644 index 00000000000..a4d938f23ed --- /dev/null +++ b/.github/workflows/test-linux-gpu.yml @@ -0,0 +1,61 @@ +name: Unit-tests on Linux GPU + +on: + pull_request: + push: + branches: + - nightly + - main + - release/* + workflow_dispatch: + +env: + CHANNEL: "nightly" + +jobs: + tests: + strategy: + matrix: + python_version: ["3.8"] + cuda_arch_version: ["11.6"] + fail-fast: false + uses: pytorch/test-infra/.github/workflows/linux_job.yml@main + with: + runner: linux.g5.4xlarge.nvidia.gpu + repository: pytorch/vision + gpu-arch-type: cuda + gpu-arch-version: ${{ matrix.cuda_arch_version }} + timeout: 120 + script: | + # Mark Build Directory Safe + git config --global --add safe.directory /__w/vision/vision + + # Set up Environment Variables + export PYTHON_VERSION="${{ matrix.python_version }}" + export VERSION="${{ matrix.cuda_arch_version }}" + export CUDATOOLKIT="pytorch-cuda=${VERSION}" + + # Set CHANNEL + if [[ (${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then + export CHANNEL=test + else + export CHANNEL=nightly + fi + + # Create Conda Env + conda create -yp ci_env python="${PYTHON_VERSION}" numpy libpng jpeg scipy + conda activate /work/ci_env + + # Install PyTorch, Torchvision, and testing libraries + set -ex + conda install \ + --yes \ + -c "pytorch-${CHANNEL}" \ + -c nvidia "pytorch-${CHANNEL}"::pytorch[build="*${VERSION}*"] \ + "${CUDATOOLKIT}" + python3 setup.py develop + python3 -m pip install pytest pytest-mock 'av<10' + + # Run Tests + python3 -m torch.utils.collect_env + python3 -m pytest --junitxml=test-results/junit.xml -v --durations 20 diff --git a/.github/workflows/test-m1.yml b/.github/workflows/test-m1.yml new file mode 100644 index 00000000000..c03fa9f76e4 --- /dev/null +++ b/.github/workflows/test-m1.yml @@ -0,0 +1,50 @@ +name: Unit-tests on M1 +on: + pull_request: + push: + branches: + - nightly + - main + - release/* + workflow_dispatch: +env: + CHANNEL: "nightly" +jobs: + tests: + name: "Unit-tests on M1" + runs-on: macos-m1-12 + strategy: + matrix: + py_vers: [ "3.8"] + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + - name: Set Release CHANNEL (for release) + if: ${{ (github.event_name == 'pull_request' && startsWith(github.base_ref, 'release')) || startsWith(github.ref, 'refs/heads/release') }} + run: | + echo "CHANNEL=test" >> "$GITHUB_ENV" + - name: Install TorchVision + shell: arch -arch arm64 bash {0} + env: + ENV_NAME: conda-env-${{ github.run_id }} + PY_VERS: ${{ matrix.py_vers }} + run: | + . ~/miniconda3/etc/profile.d/conda.sh + # Needed for JPEG library detection as setup.py detects conda presence by running `shutil.which('conda')` + export PATH=~/miniconda3/bin:$PATH + set -ex + conda create -yp ${ENV_NAME} python=${PY_VERS} numpy libpng jpeg scipy + conda run -p ${ENV_NAME} python3 -mpip install --pre torch --extra-index-url=https://download.pytorch.org/whl/${CHANNEL} + conda run -p ${ENV_NAME} python3 setup.py develop + conda run -p ${ENV_NAME} python3 -mpip install pytest pytest-mock 'av<10' + - name: Run tests + shell: arch -arch arm64 bash {0} + env: + ENV_NAME: conda-env-${{ github.run_id }} + PY_VERS: ${{ matrix.py_vers }} + run: | + . ~/miniconda3/etc/profile.d/conda.sh + set -ex + conda run -p ${ENV_NAME} --no-capture-output python3 -u -mpytest -v --tb=long --durations 20 + conda env remove -p ${ENV_NAME} diff --git a/.github/workflows/tests-schedule.yml b/.github/workflows/tests-schedule.yml new file mode 100644 index 00000000000..1e5a78207fe --- /dev/null +++ b/.github/workflows/tests-schedule.yml @@ -0,0 +1,57 @@ +name: tests + +on: + pull_request: + paths: + - "test/test_datasets_download.py" + - ".github/failed_schedule_issue_template.md" + - ".github/workflows/tests-schedule.yml" + + schedule: + - cron: "0 9 * * *" + +jobs: + download: + runs-on: ubuntu-latest + + steps: + - name: Set up python + uses: actions/setup-python@v2 + with: + python-version: 3.7 + + - name: Upgrade system packages + run: python -m pip install --upgrade pip setuptools wheel + + - name: SSL + run: python -c 'import ssl; print(ssl.OPENSSL_VERSION)' + + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Install torch nightly build + run: pip install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + + - name: Install torchvision + run: pip install --no-build-isolation --editable . + + - name: Install all optional dataset requirements + run: pip install scipy pycocotools lmdb requests + + - name: Install tests requirements + run: pip install pytest + + - name: Run tests + run: pytest -ra -v test/test_datasets_download.py + + - uses: JasonEtco/create-an-issue@v2.4.0 + name: Create issue if download tests failed + if: failure() && github.event_name == 'schedule' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + WORKFLOW: ${{ github.workflow }} + JOB: ${{ github.job }} + ID: ${{ github.run_id }} + with: + filename: .github/failed_schedule_issue_template.md From 719a6b417a06b33e41affc3b26ed1b775ca88933 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 13:01:55 +0100 Subject: [PATCH 14/22] remove old workflows --- ...-gpu.yml => prototype-tests-linux-gpu.yml} | 5 +- .github/workflows/prototype-tests.yml | 73 ------------------- 2 files changed, 4 insertions(+), 74 deletions(-) rename .github/workflows/{prototype-transforms-tests-linux-gpu.yml => prototype-tests-linux-gpu.yml} (90%) delete mode 100644 .github/workflows/prototype-tests.yml diff --git a/.github/workflows/prototype-transforms-tests-linux-gpu.yml b/.github/workflows/prototype-tests-linux-gpu.yml similarity index 90% rename from .github/workflows/prototype-transforms-tests-linux-gpu.yml rename to .github/workflows/prototype-tests-linux-gpu.yml index 6856ce78191..fa56480ee16 100644 --- a/.github/workflows/prototype-transforms-tests-linux-gpu.yml +++ b/.github/workflows/prototype-tests-linux-gpu.yml @@ -1,4 +1,4 @@ -name: Prototype Unit-tests on Linux +name: Prototype tests on Linux on: pull_request: @@ -85,6 +85,9 @@ jobs: echo '::endgroup::' echo '::group::Run prototype tests' + # We don't want to run the prototype datasets tests. Since the positional glob into `pytest`, i.e. + # `test/test_prototype*.py` takes the highest priority, neither `--ignore` nor `--ignore-glob` can help us here. + rm test/test_prototype_datasets*.py pytest \ --durations=25 \ --cov=torchvision/prototype \ diff --git a/.github/workflows/prototype-tests.yml b/.github/workflows/prototype-tests.yml deleted file mode 100644 index d3a9fbf1e3e..00000000000 --- a/.github/workflows/prototype-tests.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: tests - -on: - pull_request: - -jobs: - prototype: - strategy: - matrix: - os: - - ubuntu-latest - - windows-latest - - macos-latest - fail-fast: false - - runs-on: ${{ matrix.os }} - - steps: - - name: Set up python - uses: actions/setup-python@v3 - with: - python-version: 3.7 - - - name: Upgrade system packages - run: python -m pip install --upgrade pip setuptools wheel - - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Install PyTorch nightly builds - run: pip install --progress-bar=off --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/cpu/ - - - name: Install torchvision - run: pip install --progress-bar=off --no-build-isolation --editable . - - - name: Install other prototype dependencies - run: pip install --progress-bar=off scipy pycocotools h5py - - - name: Install test requirements - run: pip install --progress-bar=off pytest pytest-mock pytest-cov - - - name: Mark setup as complete - id: setup - run: exit 0 - - - name: Run prototype datapoints tests - shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/datapoints \ - --cov-report=term-missing \ - test/test_prototype_datapoints*.py - - - name: Run prototype transforms tests - if: success() || ( failure() && steps.setup.conclusion == 'success' ) - shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/transforms \ - --cov-report=term-missing \ - test/test_prototype_transforms*.py - - - name: Run prototype models tests - if: success() || ( failure() && steps.setup.conclusion == 'success' ) - shell: bash - run: | - pytest \ - --durations=20 \ - --cov=torchvision/prototype/models \ - --cov-report=term-missing \ - test/test_prototype_models*.py From a209b31387d9d14bc6446743587db244bccc2106 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 14:34:50 +0100 Subject: [PATCH 15/22] use free GHA runner for CPU tests --- .github/workflows/prototype-tests-linux-gpu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/prototype-tests-linux-gpu.yml b/.github/workflows/prototype-tests-linux-gpu.yml index fa56480ee16..497d44802c7 100644 --- a/.github/workflows/prototype-tests-linux-gpu.yml +++ b/.github/workflows/prototype-tests-linux-gpu.yml @@ -14,7 +14,7 @@ jobs: - "3.10" gpu-arch-type: ["cpu"] gpu-arch-version: [""] - runner: ["linux.2xlarge"] + runner: ["ubuntu-latest"] include: - python-version: "3.8" gpu-arch-type: cuda From 2db1a1a3d184ae688feb15bae338360e55c8acbe Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 14:40:30 +0100 Subject: [PATCH 16/22] Revert "use free GHA runner for CPU tests" This reverts commit a209b31387d9d14bc6446743587db244bccc2106. --- .github/workflows/prototype-tests-linux-gpu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/prototype-tests-linux-gpu.yml b/.github/workflows/prototype-tests-linux-gpu.yml index 497d44802c7..fa56480ee16 100644 --- a/.github/workflows/prototype-tests-linux-gpu.yml +++ b/.github/workflows/prototype-tests-linux-gpu.yml @@ -14,7 +14,7 @@ jobs: - "3.10" gpu-arch-type: ["cpu"] gpu-arch-version: [""] - runner: ["ubuntu-latest"] + runner: ["linux.2xlarge"] include: - python-version: "3.8" gpu-arch-type: cuda From e159ddc086ece7e2dd3fc10bfc3ab4fbad2b97c2 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 20:17:46 +0100 Subject: [PATCH 17/22] [REVERTME] turn warnings into errors --- .github/workflows/prototype-tests-linux-gpu.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/prototype-tests-linux-gpu.yml b/.github/workflows/prototype-tests-linux-gpu.yml index fa56480ee16..78ee4206507 100644 --- a/.github/workflows/prototype-tests-linux-gpu.yml +++ b/.github/workflows/prototype-tests-linux-gpu.yml @@ -89,6 +89,7 @@ jobs: # `test/test_prototype*.py` takes the highest priority, neither `--ignore` nor `--ignore-glob` can help us here. rm test/test_prototype_datasets*.py pytest \ + -Werror \ --durations=25 \ --cov=torchvision/prototype \ --cov-report=term-missing \ From 72cd31cf92a1b9e2173c735747c5e2f703e71444 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Wed, 14 Dec 2022 21:07:18 +0100 Subject: [PATCH 18/22] try fix test warnings --- test/test_prototype_transforms_functional.py | 8 ++++---- torchvision/prototype/transforms/functional/_geometry.py | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index f33992234ad..1ca9add1924 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -625,10 +625,10 @@ def _compute_expected_bbox(bbox, angle_, expand_, center_): ) transformed_points = np.matmul(points, affine_matrix.T) out_bbox = [ - np.min(transformed_points[:4, 0]), - np.min(transformed_points[:4, 1]), - np.max(transformed_points[:4, 0]), - np.max(transformed_points[:4, 1]), + int(np.min(transformed_points[:4, 0])), + int(np.min(transformed_points[:4, 1])), + int(np.max(transformed_points[:4, 0])), + int(np.max(transformed_points[:4, 1])), ] if expand_: tr_x = np.min(transformed_points[4:, 0]) diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index ba417a0ce84..7313d4ff756 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1053,6 +1053,8 @@ def _pad_with_vector_fill( output = _pad_with_scalar_fill(image, torch_padding, fill=0, padding_mode="constant") left, right, top, bottom = torch_padding + if not image.is_floating_point(): + fill = [int(f) for f in fill] fill = torch.tensor(fill, dtype=image.dtype, device=image.device).reshape(-1, 1, 1) if top > 0: From c52a3a8d83292dcb0362cd284299607b3cd30583 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Thu, 15 Dec 2022 11:41:16 +0100 Subject: [PATCH 19/22] fix module import in tests --- test/test_prototype_transforms_consistency.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/test/test_prototype_transforms_consistency.py b/test/test_prototype_transforms_consistency.py index f562649be91..8cda11008f5 100644 --- a/test/test_prototype_transforms_consistency.py +++ b/test/test_prototype_transforms_consistency.py @@ -1,9 +1,10 @@ import enum +import importlib.machinery +import importlib.util import inspect import random import re from collections import defaultdict -from importlib.machinery import SourceFileLoader from pathlib import Path import numpy as np @@ -890,8 +891,16 @@ def test_aa(self, inpt, interpolation): def import_transforms_from_references(reference): - ref_det_filepath = Path(__file__).parent.parent / "references" / reference / "transforms.py" - return SourceFileLoader(ref_det_filepath.stem, ref_det_filepath.as_posix()).load_module() + HERE = Path(__file__).parent + PROJECT_ROOT = HERE.parent + + loader = importlib.machinery.SourceFileLoader( + "transforms", str(PROJECT_ROOT / "references" / reference / "transforms.py") + ) + spec = importlib.util.spec_from_loader("transforms", loader) + module = importlib.util.module_from_spec(spec) + loader.exec_module(module) + return module det_transforms = import_transforms_from_references("detection") From 1577d2b8c4fa3c174063c12dbbe3b31b5631d842 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Thu, 15 Dec 2022 15:51:28 +0100 Subject: [PATCH 20/22] revert --- .github/workflows/prototype-tests-linux-gpu.yml | 1 - torchvision/prototype/transforms/functional/_geometry.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/.github/workflows/prototype-tests-linux-gpu.yml b/.github/workflows/prototype-tests-linux-gpu.yml index 78ee4206507..fa56480ee16 100644 --- a/.github/workflows/prototype-tests-linux-gpu.yml +++ b/.github/workflows/prototype-tests-linux-gpu.yml @@ -89,7 +89,6 @@ jobs: # `test/test_prototype*.py` takes the highest priority, neither `--ignore` nor `--ignore-glob` can help us here. rm test/test_prototype_datasets*.py pytest \ - -Werror \ --durations=25 \ --cov=torchvision/prototype \ --cov-report=term-missing \ diff --git a/torchvision/prototype/transforms/functional/_geometry.py b/torchvision/prototype/transforms/functional/_geometry.py index 7313d4ff756..ba417a0ce84 100644 --- a/torchvision/prototype/transforms/functional/_geometry.py +++ b/torchvision/prototype/transforms/functional/_geometry.py @@ -1053,8 +1053,6 @@ def _pad_with_vector_fill( output = _pad_with_scalar_fill(image, torch_padding, fill=0, padding_mode="constant") left, right, top, bottom = torch_padding - if not image.is_floating_point(): - fill = [int(f) for f in fill] fill = torch.tensor(fill, dtype=image.dtype, device=image.device).reshape(-1, 1, 1) if top > 0: From ed8d9ab4b59e26d50fcf6e01d9c686a2434aa2f9 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Fri, 16 Dec 2022 13:07:47 +0100 Subject: [PATCH 21/22] int -> float --- test/test_prototype_transforms_functional.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/test_prototype_transforms_functional.py b/test/test_prototype_transforms_functional.py index 1ca9add1924..d199625df0f 100644 --- a/test/test_prototype_transforms_functional.py +++ b/test/test_prototype_transforms_functional.py @@ -625,10 +625,10 @@ def _compute_expected_bbox(bbox, angle_, expand_, center_): ) transformed_points = np.matmul(points, affine_matrix.T) out_bbox = [ - int(np.min(transformed_points[:4, 0])), - int(np.min(transformed_points[:4, 1])), - int(np.max(transformed_points[:4, 0])), - int(np.max(transformed_points[:4, 1])), + float(np.min(transformed_points[:4, 0])), + float(np.min(transformed_points[:4, 1])), + float(np.max(transformed_points[:4, 0])), + float(np.max(transformed_points[:4, 1])), ] if expand_: tr_x = np.min(transformed_points[4:, 0]) From 119dc41b3ba01464f4bf2127fa5620084b74bae1 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Mon, 19 Dec 2022 11:05:39 +0100 Subject: [PATCH 22/22] [REVERTME] why do we have to mark as safe? --- .github/workflows/prototype-tests-linux-gpu.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/prototype-tests-linux-gpu.yml b/.github/workflows/prototype-tests-linux-gpu.yml index fa56480ee16..fe1e46ab354 100644 --- a/.github/workflows/prototype-tests-linux-gpu.yml +++ b/.github/workflows/prototype-tests-linux-gpu.yml @@ -31,7 +31,6 @@ jobs: timeout: 45 script: | # Mark Build Directory Safe - git config --global --add safe.directory /__w/vision/vision echo '::group::Set PyTorch conda channel' if [[ (${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then