diff --git a/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml b/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml index b13bd25e61d47..0967e6ed0f570 100644 --- a/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml +++ b/tools/ci_build/github/azure-pipelines/c-api-noopenmp-packaging-pipelines.yml @@ -305,7 +305,7 @@ jobs: --docker-build-args "--network=host --build-arg POLICY=manylinux2014 --build-arg PLATFORM=x86_64 --build-arg DEVTOOLSET_ROOTPATH=/opt/rh/devtoolset-11/root --build-arg PREPEND_PATH=/opt/rh/devtoolset-11/root/usr/bin: --build-arg LD_LIBRARY_PATH_ARG=/opt/rh/devtoolset-11/root/usr/lib64:/opt/rh/devtoolset-11/root/usr/lib:/opt/rh/devtoolset-11/root/usr/lib64/dyninst:/opt/rh/devtoolset-11/root/usr/lib/dyninst:/usr/local/lib64 --build-arg BUILD_UID=$( id -u )" \ --container-registry onnxruntimebuildcache \ --repository onnxruntimecuda116xtrt85build - displayName: "Getonnxruntimecuda116xtrt84build image for tools/ci_build/github/linux/docker/Dockerfile.manylinux2014_cuda11_6_tensorrt8_4" + displayName: "Get onnxruntimecuda116xtrt85build image for tools/ci_build/github/linux/docker/Dockerfile.manylinux2014_cuda11_6_tensorrt8_5" workingDirectory: $(Build.SourcesDirectory)/onnxruntime ContainerRegistry: onnxruntimebuildcache diff --git a/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml index 5c52bfb3f8f4c..2d93185718596 100644 --- a/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-gpu-tensorrt-daily-perf-pipeline.yml @@ -8,7 +8,7 @@ parameters: - name: TrtVersion displayName: TensorRT Version type: string - default: 8.4.1.5 + default: 8.5.1.1 values: - 8.2.1.8 - 8.0.1.6 diff --git a/tools/ci_build/github/azure-pipelines/templates/py-linux-gpu.yml b/tools/ci_build/github/azure-pipelines/templates/py-linux-gpu.yml index d9c4e94304023..7c1a8af644d92 100644 --- a/tools/ci_build/github/azure-pipelines/templates/py-linux-gpu.yml +++ b/tools/ci_build/github/azure-pipelines/templates/py-linux-gpu.yml @@ -23,10 +23,10 @@ jobs: - template: get-docker-image-steps.yml parameters: - Dockerfile: tools/ci_build/github/linux/docker/Dockerfile.manylinux2014_cuda11_6_tensorrt8_4 + Dockerfile: tools/ci_build/github/linux/docker/Dockerfile.manylinux2014_cuda11_6_tensorrt8_5 Context: tools/ci_build/github/linux/docker DockerBuildArgs: "--network=host --build-arg POLICY=manylinux2014 --build-arg PLATFORM=x86_64 --build-arg DEVTOOLSET_ROOTPATH=/opt/rh/devtoolset-11/root --build-arg PREPEND_PATH=/opt/rh/devtoolset-11/root/usr/bin: --build-arg LD_LIBRARY_PATH_ARG=/opt/rh/devtoolset-11/root/usr/lib64:/opt/rh/devtoolset-11/root/usr/lib:/opt/rh/devtoolset-11/root/usr/lib64/dyninst:/opt/rh/devtoolset-11/root/usr/lib/dyninst:/usr/local/lib64 --build-arg BUILD_UID=$( id -u ) --build-arg PLATFORM=${{ parameters.arch }}" - Repository: onnxruntimecuda116xtrt84build${{ parameters.arch }} + Repository: onnxruntimecuda116xtrt85build${{ parameters.arch }} - task: Bash@3 @@ -34,7 +34,7 @@ jobs: inputs: targetType: filePath filePath: tools/ci_build/github/linux/run_python_dockerbuild.sh - arguments: -i onnxruntimecuda116xtrt84build${{ parameters.arch }} -x "-d GPU" + arguments: -i onnxruntimecuda116xtrt85build${{ parameters.arch }} -x "-d GPU" - task: PublishBuildArtifacts@1 displayName: 'Publish Artifact: ONNXRuntime python wheel' diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt index e260a20b2db98..900267b9bb8e5 100644 --- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt +++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_tensorrt @@ -2,7 +2,7 @@ # Label: com.nvidia.cuda.version: 11.6.1 # Label: com.nvidia.cudnn.version: 8.4.0 # Ubuntu 20.04 -FROM nvidia/cuda:11.6.1-cudnn8-devel-ubuntu20.04 +FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 ARG PYTHON_VERSION=3.8 ARG DEBIAN_FRONTEND=noninteractive @@ -11,12 +11,12 @@ ADD scripts /tmp/scripts RUN /tmp/scripts/install_ubuntu.sh -p $PYTHON_VERSION && /tmp/scripts/install_os_deps.sh && /tmp/scripts/install_python_deps.sh -p $PYTHON_VERSION && rm -rf /tmp/scripts # Install TensorRT -RUN v="8.4.1-1+cuda11.6" &&\ +RUN v="8.5.1-1+cuda11.8" &&\ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/7fa2af80.pub &&\ apt-get update &&\ sudo apt-get install -y libnvinfer8=${v} libnvonnxparsers8=${v} libnvparsers8=${v} libnvinfer-plugin8=${v} \ libnvinfer-dev=${v} libnvonnxparsers-dev=${v} libnvparsers-dev=${v} libnvinfer-plugin-dev=${v} \ - python3-libnvinfer=${v} + python3-libnvinfer=${v} libnvinfer-samples=${v} WORKDIR /root diff --git a/tools/ci_build/github/linux/run_dockerbuild.sh b/tools/ci_build/github/linux/run_dockerbuild.sh index 2f64dc4a55f02..5aa28f052257c 100755 --- a/tools/ci_build/github/linux/run_dockerbuild.sh +++ b/tools/ci_build/github/linux/run_dockerbuild.sh @@ -96,7 +96,7 @@ elif [ $BUILD_DEVICE = "gpu" ]; then --docker-build-args="--build-arg BASEIMAGE=nvcr.io/nvidia/cuda:11.6.2-cudnn8-devel-${BUILD_OS} --build-arg BUILD_USER=onnxruntimedev --build-arg BUILD_UID=$(id -u) --build-arg PYTHON_VERSION=${PYTHON_VER} --build-arg INSTALL_DEPS_EXTRA_ARGS=\"${INSTALL_DEPS_EXTRA_ARGS}\" --build-arg USE_CONDA=${USE_CONDA} --network=host" \ --dockerfile Dockerfile.ubuntu_gpu_training --context . elif [[ $BUILD_DEVICE = "tensorrt"* ]]; then - IMAGE="$BUILD_OS-cuda11.6-cudnn8.4-tensorrt8.4" + IMAGE="$BUILD_OS-cuda11.8-cudnn8.4-tensorrt8.5" DOCKER_FILE=Dockerfile.ubuntu_tensorrt $GET_DOCKER_IMAGE_CMD --repository "onnxruntime-$IMAGE" \