Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support using onnxruntime 1.16.0 with CUDA 11.4 on Jetson Orin NX (Linux arm64 GPU). #1630

Merged
merged 8 commits into from
Dec 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 15 additions & 6 deletions .github/workflows/aarch64-linux-gnu-shared.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,20 @@ concurrency:
jobs:
aarch64_linux_gnu_shared:
runs-on: ${{ matrix.os }}
name: aarch64 shared GPU ${{ matrix.gpu }}
name: aarch64 shared GPU ${{ matrix.gpu }} ${{ matrix.onnxruntime_version }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
gpu: [ON, OFF]
include:
- os: ubuntu-latest
gpu: ON
onnxruntime_version: "1.11.0"
- os: ubuntu-latest
gpu: ON
onnxruntime_version: "1.16.0"
- os: ubuntu-latest
gpu: OFF
onnxruntime_version: ""

steps:
- uses: actions/checkout@v4
Expand All @@ -62,7 +70,7 @@ jobs:
if: steps.cache-qemu.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install autoconf automake autotools-dev ninja-build
sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev.

- name: checkout-qemu
if: steps.cache-qemu.outputs.cache-hit != 'true'
Expand Down Expand Up @@ -159,6 +167,7 @@ jobs:

export BUILD_SHARED_LIBS=ON
export SHERPA_ONNX_ENABLE_GPU=${{ matrix.gpu }}
export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=${{ matrix.onnxruntime_version }}

./build-aarch64-linux-gnu.sh

Expand Down Expand Up @@ -199,7 +208,7 @@ jobs:
if [[ ${{ matrix.gpu }} == OFF ]]; then
dst=${dst}-cpu
else
dst=${dst}-gpu
dst=${dst}-gpu-onnxruntime-${{ matrix.onnxruntime_version }}
fi
mkdir $dst

Expand All @@ -223,7 +232,7 @@ jobs:

- uses: actions/upload-artifact@v4
with:
name: sherpa-onnx-linux-aarch64-shared-gpu-${{ matrix.gpu }}
name: sherpa-onnx-linux-aarch64-shared-gpu-${{ matrix.gpu }}-onnxruntime-${{ matrix.onnxruntime_version }}
path: sherpa-onnx-*linux-aarch64-shared*.tar.bz2

# https://huggingface.co/docs/hub/spaces-github-actions
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/aarch64-linux-gnu-static.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ jobs:
if: steps.cache-qemu.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install autoconf automake autotools-dev ninja-build
sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev.

- name: checkout-qemu
if: steps.cache-qemu.outputs.cache-hit != 'true'
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/arm-linux-gnueabihf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ jobs:
if: steps.cache-qemu.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install autoconf automake autotools-dev ninja-build
sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev.

- name: checkout-qemu
if: steps.cache-qemu.outputs.cache-hit != 'true'
Expand Down
3 changes: 3 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ option(SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE "True to use pre-i
option(SHERPA_ONNX_ENABLE_SANITIZER "Whether to enable ubsan and asan" OFF)
option(SHERPA_ONNX_BUILD_C_API_EXAMPLES "Whether to enable C API examples" ON)

set(SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION "1.11.0" CACHE STRING "Used only for Linux ARM64 GPU. If you use Jetson nano b01, then please set it to 1.11.0. If you use Jetson Orin NX, then set it to 1.16.0")


set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin")
Expand Down
32 changes: 27 additions & 5 deletions build-aarch64-linux-gnu.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,25 @@
#!/usr/bin/env bash
#
# Usage of this file
#
# (1) Build CPU version of sherpa-onnx
# ./build-aarch64-linux-gnu.sh
#
# (2) Build GPU version of sherpa-onnx
#
# (a) Make sure your board has NVIDIA GPU(s)
#
# (b) For Jetson Nano B01 (using CUDA 10.2)
#
# export SHERPA_ONNX_ENABLE_GPU=ON
# export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.11.0
# ./build-aarch64-linux-gnu.sh
#
# (c) For Jetson Orin NX (using CUDA 11.4)
#
# export SHERPA_ONNX_ENABLE_GPU=ON
# export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.16.0
# ./build-aarch64-linux-gnu.sh

if command -v aarch64-none-linux-gnu-gcc &> /dev/null; then
ln -svf $(which aarch64-none-linux-gnu-gcc) ./aarch64-linux-gnu-gcc
Expand Down Expand Up @@ -47,18 +68,18 @@ fi
if [[ x"$SHERPA_ONNX_ENABLE_GPU" == x"" ]]; then
# By default, use CPU
SHERPA_ONNX_ENABLE_GPU=OFF

# If you use GPU, then please make sure you have NVIDIA GPUs on your board.
# It uses onnxruntime 1.11.0.
#
# Tested on Jetson Nano B01
fi

if [[ x"$SHERPA_ONNX_ENABLE_GPU" == x"ON" ]]; then
# Build shared libs if building GPU is enabled.
BUILD_SHARED_LIBS=ON
fi

if [[ x"$SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION" == x"" ]]; then
# Used only when SHERPA_ONNX_ENABLE_GPU is ON
SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION="1.11.0"
fi

cmake \
-DBUILD_PIPER_PHONMIZE_EXE=OFF \
-DBUILD_PIPER_PHONMIZE_TESTS=OFF \
Expand All @@ -75,6 +96,7 @@ cmake \
-DSHERPA_ONNX_ENABLE_JNI=OFF \
-DSHERPA_ONNX_ENABLE_C_API=ON \
-DSHERPA_ONNX_ENABLE_WEBSOCKET=ON \
-DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=$SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION \
-DCMAKE_TOOLCHAIN_FILE=../toolchains/aarch64-linux-gnu.toolchain.cmake \
..

Expand Down
34 changes: 26 additions & 8 deletions cmake/onnxruntime-linux-aarch64-gpu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,37 @@ if(NOT SHERPA_ONNX_ENABLE_GPU)
message(FATAL_ERROR "This file is for NVIDIA GPU only. Given SHERPA_ONNX_ENABLE_GPU: ${SHERPA_ONNX_ENABLE_GPU}")
endif()

set(onnxruntime_URL "https://github.com/csukuangfj/onnxruntime-libs/releases/download/v1.11.0/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2")
set(onnxruntime_URL2 "https://hf-mirror.com/csukuangfj/onnxruntime-libs/resolve/main/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2")
set(onnxruntime_HASH "SHA256=36eded935551e23aead09d4173bdf0bd1e7b01fdec15d77f97d6e34029aa60d7")
message(WARNING "\
SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION: ${SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION}
If you use Jetson nano b01, then please pass
-DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.11.0
to cmake (You need to make sure CUDA 10.2 is available on your board).

If you use Jetson Orin NX, then please pass
-DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.16.0
to cmake (You need to make sure CUDA 11.4 is available on your board).
")

set(v ${SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION})

set(onnxruntime_URL "https://github.com/csukuangfj/onnxruntime-libs/releases/download/v${v}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2")
set(onnxruntime_URL2 "https://hf-mirror.com/csukuangfj/onnxruntime-libs/resolve/main/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2")

if(v STREQUAL "1.11.0")
set(onnxruntime_HASH "SHA256=36eded935551e23aead09d4173bdf0bd1e7b01fdec15d77f97d6e34029aa60d7")
else()
set(onnxruntime_HASH "SHA256=4c09d5acf2c2682b4eab1dc2f1ad98fc1fde5f5f1960063e337983ba59379a4b")
endif()

# If you don't have access to the Internet,
# please download onnxruntime to one of the following locations.
# You can add more if you want.
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
${CMAKE_SOURCE_DIR}/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
${CMAKE_BINARY_DIR}/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
/tmp/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
/star-fj/fangjun/download/github/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
$ENV{HOME}/Downloads/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
${CMAKE_SOURCE_DIR}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
${CMAKE_BINARY_DIR}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
/tmp/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
/star-fj/fangjun/download/github/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
)

foreach(f IN LISTS possible_file_locations)
Expand Down
Loading