From 469cfd11d79dfc2847948e814e9b5e47b8ebd973 Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Thu, 20 Jul 2023 12:52:05 -0700 Subject: [PATCH 01/12] add test --- qa/L0_backend_python/env/test.sh | 63 +++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/qa/L0_backend_python/env/test.sh b/qa/L0_backend_python/env/test.sh index 657a1a57f8..3cb956df0b 100755 --- a/qa/L0_backend_python/env/test.sh +++ b/qa/L0_backend_python/env/test.sh @@ -126,6 +126,67 @@ cp python3.10.tar.gz models/python_3_10/python_3_10_environment.tar.gz sed -i "s/^name:.*/name: \"python_3_10\"/" config.pbtxt && \ echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_10/1/ + +# Create a model with python 3.8 version +# Successful execution of the Python model indicates that the environment has +# been setup correctly. +create_conda_env "3.8" "python-3-8" +conda install -c conda-forge libstdcxx-ng=12 -y +conda install numpy=1.23.4 -y +conda install tensorflow=2.10.0 -y +PY38_VERSION_STRING="Python version is 3.8, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" + +create_python_backend_stub +conda-pack -o python3.8.tar.gz +path_to_conda_pack=`pwd`/python3.8.tar.gz +mkdir -p models/python_3_8/1/ +cp ../../python_models/python_version/config.pbtxt ./models/python_3_8 +(cd models/python_3_8 && \ + sed -i "s/^name:.*/name: \"python_3_8\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) +cp ../../python_models/python_version/model.py ./models/python_3_8/1/ +cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 + +# Create a model with python 3.9 version +# Successful execution of the Python model indicates that the environment has +# been setup correctly. +create_conda_env "3.9" "python-3-9" +conda install -c conda-forge libstdcxx-ng=12 -y +conda install numpy=1.23.4 -y +conda install tensorflow=2.10.0 -y +PY39_VERSION_STRING="Python version is 3.9, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" + +create_python_backend_stub +conda-pack -o python3.9.tar.gz +path_to_conda_pack=`pwd`/python3.9.tar.gz +mkdir -p models/python_3_9/1/ +cp ../../python_models/python_version/config.pbtxt ./models/python_3_9 +(cd models/python_3_9 && \ + sed -i "s/^name:.*/name: \"python_3_9\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) +cp ../../python_models/python_version/model.py ./models/python_3_9/1/ +cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 + +# Create a model with python 3.11 version +# Successful execution of the Python model indicates that the environment has +# been setup correctly. +create_conda_env "3.11" "python-3-11" +conda install -c conda-forge libstdcxx-ng=12 -y +conda install numpy=1.23.4 -y +conda install tensorflow=2.10.0 -y +PY311_VERSION_STRING="Python version is 3.11, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" + +create_python_backend_stub +conda-pack -o python3.11.tar.gz +path_to_conda_pack=`pwd`/python3.11.tar.gz +mkdir -p models/python_3_11/1/ +cp ../../python_models/python_version/config.pbtxt ./models/python_3_11 +(cd models/python_3_11 && \ + sed -i "s/^name:.*/name: \"python_3_11\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) +cp ../../python_models/python_version/model.py ./models/python_3_11/1/ +cp python_backend/builddir/triton_python_backend_stub ./models/python_3_11 + rm -rf ./miniconda run_server @@ -139,7 +200,7 @@ kill $SERVER_PID wait $SERVER_PID set +e -for EXPECTED_VERSION_STRING in "$PY36_VERSION_STRING" "$PY37_VERSION_STRING" "$PY37_1_VERSION_STRING" "$PY310_VERSION_STRING"; do +for EXPECTED_VERSION_STRING in "$PY36_VERSION_STRING" "$PY37_VERSION_STRING" "$PY37_1_VERSION_STRING" "$PY310_VERSION_STRING" "$PY38_VERSION_STRING" "$PY39_VERSION_STRING" "$PY311_VERSION_STRING"; do grep "$EXPECTED_VERSION_STRING" $SERVER_LOG if [ $? -ne 0 ]; then cat $SERVER_LOG From 7e71bc03d1cd8828c754672d23fcc6b29b6d20c8 Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Thu, 20 Jul 2023 19:36:00 -0700 Subject: [PATCH 02/12] update test --- qa/L0_backend_python/env/test.sh | 47 ++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/qa/L0_backend_python/env/test.sh b/qa/L0_backend_python/env/test.sh index 3cb956df0b..c245c53a1b 100755 --- a/qa/L0_backend_python/env/test.sh +++ b/qa/L0_backend_python/env/test.sh @@ -127,9 +127,33 @@ cp python3.10.tar.gz models/python_3_10/python_3_10_environment.tar.gz echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_10/1/ +run_server +if [ "$SERVER_PID" == "0" ]; then + echo -e "\n***\n*** Failed to start $SERVER\n***" + cat $SERVER_LOG + exit 1 +fi + +kill $SERVER_PID +wait $SERVER_PID + +set +e +for EXPECTED_VERSION_STRING in "$PY36_VERSION_STRING" "$PY37_VERSION_STRING" "$PY37_1_VERSION_STRING" "$PY310_VERSION_STRING"; do + grep "$EXPECTED_VERSION_STRING" $SERVER_LOG + if [ $? -ne 0 ]; then + cat $SERVER_LOG + echo -e "\n***\n*** $EXPECTED_VERSION_STRING was not found in Triton logs. \n***" + RET=1 + fi +done + +# Test other python versions +conda update -n base -c defaults conda -y +rm $SERVER_LOG # Create a model with python 3.8 version # Successful execution of the Python model indicates that the environment has # been setup correctly. + create_conda_env "3.8" "python-3-8" conda install -c conda-forge libstdcxx-ng=12 -y conda install numpy=1.23.4 -y @@ -138,7 +162,9 @@ PY38_VERSION_STRING="Python version is 3.8, NumPy version is 1.23.4, and Tensorf create_python_backend_stub conda-pack -o python3.8.tar.gz -path_to_conda_pack=`pwd`/python3.8.tar.gz +path_to_conda_pack="$PWD/python-3-8" +mkdir -p $path_to_conda_pack +tar -xzf python3.8.tar.gz -C $path_to_conda_pack mkdir -p models/python_3_8/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_8 (cd models/python_3_8 && \ @@ -155,10 +181,11 @@ conda install -c conda-forge libstdcxx-ng=12 -y conda install numpy=1.23.4 -y conda install tensorflow=2.10.0 -y PY39_VERSION_STRING="Python version is 3.9, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" - create_python_backend_stub conda-pack -o python3.9.tar.gz -path_to_conda_pack=`pwd`/python3.9.tar.gz +path_to_conda_pack="$PWD/python-3-9" +mkdir -p $path_to_conda_pack +tar -xzf python3.9.tar.gz -C $path_to_conda_pack mkdir -p models/python_3_9/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_9 (cd models/python_3_9 && \ @@ -171,15 +198,19 @@ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 # Successful execution of the Python model indicates that the environment has # been setup correctly. create_conda_env "3.11" "python-3-11" +# tensorflow needs to be installed before numpy so pip does not mess up conda +# enviroment +pip install tensorflow==2.12.0 conda install -c conda-forge libstdcxx-ng=12 -y -conda install numpy=1.23.4 -y -conda install tensorflow=2.10.0 -y -PY311_VERSION_STRING="Python version is 3.11, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" +conda install numpy=1.23.5 -y +PY311_VERSION_STRING="Python version is 3.11, NumPy version is 1.23.5, and Tensorflow version is 2.12.0" create_python_backend_stub conda-pack -o python3.11.tar.gz path_to_conda_pack=`pwd`/python3.11.tar.gz -mkdir -p models/python_3_11/1/ +path_to_conda_pack="$PWD/python-3-11" +mkdir -p $path_to_conda_pack +tar -xzf python3.11.tar.gz -C $path_to_conda_pack cp ../../python_models/python_version/config.pbtxt ./models/python_3_11 (cd models/python_3_11 && \ sed -i "s/^name:.*/name: \"python_3_11\"/" config.pbtxt && \ @@ -200,7 +231,7 @@ kill $SERVER_PID wait $SERVER_PID set +e -for EXPECTED_VERSION_STRING in "$PY36_VERSION_STRING" "$PY37_VERSION_STRING" "$PY37_1_VERSION_STRING" "$PY310_VERSION_STRING" "$PY38_VERSION_STRING" "$PY39_VERSION_STRING" "$PY311_VERSION_STRING"; do +for EXPECTED_VERSION_STRING in "$PY38_VERSION_STRING" "$PY39_VERSION_STRING" "$PY311_VERSION_STRING"; do grep "$EXPECTED_VERSION_STRING" $SERVER_LOG if [ $? -ne 0 ]; then cat $SERVER_LOG From 25b67605caa6ba50744a04516f3419553e9fc039 Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Wed, 26 Jul 2023 19:13:31 -0700 Subject: [PATCH 03/12] fix test --- qa/L0_backend_python/env/test.sh | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/qa/L0_backend_python/env/test.sh b/qa/L0_backend_python/env/test.sh index c245c53a1b..3211a467e1 100755 --- a/qa/L0_backend_python/env/test.sh +++ b/qa/L0_backend_python/env/test.sh @@ -151,7 +151,7 @@ done conda update -n base -c defaults conda -y rm $SERVER_LOG # Create a model with python 3.8 version -# Successful execution of the Python model indicates that the environment has +# Successful execution of the Python model indicates that the environment has # been setup correctly. create_conda_env "3.8" "python-3-8" @@ -174,7 +174,7 @@ cp ../../python_models/python_version/model.py ./models/python_3_8/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 # Create a model with python 3.9 version -# Successful execution of the Python model indicates that the environment has +# Successful execution of the Python model indicates that the environment has # been setup correctly. create_conda_env "3.9" "python-3-9" conda install -c conda-forge libstdcxx-ng=12 -y @@ -195,12 +195,12 @@ cp ../../python_models/python_version/model.py ./models/python_3_9/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 # Create a model with python 3.11 version -# Successful execution of the Python model indicates that the environment has +# Successful execution of the Python model indicates that the environment has # been setup correctly. create_conda_env "3.11" "python-3-11" -# tensorflow needs to be installed before numpy so pip does not mess up conda -# enviroment -pip install tensorflow==2.12.0 +# tensorflow needs to be installed before numpy so pip does not mess up conda +# environment +pip install tensorflow==2.12.0 conda install -c conda-forge libstdcxx-ng=12 -y conda install numpy=1.23.5 -y PY311_VERSION_STRING="Python version is 3.11, NumPy version is 1.23.5, and Tensorflow version is 2.12.0" @@ -211,6 +211,7 @@ path_to_conda_pack=`pwd`/python3.11.tar.gz path_to_conda_pack="$PWD/python-3-11" mkdir -p $path_to_conda_pack tar -xzf python3.11.tar.gz -C $path_to_conda_pack +mkdir -p models/python_3_11/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_11 (cd models/python_3_11 && \ sed -i "s/^name:.*/name: \"python_3_11\"/" config.pbtxt && \ From d4f0a564e92b994b13db314f0e67fe862211483f Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Thu, 27 Jul 2023 15:15:49 -0700 Subject: [PATCH 04/12] add all around tests for python version 3.8-3.11 --- qa/L0_backend_python/env/test.sh | 13 + qa/L0_backend_python/test.sh | 495 ++++++++++++++++--------------- 2 files changed, 269 insertions(+), 239 deletions(-) diff --git a/qa/L0_backend_python/env/test.sh b/qa/L0_backend_python/env/test.sh index 3211a467e1..e049da2994 100755 --- a/qa/L0_backend_python/env/test.sh +++ b/qa/L0_backend_python/env/test.sh @@ -172,6 +172,9 @@ cp ../../python_models/python_version/config.pbtxt ./models/python_3_8 echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_8/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 +# copy the stub out to /opt/tritonserver/backends/python/3-8 +mkdir -p /opt/tritonserver/backends/python/3-8 +cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-8 # Create a model with python 3.9 version # Successful execution of the Python model indicates that the environment has @@ -193,6 +196,9 @@ cp ../../python_models/python_version/config.pbtxt ./models/python_3_9 echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_9/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 +# copy the stub out to /opt/tritonserver/backends/python/3-9 +mkdir -p /opt/tritonserver/backends/python/3-9 +cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-9 # Create a model with python 3.11 version # Successful execution of the Python model indicates that the environment has @@ -218,6 +224,13 @@ cp ../../python_models/python_version/config.pbtxt ./models/python_3_11 echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_11/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_11 +# copy the stub out to /opt/tritonserver/backends/python/3-11 +mkdir -p /opt/tritonserver/backends/python/3-11 +cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-11 + +# copy the stub out to /opt/tritonserver/backends/python/3-10 +mkdir -p /opt/tritonserver/backends/python/3-10 +cp /opt/tritonserver/backends/python/triton_python_backend_stub /opt/tritonserver/backends/python/3-10 rm -rf ./miniconda diff --git a/qa/L0_backend_python/test.sh b/qa/L0_backend_python/test.sh index 9453420a3c..7725525c6d 100755 --- a/qa/L0_backend_python/test.sh +++ b/qa/L0_backend_python/test.sh @@ -132,255 +132,297 @@ mkdir -p models/dlpack_identity/1/ cp ../python_models/dlpack_identity/model.py ./models/dlpack_identity/1/ cp ../python_models/dlpack_identity/config.pbtxt ./models/dlpack_identity -# Skip torch install on Jetson since it is already installed. +# Env test should be run first so we can get the stubs for each of the +# environments +# Disable env test for Jetson since cloud storage repos are not supported if [ "$TEST_JETSON" == "0" ]; then - pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html -else - # GPU tensor tests are disabled on jetson - EXPECTED_NUM_TESTS=9 + # In 'env' test we use miniconda for dependency management. No need to run + # the test in a virtual environment. + (cd env && bash -ex test.sh) + if [ $? -ne 0 ]; then + echo "Subtest env FAILED" + RET=1 + fi fi -prev_num_pages=`get_shm_pages` -run_server -if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 -fi +PYTHON_ENV_VERSION="8 9 10 11" +for PYTHON_ENV in $PYTHON_ENV_VERSION; do + echo "python environment 3.${PYTHON_ENV}" + # Set up environment and stub for each test + add-apt-repository ppa:deadsnakes/ppa - + apt-get update + apt-get install "python3.${PYTHON_ENV}" -y + rm -f /usr/bin/python3 && \ + ln -s "/usr/bin/python3.${PYTHON_ENV}" /usr/bin/python3 + PYTHON_STUB_LOCATION=/opt/tritonserver/backend/python/3-${PYTHON_ENV}/triton_python_backend_stub + cp ${PYTHON_STUB_LOCATION} /opt/tritonserver/backend/python/ + + # Skip torch install on Jetson since it is already installed. + if [ "$TEST_JETSON" == "0" ]; then + pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + else + # GPU tensor tests are disabled on jetson + EXPECTED_NUM_TESTS=9 + fi -set +e -python3 $CLIENT_PY >> $CLIENT_LOG 2>&1 -if [ $? -ne 0 ]; then - cat $CLIENT_LOG - RET=1 -else - check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS + prev_num_pages=`get_shm_pages` + run_server + if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 + fi + + set +e + python3 $CLIENT_PY >> $CLIENT_LOG 2>&1 if [ $? -ne 0 ]; then cat $CLIENT_LOG - echo -e "\n***\n*** Test Result Verification Failed\n***" RET=1 + else + check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS + if [ $? -ne 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Test Result Verification Failed\n***" + RET=1 + fi fi -fi -set -e - -kill $SERVER_PID -wait $SERVER_PID - -current_num_pages=`get_shm_pages` -if [ $current_num_pages -ne $prev_num_pages ]; then - ls /dev/shm - cat $CLIENT_LOG - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. -Shared memory pages before starting triton equals to $prev_num_pages -and shared memory pages after starting triton equals to $current_num_pages \n***" - RET=1 -fi + set -e -prev_num_pages=`get_shm_pages` -# Triton non-graceful exit -run_server -if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 -fi + kill $SERVER_PID + wait $SERVER_PID -sleep 5 + current_num_pages=`get_shm_pages` + if [ $current_num_pages -ne $prev_num_pages ]; then + ls /dev/shm + cat $CLIENT_LOG + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. + Shared memory pages before starting triton equals to $prev_num_pages + and shared memory pages after starting triton equals to $current_num_pages \n***" + RET=1 + fi -triton_procs=`pgrep --parent $SERVER_PID` + prev_num_pages=`get_shm_pages` + # Triton non-graceful exit + run_server + if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 + fi -set +e + sleep 5 -# Trigger non-graceful termination of Triton -kill -9 $SERVER_PID + triton_procs=`pgrep --parent $SERVER_PID` -# Wait 10 seconds so that Python stub can detect non-graceful exit -sleep 10 + set +e -for triton_proc in $triton_procs; do - kill -0 $triton_proc > /dev/null 2>&1 - if [ $? -eq 0 ]; then - cat $CLIENT_LOG - echo -e "\n***\n*** Python backend non-graceful exit test failed \n***" - RET=1 - break - fi -done -set -e + # Trigger non-graceful termination of Triton + kill -9 $SERVER_PID -# -# Test KIND_GPU -# Disable env test for Jetson since GPU Tensors are not supported -if [ "$TEST_JETSON" == "0" ]; then - rm -rf models/ - mkdir -p models/add_sub_gpu/1/ - cp ../python_models/add_sub/model.py ./models/add_sub_gpu/1/ - cp ../python_models/add_sub_gpu/config.pbtxt ./models/add_sub_gpu/ - - prev_num_pages=`get_shm_pages` - run_server - if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 - fi - - if [ $? -ne 0 ]; then - cat $SERVER_LOG - echo -e "\n***\n*** KIND_GPU model test failed \n***" - RET=1 - fi - - kill $SERVER_PID - wait $SERVER_PID - - current_num_pages=`get_shm_pages` - if [ $current_num_pages -ne $prev_num_pages ]; then - cat $CLIENT_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. - Shared memory pages before starting triton equals to $prev_num_pages - and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 - fi -fi + # Wait 10 seconds so that Python stub can detect non-graceful exit + sleep 10 -# Test Multi file models -rm -rf models/ -mkdir -p models/multi_file/1/ -cp ../python_models/multi_file/*.py ./models/multi_file/1/ -cp ../python_models/identity_fp32/config.pbtxt ./models/multi_file/ -(cd models/multi_file && \ - sed -i "s/^name:.*/name: \"multi_file\"/" config.pbtxt) - -prev_num_pages=`get_shm_pages` -run_server -if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 -fi + for triton_proc in $triton_procs; do + kill -0 $triton_proc > /dev/null 2>&1 + if [ $? -eq 0 ]; then + cat $CLIENT_LOG + echo -e "\n***\n*** Python backend non-graceful exit test failed \n***" + RET=1 + break + fi + done + set -e + + # + # Test KIND_GPU + # Disable env test for Jetson since GPU Tensors are not supported + if [ "$TEST_JETSON" == "0" ]; then + rm -rf models/ + mkdir -p models/add_sub_gpu/1/ + cp ../python_models/add_sub/model.py ./models/add_sub_gpu/1/ + cp ../python_models/add_sub_gpu/config.pbtxt ./models/add_sub_gpu/ + + prev_num_pages=`get_shm_pages` + run_server + if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 + fi -if [ $? -ne 0 ]; then - cat $SERVER_LOG - echo -e "\n***\n*** multi-file model test failed \n***" - RET=1 -fi + if [ $? -ne 0 ]; then + cat $SERVER_LOG + echo -e "\n***\n*** KIND_GPU model test failed \n***" + RET=1 + fi -kill $SERVER_PID -wait $SERVER_PID + kill $SERVER_PID + wait $SERVER_PID -current_num_pages=`get_shm_pages` -if [ $current_num_pages -ne $prev_num_pages ]; then - cat $SERVER_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. -Shared memory pages before starting triton equals to $prev_num_pages -and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 -fi + current_num_pages=`get_shm_pages` + if [ $current_num_pages -ne $prev_num_pages ]; then + cat $CLIENT_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. + Shared memory pages before starting triton equals to $prev_num_pages + and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 + fi + fi -# Test environment variable propagation -rm -rf models/ -mkdir -p models/model_env/1/ -cp ../python_models/model_env/model.py ./models/model_env/1/ -cp ../python_models/model_env/config.pbtxt ./models/model_env/ - -export MY_ENV="MY_ENV" -prev_num_pages=`get_shm_pages` -run_server -if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - echo -e "\n***\n*** Environment variable test failed \n***" - exit 1 -fi + # Test Multi file models + rm -rf models/ + mkdir -p models/multi_file/1/ + cp ../python_models/multi_file/*.py ./models/multi_file/1/ + cp ../python_models/identity_fp32/config.pbtxt ./models/multi_file/ + (cd models/multi_file && \ + sed -i "s/^name:.*/name: \"multi_file\"/" config.pbtxt) + + prev_num_pages=`get_shm_pages` + run_server + if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 + fi -kill $SERVER_PID -wait $SERVER_PID + if [ $? -ne 0 ]; then + cat $SERVER_LOG + echo -e "\n***\n*** multi-file model test failed \n***" + RET=1 + fi -current_num_pages=`get_shm_pages` -if [ $current_num_pages -ne $prev_num_pages ]; then - cat $CLIENT_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. -Shared memory pages before starting triton equals to $prev_num_pages -and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 -fi + kill $SERVER_PID + wait $SERVER_PID -rm -fr ./models -mkdir -p models/identity_fp32/1/ -cp ../python_models/identity_fp32/model.py ./models/identity_fp32/1/model.py -cp ../python_models/identity_fp32/config.pbtxt ./models/identity_fp32/config.pbtxt + current_num_pages=`get_shm_pages` + if [ $current_num_pages -ne $prev_num_pages ]; then + cat $SERVER_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. + Shared memory pages before starting triton equals to $prev_num_pages + and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 + fi -shm_default_byte_size=$((1024*1024*4)) -SERVER_ARGS="$BASE_SERVER_ARGS --backend-config=python,shm-default-byte-size=$shm_default_byte_size" + # Test environment variable propagation + rm -rf models/ + mkdir -p models/model_env/1/ + cp ../python_models/model_env/model.py ./models/model_env/1/ + cp ../python_models/model_env/config.pbtxt ./models/model_env/ + + export MY_ENV="MY_ENV" + prev_num_pages=`get_shm_pages` + run_server + if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + echo -e "\n***\n*** Environment variable test failed \n***" + exit 1 + fi -run_server -if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 -fi + kill $SERVER_PID + wait $SERVER_PID -for shm_page in `ls /dev/shm/`; do - if [[ $shm_page != triton_python_backend_shm* ]]; then - continue - fi - page_size=`ls -l /dev/shm/$shm_page 2>&1 | awk '{print $5}'` - if [ $page_size -ne $shm_default_byte_size ]; then - echo -e "Shared memory region size is not equal to -$shm_default_byte_size for page $shm_page. Region size is -$page_size." - RET=1 + current_num_pages=`get_shm_pages` + if [ $current_num_pages -ne $prev_num_pages ]; then + cat $CLIENT_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. + Shared memory pages before starting triton equals to $prev_num_pages + and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 fi -done -kill $SERVER_PID -wait $SERVER_PID + rm -fr ./models + mkdir -p models/identity_fp32/1/ + cp ../python_models/identity_fp32/model.py ./models/identity_fp32/1/model.py + cp ../python_models/identity_fp32/config.pbtxt ./models/identity_fp32/config.pbtxt -# Test model getting killed during initialization -rm -fr ./models -mkdir -p models/init_exit/1/ -cp ../python_models/init_exit/model.py ./models/init_exit/1/model.py -cp ../python_models/init_exit/config.pbtxt ./models/init_exit/config.pbtxt + shm_default_byte_size=$((1024*1024*4)) + SERVER_ARGS="$BASE_SERVER_ARGS --backend-config=python,shm-default-byte-size=$shm_default_byte_size" -ERROR_MESSAGE="Stub process 'init_exit_0_0' is not healthy." + run_server + if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 + fi + + for shm_page in `ls /dev/shm/`; do + if [[ $shm_page != triton_python_backend_shm* ]]; then + continue + fi + page_size=`ls -l /dev/shm/$shm_page 2>&1 | awk '{print $5}'` + if [ $page_size -ne $shm_default_byte_size ]; then + echo -e "Shared memory region size is not equal to + $shm_default_byte_size for page $shm_page. Region size is + $page_size." + RET=1 + fi + done -prev_num_pages=`get_shm_pages` -run_server -if [ "$SERVER_PID" != "0" ]; then - echo -e "*** FAILED: unexpected success starting $SERVER" >> $CLIENT_LOG - RET=1 kill $SERVER_PID wait $SERVER_PID -else - if grep "$ERROR_MESSAGE" $SERVER_LOG; then - echo -e "Found \"$ERROR_MESSAGE\"" >> $CLIENT_LOG - else - echo $CLIENT_LOG - echo -e "Not found \"$ERROR_MESSAGE\"" >> $CLIENT_LOG + + # Test model getting killed during initialization + rm -fr ./models + mkdir -p models/init_exit/1/ + cp ../python_models/init_exit/model.py ./models/init_exit/1/model.py + cp ../python_models/init_exit/config.pbtxt ./models/init_exit/config.pbtxt + + ERROR_MESSAGE="Stub process 'init_exit_0_0' is not healthy." + + prev_num_pages=`get_shm_pages` + run_server + if [ "$SERVER_PID" != "0" ]; then + echo -e "*** FAILED: unexpected success starting $SERVER" >> $CLIENT_LOG RET=1 + kill $SERVER_PID + wait $SERVER_PID + else + if grep "$ERROR_MESSAGE" $SERVER_LOG; then + echo -e "Found \"$ERROR_MESSAGE\"" >> $CLIENT_LOG + else + echo $CLIENT_LOG + echo -e "Not found \"$ERROR_MESSAGE\"" >> $CLIENT_LOG + RET=1 + fi fi -fi -current_num_pages=`get_shm_pages` -if [ $current_num_pages -ne $prev_num_pages ]; then - cat $SERVER_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. -Shared memory pages before starting triton equals to $prev_num_pages -and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 -fi + current_num_pages=`get_shm_pages` + if [ $current_num_pages -ne $prev_num_pages ]; then + cat $SERVER_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. + Shared memory pages before starting triton equals to $prev_num_pages + and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 + fi -# Disable env test for Jetson since cloud storage repos are not supported -# Disable ensemble, io and bls tests for Jetson since GPU Tensors are not supported -# Disable variants test for Jetson since already built without GPU Tensor support -# Disable decoupled test because it uses GPU tensors -if [ "$TEST_JETSON" == "0" ]; then - SUBTESTS="ensemble io bls decoupled variants" + # Disable ensemble, io and bls tests for Jetson since GPU Tensors are not supported + # Disable variants test for Jetson since already built without GPU Tensor support + # Disable decoupled test because it uses GPU tensors + if [ "$TEST_JETSON" == "0" ]; then + SUBTESTS="ensemble io bls decoupled variants" + for TEST in ${SUBTESTS}; do + # Run each subtest in a separate virtual environment to avoid conflicts + # between dependencies. + virtualenv --system-site-packages venv + source venv/bin/activate + + (cd ${TEST} && bash -ex test.sh) + if [ $? -ne 0 ]; then + echo "Subtest ${TEST} FAILED" + RET=1 + fi + + deactivate + rm -fr venv + done + fi + + SUBTESTS="lifecycle restart model_control examples argument_validation logging custom_metrics" for TEST in ${SUBTESTS}; do # Run each subtest in a separate virtual environment to avoid conflicts # between dependencies. @@ -388,40 +430,15 @@ if [ "$TEST_JETSON" == "0" ]; then source venv/bin/activate (cd ${TEST} && bash -ex test.sh) + if [ $? -ne 0 ]; then - echo "Subtest ${TEST} FAILED" - RET=1 + echo "Subtest ${TEST} FAILED" + RET=1 fi deactivate rm -fr venv done - - # In 'env' test we use miniconda for dependency management. No need to run - # the test in a virtual environment. - (cd env && bash -ex test.sh) - if [ $? -ne 0 ]; then - echo "Subtest env FAILED" - RET=1 - fi -fi - -SUBTESTS="lifecycle restart model_control examples argument_validation logging custom_metrics" -for TEST in ${SUBTESTS}; do - # Run each subtest in a separate virtual environment to avoid conflicts - # between dependencies. - virtualenv --system-site-packages venv - source venv/bin/activate - - (cd ${TEST} && bash -ex test.sh) - - if [ $? -ne 0 ]; then - echo "Subtest ${TEST} FAILED" - RET=1 - fi - - deactivate - rm -fr venv done if [ $RET -eq 0 ]; then From 2bb78a7371e2e3a46217cc5d486be65036d3be89 Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Fri, 28 Jul 2023 15:17:33 -0700 Subject: [PATCH 05/12] fix test --- qa/L0_backend_python/test.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/qa/L0_backend_python/test.sh b/qa/L0_backend_python/test.sh index 7725525c6d..ca76701b1d 100755 --- a/qa/L0_backend_python/test.sh +++ b/qa/L0_backend_python/test.sh @@ -149,9 +149,8 @@ PYTHON_ENV_VERSION="8 9 10 11" for PYTHON_ENV in $PYTHON_ENV_VERSION; do echo "python environment 3.${PYTHON_ENV}" # Set up environment and stub for each test - add-apt-repository ppa:deadsnakes/ppa - - apt-get update - apt-get install "python3.${PYTHON_ENV}" -y + add-apt-repository ppa:deadsnakes/ppa -y + apt-get update && apt-get -y install "python3.${PYTHON_ENV}" rm -f /usr/bin/python3 && \ ln -s "/usr/bin/python3.${PYTHON_ENV}" /usr/bin/python3 PYTHON_STUB_LOCATION=/opt/tritonserver/backend/python/3-${PYTHON_ENV}/triton_python_backend_stub From 72bafa12abb1852650bc6c29b32fd0f7896b77fc Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Tue, 1 Aug 2023 17:57:51 -0700 Subject: [PATCH 06/12] add input to test for python versions --- qa/L0_backend_python/test.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/qa/L0_backend_python/test.sh b/qa/L0_backend_python/test.sh index ca76701b1d..41e958ea4a 100755 --- a/qa/L0_backend_python/test.sh +++ b/qa/L0_backend_python/test.sh @@ -44,6 +44,7 @@ SERVER=${TRITON_DIR}/bin/tritonserver export BACKEND_DIR=${TRITON_DIR}/backends export TEST_JETSON=${TEST_JETSON:=0} export CUDA_VISIBLE_DEVICES=0 +PYTHON_ENV_VERSION=${PYTHON_ENV_VERSION:="8 9 10 11"} BASE_SERVER_ARGS="--model-repository=`pwd`/models --backend-directory=${BACKEND_DIR} --log-verbose=1" # Set the default byte size to 5MBs to avoid going out of shared memory. The @@ -145,16 +146,16 @@ if [ "$TEST_JETSON" == "0" ]; then fi fi -PYTHON_ENV_VERSION="8 9 10 11" -for PYTHON_ENV in $PYTHON_ENV_VERSION; do +for PYTHON_ENV in ${PYTHON_ENV_VERSION}; do echo "python environment 3.${PYTHON_ENV}" # Set up environment and stub for each test add-apt-repository ppa:deadsnakes/ppa -y apt-get update && apt-get -y install "python3.${PYTHON_ENV}" rm -f /usr/bin/python3 && \ ln -s "/usr/bin/python3.${PYTHON_ENV}" /usr/bin/python3 - PYTHON_STUB_LOCATION=/opt/tritonserver/backend/python/3-${PYTHON_ENV}/triton_python_backend_stub - cp ${PYTHON_STUB_LOCATION} /opt/tritonserver/backend/python/ + + PYTHON_STUB_LOCATION=/opt/tritonserver/backends/python/3-${PYTHON_ENV}/triton_python_backend_stub + cp ${PYTHON_STUB_LOCATION} /opt/tritonserver/backends/python/triton_python_backend_stub # Skip torch install on Jetson since it is already installed. if [ "$TEST_JETSON" == "0" ]; then From c7aa5f7961c73d736d5bdaddc3f71fe7e9cf157f Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Wed, 2 Aug 2023 19:13:33 -0700 Subject: [PATCH 07/12] fix test --- qa/L0_backend_python/env/test.sh | 8 ++++---- qa/L0_backend_python/test.sh | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/qa/L0_backend_python/env/test.sh b/qa/L0_backend_python/env/test.sh index e049da2994..bc7ef921e2 100755 --- a/qa/L0_backend_python/env/test.sh +++ b/qa/L0_backend_python/env/test.sh @@ -174,7 +174,7 @@ cp ../../python_models/python_version/model.py ./models/python_3_8/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 # copy the stub out to /opt/tritonserver/backends/python/3-8 mkdir -p /opt/tritonserver/backends/python/3-8 -cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-8 +cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-8/triton_python_backend_stub # Create a model with python 3.9 version # Successful execution of the Python model indicates that the environment has @@ -198,7 +198,7 @@ cp ../../python_models/python_version/model.py ./models/python_3_9/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 # copy the stub out to /opt/tritonserver/backends/python/3-9 mkdir -p /opt/tritonserver/backends/python/3-9 -cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-9 +cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-9/triton_python_backend_stub # Create a model with python 3.11 version # Successful execution of the Python model indicates that the environment has @@ -226,11 +226,11 @@ cp ../../python_models/python_version/model.py ./models/python_3_11/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_11 # copy the stub out to /opt/tritonserver/backends/python/3-11 mkdir -p /opt/tritonserver/backends/python/3-11 -cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-11 +cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-11/triton_python_backend_stub # copy the stub out to /opt/tritonserver/backends/python/3-10 mkdir -p /opt/tritonserver/backends/python/3-10 -cp /opt/tritonserver/backends/python/triton_python_backend_stub /opt/tritonserver/backends/python/3-10 +cp /opt/tritonserver/backends/python/triton_python_backend_stub /opt/tritonserver/backends/python/3-10/triton_python_backend_stub rm -rf ./miniconda diff --git a/qa/L0_backend_python/test.sh b/qa/L0_backend_python/test.sh index 41e958ea4a..dd3df89224 100755 --- a/qa/L0_backend_python/test.sh +++ b/qa/L0_backend_python/test.sh @@ -150,7 +150,9 @@ for PYTHON_ENV in ${PYTHON_ENV_VERSION}; do echo "python environment 3.${PYTHON_ENV}" # Set up environment and stub for each test add-apt-repository ppa:deadsnakes/ppa -y - apt-get update && apt-get -y install "python3.${PYTHON_ENV}" + apt-get update && apt-get -y install \ + "python3.${PYTHON_ENV}-dev" \ + "python3.${PYTHON_ENV}-distutils" rm -f /usr/bin/python3 && \ ln -s "/usr/bin/python3.${PYTHON_ENV}" /usr/bin/python3 From 48b44a3d20f4e88136a9c5ddef0e308806628e95 Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Wed, 2 Aug 2023 19:25:40 -0700 Subject: [PATCH 08/12] add conda deactivate everywhere --- qa/L0_backend_python/env/test.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/qa/L0_backend_python/env/test.sh b/qa/L0_backend_python/env/test.sh index bc7ef921e2..b0cb190bf9 100755 --- a/qa/L0_backend_python/env/test.sh +++ b/qa/L0_backend_python/env/test.sh @@ -108,6 +108,7 @@ cp python3.6.tar.gz models/python_3_6/python_3_6_environment.tar.gz echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_6/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_6 +conda deactivate # Test conda env without custom Python backend stub This environment should # always use the default Python version shipped in the container. For Ubuntu 22.04 @@ -126,6 +127,7 @@ cp python3.10.tar.gz models/python_3_10/python_3_10_environment.tar.gz sed -i "s/^name:.*/name: \"python_3_10\"/" config.pbtxt && \ echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_10/1/ +conda deactivate run_server if [ "$SERVER_PID" == "0" ]; then @@ -175,6 +177,7 @@ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 # copy the stub out to /opt/tritonserver/backends/python/3-8 mkdir -p /opt/tritonserver/backends/python/3-8 cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-8/triton_python_backend_stub +conda deactivate # Create a model with python 3.9 version # Successful execution of the Python model indicates that the environment has @@ -199,6 +202,7 @@ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 # copy the stub out to /opt/tritonserver/backends/python/3-9 mkdir -p /opt/tritonserver/backends/python/3-9 cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-9/triton_python_backend_stub +conda deactivate # Create a model with python 3.11 version # Successful execution of the Python model indicates that the environment has @@ -227,6 +231,7 @@ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_11 # copy the stub out to /opt/tritonserver/backends/python/3-11 mkdir -p /opt/tritonserver/backends/python/3-11 cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-11/triton_python_backend_stub +conda deactivate # copy the stub out to /opt/tritonserver/backends/python/3-10 mkdir -p /opt/tritonserver/backends/python/3-10 From d645a459973cd7b04f1f13e9423ee928ecde8358 Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Thu, 3 Aug 2023 15:47:42 -0700 Subject: [PATCH 09/12] move to non for loop, add numpy to python installation --- qa/L0_backend_python/env/test.sh | 245 ++++++++-------- qa/L0_backend_python/test.sh | 490 +++++++++++++++---------------- 2 files changed, 372 insertions(+), 363 deletions(-) diff --git a/qa/L0_backend_python/env/test.sh b/qa/L0_backend_python/env/test.sh index b0cb190bf9..b1db62ea78 100755 --- a/qa/L0_backend_python/env/test.sh +++ b/qa/L0_backend_python/env/test.sh @@ -42,6 +42,125 @@ rm -rf *.tar.gz install_build_deps install_conda +if [ ${PYTHON_ENV_VERSION} != "10" ]; then + # Test other python versions + conda update -n base -c defaults conda -y + rm $SERVER_LOG + # Create a model with python 3.8 version + # Successful execution of the Python model indicates that the environment has + # been setup correctly. + + create_conda_env "3.8" "python-3-8" + conda install -c conda-forge libstdcxx-ng=12 -y + conda install numpy=1.23.4 -y + conda install tensorflow=2.10.0 -y + PY38_VERSION_STRING="Python version is 3.8, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" + + create_python_backend_stub + conda-pack -o python3.8.tar.gz + path_to_conda_pack="$PWD/python-3-8" + mkdir -p $path_to_conda_pack + tar -xzf python3.8.tar.gz -C $path_to_conda_pack + mkdir -p models/python_3_8/1/ + cp ../../python_models/python_version/config.pbtxt ./models/python_3_8 + (cd models/python_3_8 && \ + sed -i "s/^name:.*/name: \"python_3_8\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + cp ../../python_models/python_version/model.py ./models/python_3_8/1/ + cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 + # copy the stub out to /opt/tritonserver/backends/python/3-8 + mkdir -p /opt/tritonserver/backends/python/3-8 + cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-8/triton_python_backend_stub + conda deactivate + + # Create a model with python 3.9 version + # Successful execution of the Python model indicates that the environment has + # been setup correctly. + create_conda_env "3.9" "python-3-9" + conda install -c conda-forge libstdcxx-ng=12 -y + conda install numpy=1.23.4 -y + conda install tensorflow=2.10.0 -y + PY39_VERSION_STRING="Python version is 3.9, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" + create_python_backend_stub + conda-pack -o python3.9.tar.gz + path_to_conda_pack="$PWD/python-3-9" + mkdir -p $path_to_conda_pack + tar -xzf python3.9.tar.gz -C $path_to_conda_pack + mkdir -p models/python_3_9/1/ + cp ../../python_models/python_version/config.pbtxt ./models/python_3_9 + (cd models/python_3_9 && \ + sed -i "s/^name:.*/name: \"python_3_9\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + cp ../../python_models/python_version/model.py ./models/python_3_9/1/ + cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 + # copy the stub out to /opt/tritonserver/backends/python/3-9 + mkdir -p /opt/tritonserver/backends/python/3-9 + cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-9/triton_python_backend_stub + conda deactivate + + # Create a model with python 3.11 version + # Successful execution of the Python model indicates that the environment has + # been setup correctly. + create_conda_env "3.11" "python-3-11" + # tensorflow needs to be installed before numpy so pip does not mess up conda + # environment + pip install tensorflow==2.12.0 + conda install -c conda-forge libstdcxx-ng=12 -y + conda install numpy=1.23.5 -y + PY311_VERSION_STRING="Python version is 3.11, NumPy version is 1.23.5, and Tensorflow version is 2.12.0" + + create_python_backend_stub + conda-pack -o python3.11.tar.gz + path_to_conda_pack=`pwd`/python3.11.tar.gz + path_to_conda_pack="$PWD/python-3-11" + mkdir -p $path_to_conda_pack + tar -xzf python3.11.tar.gz -C $path_to_conda_pack + mkdir -p models/python_3_11/1/ + cp ../../python_models/python_version/config.pbtxt ./models/python_3_11 + (cd models/python_3_11 && \ + sed -i "s/^name:.*/name: \"python_3_11\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + cp ../../python_models/python_version/model.py ./models/python_3_11/1/ + cp python_backend/builddir/triton_python_backend_stub ./models/python_3_11 + # copy the stub out to /opt/tritonserver/backends/python/3-11 + mkdir -p /opt/tritonserver/backends/python/3-11 + cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-11/triton_python_backend_stub + conda deactivate + + # copy the stub out to /opt/tritonserver/backends/python/3-10 + mkdir -p /opt/tritonserver/backends/python/3-10 + cp /opt/tritonserver/backends/python/triton_python_backend_stub /opt/tritonserver/backends/python/3-10/triton_python_backend_stub + + rm -rf ./miniconda + + run_server + if [ "$SERVER_PID" == "0" ]; then + echo -e "\n***\n*** Failed to start $SERVER\n***" + cat $SERVER_LOG + exit 1 + fi + + kill $SERVER_PID + wait $SERVER_PID + + set +e + for EXPECTED_VERSION_STRING in "$PY38_VERSION_STRING" "$PY39_VERSION_STRING" "$PY311_VERSION_STRING"; do + grep "$EXPECTED_VERSION_STRING" $SERVER_LOG + if [ $? -ne 0 ]; then + cat $SERVER_LOG + echo -e "\n***\n*** $EXPECTED_VERSION_STRING was not found in Triton logs. \n***" + RET=1 + fi + done + if [ $RET -eq 0 ]; then + echo -e "\n***\n*** Finished creating environments.\n***" + else + cat $SERVER_LOG + echo -e "\n***\n*** Failed to create python environments.\n***" + fi + + exit $RET +fi # Tensorflow 2.1.0 only works with Python 3.4 - 3.7. Successful execution of # the Python model indicates that the environment has been setup correctly. # Create a model with python 3.7 version @@ -57,8 +176,8 @@ path_to_conda_pack=`pwd`/python3.7.tar.gz mkdir -p models/python_3_7/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_7 (cd models/python_3_7 && \ - sed -i "s/^name:.*/name: \"python_3_7\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + sed -i "s/^name:.*/name: \"python_3_7\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_7/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_7 conda deactivate @@ -79,8 +198,8 @@ create_python_backend_stub mkdir -p models/python_3_7_1/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_7_1 (cd models/python_3_7_1 && \ - sed -i "s/^name:.*/name: \"python_3_7_1\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + sed -i "s/^name:.*/name: \"python_3_7_1\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_7_1/1/ # Copy activate script to folder cp $path_to_conda_pack/lib/python3.7/site-packages/conda_pack/scripts/posix/activate $path_to_conda_pack/bin/. @@ -104,8 +223,8 @@ mkdir -p models/python_3_6/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_6 cp python3.6.tar.gz models/python_3_6/python_3_6_environment.tar.gz (cd models/python_3_6 && \ - sed -i "s/^name:.*/name: \"python_3_6\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) + sed -i "s/^name:.*/name: \"python_3_6\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_6/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_6 conda deactivate @@ -124,8 +243,8 @@ mkdir -p models/python_3_10/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_10 cp python3.10.tar.gz models/python_3_10/python_3_10_environment.tar.gz (cd models/python_3_10 && \ - sed -i "s/^name:.*/name: \"python_3_10\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) + sed -i "s/^name:.*/name: \"python_3_10\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_10/1/ conda deactivate @@ -149,116 +268,6 @@ for EXPECTED_VERSION_STRING in "$PY36_VERSION_STRING" "$PY37_VERSION_STRING" "$P fi done -# Test other python versions -conda update -n base -c defaults conda -y -rm $SERVER_LOG -# Create a model with python 3.8 version -# Successful execution of the Python model indicates that the environment has -# been setup correctly. - -create_conda_env "3.8" "python-3-8" -conda install -c conda-forge libstdcxx-ng=12 -y -conda install numpy=1.23.4 -y -conda install tensorflow=2.10.0 -y -PY38_VERSION_STRING="Python version is 3.8, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" - -create_python_backend_stub -conda-pack -o python3.8.tar.gz -path_to_conda_pack="$PWD/python-3-8" -mkdir -p $path_to_conda_pack -tar -xzf python3.8.tar.gz -C $path_to_conda_pack -mkdir -p models/python_3_8/1/ -cp ../../python_models/python_version/config.pbtxt ./models/python_3_8 -(cd models/python_3_8 && \ - sed -i "s/^name:.*/name: \"python_3_8\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) -cp ../../python_models/python_version/model.py ./models/python_3_8/1/ -cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 -# copy the stub out to /opt/tritonserver/backends/python/3-8 -mkdir -p /opt/tritonserver/backends/python/3-8 -cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-8/triton_python_backend_stub -conda deactivate - -# Create a model with python 3.9 version -# Successful execution of the Python model indicates that the environment has -# been setup correctly. -create_conda_env "3.9" "python-3-9" -conda install -c conda-forge libstdcxx-ng=12 -y -conda install numpy=1.23.4 -y -conda install tensorflow=2.10.0 -y -PY39_VERSION_STRING="Python version is 3.9, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" -create_python_backend_stub -conda-pack -o python3.9.tar.gz -path_to_conda_pack="$PWD/python-3-9" -mkdir -p $path_to_conda_pack -tar -xzf python3.9.tar.gz -C $path_to_conda_pack -mkdir -p models/python_3_9/1/ -cp ../../python_models/python_version/config.pbtxt ./models/python_3_9 -(cd models/python_3_9 && \ - sed -i "s/^name:.*/name: \"python_3_9\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) -cp ../../python_models/python_version/model.py ./models/python_3_9/1/ -cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 -# copy the stub out to /opt/tritonserver/backends/python/3-9 -mkdir -p /opt/tritonserver/backends/python/3-9 -cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-9/triton_python_backend_stub -conda deactivate - -# Create a model with python 3.11 version -# Successful execution of the Python model indicates that the environment has -# been setup correctly. -create_conda_env "3.11" "python-3-11" -# tensorflow needs to be installed before numpy so pip does not mess up conda -# environment -pip install tensorflow==2.12.0 -conda install -c conda-forge libstdcxx-ng=12 -y -conda install numpy=1.23.5 -y -PY311_VERSION_STRING="Python version is 3.11, NumPy version is 1.23.5, and Tensorflow version is 2.12.0" - -create_python_backend_stub -conda-pack -o python3.11.tar.gz -path_to_conda_pack=`pwd`/python3.11.tar.gz -path_to_conda_pack="$PWD/python-3-11" -mkdir -p $path_to_conda_pack -tar -xzf python3.11.tar.gz -C $path_to_conda_pack -mkdir -p models/python_3_11/1/ -cp ../../python_models/python_version/config.pbtxt ./models/python_3_11 -(cd models/python_3_11 && \ - sed -i "s/^name:.*/name: \"python_3_11\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) -cp ../../python_models/python_version/model.py ./models/python_3_11/1/ -cp python_backend/builddir/triton_python_backend_stub ./models/python_3_11 -# copy the stub out to /opt/tritonserver/backends/python/3-11 -mkdir -p /opt/tritonserver/backends/python/3-11 -cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-11/triton_python_backend_stub -conda deactivate - -# copy the stub out to /opt/tritonserver/backends/python/3-10 -mkdir -p /opt/tritonserver/backends/python/3-10 -cp /opt/tritonserver/backends/python/triton_python_backend_stub /opt/tritonserver/backends/python/3-10/triton_python_backend_stub - -rm -rf ./miniconda - -run_server -if [ "$SERVER_PID" == "0" ]; then - echo -e "\n***\n*** Failed to start $SERVER\n***" - cat $SERVER_LOG - exit 1 -fi - -kill $SERVER_PID -wait $SERVER_PID - -set +e -for EXPECTED_VERSION_STRING in "$PY38_VERSION_STRING" "$PY39_VERSION_STRING" "$PY311_VERSION_STRING"; do - grep "$EXPECTED_VERSION_STRING" $SERVER_LOG - if [ $? -ne 0 ]; then - cat $SERVER_LOG - echo -e "\n***\n*** $EXPECTED_VERSION_STRING was not found in Triton logs. \n***" - RET=1 - fi -done - # Test default (non set) locale in python stub processes # NOTE: In certain pybind versions, the locale settings may not be propagated from parent to # stub processes correctly. See https://github.com/triton-inference-server/python_backend/pull/260. diff --git a/qa/L0_backend_python/test.sh b/qa/L0_backend_python/test.sh index dd3df89224..36bcacc36a 100755 --- a/qa/L0_backend_python/test.sh +++ b/qa/L0_backend_python/test.sh @@ -44,7 +44,7 @@ SERVER=${TRITON_DIR}/bin/tritonserver export BACKEND_DIR=${TRITON_DIR}/backends export TEST_JETSON=${TEST_JETSON:=0} export CUDA_VISIBLE_DEVICES=0 -PYTHON_ENV_VERSION=${PYTHON_ENV_VERSION:="8 9 10 11"} +export PYTHON_ENV_VERSION=${PYTHON_ENV_VERSION:="10"} BASE_SERVER_ARGS="--model-repository=`pwd`/models --backend-directory=${BACKEND_DIR} --log-verbose=1" # Set the default byte size to 5MBs to avoid going out of shared memory. The @@ -146,285 +146,267 @@ if [ "$TEST_JETSON" == "0" ]; then fi fi -for PYTHON_ENV in ${PYTHON_ENV_VERSION}; do - echo "python environment 3.${PYTHON_ENV}" - # Set up environment and stub for each test - add-apt-repository ppa:deadsnakes/ppa -y - apt-get update && apt-get -y install \ - "python3.${PYTHON_ENV}-dev" \ - "python3.${PYTHON_ENV}-distutils" - rm -f /usr/bin/python3 && \ - ln -s "/usr/bin/python3.${PYTHON_ENV}" /usr/bin/python3 - - PYTHON_STUB_LOCATION=/opt/tritonserver/backends/python/3-${PYTHON_ENV}/triton_python_backend_stub - cp ${PYTHON_STUB_LOCATION} /opt/tritonserver/backends/python/triton_python_backend_stub - - # Skip torch install on Jetson since it is already installed. - if [ "$TEST_JETSON" == "0" ]; then - pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html - else - # GPU tensor tests are disabled on jetson - EXPECTED_NUM_TESTS=9 - fi +echo "python environment 3.${PYTHON_ENV_VERSION}" +# Set up environment and stub for each test +add-apt-repository ppa:deadsnakes/ppa -y +apt-get update && apt-get -y install \ + "python3.${PYTHON_ENV_VERSION}-dev" \ + "python3.${PYTHON_ENV_VERSION}-distutils" \ + numpy +rm -f /usr/bin/python3 && \ +ln -s "/usr/bin/python3.${PYTHON_ENV_VERSION}" /usr/bin/python3 - prev_num_pages=`get_shm_pages` - run_server - if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 - fi +PYTHON_STUB_LOCATION=/opt/tritonserver/backends/python/3-${PYTHON_ENV_VERSION}/triton_python_backend_stub +cp ${PYTHON_STUB_LOCATION} /opt/tritonserver/backends/python/triton_python_backend_stub + +# Skip torch install on Jetson since it is already installed. +if [ "$TEST_JETSON" == "0" ]; then +pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html +else +# GPU tensor tests are disabled on jetson +EXPECTED_NUM_TESTS=9 +fi - set +e - python3 $CLIENT_PY >> $CLIENT_LOG 2>&1 +prev_num_pages=`get_shm_pages` +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi + +set +e +python3 $CLIENT_PY >> $CLIENT_LOG 2>&1 +if [ $? -ne 0 ]; then + cat $CLIENT_LOG + RET=1 +else + check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS if [ $? -ne 0 ]; then cat $CLIENT_LOG + echo -e "\n***\n*** Test Result Verification Failed\n***" RET=1 - else - check_test_results $TEST_RESULT_FILE $EXPECTED_NUM_TESTS - if [ $? -ne 0 ]; then - cat $CLIENT_LOG - echo -e "\n***\n*** Test Result Verification Failed\n***" - RET=1 - fi fi - set -e +fi +set -e + +kill $SERVER_PID +wait $SERVER_PID + +current_num_pages=`get_shm_pages` +if [ $current_num_pages -ne $prev_num_pages ]; then + ls /dev/shm + cat $CLIENT_LOG + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. +Shared memory pages before starting triton equals to $prev_num_pages +and shared memory pages after starting triton equals to $current_num_pages \n***" + RET=1 +fi - kill $SERVER_PID - wait $SERVER_PID +prev_num_pages=`get_shm_pages` +# Triton non-graceful exit +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi - current_num_pages=`get_shm_pages` - if [ $current_num_pages -ne $prev_num_pages ]; then - ls /dev/shm +sleep 5 + +triton_procs=`pgrep --parent $SERVER_PID` + +set +e + +# Trigger non-graceful termination of Triton +kill -9 $SERVER_PID + +# Wait 10 seconds so that Python stub can detect non-graceful exit +sleep 10 + +for triton_proc in $triton_procs; do + kill -0 $triton_proc > /dev/null 2>&1 + if [ $? -eq 0 ]; then cat $CLIENT_LOG - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. - Shared memory pages before starting triton equals to $prev_num_pages - and shared memory pages after starting triton equals to $current_num_pages \n***" + echo -e "\n***\n*** Python backend non-graceful exit test failed \n***" RET=1 + break fi +done +set -e - prev_num_pages=`get_shm_pages` - # Triton non-graceful exit - run_server - if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 - fi +# +# Test KIND_GPU +# Disable env test for Jetson since GPU Tensors are not supported +if [ "$TEST_JETSON" == "0" ]; then +rm -rf models/ +mkdir -p models/add_sub_gpu/1/ +cp ../python_models/add_sub/model.py ./models/add_sub_gpu/1/ +cp ../python_models/add_sub_gpu/config.pbtxt ./models/add_sub_gpu/ + +prev_num_pages=`get_shm_pages` +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi - sleep 5 +if [ $? -ne 0 ]; then + cat $SERVER_LOG + echo -e "\n***\n*** KIND_GPU model test failed \n***" + RET=1 +fi - triton_procs=`pgrep --parent $SERVER_PID` +kill $SERVER_PID +wait $SERVER_PID - set +e +current_num_pages=`get_shm_pages` +if [ $current_num_pages -ne $prev_num_pages ]; then + cat $CLIENT_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. +Shared memory pages before starting triton equals to $prev_num_pages +and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 +fi +fi - # Trigger non-graceful termination of Triton - kill -9 $SERVER_PID +# Test Multi file models +rm -rf models/ +mkdir -p models/multi_file/1/ +cp ../python_models/multi_file/*.py ./models/multi_file/1/ +cp ../python_models/identity_fp32/config.pbtxt ./models/multi_file/ +(cd models/multi_file && \ + sed -i "s/^name:.*/name: \"multi_file\"/" config.pbtxt) + +prev_num_pages=`get_shm_pages` +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi - # Wait 10 seconds so that Python stub can detect non-graceful exit - sleep 10 +if [ $? -ne 0 ]; then + cat $SERVER_LOG + echo -e "\n***\n*** multi-file model test failed \n***" + RET=1 +fi - for triton_proc in $triton_procs; do - kill -0 $triton_proc > /dev/null 2>&1 - if [ $? -eq 0 ]; then - cat $CLIENT_LOG - echo -e "\n***\n*** Python backend non-graceful exit test failed \n***" - RET=1 - break - fi - done - set -e - - # - # Test KIND_GPU - # Disable env test for Jetson since GPU Tensors are not supported - if [ "$TEST_JETSON" == "0" ]; then - rm -rf models/ - mkdir -p models/add_sub_gpu/1/ - cp ../python_models/add_sub/model.py ./models/add_sub_gpu/1/ - cp ../python_models/add_sub_gpu/config.pbtxt ./models/add_sub_gpu/ - - prev_num_pages=`get_shm_pages` - run_server - if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 - fi +kill $SERVER_PID +wait $SERVER_PID - if [ $? -ne 0 ]; then - cat $SERVER_LOG - echo -e "\n***\n*** KIND_GPU model test failed \n***" - RET=1 - fi +current_num_pages=`get_shm_pages` +if [ $current_num_pages -ne $prev_num_pages ]; then + cat $SERVER_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. +Shared memory pages before starting triton equals to $prev_num_pages +and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 +fi - kill $SERVER_PID - wait $SERVER_PID +# Test environment variable propagation +rm -rf models/ +mkdir -p models/model_env/1/ +cp ../python_models/model_env/model.py ./models/model_env/1/ +cp ../python_models/model_env/config.pbtxt ./models/model_env/ + +export MY_ENV="MY_ENV" +prev_num_pages=`get_shm_pages` +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + echo -e "\n***\n*** Environment variable test failed \n***" + exit 1 +fi - current_num_pages=`get_shm_pages` - if [ $current_num_pages -ne $prev_num_pages ]; then - cat $CLIENT_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. - Shared memory pages before starting triton equals to $prev_num_pages - and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 - fi - fi +kill $SERVER_PID +wait $SERVER_PID - # Test Multi file models - rm -rf models/ - mkdir -p models/multi_file/1/ - cp ../python_models/multi_file/*.py ./models/multi_file/1/ - cp ../python_models/identity_fp32/config.pbtxt ./models/multi_file/ - (cd models/multi_file && \ - sed -i "s/^name:.*/name: \"multi_file\"/" config.pbtxt) - - prev_num_pages=`get_shm_pages` - run_server - if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 - fi +current_num_pages=`get_shm_pages` +if [ $current_num_pages -ne $prev_num_pages ]; then + cat $CLIENT_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. +Shared memory pages before starting triton equals to $prev_num_pages +and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 +fi - if [ $? -ne 0 ]; then - cat $SERVER_LOG - echo -e "\n***\n*** multi-file model test failed \n***" - RET=1 - fi +rm -fr ./models +mkdir -p models/identity_fp32/1/ +cp ../python_models/identity_fp32/model.py ./models/identity_fp32/1/model.py +cp ../python_models/identity_fp32/config.pbtxt ./models/identity_fp32/config.pbtxt - kill $SERVER_PID - wait $SERVER_PID +shm_default_byte_size=$((1024*1024*4)) +SERVER_ARGS="$BASE_SERVER_ARGS --backend-config=python,shm-default-byte-size=$shm_default_byte_size" - current_num_pages=`get_shm_pages` - if [ $current_num_pages -ne $prev_num_pages ]; then - cat $SERVER_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. - Shared memory pages before starting triton equals to $prev_num_pages - and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 - fi +run_server +if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 +fi - # Test environment variable propagation - rm -rf models/ - mkdir -p models/model_env/1/ - cp ../python_models/model_env/model.py ./models/model_env/1/ - cp ../python_models/model_env/config.pbtxt ./models/model_env/ - - export MY_ENV="MY_ENV" - prev_num_pages=`get_shm_pages` - run_server - if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - echo -e "\n***\n*** Environment variable test failed \n***" - exit 1 +for shm_page in `ls /dev/shm/`; do + if [[ $shm_page != triton_python_backend_shm* ]]; then + continue fi - - kill $SERVER_PID - wait $SERVER_PID - - current_num_pages=`get_shm_pages` - if [ $current_num_pages -ne $prev_num_pages ]; then - cat $CLIENT_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. - Shared memory pages before starting triton equals to $prev_num_pages - and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 + page_size=`ls -l /dev/shm/$shm_page 2>&1 | awk '{print $5}'` + if [ $page_size -ne $shm_default_byte_size ]; then + echo -e "Shared memory region size is not equal to +$shm_default_byte_size for page $shm_page. Region size is +$page_size." + RET=1 fi +done - rm -fr ./models - mkdir -p models/identity_fp32/1/ - cp ../python_models/identity_fp32/model.py ./models/identity_fp32/1/model.py - cp ../python_models/identity_fp32/config.pbtxt ./models/identity_fp32/config.pbtxt - - shm_default_byte_size=$((1024*1024*4)) - SERVER_ARGS="$BASE_SERVER_ARGS --backend-config=python,shm-default-byte-size=$shm_default_byte_size" +kill $SERVER_PID +wait $SERVER_PID - run_server - if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 - fi +# Test model getting killed during initialization +rm -fr ./models +mkdir -p models/init_exit/1/ +cp ../python_models/init_exit/model.py ./models/init_exit/1/model.py +cp ../python_models/init_exit/config.pbtxt ./models/init_exit/config.pbtxt - for shm_page in `ls /dev/shm/`; do - if [[ $shm_page != triton_python_backend_shm* ]]; then - continue - fi - page_size=`ls -l /dev/shm/$shm_page 2>&1 | awk '{print $5}'` - if [ $page_size -ne $shm_default_byte_size ]; then - echo -e "Shared memory region size is not equal to - $shm_default_byte_size for page $shm_page. Region size is - $page_size." - RET=1 - fi - done +ERROR_MESSAGE="Stub process 'init_exit_0_0' is not healthy." +prev_num_pages=`get_shm_pages` +run_server +if [ "$SERVER_PID" != "0" ]; then + echo -e "*** FAILED: unexpected success starting $SERVER" >> $CLIENT_LOG + RET=1 kill $SERVER_PID wait $SERVER_PID - - # Test model getting killed during initialization - rm -fr ./models - mkdir -p models/init_exit/1/ - cp ../python_models/init_exit/model.py ./models/init_exit/1/model.py - cp ../python_models/init_exit/config.pbtxt ./models/init_exit/config.pbtxt - - ERROR_MESSAGE="Stub process 'init_exit_0_0' is not healthy." - - prev_num_pages=`get_shm_pages` - run_server - if [ "$SERVER_PID" != "0" ]; then - echo -e "*** FAILED: unexpected success starting $SERVER" >> $CLIENT_LOG - RET=1 - kill $SERVER_PID - wait $SERVER_PID +else + if grep "$ERROR_MESSAGE" $SERVER_LOG; then + echo -e "Found \"$ERROR_MESSAGE\"" >> $CLIENT_LOG else - if grep "$ERROR_MESSAGE" $SERVER_LOG; then - echo -e "Found \"$ERROR_MESSAGE\"" >> $CLIENT_LOG - else - echo $CLIENT_LOG - echo -e "Not found \"$ERROR_MESSAGE\"" >> $CLIENT_LOG - RET=1 - fi - fi - - current_num_pages=`get_shm_pages` - if [ $current_num_pages -ne $prev_num_pages ]; then - cat $SERVER_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. - Shared memory pages before starting triton equals to $prev_num_pages - and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 + echo $CLIENT_LOG + echo -e "Not found \"$ERROR_MESSAGE\"" >> $CLIENT_LOG + RET=1 fi +fi - # Disable ensemble, io and bls tests for Jetson since GPU Tensors are not supported - # Disable variants test for Jetson since already built without GPU Tensor support - # Disable decoupled test because it uses GPU tensors - if [ "$TEST_JETSON" == "0" ]; then - SUBTESTS="ensemble io bls decoupled variants" - for TEST in ${SUBTESTS}; do - # Run each subtest in a separate virtual environment to avoid conflicts - # between dependencies. - virtualenv --system-site-packages venv - source venv/bin/activate - - (cd ${TEST} && bash -ex test.sh) - if [ $? -ne 0 ]; then - echo "Subtest ${TEST} FAILED" - RET=1 - fi - - deactivate - rm -fr venv - done - fi +current_num_pages=`get_shm_pages` +if [ $current_num_pages -ne $prev_num_pages ]; then + cat $SERVER_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. +Shared memory pages before starting triton equals to $prev_num_pages +and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 +fi - SUBTESTS="lifecycle restart model_control examples argument_validation logging custom_metrics" +# Disable ensemble, io and bls tests for Jetson since GPU Tensors are not supported +# Disable variants test for Jetson since already built without GPU Tensor support +# Disable decoupled test because it uses GPU tensors +if [ "$TEST_JETSON" == "0" ]; then + SUBTESTS="ensemble io bls decoupled variants" for TEST in ${SUBTESTS}; do # Run each subtest in a separate virtual environment to avoid conflicts # between dependencies. @@ -432,17 +414,35 @@ for PYTHON_ENV in ${PYTHON_ENV_VERSION}; do source venv/bin/activate (cd ${TEST} && bash -ex test.sh) - if [ $? -ne 0 ]; then - echo "Subtest ${TEST} FAILED" - RET=1 + echo "Subtest ${TEST} FAILED" + RET=1 fi deactivate rm -fr venv done +fi + +SUBTESTS="lifecycle restart model_control examples argument_validation logging custom_metrics" +for TEST in ${SUBTESTS}; do + # Run each subtest in a separate virtual environment to avoid conflicts + # between dependencies. + virtualenv --system-site-packages venv + source venv/bin/activate + + (cd ${TEST} && bash -ex test.sh) + + if [ $? -ne 0 ]; then + echo "Subtest ${TEST} FAILED" + RET=1 + fi + + deactivate + rm -fr venv done + if [ $RET -eq 0 ]; then echo -e "\n***\n*** Test Passed\n***" else From 36339ad5546cdb20424d15153b33f46e4646fe54 Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Thu, 3 Aug 2023 19:17:53 -0700 Subject: [PATCH 10/12] add enviroment specific installation of shm --- qa/L0_backend_python/env/test.sh | 136 +------------- .../setup_python_enviroment.sh | 172 ++++++++++++++++++ qa/L0_backend_python/test.sh | 110 +++++------ 3 files changed, 229 insertions(+), 189 deletions(-) create mode 100755 qa/L0_backend_python/setup_python_enviroment.sh diff --git a/qa/L0_backend_python/env/test.sh b/qa/L0_backend_python/env/test.sh index b1db62ea78..aeba2691cf 100755 --- a/qa/L0_backend_python/env/test.sh +++ b/qa/L0_backend_python/env/test.sh @@ -42,125 +42,6 @@ rm -rf *.tar.gz install_build_deps install_conda -if [ ${PYTHON_ENV_VERSION} != "10" ]; then - # Test other python versions - conda update -n base -c defaults conda -y - rm $SERVER_LOG - # Create a model with python 3.8 version - # Successful execution of the Python model indicates that the environment has - # been setup correctly. - - create_conda_env "3.8" "python-3-8" - conda install -c conda-forge libstdcxx-ng=12 -y - conda install numpy=1.23.4 -y - conda install tensorflow=2.10.0 -y - PY38_VERSION_STRING="Python version is 3.8, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" - - create_python_backend_stub - conda-pack -o python3.8.tar.gz - path_to_conda_pack="$PWD/python-3-8" - mkdir -p $path_to_conda_pack - tar -xzf python3.8.tar.gz -C $path_to_conda_pack - mkdir -p models/python_3_8/1/ - cp ../../python_models/python_version/config.pbtxt ./models/python_3_8 - (cd models/python_3_8 && \ - sed -i "s/^name:.*/name: \"python_3_8\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) - cp ../../python_models/python_version/model.py ./models/python_3_8/1/ - cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 - # copy the stub out to /opt/tritonserver/backends/python/3-8 - mkdir -p /opt/tritonserver/backends/python/3-8 - cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-8/triton_python_backend_stub - conda deactivate - - # Create a model with python 3.9 version - # Successful execution of the Python model indicates that the environment has - # been setup correctly. - create_conda_env "3.9" "python-3-9" - conda install -c conda-forge libstdcxx-ng=12 -y - conda install numpy=1.23.4 -y - conda install tensorflow=2.10.0 -y - PY39_VERSION_STRING="Python version is 3.9, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" - create_python_backend_stub - conda-pack -o python3.9.tar.gz - path_to_conda_pack="$PWD/python-3-9" - mkdir -p $path_to_conda_pack - tar -xzf python3.9.tar.gz -C $path_to_conda_pack - mkdir -p models/python_3_9/1/ - cp ../../python_models/python_version/config.pbtxt ./models/python_3_9 - (cd models/python_3_9 && \ - sed -i "s/^name:.*/name: \"python_3_9\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) - cp ../../python_models/python_version/model.py ./models/python_3_9/1/ - cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 - # copy the stub out to /opt/tritonserver/backends/python/3-9 - mkdir -p /opt/tritonserver/backends/python/3-9 - cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-9/triton_python_backend_stub - conda deactivate - - # Create a model with python 3.11 version - # Successful execution of the Python model indicates that the environment has - # been setup correctly. - create_conda_env "3.11" "python-3-11" - # tensorflow needs to be installed before numpy so pip does not mess up conda - # environment - pip install tensorflow==2.12.0 - conda install -c conda-forge libstdcxx-ng=12 -y - conda install numpy=1.23.5 -y - PY311_VERSION_STRING="Python version is 3.11, NumPy version is 1.23.5, and Tensorflow version is 2.12.0" - - create_python_backend_stub - conda-pack -o python3.11.tar.gz - path_to_conda_pack=`pwd`/python3.11.tar.gz - path_to_conda_pack="$PWD/python-3-11" - mkdir -p $path_to_conda_pack - tar -xzf python3.11.tar.gz -C $path_to_conda_pack - mkdir -p models/python_3_11/1/ - cp ../../python_models/python_version/config.pbtxt ./models/python_3_11 - (cd models/python_3_11 && \ - sed -i "s/^name:.*/name: \"python_3_11\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) - cp ../../python_models/python_version/model.py ./models/python_3_11/1/ - cp python_backend/builddir/triton_python_backend_stub ./models/python_3_11 - # copy the stub out to /opt/tritonserver/backends/python/3-11 - mkdir -p /opt/tritonserver/backends/python/3-11 - cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/3-11/triton_python_backend_stub - conda deactivate - - # copy the stub out to /opt/tritonserver/backends/python/3-10 - mkdir -p /opt/tritonserver/backends/python/3-10 - cp /opt/tritonserver/backends/python/triton_python_backend_stub /opt/tritonserver/backends/python/3-10/triton_python_backend_stub - - rm -rf ./miniconda - - run_server - if [ "$SERVER_PID" == "0" ]; then - echo -e "\n***\n*** Failed to start $SERVER\n***" - cat $SERVER_LOG - exit 1 - fi - - kill $SERVER_PID - wait $SERVER_PID - - set +e - for EXPECTED_VERSION_STRING in "$PY38_VERSION_STRING" "$PY39_VERSION_STRING" "$PY311_VERSION_STRING"; do - grep "$EXPECTED_VERSION_STRING" $SERVER_LOG - if [ $? -ne 0 ]; then - cat $SERVER_LOG - echo -e "\n***\n*** $EXPECTED_VERSION_STRING was not found in Triton logs. \n***" - RET=1 - fi - done - if [ $RET -eq 0 ]; then - echo -e "\n***\n*** Finished creating environments.\n***" - else - cat $SERVER_LOG - echo -e "\n***\n*** Failed to create python environments.\n***" - fi - - exit $RET -fi # Tensorflow 2.1.0 only works with Python 3.4 - 3.7. Successful execution of # the Python model indicates that the environment has been setup correctly. # Create a model with python 3.7 version @@ -176,8 +57,8 @@ path_to_conda_pack=`pwd`/python3.7.tar.gz mkdir -p models/python_3_7/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_7 (cd models/python_3_7 && \ - sed -i "s/^name:.*/name: \"python_3_7\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + sed -i "s/^name:.*/name: \"python_3_7\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_7/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_7 conda deactivate @@ -198,8 +79,8 @@ create_python_backend_stub mkdir -p models/python_3_7_1/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_7_1 (cd models/python_3_7_1 && \ - sed -i "s/^name:.*/name: \"python_3_7_1\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + sed -i "s/^name:.*/name: \"python_3_7_1\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_7_1/1/ # Copy activate script to folder cp $path_to_conda_pack/lib/python3.7/site-packages/conda_pack/scripts/posix/activate $path_to_conda_pack/bin/. @@ -223,8 +104,8 @@ mkdir -p models/python_3_6/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_6 cp python3.6.tar.gz models/python_3_6/python_3_6_environment.tar.gz (cd models/python_3_6 && \ - sed -i "s/^name:.*/name: \"python_3_6\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) + sed -i "s/^name:.*/name: \"python_3_6\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_6/1/ cp python_backend/builddir/triton_python_backend_stub ./models/python_3_6 conda deactivate @@ -243,10 +124,11 @@ mkdir -p models/python_3_10/1/ cp ../../python_models/python_version/config.pbtxt ./models/python_3_10 cp python3.10.tar.gz models/python_3_10/python_3_10_environment.tar.gz (cd models/python_3_10 && \ - sed -i "s/^name:.*/name: \"python_3_10\"/" config.pbtxt && \ - echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) + sed -i "s/^name:.*/name: \"python_3_10\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt) cp ../../python_models/python_version/model.py ./models/python_3_10/1/ conda deactivate +rm -rf ./miniconda run_server if [ "$SERVER_PID" == "0" ]; then diff --git a/qa/L0_backend_python/setup_python_enviroment.sh b/qa/L0_backend_python/setup_python_enviroment.sh new file mode 100755 index 0000000000..6e87564378 --- /dev/null +++ b/qa/L0_backend_python/setup_python_enviroment.sh @@ -0,0 +1,172 @@ +#!/bin/bash +# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +RET=0 +set -e +if [ ${PYTHON_ENV_VERSION} = "10" ]; then + echo No need to set up anything for default python3.${PYTHON_ENV_VERSION} + exit $RET +fi + +source common.sh +source ../common/util.sh + +SERVER=/opt/tritonserver/bin/tritonserver +BASE_SERVER_ARGS="--model-repository=`pwd`/models --log-verbose=1 --disable-auto-complete-config" +PYTHON_BACKEND_BRANCH=$PYTHON_BACKEND_REPO_TAG +SERVER_ARGS=$BASE_SERVER_ARGS +SERVER_LOG="./inference_server.log" +export PYTHON_ENV_VERSION=${PYTHON_ENV_VERSION:="10"} +RET=0 +EXPECTED_VERSION_STRINGS="" + +rm -fr ./models +rm -rf *.tar.gz +install_build_deps +install_conda + +# Test other python versions +conda update -n base -c defaults conda -y +# Create a model with python 3.8 version +# Successful execution of the Python model indicates that the environment has +# been setup correctly. +if [ ${PYTHON_ENV_VERSION} = "8" ]; then + create_conda_env "3.8" "python-3-8" + conda install -c conda-forge libstdcxx-ng=12 -y + conda install numpy=1.23.4 -y + conda install tensorflow=2.10.0 -y + EXPECTED_VERSION_STRING="Python version is 3.8, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" + create_python_backend_stub + conda-pack -o python3.8.tar.gz + path_to_conda_pack="$PWD/python-3-8" + mkdir -p $path_to_conda_pack + tar -xzf python3.8.tar.gz -C $path_to_conda_pack + mkdir -p models/python_3_8/1/ + cp ../python_models/python_version/config.pbtxt ./models/python_3_8 + (cd models/python_3_8 && \ + sed -i "s/^name:.*/name: \"python_3_8\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + cp ../python_models/python_version/model.py ./models/python_3_8/1/ + cp python_backend/builddir/triton_python_backend_stub ./models/python_3_8 +fi + +# Create a model with python 3.9 version +# Successful execution of the Python model indicates that the environment has +# been setup correctly. +if [ ${PYTHON_ENV_VERSION} = "9" ]; then + create_conda_env "3.9" "python-3-9" + conda install -c conda-forge libstdcxx-ng=12 -y + conda install numpy=1.23.4 -y + conda install tensorflow=2.10.0 -y + EXPECTED_VERSION_STRING="Python version is 3.9, NumPy version is 1.23.4, and Tensorflow version is 2.10.0" + create_python_backend_stub + conda-pack -o python3.9.tar.gz + path_to_conda_pack="$PWD/python-3-9" + mkdir -p $path_to_conda_pack + tar -xzf python3.9.tar.gz -C $path_to_conda_pack + mkdir -p models/python_3_9/1/ + cp ../python_models/python_version/config.pbtxt ./models/python_3_9 + (cd models/python_3_9 && \ + sed -i "s/^name:.*/name: \"python_3_9\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + cp ../python_models/python_version/model.py ./models/python_3_9/1/ + cp python_backend/builddir/triton_python_backend_stub ./models/python_3_9 +fi + +# Create a model with python 3.11 version +# Successful execution of the Python model indicates that the environment has +# been setup correctly. +if [ ${PYTHON_ENV_VERSION} = "11" ]; then + create_conda_env "3.11" "python-3-11" + # tensorflow needs to be installed before numpy so pip does not mess up conda + # environment + pip install tensorflow==2.12.0 + conda install -c conda-forge libstdcxx-ng=12 -y + conda install numpy=1.23.5 -y + EXPECTED_VERSION_STRING="Python version is 3.11, NumPy version is 1.23.5, and Tensorflow version is 2.12.0" + create_python_backend_stub + conda-pack -o python3.11.tar.gz + path_to_conda_pack="$PWD/python-3-11" + mkdir -p $path_to_conda_pack + tar -xzf python3.11.tar.gz -C $path_to_conda_pack + mkdir -p models/python_3_11/1/ + cp ../python_models/python_version/config.pbtxt ./models/python_3_11 + (cd models/python_3_11 && \ + sed -i "s/^name:.*/name: \"python_3_11\"/" config.pbtxt && \ + echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt) + cp ../python_models/python_version/model.py ./models/python_3_11/1/ + cp python_backend/builddir/triton_python_backend_stub ./models/python_3_11 +fi +conda deactivate +rm -rf ./miniconda + +# test that +set +e +run_server +if [ "$SERVER_PID" == "0" ]; then + echo -e "\n***\n*** Failed to start $SERVER\n***" + cat $SERVER_LOG + exit 1 +fi + +kill $SERVER_PID +wait $SERVER_PID + +grep "$EXPECTED_VERSION_STRING" $SERVER_LOG +if [ $? -ne 0 ]; then + cat $SERVER_LOG + echo -e "\n***\n*** $EXPECTED_VERSION_STRING was not found in Triton logs. \n***" + RET=1 +fi +set -e + +echo "python environment 3.${PYTHON_ENV_VERSION}" +# copy the stub out to /opt/tritonserver/backends/python/triton_python_backend_stub +cp python_backend/builddir/triton_python_backend_stub /opt/tritonserver/backends/python/triton_python_backend_stub +# Set up environment and stub for each test +add-apt-repository ppa:deadsnakes/ppa -y +apt-get update && apt-get -y install \ + "python3.${PYTHON_ENV_VERSION}-dev" \ + "python3.${PYTHON_ENV_VERSION}-distutils" \ + libboost-dev +rm -f /usr/bin/python3 && \ +ln -s "/usr/bin/python3.${PYTHON_ENV_VERSION}" /usr/bin/python3 && \ +rm -r /usr/bin/python3.10 +pip3 install --upgrade install requests numpy virtualenv +find /opt/tritonserver/qa/pkgs/ -maxdepth 1 -type f -name \ + "tritonclient-*linux*.whl" | xargs printf -- '%s[all]' | \ + xargs pip3 install --upgrade + +# Build triton-shm-monitor for the test +cd python_backend && rm -rf install build && mkdir build && cd build && \ + cmake -DCMAKE_INSTALL_PREFIX:PATH=$PWD/install \ + -DTRITON_COMMON_REPO_TAG:STRING=${TRITON_COMMON_REPO_TAG} \ + -DTRITON_CORE_REPO_TAG:STRING=${TRITON_CORE_REPO_TAG} \ + -DTRITON_BACKEND_REPO_TAG:STRING=${TRITON_BACKEND_REPO_TAG} .. && \ + make -j16 triton-shm-monitor install +cp $PWD/install/backends/python/triton_shm_monitor.cpython-* /opt/tritonserver/qa/common/. +set +e +exit $RET diff --git a/qa/L0_backend_python/test.sh b/qa/L0_backend_python/test.sh index 36bcacc36a..ea38379005 100755 --- a/qa/L0_backend_python/test.sh +++ b/qa/L0_backend_python/test.sh @@ -62,6 +62,8 @@ source ./common.sh rm -fr *.log ./models +(bash -ex setup_python_enviroment.sh) + mkdir -p models/identity_fp32/1/ cp ../python_models/identity_fp32/model.py ./models/identity_fp32/1/model.py cp ../python_models/identity_fp32/config.pbtxt ./models/identity_fp32/config.pbtxt @@ -133,38 +135,12 @@ mkdir -p models/dlpack_identity/1/ cp ../python_models/dlpack_identity/model.py ./models/dlpack_identity/1/ cp ../python_models/dlpack_identity/config.pbtxt ./models/dlpack_identity -# Env test should be run first so we can get the stubs for each of the -# environments -# Disable env test for Jetson since cloud storage repos are not supported -if [ "$TEST_JETSON" == "0" ]; then - # In 'env' test we use miniconda for dependency management. No need to run - # the test in a virtual environment. - (cd env && bash -ex test.sh) - if [ $? -ne 0 ]; then - echo "Subtest env FAILED" - RET=1 - fi -fi - -echo "python environment 3.${PYTHON_ENV_VERSION}" -# Set up environment and stub for each test -add-apt-repository ppa:deadsnakes/ppa -y -apt-get update && apt-get -y install \ - "python3.${PYTHON_ENV_VERSION}-dev" \ - "python3.${PYTHON_ENV_VERSION}-distutils" \ - numpy -rm -f /usr/bin/python3 && \ -ln -s "/usr/bin/python3.${PYTHON_ENV_VERSION}" /usr/bin/python3 - -PYTHON_STUB_LOCATION=/opt/tritonserver/backends/python/3-${PYTHON_ENV_VERSION}/triton_python_backend_stub -cp ${PYTHON_STUB_LOCATION} /opt/tritonserver/backends/python/triton_python_backend_stub - # Skip torch install on Jetson since it is already installed. if [ "$TEST_JETSON" == "0" ]; then -pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html else -# GPU tensor tests are disabled on jetson -EXPECTED_NUM_TESTS=9 + # GPU tensor tests are disabled on jetson + EXPECTED_NUM_TESTS=9 fi prev_num_pages=`get_shm_pages` @@ -239,37 +215,37 @@ set -e # Test KIND_GPU # Disable env test for Jetson since GPU Tensors are not supported if [ "$TEST_JETSON" == "0" ]; then -rm -rf models/ -mkdir -p models/add_sub_gpu/1/ -cp ../python_models/add_sub/model.py ./models/add_sub_gpu/1/ -cp ../python_models/add_sub_gpu/config.pbtxt ./models/add_sub_gpu/ - -prev_num_pages=`get_shm_pages` -run_server -if [ "$SERVER_PID" == "0" ]; then - cat $SERVER_LOG - echo -e "\n***\n*** Failed to start $SERVER\n***" - exit 1 -fi - -if [ $? -ne 0 ]; then - cat $SERVER_LOG - echo -e "\n***\n*** KIND_GPU model test failed \n***" - RET=1 -fi - -kill $SERVER_PID -wait $SERVER_PID - -current_num_pages=`get_shm_pages` -if [ $current_num_pages -ne $prev_num_pages ]; then - cat $CLIENT_LOG - ls /dev/shm - echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. -Shared memory pages before starting triton equals to $prev_num_pages -and shared memory pages after starting triton equals to $current_num_pages \n***" - exit 1 -fi + rm -rf models/ + mkdir -p models/add_sub_gpu/1/ + cp ../python_models/add_sub/model.py ./models/add_sub_gpu/1/ + cp ../python_models/add_sub_gpu/config.pbtxt ./models/add_sub_gpu/ + + prev_num_pages=`get_shm_pages` + run_server + if [ "$SERVER_PID" == "0" ]; then + cat $SERVER_LOG + echo -e "\n***\n*** Failed to start $SERVER\n***" + exit 1 + fi + + if [ $? -ne 0 ]; then + cat $SERVER_LOG + echo -e "\n***\n*** KIND_GPU model test failed \n***" + RET=1 + fi + + kill $SERVER_PID + wait $SERVER_PID + + current_num_pages=`get_shm_pages` + if [ $current_num_pages -ne $prev_num_pages ]; then + cat $CLIENT_LOG + ls /dev/shm + echo -e "\n***\n*** Test Failed. Shared memory pages where not cleaned properly. + Shared memory pages before starting triton equals to $prev_num_pages + and shared memory pages after starting triton equals to $current_num_pages \n***" + exit 1 + fi fi # Test Multi file models @@ -278,7 +254,7 @@ mkdir -p models/multi_file/1/ cp ../python_models/multi_file/*.py ./models/multi_file/1/ cp ../python_models/identity_fp32/config.pbtxt ./models/multi_file/ (cd models/multi_file && \ - sed -i "s/^name:.*/name: \"multi_file\"/" config.pbtxt) + sed -i "s/^name:.*/name: \"multi_file\"/" config.pbtxt) prev_num_pages=`get_shm_pages` run_server @@ -402,6 +378,7 @@ and shared memory pages after starting triton equals to $current_num_pages \n*** exit 1 fi +# Disable env test for Jetson since cloud storage repos are not supported # Disable ensemble, io and bls tests for Jetson since GPU Tensors are not supported # Disable variants test for Jetson since already built without GPU Tensor support # Disable decoupled test because it uses GPU tensors @@ -422,6 +399,16 @@ if [ "$TEST_JETSON" == "0" ]; then deactivate rm -fr venv done + + if [ ${PYTHON_ENV_VERSION} = "10" ]; then + # In 'env' test we use miniconda for dependency management. No need to run + # the test in a virtual environment. + (cd env && bash -ex test.sh) + if [ $? -ne 0 ]; then + echo "Subtest env FAILED" + RET=1 + fi + fi fi SUBTESTS="lifecycle restart model_control examples argument_validation logging custom_metrics" @@ -442,7 +429,6 @@ for TEST in ${SUBTESTS}; do rm -fr venv done - if [ $RET -eq 0 ]; then echo -e "\n***\n*** Test Passed\n***" else From 13ca9847832e0278a05590a864bf0e266b21d51a Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Mon, 7 Aug 2023 11:00:26 -0700 Subject: [PATCH 11/12] update examples torch and torchvision versions --- qa/L0_backend_python/examples/test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/L0_backend_python/examples/test.sh b/qa/L0_backend_python/examples/test.sh index bbad8b5bfd..19acd4f127 100755 --- a/qa/L0_backend_python/examples/test.sh +++ b/qa/L0_backend_python/examples/test.sh @@ -41,7 +41,7 @@ rm -fr *.log python_backend/ # Skip torch and torchvision install on Jetson since it is already installed. if [ "$TEST_JETSON" == "0" ]; then pip3 uninstall -y torch - pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html torchvision==0.14.0+cu117 + pip3 install torch==2.0.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html torchvision==0.15.0+cu117 fi # Install `validators` for Model Instance Kind example From d16bf7db17c1a7822d39457bf28c70474facef6e Mon Sep 17 00:00:00 2001 From: Katherine Yang Date: Mon, 7 Aug 2023 15:00:49 -0700 Subject: [PATCH 12/12] added check for default version --- qa/L0_backend_python/test.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/qa/L0_backend_python/test.sh b/qa/L0_backend_python/test.sh index ea38379005..af8000b2aa 100755 --- a/qa/L0_backend_python/test.sh +++ b/qa/L0_backend_python/test.sh @@ -62,8 +62,20 @@ source ./common.sh rm -fr *.log ./models +python3 --version | grep "3.10" > /dev/null +if [ $? -ne 0 ]; then + echo -e "Expecting Python default version to be: Python 3.10 but actual version is $(python3 --version)" + exit 1 +fi + (bash -ex setup_python_enviroment.sh) +python3 --version | grep "3.${PYTHON_ENV_VERSION}" > /dev/null +if [ $? -ne 0 ]; then + echo -e "Expecting Python version to be: Python 3.${PYTHON_ENV_VERSION} but actual version is $(python3 --version)" + exit 1 +fi + mkdir -p models/identity_fp32/1/ cp ../python_models/identity_fp32/model.py ./models/identity_fp32/1/model.py cp ../python_models/identity_fp32/config.pbtxt ./models/identity_fp32/config.pbtxt