Skip to content

Commit

Permalink
Fix jetson
Browse files Browse the repository at this point in the history
  • Loading branch information
Tabrizian committed Jun 23, 2023
1 parent a629ff1 commit eccc3cf
Showing 1 changed file with 4 additions and 2 deletions.
6 changes: 4 additions & 2 deletions qa/L0_backend_python/python_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
import os

from tritonclient.utils import *
import tritonclient.utils.cuda_shared_memory as cuda_shared_memory
import tritonclient.http as httpclient

TEST_JETSON = bool(int(os.environ.get('TEST_JETSON', 0)))
Expand Down Expand Up @@ -155,9 +154,11 @@ def test_growth_error(self):
self._infer_help(model_name, shape, dtype)

# GPU tensors are not supported on jetson
# CUDA Shared memory is not supported on jetson
if not TEST_JETSON:
# CUDA Shared memory is not supported on jetson

def test_gpu_tensor_error(self):
import tritonclient.utils.cuda_shared_memory as cuda_shared_memory
model_name = 'identity_bool'
with httpclient.InferenceServerClient("localhost:8000") as client:
input_data = np.array([[True] * 1000], dtype=bool)
Expand All @@ -184,6 +185,7 @@ def test_gpu_tensor_error(self):
cuda_shared_memory.destroy_shared_memory_region(shm0_handle)

def test_dlpack_tensor_error(self):
import tritonclient.utils.cuda_shared_memory as cuda_shared_memory
model_name = 'dlpack_identity'
with httpclient.InferenceServerClient("localhost:8000") as client:
input_data = np.array([[1] * 1000], dtype=np.float32)
Expand Down

0 comments on commit eccc3cf

Please sign in to comment.