From c272b788d86b93d7c1c9e6253ae7c551df9637c7 Mon Sep 17 00:00:00 2001 From: Naren Dasan Date: Tue, 16 Apr 2024 18:05:34 -0700 Subject: [PATCH] chore: address review comments Signed-off-by: Naren Dasan Signed-off-by: Naren Dasan --- .../dynamo/torch_compile_stable_diffusion.py | 5 +-- py/torch_tensorrt/_compile.py | 6 ++- py/torch_tensorrt/_enums.py | 39 +++++++++++++++---- .../dynamo/conversion/converter_utils.py | 2 +- 4 files changed, 38 insertions(+), 14 deletions(-) diff --git a/examples/dynamo/torch_compile_stable_diffusion.py b/examples/dynamo/torch_compile_stable_diffusion.py index 0511e5a363..a0b725572b 100644 --- a/examples/dynamo/torch_compile_stable_diffusion.py +++ b/examples/dynamo/torch_compile_stable_diffusion.py @@ -18,9 +18,8 @@ # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ import torch -from diffusers import DiffusionPipeline - import torch_tensorrt +from diffusers import DiffusionPipeline model_id = "CompVis/stable-diffusion-v1-4" device = "cuda:0" @@ -39,7 +38,7 @@ backend=backend, options={ "truncate_long_and_double": True, - "precision": torch.float16, + "enabled_precisions": {torch.float32, torch.float16}, }, dynamic=False, ) diff --git a/py/torch_tensorrt/_compile.py b/py/torch_tensorrt/_compile.py index b4454ec8b1..1381971047 100644 --- a/py/torch_tensorrt/_compile.py +++ b/py/torch_tensorrt/_compile.py @@ -190,9 +190,11 @@ def compile( Returns: torch.nn.Module: Compiled Module, when run it will execute via TensorRT """ - input_list = inputs if inputs else [] + input_list = inputs if inputs is not None else [] enabled_precisions_set: Set[dtype | torch.dtype] = ( - enabled_precisions if enabled_precisions else _defaults.ENABLED_PRECISIONS + enabled_precisions + if enabled_precisions is not None + else _defaults.ENABLED_PRECISIONS ) module_type = _parse_module_type(module) diff --git a/py/torch_tensorrt/_enums.py b/py/torch_tensorrt/_enums.py index 6c11b82454..724a2a9346 100644 --- a/py/torch_tensorrt/_enums.py +++ b/py/torch_tensorrt/_enums.py @@ -185,7 +185,9 @@ def try_from( casted_format = dtype._from(t, use_default=use_default) return casted_format except (ValueError, TypeError) as e: - logging.debug(e) + logging.debug( + f"Conversion from {t} to torch_tensorrt.dtype failed", exc_info=True + ) return None def to( @@ -301,7 +303,10 @@ def try_to( casted_format = self.to(t, use_default) return casted_format except (ValueError, TypeError) as e: - logging.debug(e) + logging.debug( + f"torch_tensorrt.dtype conversion to target type {t} failed", + exc_info=True, + ) return None def __eq__(self, other: Union[torch.dtype, trt.DataType, np.dtype, dtype]) -> bool: @@ -413,7 +418,10 @@ def try_from( casted_format = memory_format._from(f) return casted_format except (ValueError, TypeError) as e: - logging.debug(e) + logging.debug( + f"Conversion from {f} to torch_tensorrt.memory_format failed", + exc_info=True, + ) return None def to( @@ -492,7 +500,10 @@ def try_to( casted_format = self.to(t) return casted_format except (ValueError, TypeError) as e: - logging.debug(e) + logging.debug( + f"torch_tensorrt.memory_format conversion to target type {t} failed", + exc_info=True, + ) return None def __eq__( @@ -546,7 +557,10 @@ def try_from(cls, d: Union[trt.DeviceType, DeviceType]) -> Optional[DeviceType]: casted_format = DeviceType._from(d) return casted_format except (ValueError, TypeError) as e: - logging.debug(e) + logging.debug( + f"Conversion from {d} to torch_tensorrt.DeviceType failed", + exc_info=True, + ) return None def to( @@ -595,7 +609,10 @@ def try_to( casted_format = self.to(t, use_default=use_default) return casted_format except (ValueError, TypeError) as e: - logging.debug(e) + logging.debug( + f"torch_tensorrt.DeviceType conversion to target type {t} failed", + exc_info=True, + ) return None def __eq__(self, other: Union[trt.DeviceType, DeviceType]) -> bool: @@ -653,7 +670,10 @@ def try_from( casted_format = EngineCapability._from(c) return casted_format except (ValueError, TypeError) as e: - logging.debug(e) + logging.debug( + f"Conversion from {c} to torch_tensorrt.EngineCapablity failed", + exc_info=True, + ) return None def to( @@ -696,7 +716,10 @@ def try_to( casted_format = self.to(t) return casted_format except (ValueError, TypeError) as e: - logging.debug(e) + logging.debug( + f"torch_tensorrt.EngineCapablity conversion to target type {t} failed", + exc_info=True, + ) return None def __eq__(self, other: Union[trt.EngineCapability, EngineCapability]) -> bool: diff --git a/py/torch_tensorrt/dynamo/conversion/converter_utils.py b/py/torch_tensorrt/dynamo/conversion/converter_utils.py index c2f4fe20eb..04f048c5f3 100644 --- a/py/torch_tensorrt/dynamo/conversion/converter_utils.py +++ b/py/torch_tensorrt/dynamo/conversion/converter_utils.py @@ -266,7 +266,7 @@ def create_constant( A TensorRT ITensor that represents the given value. """ numpy_value = to_numpy( - value, _enums.dtype._from(dtype).to(np.dtype) if dtype else None + value, _enums.dtype._from(dtype).to(np.dtype) if dtype is not None else None ) constant = ctx.net.add_constant( (1,) if isinstance(value, (int, float, bool)) else value.shape,