From 3e2a701a6833aac3d4dadd05308c9b2f5345ab09 Mon Sep 17 00:00:00 2001 From: Ofir Gordon Date: Mon, 13 Jan 2025 21:50:30 +0200 Subject: [PATCH] fix QuantizationMethod import in tests --- .../model_tests/test_feature_models_runner.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py index 0c9c1c563..208818613 100644 --- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py +++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py @@ -19,7 +19,6 @@ import torch from torch import nn -import model_compression_toolkit as mct from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting from model_compression_toolkit.core.common.network_editors import NodeTypeFilter, NodeNameFilter @@ -695,15 +694,15 @@ def test_gptq(self): GPTQWeightsUpdateTest(self, rounding_type=RoundingType.SoftQuantizer).run_test() GPTQLearnRateZeroTest(self, rounding_type=RoundingType.SoftQuantizer).run_test() GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer, - weights_quant_method=mct.QuantizationMethod.UNIFORM).run_test() + weights_quant_method=QuantizationMethod.UNIFORM).run_test() GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer, - weights_quant_method=mct.QuantizationMethod.UNIFORM, per_channel=False, + weights_quant_method=QuantizationMethod.UNIFORM, per_channel=False, params_learning=False).run_test() GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer, - weights_quant_method=mct.QuantizationMethod.UNIFORM, + weights_quant_method=QuantizationMethod.UNIFORM, per_channel=True, hessian_weights=True, log_norm_weights=True, scaled_log_norm=True).run_test() GPTQWeightsUpdateTest(self, rounding_type=RoundingType.SoftQuantizer, - weights_quant_method=mct.QuantizationMethod.UNIFORM, + weights_quant_method=QuantizationMethod.UNIFORM, params_learning=False).run_test() # TODO: When params learning is True, the uniform quantizer gets a min value > max value def test_gptq_with_gradual_activation(self):