Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move MP traget_kpi from facade to MixedPrecisionQuantizationConfig #990

Merged
merged 8 commits into from
Mar 11, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
adjust tests with nre API
  • Loading branch information
Ofir Gordon authored and Ofir Gordon committed Mar 11, 2024
commit 59b23bf4394717b1fe6e47a30516c7d96d88ecfe
4 changes: 2 additions & 2 deletions tests/common_tests/base_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,13 @@ def get_input_shapes(self):

def get_core_config(self):
return CoreConfig(quantization_config=self.get_quantization_config(),
mixed_precision_config=self.get_mixed_precision_v2_config(),
mixed_precision_config=self.get_mixed_precision_config(),
debug_config=self.get_debug_config())

def get_quantization_config(self):
return QuantizationConfig()

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return None

def get_debug_config(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,9 @@ def get_tpc(self):
mp_bitwidth_candidates_list=self.mixed_precision_candidates_list,
name="mp_bopts_test")

def get_mixed_precision_v2_config(self):
return MixedPrecisionQuantizationConfig(num_of_images=1)
def get_mixed_precision_config(self):
return MixedPrecisionQuantizationConfig(num_of_images=1,
target_kpi=self.get_kpi())

def get_input_shapes(self):
return [[self.val_batch_size, 16, 16, 3]]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -295,11 +295,13 @@ def __init__(self, unit_test, kpi_weights=np.inf, kpi_activation=np.inf, expecte

def run_test(self, **kwargs):
model_float = self.create_networks()
config = mct.core.CoreConfig(mixed_precision_config=MixedPrecisionQuantizationConfig())
config = mct.core.CoreConfig(
mixed_precision_config=MixedPrecisionQuantizationConfig(target_kpi=
mct.core.KPI(weights_memory=self.kpi_weights,
activation_memory=self.kpi_activation)))
qat_ready_model, quantization_info, custom_objects = mct.qat.keras_quantization_aware_training_init_experimental(
model_float,
self.representative_data_gen_experimental,
mct.core.KPI(weights_memory=self.kpi_weights, activation_memory=self.kpi_activation),
core_config=config,
fw_info=self.get_fw_info(),
target_platform_capabilities=self.get_tpc())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ def get_quantization_config(self):
relu_bound_to_power_of_2=True, weights_bias_correction=True,
input_scaling=True, activation_channel_equalization=True)

def get_mixed_precision_v2_config(self):
return MixedPrecisionQuantizationConfig()
def get_mixed_precision_config(self):
return MixedPrecisionQuantizationConfig(target_kpi=self.get_kpi())

def create_networks(self):
layer = layers.Conv2D(3, 4)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def get_quantization_config(self):
relu_bound_to_power_of_2=True, weights_bias_correction=True,
input_scaling=True, activation_channel_equalization=True)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
target_kpi=self.get_kpi())

Expand Down Expand Up @@ -81,7 +81,7 @@ def get_quantization_config(self):
relu_bound_to_power_of_2=True, weights_bias_correction=True,
input_scaling=True, activation_channel_equalization=True)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(target_kpi=self.get_kpi())

def get_kpi(self):
Expand Down Expand Up @@ -225,7 +225,7 @@ class MixedPercisionCombinedNMSTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
target_kpi=self.get_kpi(),
use_hessian_based_scores=False)
Expand Down Expand Up @@ -367,7 +367,7 @@ def get_quantization_config(self):
relu_bound_to_power_of_2=False, weights_bias_correction=False,
input_scaling=False, activation_channel_equalization=False)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(target_kpi=self.get_kpi())


Expand All @@ -383,7 +383,7 @@ def get_quantization_config(self):
input_scaling=False,
activation_channel_equalization=False)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
target_kpi=self.get_kpi())

Expand Down Expand Up @@ -416,7 +416,7 @@ class MixedPercisionSearchLastLayerDistanceTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test, val_batch_size=2)

def get_mixed_precision_v2_config(self):
def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
target_kpi=self.get_kpi(),
distance_weighting_method=get_last_layer_weights,
Expand Down
6 changes: 3 additions & 3 deletions tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@
from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import LayerFilterParams
from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import Greater, Smaller, Eq
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import DEFAULT_MIXEDPRECISION_CONFIG
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
DEFAULT_MIXEDPRECISION_CONFIG, MixedPrecisionQuantizationConfig
from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
TFLITE_TP_MODEL, QNNPACK_TP_MODEL, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
Expand Down Expand Up @@ -240,13 +241,12 @@ def rep_data():
rep_data,
target_platform_capabilities=tpc)

mp_qc = copy.deepcopy(DEFAULT_MIXEDPRECISION_CONFIG)
mp_qc = MixedPrecisionQuantizationConfig(target_kpi=mct.core.KPI(np.inf))
mp_qc.num_of_images = 1
core_config = mct.core.CoreConfig(quantization_config=mct.core.QuantizationConfig(),
mixed_precision_config=mp_qc)
quantized_model, _ = mct.ptq.pytorch_post_training_quantization(model,
rep_data,
target_kpi=mct.core.KPI(np.inf),
target_platform_capabilities=tpc,
core_config=core_config)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ def get_core_configs(self):
qc = mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=False, weights_bias_correction=True,
input_scaling=False, activation_channel_equalization=False)
mpc = mct.core.MixedPrecisionQuantizationConfig(num_of_images=1)
mpc = mct.core.MixedPrecisionQuantizationConfig(num_of_images=1,
target_kpi=self.get_kpi())

return {"mixed_precision_activation_model": mct.core.CoreConfig(quantization_config=qc, mixed_precision_config=mpc)}

Expand Down Expand Up @@ -126,8 +127,9 @@ def __init__(self, unit_test):
def get_kpi(self):
return KPI(np.inf, np.inf)

def get_mixed_precision_v2_config(self):
return MixedPrecisionQuantizationConfig(num_of_images=4)
def get_mixed_precision_config(self):
return MixedPrecisionQuantizationConfig(num_of_images=4,
target_kpi=self.get_kpi())

def create_feature_network(self, input_shape):
return MixedPrecisionMultipleInputsNet(input_shape)
Expand Down