Skip to content

Commit

Permalink
Fix tests TPCv4 after TPC refactor (#1318)
Browse files Browse the repository at this point in the history
  • Loading branch information
ofirgo authored Jan 8, 2025
1 parent ce318c0 commit 3a86e7b
Showing 1 changed file with 25 additions and 22 deletions.
47 changes: 25 additions & 22 deletions tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
IMX500_TP_MODEL
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
Signedness, \
AttributeQuantizationConfig, OpQuantizationConfig
Signedness, AttributeQuantizationConfig, OpQuantizationConfig

tp = mct.target_platform

Expand Down Expand Up @@ -155,13 +154,15 @@ def generate_tp_model(default_config: OpQuantizationConfig,
# of possible configurations to consider when quantizing a set of operations (in mixed-precision, for example).
# If the QuantizationConfigOptions contains only one configuration,
# this configuration will be used for the operation quantization:
default_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([default_config]))
default_configuration_options = schema.QuantizationConfigOptions(
quantization_configurations=tuple([default_config]))
default_config_input16 = default_config.clone_and_edit(supported_input_activation_n_bits=(8, 16))
default_config_options_16bit = schema.QuantizationConfigOptions(quantization_configurations=tuple([default_config_input16,
default_config_input16.clone_and_edit(
activation_n_bits=16,
signedness=Signedness.SIGNED)]),
base_config=default_config_input16)
default_config_options_16bit = schema.QuantizationConfigOptions(
quantization_configurations=tuple([default_config_input16,
default_config_input16.clone_and_edit(
activation_n_bits=16,
signedness=Signedness.SIGNED)]),
base_config=default_config_input16)

# Create a QuantizationConfigOptions for quantizing constants in functional ops.
# Constant configuration is similar to the default eight bit configuration except for PoT
Expand All @@ -180,9 +181,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
supported_input_activation_n_bits=(8, 16))
const_config_input16_output16 = const_config_input16.clone_and_edit(
activation_n_bits=16, signedness=Signedness.SIGNED)
const_configuration_options_inout16 = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config_input16_output16,
const_configuration_options_inout16 = (
schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config_input16_output16,
const_config_input16]),
base_config=const_config_input16)
base_config=const_config_input16))

const_config_input16_per_tensor = const_config.clone_and_edit(
supported_input_activation_n_bits=(8, 16),
Expand All @@ -201,7 +203,8 @@ def generate_tp_model(default_config: OpQuantizationConfig,
quantization_preserving=True,
default_weight_attr_config=const_config.default_weight_attr_config.clone_and_edit(
weights_per_channel_threshold=False))
qpreserving_const_config_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([qpreserving_const_config]))
qpreserving_const_config_options = schema.QuantizationConfigOptions(
quantization_configurations=tuple([qpreserving_const_config]))

mp_cfg_list_16bit = [mp_cfg.clone_and_edit(activation_n_bits=16, signedness=Signedness.SIGNED)
for mp_cfg in mixed_precision_cfg_list]
Expand Down Expand Up @@ -231,25 +234,25 @@ def generate_tp_model(default_config: OpQuantizationConfig,
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.FAKE_QUANT, qc_options=no_quantization_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.SSD_POST_PROCESS, qc_options=no_quantization_config))

quant_preserving_config = (default_configuration_options.clone_and_edit(enable_activation_quantization=False,
quantization_preserving=True)
.clone_and_edit_weight_attribute(enable_weights_quantization=False))
quant_preserving_config = (default_configuration_options.clone_and_edit(
enable_activation_quantization=False,
quantization_preserving=True).clone_and_edit_weight_attribute(enable_weights_quantization=False))

operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.UNSTACK, qc_options=quant_preserving_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.DROPOUT, qc_options=quant_preserving_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.SPLIT_CHUNK, qc_options=quant_preserving_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.GET_ITEM, qc_options=quant_preserving_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.MAXPOOL, qc_options=quant_preserving_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.PAD, qc_options=quant_preserving_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.ZERO_PADDING2D, qc_options=quant_preserving_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.CAST, qc_options=quant_preserving_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.STRIDED_SLICE, qc_options=quant_preserving_config))

dim_manipulation_config = (default_configuration_options.clone_and_edit(enable_activation_quantization=False,
quantization_preserving=True,
supported_input_activation_n_bits=(8, 16))
.clone_and_edit_weight_attribute(enable_weights_quantization=False))

operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.SPLIT_CHUNK, qc_options=dim_manipulation_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.GET_ITEM, qc_options=dim_manipulation_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.UNSTACK, qc_options=dim_manipulation_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.STRIDED_SLICE, qc_options=dim_manipulation_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.FLATTEN, qc_options=dim_manipulation_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.RESHAPE, qc_options=dim_manipulation_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.UNSQUEEZE, qc_options=dim_manipulation_config))
Expand Down Expand Up @@ -289,8 +292,8 @@ def generate_tp_model(default_config: OpQuantizationConfig,
hard_tanh = schema.OperatorsSet(name=schema.OperatorSetNames.HARD_TANH, qc_options=default_config_options_16bit)

operator_set.extend(
[conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish, hardswish, sigmoid,
tanh, gelu, hardsigmoid, hard_tanh])
[conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish, hardswish,
sigmoid, tanh, gelu, hardsigmoid, hard_tanh])
any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])

# Combine multiple operators into a single operator to avoid quantization between
Expand All @@ -299,8 +302,8 @@ def generate_tp_model(default_config: OpQuantizationConfig,
activations_after_conv_to_fuse = schema.OperatorSetConcat(
operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, gelu, hardswish, hardsigmoid, prelu, sigmoid, tanh])
conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
activations_after_fc_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, sigmoid, tanh, gelu,
hardswish, hardsigmoid])
activations_after_fc_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish,
sigmoid, tanh, gelu, hardswish, hardsigmoid])
any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])

# ------------------- #
Expand Down

0 comments on commit 3a86e7b

Please sign in to comment.