Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a test for TF mixed precision #9806

Merged
merged 1 commit into from
Jan 27, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,10 @@ def test_model_common_attributes(self):
name = model.get_bias()
assert name is None

def test_mixed_precision(self):
# TODO JP: Make ALBERT float16 compliant
pass

@slow
def test_model_from_pretrained(self):
for model_name in TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make BART float16 compliant
pass


def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_blenderbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make Blenderbot float16 compliant
pass

def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_blenderbot_small.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make Blenderbot Small float16 compliant
pass


def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
Expand Down
14 changes: 14 additions & 0 deletions tests/test_modeling_tf_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,20 @@ def test_saved_model_with_attentions_output(self):
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)

def test_mixed_precision(self):
tf.keras.mixed_precision.experimental.set_policy("mixed_float16")

config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
outputs = model(class_inputs_dict)

self.assertIsNotNone(outputs)

tf.keras.mixed_precision.experimental.set_policy("float32")

def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_ctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,10 @@ def test_model_common_attributes(self):
name = model.get_bias()
assert name is None

def test_mixed_precision(self):
# TODO JP: Make CTRL float16 compliant
pass

@slow
def test_model_from_pretrained(self):
for model_name in TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
8 changes: 2 additions & 6 deletions tests/test_modeling_tf_flaubert.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,12 +330,8 @@ def test_model_from_pretrained(self):
model = TFFlaubertModel.from_pretrained(model_name)
self.assertIsNotNone(model)

def test_saved_model_with_hidden_states_output(self):
# Should be uncommented during patrick TF refactor
pass

def test_saved_model_with_attentions_output(self):
# Should be uncommented during patrick TF refactor
def test_mixed_precision(self):
# TODO JP: Make Flaubert float16 compliant
pass


Expand Down
8 changes: 8 additions & 0 deletions tests/test_modeling_tf_funnel.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,6 +371,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make Funnel float16 compliant
pass


@require_tf
class TFFunnelBaseModelTest(TFModelTesterMixin, unittest.TestCase):
Expand Down Expand Up @@ -401,3 +405,7 @@ def test_for_multiple_choice(self):
def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make Funnel float16 compliant
pass
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,10 @@ def test_gpt2_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs)

def test_mixed_precision(self):
# TODO JP: Make GPT2 float16 compliant
pass

@slow
def test_model_from_pretrained(self):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_led.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make LED float16 compliant
pass

def test_saved_model_with_attentions_output(self):
# This test don't pass because of the error:
# condition [13,8,4,5], then [13,8,4,5], and else [13,8,4,6] must be broadcastable
Expand Down
15 changes: 13 additions & 2 deletions tests/test_modeling_tf_longformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,14 +340,25 @@ def test_for_multiple_choice(self):

@slow
def test_saved_model_with_attentions_output(self):
# longformer has special attentions which are not
# compatible in graph mode
# This test don't pass because of the error:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nice!

# condition [13,8,4,5], then [13,8,4,5], and else [13,8,4,6] must be broadcastable
# This occurs line 323 in modeling_tf_led.py because the condition line 255
# returns a tensor of shape
# [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 2]
# if is_global_attn is True and a tensor of shape
# [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1]
# This is due to the tf.concat call line 703 that adds one dimension
# Need to check with PVP how to properly fix this
pass

def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make Longformer float16 compliant
pass


@require_tf
@require_sentencepiece
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_lxmert.py
Original file line number Diff line number Diff line change
Expand Up @@ -704,6 +704,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make Lxmert float16 compliant
pass

@slow
def test_saved_model_with_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_marian.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make Marian float16 compliant
pass

def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_mbart.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make MBart float16 compliant
pass

def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make MobileBert float16 compliant
pass

@slow
def test_model_from_pretrained(self):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,10 @@ def test_openai_gpt_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)

def test_mixed_precision(self):
# TODO JP: Make OpenAIGPT float16 compliant
pass

@slow
def test_model_from_pretrained(self):
for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_pegasus.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make Pegasus float16 compliant
pass

def test_resize_token_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand Down
8 changes: 8 additions & 0 deletions tests/test_modeling_tf_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,10 @@ def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass

def test_mixed_precision(self):
# TODO JP: Make T5 float16 compliant
pass

@slow
def test_model_from_pretrained(self):
model = TFT5Model.from_pretrained("t5-small")
Expand Down Expand Up @@ -435,6 +439,10 @@ def test_model(self):
def test_train_pipeline_custom_model(self):
pass

def test_mixed_precision(self):
# TODO JP: Make T5 float16 compliant
pass


@require_tf
@require_sentencepiece
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_transfo_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,10 @@ def test_model_common_attributes(self):
name = model.get_bias()
assert name is None

def test_mixed_precision(self):
# TODO JP: Make TransfoXL float16 compliant
pass

@slow
def test_model_from_pretrained(self):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down
4 changes: 4 additions & 0 deletions tests/test_modeling_tf_xlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,10 @@ def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs)

def test_mixed_precision(self):
# TODO JP: Make XLM float16 compliant
pass

@slow
def test_model_from_pretrained(self):
for model_name in TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
Expand Down