Skip to content

Commit

Permalink
Skip all high mem test
Browse files Browse the repository at this point in the history
  • Loading branch information
adelavega committed Jan 18, 2021
1 parent 8be195e commit 02963d0
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 16 deletions.
3 changes: 2 additions & 1 deletion pliers/tests/extractors/test_audio_extractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,8 @@ def test_percussion_extractor():
@pytest.mark.parametrize('hop_size', [0.1, 1])
@pytest.mark.parametrize('top_n', [5, 10])
@pytest.mark.parametrize('target_sr', [22000, 14000])
@pytest.mark.forked
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
def test_audioset_extractor(hop_size, top_n, target_sr):
verify_dependencies(['tensorflow'])

Expand Down
3 changes: 2 additions & 1 deletion pliers/tests/extractors/test_misc_extractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ def dummy_list(array):
assert r_lambda_df['custom_function'][0] == -4


@pytest.mark.forked
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
def test_metric_er_as_stim():
stim = ComplexTextStim(text = 'This is [MASK] test')
ext_bert = BertLMExtractor(return_softmax=True)
Expand Down
26 changes: 17 additions & 9 deletions pliers/tests/extractors/test_model_extractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@
SPEECH_URL = 'https://tfhub.dev/google/speech_embedding/1'


@pytest.mark.forked
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
def test_tensorflow_keras_application_extractor():
imgs = [join(IMAGE_DIR, f) for f in ['apple.jpg', 'obama.jpg']]
imgs = [ImageStim(im, onset=4.2, duration=1) for im in imgs]
Expand All @@ -51,7 +52,8 @@ def test_tensorflow_keras_application_extractor():
with pytest.raises(ValueError):
TensorFlowKerasApplicationExtractor(architecture='foo')


@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
@pytest.mark.forked
def test_tfhub_image():
stim = ImageStim(join(IMAGE_DIR, 'apple.jpg'))
Expand All @@ -61,19 +63,21 @@ def test_tfhub_image():
for i in range(1000) ])
assert np.argmax(np.array([df['feature_' + str(i)][0] \
for i in range(1000)])) == 948

@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')

@pytest.mark.forked
def test_tfhub_image_reshape():
stim = ImageStim(join(IMAGE_DIR, 'apple.jpg'))
stim2 = ImageStim(join(IMAGE_DIR, 'obama.jpg'))
ext = TFHubImageExtractor(MNET_URL,
ext = TFHubImageExtractor(MNET_URL,
reshape_input=(224,224,3),
features='feature_vector')
df = merge_results(ext.transform([stim, stim2]),
extractor_names=False)
assert df.shape[0] == 2
assert all([len(v) == 1280 for v in df['feature_vector']])
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')(v) == 1280 for v in df['feature_vector']])


@pytest.mark.forked
Expand All @@ -82,7 +86,8 @@ def test_tfhub_text():
ext = TFHubTextExtractor(SENTENC_URL, output_key=None)
df = ext.transform(stim).to_df()
assert all([f'feature_{i}' in df.columns for i in range(512)])
true = hub.KerasLayer(SENTENC_URL)([stim.text])[0,10].numpy()
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')sLayer(SENTENC_URL)([stim.text])[0,10].numpy()
assert np.isclose(df['feature_10'][0], true)


Expand All @@ -96,7 +101,8 @@ def test_tfhub_text_one_feature():
assert df.shape[0] == len(cstim.elements)
true = hub.KerasLayer(GNEWS_URL)([cstim.elements[3].text])[0,2].numpy()
assert np.isclose(df['embedding'][3][2], true)
with pytest.raises(ValueError) as err:
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')ses(ValueError) as err:
TFHubTextExtractor(GNEWS_URL, output_key='key').transform(stim)
assert 'not a dictionary' in str(err.value)

Expand All @@ -116,7 +122,8 @@ def test_tfhub_text_transformer_sentence():
['pooled_output'][0,20].numpy()
assert np.isclose(df['sent_encoding'][5][20], true)
with pytest.raises(ValueError) as err:
TFHubTextExtractor(ELECTRA_URL,
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')tractor(ELECTRA_URL,
preprocessor_url_or_path=TOKENIZER_URL,
output_key='key').transform(stim)
assert 'Check which keys' in str(err.value)
Expand All @@ -128,7 +135,8 @@ def test_tfhub_text_transformer_tokens():
tkn_ext = TFHubTextExtractor(ELECTRA_URL,
features='token_encodings',
output_key='sequence_output',
preprocessor_url_or_path=TOKENIZER_URL)
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory') preprocessor_url_or_path=TOKENIZER_URL)
tkn_df = merge_results(tkn_ext.transform(cstim.elements[:3]),
extractor_names=False)
assert all([tkn_df['token_encodings'][i].shape == (128, 256) \
Expand Down
15 changes: 10 additions & 5 deletions pliers/tests/extractors/test_text_extractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,8 @@ def test_spacy_doc_extractor():
assert result['is_sentenced'][3]


@pytest.mark.forked
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
def test_bert_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
Expand Down Expand Up @@ -320,7 +321,8 @@ def test_bert_extractor():
del res, res_token, res_file, ext_base, ext_base_token


@pytest.mark.forked
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
@pytest.mark.parametrize('model', ['bert-large-uncased',
'distilbert-base-uncased',
'roberta-base','camembert-base'])
Expand All @@ -342,7 +344,8 @@ def test_bert_other_models(model):
del res, stim


@pytest.mark.forked
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
def test_bert_sequence_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
Expand Down Expand Up @@ -395,7 +398,8 @@ def test_bert_sequence_extractor():
del ext_pooler, res_cls, res_max, res_pooler, res_sequence, res_file, stim


@pytest.mark.forked
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
def test_bert_LM_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_masked = ComplexTextStim(text='This is MASK tokenized sentence.')
Expand Down Expand Up @@ -478,7 +482,8 @@ def test_bert_LM_extractor():
res_threshold, res_default, res_return_mask


@pytest.mark.forked
@pytest.mark.skipif(environ.get('CI', False) == 'true',
reason='high memory')
def test_bert_sentiment_extractor():
stim = ComplexTextStim(text='This is the best day of my life.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
Expand Down

0 comments on commit 02963d0

Please sign in to comment.