diff --git a/tests/conftest.py b/tests/conftest.py index f1e96c3775..c777a8a4c9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -15,20 +15,19 @@ def isolate_modules(): yield isolate_modules sys.modules.update(CACHE_MODULES) -''' -This fixture is automatically run to clear out the events registered before and after a test function runs. -''' + @pytest.fixture(scope='function', autouse=True) def reset_events(): + """ + This fixture is automatically run to clear out the events registered before and after a test function runs. + """ pyhf.events.__events.clear() pyhf.events.__disabled_events.clear() yield reset_events pyhf.events.__events.clear() pyhf.events.__disabled_events.clear() -''' -This fixture is automatically run to reset the backend before and after a test function runs. -''' + @pytest.fixture(scope='function', autouse=True) def reset_backend(): """ @@ -38,20 +37,21 @@ def reset_backend(): yield reset_backend pyhf.set_backend(pyhf.default_backend) -@pytest.fixture(scope='function', params=[ - (pyhf.tensor.numpy_backend(), None), - (pyhf.tensor.tensorflow_backend(session=tf.Session()), None), - (pyhf.tensor.pytorch_backend(), None), - (pyhf.tensor.mxnet_backend(), None), - (pyhf.tensor.numpy_backend(poisson_from_normal=True), pyhf.optimize.minuit_optimizer()), - ], - ids=[ - 'numpy', - 'tensorflow', - 'pytorch', - 'mxnet', - 'numpy_minuit', - ]) + +@pytest.fixture( + scope='function', + params=[ + (pyhf.tensor.numpy_backend(), None), + (pyhf.tensor.tensorflow_backend(session=tf.Session()), None), + (pyhf.tensor.pytorch_backend(), None), + (pyhf.tensor.mxnet_backend(), None), + ( + pyhf.tensor.numpy_backend(poisson_from_normal=True), + pyhf.optimize.minuit_optimizer(), + ), + ], + ids=['numpy', 'tensorflow', 'pytorch', 'mxnet', 'numpy_minuit'], +) def backend(request): param = request.param # a better way to get the id? all the backends we have so far for testing diff --git a/tests/test_constraints.py b/tests/test_constraints.py index add9e22ac9..edc25853c9 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -1,3 +1,4 @@ +import pytest import pyhf @@ -91,10 +92,10 @@ def slow(self, auxdata, pars): return tensorlib.sum(summands) if summands is not None else 0 def fast(self, auxdata, pars): - return self.constraint_logpdf(auxdata,pars) + return self.constraint_logpdf(auxdata, pars) auxd = pyhf.tensorlib.astensor(m.config.auxdata) pars = pyhf.tensorlib.astensor(m.config.suggested_init()) - slow_result = pyhf.tensorlib.tolist(slow(m,auxd,pars)) - fast_result = pyhf.tensorlib.tolist(fast(m,auxd,pars)) + slow_result = pyhf.tensorlib.tolist(slow(m, auxd, pars)) + fast_result = pyhf.tensorlib.tolist(fast(m, auxd, pars)) assert pytest.approx(slow_result) == fast_result diff --git a/tests/test_interpolate.py b/tests/test_interpolate.py index 451155c60d..50e8f343e6 100644 --- a/tests/test_interpolate.py +++ b/tests/test_interpolate.py @@ -56,7 +56,9 @@ def filled_shapes(histogramssets, alphasets): def test_interpolator(backend, interpcode, random_histosets_alphasets_pair): histogramssets, alphasets = random_histosets_alphasets_pair - interpolator = getattr(pyhf.interpolate, '_hfinterpolator_code{}'.format(interpcode))(histogramssets.tolist()) + interpolator = getattr( + pyhf.interpolate, '_hfinterpolator_code{}'.format(interpcode) + )(histogramssets.tolist()) assert interpolator.alphasets_shape == (histogramssets.shape[0], 1) interpolator(pyhf.tensorlib.astensor(alphasets.tolist())) assert interpolator.alphasets_shape == alphasets.shape diff --git a/tests/test_pdf.py b/tests/test_pdf.py index f20ed2aea5..8ab9d2721e 100644 --- a/tests/test_pdf.py +++ b/tests/test_pdf.py @@ -5,6 +5,7 @@ import numpy as np import json + @pytest.mark.fail_mxnet def test_pdf_inputs(backend): source = { @@ -18,12 +19,15 @@ def test_pdf_inputs(backend): pars = pdf.config.suggested_init() data = source['bindata']['data'] + pdf.config.auxdata - tensorlib, _ = backend assert tensorlib.shape(tensorlib.astensor(data)) == (2,) assert tensorlib.shape(tensorlib.astensor(pars)) == (2,) - assert tensorlib.tolist(pdf.pdf(pars,data)) == pytest.approx([0.002417160663753748], abs=1e-4) - assert tensorlib.tolist(pdf.logpdf(pars,data)) == pytest.approx([-6.025179228209936], abs=1e-4) + assert tensorlib.tolist(pdf.pdf(pars, data)) == pytest.approx( + [0.002417160663753748], abs=1e-4 + ) + assert tensorlib.tolist(pdf.logpdf(pars, data)) == pytest.approx( + [-6.025179228209936], abs=1e-4 + ) @pytest.mark.only_numpy @@ -52,6 +56,7 @@ def test_core_pdf_broadcasting(backend): assert broadcasted.shape == np.array(data).shape assert np.all(naive_python == broadcasted) + def test_pdf_integration_staterror(backend): spec = { 'channels': [ @@ -96,11 +101,13 @@ def test_pdf_integration_staterror(backend): par = pdf.config.par_slice('stat_firstchannel') par_set = pdf.config.param_set('stat_firstchannel') tensorlib, _ = backend - uncerts = tensorlib.astensor([[12.,12.],[5.,5.]]) - nominal = tensorlib.astensor([[50.,70.],[30.,20.]]) + uncerts = tensorlib.astensor([[12.0, 12.0], [5.0, 5.0]]) + nominal = tensorlib.astensor([[50.0, 70.0], [30.0, 20.0]]) quad = tensorlib.sqrt(tensorlib.sum(tensorlib.power(uncerts, 2), axis=0)) totals = tensorlib.sum(nominal, axis=0) - assert pytest.approx(tensorlib.tolist(par_set.sigmas)) == tensorlib.tolist(tensorlib.divide(quad, totals)) + assert pytest.approx(tensorlib.tolist(par_set.sigmas)) == tensorlib.tolist( + tensorlib.divide(quad, totals) + ) @pytest.mark.only_numpy diff --git a/tests/test_validation.py b/tests/test_validation.py index 1534436f27..a851ea61dc 100644 --- a/tests/test_validation.py +++ b/tests/test_validation.py @@ -623,13 +623,12 @@ def validate_runOnePoint(pdf, data, mu_test, expected_result, tolerance=1e-6): par_bounds = pdf.config.suggested_bounds() CLs_obs, CLs_exp = pyhf.utils.runOnePoint( - mu_test, data, pdf, init_pars, par_bounds)[-2:] + mu_test, data, pdf, init_pars, par_bounds + )[-2:] - assert abs(CLs_obs - expected_result['obs']) / \ - expected_result['obs'] < tolerance + assert abs(CLs_obs - expected_result['obs']) / expected_result['obs'] < tolerance for result, expected_result in zip(CLs_exp, expected_result['exp']): - assert abs(result - expected_result) / \ - expected_result < tolerance + assert abs(result - expected_result) / expected_result < tolerance @pytest.mark.parametrize(