Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix: bug in benchmark tests #280

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@ python:
before_install:
- pip install --upgrade pip setuptools wheel
install:
- pip install --ignore-installed -U -q -e .[tensorflow,torch,mxnet,minuit,develop] # Ensure right version of NumPy installed
- pip install --ignore-installed -U -q -e .[tensorflow,torch,mxnet,minuit,develop]
- pip freeze
script:
- pyflakes pyhf
- pytest --ignore tests/benchmarks/ --ignore tests/test_notebooks.py
- pytest -r sx --ignore tests/benchmarks/ --ignore tests/test_notebooks.py
after_success: coveralls

# always test (on both 'push' and 'pr' builds in Travis)
Expand Down Expand Up @@ -45,17 +45,17 @@ jobs:
before_install:
- pip install --upgrade pip setuptools wheel
install:
- pip install --ignore-installed -U -q -e .[tensorflow,torch,mxnet,develop]
- pip install --ignore-installed -U -q -e .[tensorflow,torch,mxnet,minuit,develop]
- pip freeze
script: pytest --benchmark-sort=mean tests/benchmarks/
script: pytest -r sx --benchmark-sort=mean tests/benchmarks/
- stage: docs
python: '3.6'
before_install:
- sudo apt-get update
- sudo apt-get -qq install pandoc
- pip install --upgrade pip setuptools wheel
install:
- pip install --ignore-installed -U -q -e .[tensorflow,torch,mxnet,develop]
- pip install --ignore-installed -U -q -e .[tensorflow,torch,mxnet,minuit,develop]
- pip freeze
script:
- python -m doctest README.md
Expand Down
2 changes: 1 addition & 1 deletion pyhf/tensor/numpy_backend.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np
import logging
from scipy.special import gammaln, xlogy
from scipy.special import gammaln
from scipy.stats import norm
log = logging.getLogger(__name__)

Expand Down
20 changes: 1 addition & 19 deletions tests/benchmarks/test_benchmark.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import pyhf
from pyhf.simplemodels import hepdata_like
import tensorflow as tf
import numpy as np
import pytest

Expand Down Expand Up @@ -64,11 +63,6 @@ def generate_source_poisson(n_bins):


def runOnePoint(pdf, data):
if isinstance(pyhf.tensorlib, pyhf.tensor.tensorflow_backend):
# Reset the TensorFlow graph and session for each run
tf.reset_default_graph()
pyhf.tensorlib.session = tf.Session()

return pyhf.utils.runOnePoint(1.0, data, pdf,
pdf.config.suggested_init(),
pdf.config.suggested_bounds())
Expand All @@ -80,19 +74,7 @@ def runOnePoint(pdf, data):


@pytest.mark.parametrize('n_bins', bins, ids=bin_ids)
@pytest.mark.parametrize('backend',
[
pyhf.tensor.numpy_backend(),
pyhf.tensor.tensorflow_backend(session=tf.Session()),
pyhf.tensor.pytorch_backend(),
# pyhf.tensor.mxnet_backend(),
],
ids=[
'numpy',
'tensorflow',
'pytorch',
# 'mxnet',
])
@pytest.mark.skip_mxnet
def test_runOnePoint(benchmark, backend, n_bins):
"""
Benchmark the performance of pyhf.runOnePoint()
Expand Down
37 changes: 0 additions & 37 deletions tests/test_pdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,13 +57,8 @@ def test_core_pdf_broadcasting(backend):
assert broadcasted.shape == np.array(data).shape
assert np.all(naive_python == broadcasted)

<<<<<<< dd198c062e2415192815c27d224d2897b7b9c2bd
@pytest.mark.only_numpy
def test_pdf_integration_staterror(backend):
=======

def test_pdf_integration_staterror():
>>>>>>> Wrap json.load in with clause to safely load and close
spec = {
'channels': [
{
Expand Down Expand Up @@ -111,16 +106,9 @@ def test_pdf_integration_staterror():
for c,e in zip(computed,expected):
assert c==e

<<<<<<< dd198c062e2415192815c27d224d2897b7b9c2bd
@pytest.mark.only_numpy
def test_pdf_integration_histosys(backend):
source = json.load(open('validation/data/2bin_histosys_example2.json'))
=======

def test_pdf_integration_histosys():
with open('validation/data/2bin_histosys_example2.json') as read_json:
source = json.load(read_json)
>>>>>>> Wrap json.load in with clause to safely load and close
spec = {
'channels': [
{
Expand Down Expand Up @@ -171,17 +159,7 @@ def test_pdf_integration_histosys():

@pytest.mark.skip_mxnet
def test_pdf_integration_normsys(backend):
<<<<<<< dd198c062e2415192815c27d224d2897b7b9c2bd
source = json.load(open('validation/data/2bin_histosys_example2.json'))
=======
pyhf.set_backend(backend)
if isinstance(pyhf.tensorlib, pyhf.tensor.tensorflow_backend):
tf.reset_default_graph()
pyhf.tensorlib.session = tf.Session()

with open('validation/data/2bin_histosys_example2.json') as read_json:
source = json.load(read_json)
>>>>>>> Wrap json.load in with clause to safely load and close
spec = {
'channels': [
{
Expand Down Expand Up @@ -217,16 +195,9 @@ def test_pdf_integration_normsys(backend):
pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [-1.0]]
assert np.allclose(pyhf.tensorlib.tolist(pdf.expected_data(pars, include_auxdata = False)),[100*0.9,150*0.9])

<<<<<<< dd198c062e2415192815c27d224d2897b7b9c2bd
@pytest.mark.only_numpy
def test_pdf_integration_shapesys(backend):
source = json.load(open('validation/data/2bin_histosys_example2.json'))
=======

def test_pdf_integration_shapesys():
with open('validation/data/2bin_histosys_example2.json') as read_json:
source = json.load(read_json)
>>>>>>> Wrap json.load in with clause to safely load and close
spec = {
'channels': [
{
Expand Down Expand Up @@ -321,12 +292,4 @@ def test_invalid_modifier_name_resuse():
with pytest.raises(pyhf.exceptions.InvalidNameReuse):
pdf = pyhf.Model(spec, poiname = 'reused_name')

<<<<<<< dd198c062e2415192815c27d224d2897b7b9c2bd
pdf = pyhf.Model(spec, poiname = 'reused_name', qualify_names = True)
<<<<<<< 01c6b81aeb46c5f30df8f698666fae1a2595f5a4

=======
>>>>>>> Remove use of poisson_from_normal=True from everywhere
=======
pdf = pyhf.Model(spec, poiname = 'reused_name', qualify_names = True)
>>>>>>> Wrap json.load in with clause to safely load and close
2 changes: 2 additions & 0 deletions tests/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ def test_common_tensor_backends(backend):
with pytest.raises(Exception):
tb.simple_broadcast([1], [2, 3], [5, 6, 7])

# poisson(lambda=0) is not defined, should return NaN
assert tb.tolist(pyhf.tensorlib.poisson([0, 0, 1, 1], [0, 1, 0, 1])) == pytest.approx([np.nan, 0.3678794503211975, 0.0, 0.3678794503211975], nan_ok=True)

def test_einsum(backend):
tb = pyhf.tensorlib
Expand Down