diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000..b9a75affdb
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,6 @@
+repos:
+- repo: https://github.com/ambv/black
+ rev: stable
+ hooks:
+ - id: black
+ language_version: python3.6
diff --git a/.travis.yml b/.travis.yml
index c12ae1db72..730f7fd5ee 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -14,6 +14,7 @@ install:
script:
- pyflakes pyhf
- pytest -r sx --ignore tests/benchmarks/ --ignore tests/test_notebooks.py
+ - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]]; then black --check --diff --verbose .; fi
after_success: coveralls
# always test (on both 'push' and 'pr' builds in Travis)
diff --git a/README.md b/README.md
index 5bcc36249f..0e86ba51f3 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,7 @@
[![Build Status](https://travis-ci.org/diana-hep/pyhf.svg?branch=master)](https://travis-ci.org/diana-hep/pyhf)
[![Docker Automated](https://img.shields.io/docker/automated/pyhf/pyhf.svg)](https://hub.docker.com/r/pyhf/pyhf/)
[![Coverage Status](https://coveralls.io/repos/github/diana-hep/pyhf/badge.svg?branch=master)](https://coveralls.io/github/diana-hep/pyhf?branch=master) [![Code Health](https://landscape.io/github/diana-hep/pyhf/master/landscape.svg?style=flat)](https://landscape.io/github/diana-hep/pyhf/master)
+[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)
[![Docs](https://img.shields.io/badge/docs-master-blue.svg)](https://diana-hep.github.io/pyhf)
[![Binder](https://mybinder.org/badge.svg)](https://mybinder.org/v2/gh/diana-hep/pyhf/master?filepath=docs%2Fexamples%2Fnotebooks%2Fbinderexample%2FStatisticalAnalysis.ipynb)
diff --git a/binder/trigger_binder.py b/binder/trigger_binder.py
index dae665ead2..33267664bb 100644
--- a/binder/trigger_binder.py
+++ b/binder/trigger_binder.py
@@ -8,14 +8,15 @@
from selenium.webdriver.support.expected_conditions import staleness_of
-class SeleniumSession():
+class SeleniumSession:
def __init__(self, args):
self.options = Options()
self.options.set_headless()
self.options.add_argument('--no-sandbox')
if args.chromedriver_path is not None:
self.browser = webdriver.Chrome(
- args.chromedriver_path, chrome_options=self.options)
+ args.chromedriver_path, chrome_options=self.options
+ )
else:
self.browser = webdriver.Chrome(chrome_options=self.options)
@@ -39,13 +40,23 @@ def main(args):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument('-v', '--verbose', dest='is_verbose',
- action='store_true',
- help='Print out more information')
- parser.add_argument('--chromedriver-path', dest='chromedriver_path',
- type=str, default=None, help='System path to ChromeDriver')
- parser.add_argument('--url', dest='url',
- type=str, default=None, help='URL for Selinium to open')
+ parser.add_argument(
+ '-v',
+ '--verbose',
+ dest='is_verbose',
+ action='store_true',
+ help='Print out more information',
+ )
+ parser.add_argument(
+ '--chromedriver-path',
+ dest='chromedriver_path',
+ type=str,
+ default=None,
+ help='System path to ChromeDriver',
+ )
+ parser.add_argument(
+ '--url', dest='url', type=str, default=None, help='URL for Selinium to open'
+ )
args = parser.parse_args()
main(args)
diff --git a/docs/conf.py b/docs/conf.py
index 2e31cf8d84..5ad094b425 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -18,10 +18,15 @@
#
import os
import sys
+
sys.path.insert(0, os.path.abspath('..'))
+
def setup(app):
- app.add_stylesheet('https://cdnjs.cloudflare.com/ajax/libs/github-fork-ribbon-css/0.2.2/gh-fork-ribbon.min.css')
+ app.add_stylesheet(
+ 'https://cdnjs.cloudflare.com/ajax/libs/github-fork-ribbon-css/0.2.2/gh-fork-ribbon.min.css'
+ )
+
# -- General configuration ------------------------------------------------
@@ -257,29 +262,31 @@ def setup(app):
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- #
- # 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- #
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- #
- # 'preamble': '',
-
- # Latex figure (float) alignment
- #
- # 'figure_align': 'htbp',
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'pyhf.tex', u'pyhf Documentation',
- u'Lukas Heinrich, Matthew Feickert', 'manual'),
+ (
+ master_doc,
+ 'pyhf.tex',
+ u'pyhf Documentation',
+ u'Lukas Heinrich, Matthew Feickert',
+ 'manual',
+ )
]
# The name of an image file (relative to this directory) to place at the top of
@@ -319,10 +326,7 @@ def setup(app):
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, 'pyhf', u'pyhf Documentation',
- [author], 1)
-]
+man_pages = [(master_doc, 'pyhf', u'pyhf Documentation', [author], 1)]
# If true, show URL addresses after external links.
#
@@ -335,9 +339,15 @@ def setup(app):
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'pyhf', u'pyhf Documentation',
- author, 'pyhf', 'One line description of project.',
- 'Miscellaneous'),
+ (
+ master_doc,
+ 'pyhf',
+ u'pyhf Documentation',
+ author,
+ 'pyhf',
+ 'One line description of project.',
+ 'Miscellaneous',
+ )
]
# Documents to append as an appendix to all manuals.
diff --git a/docs/development.rst b/docs/development.rst
index 5b56337ebc..42cb9926b8 100644
--- a/docs/development.rst
+++ b/docs/development.rst
@@ -4,3 +4,7 @@ Developing
To develop, we suggest using `virtual environments `__ together with ``pip`` or using `pipenv `__. To get all necessary packages for development::
pip install --ignore-installed -U -e .[complete]
+
+Then setup the Git pre-commit hook for `Black `__ by running::
+
+ pre-commit install
diff --git a/pyhf/__init__.py b/pyhf/__init__.py
index 8c47e48be6..5bd16e5e80 100644
--- a/pyhf/__init__.py
+++ b/pyhf/__init__.py
@@ -1,6 +1,7 @@
from . import tensor, optimize
from .version import __version__
from . import events
+
tensorlib = tensor.numpy_backend()
default_backend = tensorlib
optimizer = optimize.scipy_optimizer()
@@ -49,24 +50,36 @@ def set_backend(backend, custom_optimizer=None):
optimizer_changed = False
if backend.name == 'tensorflow':
- new_optimizer = custom_optimizer if custom_optimizer else optimize.tflow_optimizer(backend)
+ new_optimizer = (
+ custom_optimizer if custom_optimizer else optimize.tflow_optimizer(backend)
+ )
if tensorlib.name == 'tensorflow':
tensorlib_changed |= bool(backend.session != tensorlib.session)
elif backend.name == 'pytorch':
- new_optimizer = custom_optimizer if custom_optimizer else optimize.pytorch_optimizer(tensorlib=backend)
+ new_optimizer = (
+ custom_optimizer
+ if custom_optimizer
+ else optimize.pytorch_optimizer(tensorlib=backend)
+ )
# TODO: Add support for mxnet_optimizer()
# elif tensorlib.name == 'mxnet':
# new_optimizer = custom_optimizer if custom_optimizer else mxnet_optimizer()
else:
- new_optimizer = custom_optimizer if custom_optimizer else optimize.scipy_optimizer()
+ new_optimizer = (
+ custom_optimizer if custom_optimizer else optimize.scipy_optimizer()
+ )
optimizer_changed = bool(optimizer != new_optimizer)
# set new backend
tensorlib = backend
optimizer = new_optimizer
# trigger events
- if tensorlib_changed: events.trigger("tensorlib_changed")()
- if optimizer_changed: events.trigger("optimizer_changed")()
+ if tensorlib_changed:
+ events.trigger("tensorlib_changed")()
+ if optimizer_changed:
+ events.trigger("optimizer_changed")()
+
from .pdf import Model
+
__all__ = ['Model', 'utils', 'modifiers', '__version__']
diff --git a/pyhf/commandline.py b/pyhf/commandline.py
index b47c1fb4b2..419a814413 100644
--- a/pyhf/commandline.py
+++ b/pyhf/commandline.py
@@ -1,6 +1,4 @@
import logging
-logging.basicConfig()
-log = logging.getLogger(__name__)
import click
import json
@@ -14,16 +12,29 @@
from .pdf import Model
from .version import __version__
+logging.basicConfig()
+log = logging.getLogger(__name__)
+
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.version_option(version=__version__)
def pyhf():
pass
+
@pyhf.command()
@click.argument('entrypoint-xml', type=click.Path(exists=True))
-@click.option('--basedir', help='The base directory for the XML files to point relative to.', type=click.Path(exists=True), default=os.getcwd())
-@click.option('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)
+@click.option(
+ '--basedir',
+ help='The base directory for the XML files to point relative to.',
+ type=click.Path(exists=True),
+ default=os.getcwd(),
+)
+@click.option(
+ '--output-file',
+ help='The location of the output json file. If not specified, prints to screen.',
+ default=None,
+)
@click.option('--track-progress/--hide-progress', default=True)
def xml2json(entrypoint_xml, basedir, output_file, track_progress):
""" Entrypoint XML: The top-level XML file for the PDF definition. """
@@ -36,6 +47,7 @@ def xml2json(entrypoint_xml, basedir, output_file, track_progress):
log.debug("Written to {0:s}".format(output_file))
sys.exit(0)
+
@pyhf.command()
@click.argument('workspace', default='-')
@click.argument('xmlfile', default='-')
@@ -45,12 +57,19 @@ def json2xml(workspace, xmlfile, specroot, dataroot):
with click.open_file(workspace, 'r') as specstream:
d = json.load(specstream)
with click.open_file(xmlfile, 'w') as outstream:
- outstream.write(writexml.writexml(d, specroot, dataroot,'').decode('utf-8'))
+ outstream.write(
+ writexml.writexml(d, specroot, dataroot, '').decode('utf-8')
+ )
sys.exit(0)
+
@pyhf.command()
@click.argument('workspace', default='-')
-@click.option('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)
+@click.option(
+ '--output-file',
+ help='The location of the output json file. If not specified, prints to screen.',
+ default=None,
+)
@click.option('--measurement', default=None)
@click.option('-p', '--patch', multiple=True)
@click.option('--qualify-names/--no-qualify-names', default=False)
@@ -60,10 +79,14 @@ def cls(workspace, output_file, measurement, qualify_names, patch):
measurements = d['toplvl']['measurements']
measurement_names = [m['name'] for m in measurements]
measurement_index = 0
-
+
log.debug('measurements defined:\n\t{0:s}'.format('\n\t'.join(measurement_names)))
if measurement and measurement not in measurement_names:
- log.error('no measurement by name \'{0:s}\' exists, pick from one of the valid ones above'.format(measurement))
+ log.error(
+ 'no measurement by name \'{0:s}\' exists, pick from one of the valid ones above'.format(
+ measurement
+ )
+ )
sys.exit(1)
else:
if not measurement and len(measurements) > 1:
@@ -72,16 +95,27 @@ def cls(workspace, output_file, measurement, qualify_names, patch):
elif measurement:
measurement_index = measurement_names.index(measurement)
- log.debug('calculating CLs for measurement {0:s}'.format(measurements[measurement_index]['name']))
- spec = {'channels':d['channels']}
+ log.debug(
+ 'calculating CLs for measurement {0:s}'.format(
+ measurements[measurement_index]['name']
+ )
+ )
+ spec = {'channels': d['channels']}
for p in patch:
with click.open_file(p, 'r') as read_file:
p = jsonpatch.JsonPatch(json.loads(read_file.read()))
spec = p.apply(spec)
- p = Model(spec, poiname=measurements[measurement_index]['config']['poi'], qualify_names=qualify_names)
- observed = sum((d['data'][c] for c in p.config.channels),[]) + p.config.auxdata
+ p = Model(
+ spec,
+ poiname=measurements[measurement_index]['config']['poi'],
+ qualify_names=qualify_names,
+ )
+ observed = sum((d['data'][c] for c in p.config.channels), []) + p.config.auxdata
result = runOnePoint(1.0, observed, p)
- result = {'CLs_obs': result[-2].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}
+ result = {
+ 'CLs_obs': result[-2].tolist()[0],
+ 'CLs_exp': result[-1].ravel().tolist(),
+ }
if output_file is None:
print(json.dumps(result, indent=4, sort_keys=True))
else:
diff --git a/pyhf/constraints.py b/pyhf/constraints.py
index d7708717c1..eab46f8abd 100644
--- a/pyhf/constraints.py
+++ b/pyhf/constraints.py
@@ -1,14 +1,15 @@
from . import get_backend, default_backend
from . import events
+
class gaussian_constraint_combined(object):
- def __init__(self,pdfconfig):
+ def __init__(self, pdfconfig):
# iterate over all constraints order doesn't matter....
self.par_indices = list(range(len(pdfconfig.suggested_init())))
self.data_indices = list(range(len(pdfconfig.auxdata)))
self.parset_and_slice = [
- (pdfconfig.param_set(cname),pdfconfig.par_slice(cname))
+ (pdfconfig.param_set(cname), pdfconfig.par_slice(cname))
for cname in pdfconfig.auxdata_order
]
self._precompute()
@@ -20,11 +21,12 @@ def _precompute(self):
normal_constraint_data = []
normal_constraint_mean_indices = []
normal_constraint_sigmas = []
- for parset,parslice in self.parset_and_slice:
+ for parset, parslice in self.parset_and_slice:
end_index = start_index + parset.n_parameters
thisauxdata = self.data_indices[start_index:end_index]
start_index = end_index
- if not parset.pdf_type == 'normal': continue
+ if not parset.pdf_type == 'normal':
+ continue
# many constraints are defined on a unit gaussian
# but we reserved the possibility that a paramset
@@ -34,39 +36,66 @@ def _precompute(self):
try:
normal_constraint_sigmas.append(parset.sigmas)
except AttributeError:
- normal_constraint_sigmas.append([1.]*len(thisauxdata))
+ normal_constraint_sigmas.append([1.0] * len(thisauxdata))
normal_constraint_data.append(thisauxdata)
normal_constraint_mean_indices.append(self.par_indices[parslice])
if normal_constraint_mean_indices:
- normal_mean_idc = default_backend.concatenate(list(map(lambda x: default_backend.astensor(x,dtype = 'int'),normal_constraint_mean_indices)))
- normal_sigmas = default_backend.concatenate(list(map(default_backend.astensor,normal_constraint_sigmas)))
- normal_data = default_backend.concatenate(list(map(lambda x: default_backend.astensor(x,dtype = 'int'),normal_constraint_data)))
-
- self.normal_data = tensorlib.astensor(default_backend.tolist(normal_data),dtype = 'int')
- self.normal_sigmas = tensorlib.astensor(default_backend.tolist(normal_sigmas))
- self.normal_mean_idc = tensorlib.astensor(default_backend.tolist(normal_mean_idc),dtype = 'int')
+ normal_mean_idc = default_backend.concatenate(
+ list(
+ map(
+ lambda x: default_backend.astensor(x, dtype='int'),
+ normal_constraint_mean_indices,
+ )
+ )
+ )
+ normal_sigmas = default_backend.concatenate(
+ list(map(default_backend.astensor, normal_constraint_sigmas))
+ )
+ normal_data = default_backend.concatenate(
+ list(
+ map(
+ lambda x: default_backend.astensor(x, dtype='int'),
+ normal_constraint_data,
+ )
+ )
+ )
+
+ self.normal_data = tensorlib.astensor(
+ default_backend.tolist(normal_data), dtype='int'
+ )
+ self.normal_sigmas = tensorlib.astensor(
+ default_backend.tolist(normal_sigmas)
+ )
+ self.normal_mean_idc = tensorlib.astensor(
+ default_backend.tolist(normal_mean_idc), dtype='int'
+ )
else:
- self.normal_data, self.normal_sigmas, self.normal_mean_idc = None, None, None
+ self.normal_data, self.normal_sigmas, self.normal_mean_idc = (
+ None,
+ None,
+ None,
+ )
- def logpdf(self,auxdata,pars):
+ def logpdf(self, auxdata, pars):
if self.normal_data is None:
return 0
tensorlib, _ = get_backend()
- normal_data = tensorlib.gather(auxdata,self.normal_data)
- normal_means = tensorlib.gather(pars,self.normal_mean_idc)
- normal = tensorlib.normal_logpdf(normal_data,normal_means,self.normal_sigmas)
+ normal_data = tensorlib.gather(auxdata, self.normal_data)
+ normal_means = tensorlib.gather(pars, self.normal_mean_idc)
+ normal = tensorlib.normal_logpdf(normal_data, normal_means, self.normal_sigmas)
return tensorlib.sum(normal)
+
class poisson_constraint_combined(object):
- def __init__(self,pdfconfig):
+ def __init__(self, pdfconfig):
# iterate over all constraints order doesn't matter....
self.par_indices = list(range(len(pdfconfig.suggested_init())))
self.data_indices = list(range(len(pdfconfig.auxdata)))
self.mod_and_slice = [
- (pdfconfig.param_set(cname),pdfconfig.par_slice(cname))
+ (pdfconfig.param_set(cname), pdfconfig.par_slice(cname))
for cname in pdfconfig.auxdata_order
]
self._precompute()
@@ -79,11 +108,12 @@ def _precompute(self):
poisson_constraint_data = []
poisson_constraint_rate_indices = []
poisson_constraint_rate_factors = []
- for parset,parslice in self.mod_and_slice:
+ for parset, parslice in self.mod_and_slice:
end_index = start_index + parset.n_parameters
thisauxdata = self.data_indices[start_index:end_index]
start_index = end_index
- if not parset.pdf_type == 'poisson': continue
+ if not parset.pdf_type == 'poisson':
+ continue
poisson_constraint_data.append(thisauxdata)
poisson_constraint_rate_indices.append(self.par_indices[parslice])
@@ -95,29 +125,62 @@ def _precompute(self):
try:
poisson_constraint_rate_factors.append(parset.factors)
except AttributeError:
- poisson_constraint_rate_factors.append(default_backend.shape(self.par_indices[parslice]))
-
+ poisson_constraint_rate_factors.append(
+ default_backend.shape(self.par_indices[parslice])
+ )
if poisson_constraint_rate_indices:
- poisson_rate_idc = default_backend.concatenate(list(map(lambda x: default_backend.astensor(x,dtype = 'int'), poisson_constraint_rate_indices)))
- poisson_rate_fac = default_backend.concatenate(list(map(lambda x: default_backend.astensor(x,dtype = 'float'), poisson_constraint_rate_factors)))
- poisson_data = default_backend.concatenate(list(map(lambda x: default_backend.astensor(x,dtype = 'int'), poisson_constraint_data)))
-
- self.poisson_data = tensorlib.astensor(default_backend.tolist(poisson_data),dtype = 'int')
- self.poisson_rate_idc = tensorlib.astensor(default_backend.tolist(poisson_rate_idc),dtype = 'int')
- self.poisson_rate_fac = tensorlib.astensor(default_backend.tolist(poisson_rate_fac),dtype = 'float')
+ poisson_rate_idc = default_backend.concatenate(
+ list(
+ map(
+ lambda x: default_backend.astensor(x, dtype='int'),
+ poisson_constraint_rate_indices,
+ )
+ )
+ )
+ poisson_rate_fac = default_backend.concatenate(
+ list(
+ map(
+ lambda x: default_backend.astensor(x, dtype='float'),
+ poisson_constraint_rate_factors,
+ )
+ )
+ )
+ poisson_data = default_backend.concatenate(
+ list(
+ map(
+ lambda x: default_backend.astensor(x, dtype='int'),
+ poisson_constraint_data,
+ )
+ )
+ )
+
+ self.poisson_data = tensorlib.astensor(
+ default_backend.tolist(poisson_data), dtype='int'
+ )
+ self.poisson_rate_idc = tensorlib.astensor(
+ default_backend.tolist(poisson_rate_idc), dtype='int'
+ )
+ self.poisson_rate_fac = tensorlib.astensor(
+ default_backend.tolist(poisson_rate_fac), dtype='float'
+ )
else:
- self.poisson_rate_idc, self.poisson_data, self.poisson_rate_fac = None, None, None
+ self.poisson_rate_idc, self.poisson_data, self.poisson_rate_fac = (
+ None,
+ None,
+ None,
+ )
- def logpdf(self,auxdata,pars):
+ def logpdf(self, auxdata, pars):
if self.poisson_data is None:
return 0
tensorlib, _ = get_backend()
- poisson_data = tensorlib.gather(auxdata,self.poisson_data)
- poisson_rate_base = tensorlib.gather(pars,self.poisson_rate_idc)
- poisson_factors = self.poisson_rate_fac
+ poisson_data = tensorlib.gather(auxdata, self.poisson_data)
+ poisson_rate_base = tensorlib.gather(pars, self.poisson_rate_idc)
+ poisson_factors = self.poisson_rate_fac
poisson_rate = tensorlib.product(
- tensorlib.stack([poisson_rate_base, poisson_factors]), axis=0)
- poisson = tensorlib.poisson_logpdf(poisson_data,poisson_rate)
+ tensorlib.stack([poisson_rate_base, poisson_factors]), axis=0
+ )
+ poisson = tensorlib.poisson_logpdf(poisson_data, poisson_rate)
return tensorlib.sum(poisson)
diff --git a/pyhf/events.py b/pyhf/events.py
index a7c2be6793..6190c08d74 100644
--- a/pyhf/events.py
+++ b/pyhf/events.py
@@ -1,7 +1,10 @@
__events = {}
__disabled_events = set([])
-def noop(*args, **kwargs): pass
+
+def noop(*args, **kwargs):
+ pass
+
class Callables(list):
def __call__(self, *args, **kwargs):
@@ -11,77 +14,86 @@ def __call__(self, *args, **kwargs):
def __repr__(self):
return "Callables(%s)" % list.__repr__(self)
-"""
-
- This is meant to be used as a decorator.
- >>> @pyhf.events.subscribe('myevent')
- ... def test(a,b):
- ... print a+b
- ...
- >>> pyhf.events.trigger_myevent(1,2)
- 3
-"""
def subscribe(event):
+ """
+ This is meant to be used as a decorator.
+ """
+ # Example:
+ #
+ # >>> @pyhf.events.subscribe('myevent')
+ # ... def test(a,b):
+ # ... print a+b
+ # ...
+ # >>> pyhf.events.trigger_myevent(1,2)
+ # 3
global __events
+
def __decorator(func):
__events.setdefault(event, Callables()).append(func)
return func
+
return __decorator
-"""
+def register(event):
+ """
This is meant to be used as a decorator to register a function for triggering events.
This creates two events: "::before" and "::after"
+ """
+ # Examples:
+ #
+ # >>> @pyhf.events.register('test_func')
+ # ... def test(a,b):
+ # ... print a+b
+ # ...
+ # >>> @pyhf.events.subscribe('test_func::before')
+ # ... def precall():
+ # ... print 'before call'
+ # ...
+ # >>> @pyhf.events.subscribe('test_func::after')
+ # ... def postcall():
+ # ... print 'after call'
+ # ...
+ # >>> test(1,2)
+ # "before call"
+ # 3
+ # "after call"
+ # >>>
- >>> @pyhf.events.register('test_func')
- ... def test(a,b):
- ... print a+b
- ...
- >>> @pyhf.events.subscribe('test_func::before')
- ... def precall():
- ... print 'before call'
- ...
- >>> @pyhf.events.subscribe('test_func::after')
- ... def postcall():
- ... print 'after call'
- ...
- >>> test(1,2)
- "before call"
- 3
- "after call"
- >>>
-
-"""
-def register(event):
def _register(func):
def register_wrapper(*args, **kwargs):
trigger("{0:s}::before".format(event))()
result = func(*args, **kwargs)
trigger("{0:s}::after".format(event))()
return result
+
return register_wrapper
+
return _register
-"""
-Trigger an event if not disabled.
-"""
+
def trigger(event):
+ """
+ Trigger an event if not disabled.
+ """
global __events, __disabled_events, noop
is_noop = bool(event in __disabled_events or event not in __events)
return noop if is_noop else __events.get(event)
-"""
-Disable an event from firing.
-"""
+
def disable(event):
+ """
+ Disable an event from firing.
+ """
global __disabled_events
__disabled_events.add(event)
-"""
-Enable an event to be fired if disabled.
-"""
+
def enable(event):
+ """
+ Enable an event to be fired if disabled.
+ """
global __disabled_events
__disabled_events.remove(event)
diff --git a/pyhf/exceptions/__init__.py b/pyhf/exceptions/__init__.py
index e42dbddaea..ddb4ae2907 100644
--- a/pyhf/exceptions/__init__.py
+++ b/pyhf/exceptions/__init__.py
@@ -1,17 +1,21 @@
import sys
+
class InvalidMeasurement(Exception):
"""
InvalidMeasurement is raised when a specified measurement is invalid given the specification.
"""
+
class InvalidNameReuse(Exception):
pass
+
class InvalidSpecification(Exception):
"""
InvalidSpecification is raised when a specification does not validate against the given schema.
"""
+
def __init__(self, ValidationError):
self.exc_info = sys.exc_info()
self.parent = ValidationError
@@ -23,18 +27,23 @@ def __init__(self, ValidationError):
self.path += '.{}'.format(item)
self.path = self.path.lstrip('.')
self.instance = ValidationError.instance
- message = '{0}.\n\tPath: {1}\n\tInstance: {2}'.format(ValidationError.message, self.path, self.instance)
+ message = '{0}.\n\tPath: {1}\n\tInstance: {2}'.format(
+ ValidationError.message, self.path, self.instance
+ )
# Call the base class constructor with the parameters it needs
super(InvalidSpecification, self).__init__(message)
+
class InvalidModel(Exception):
"""
InvalidModel is raised when a given model does not have the right configuration, even though it validates correctly against the schema.
This can occur, for example, when the provided parameter of interest to fit against does not get declared in the specification provided.
"""
+
pass
+
class InvalidModifier(Exception):
"""
InvalidModifier is raised when an invalid modifier is requested. This includes:
@@ -43,10 +52,13 @@ class InvalidModifier(Exception):
- initializing a modifier that does not exist, or has not been loaded
"""
+
pass
+
class InvalidInterpCode(Exception):
"""
InvalidInterpCode is raised when an invalid/unimplemented interpolation code is requested.
"""
+
pass
diff --git a/pyhf/interpolate.py b/pyhf/interpolate.py
index 84eab19a34..324eda2825 100644
--- a/pyhf/interpolate.py
+++ b/pyhf/interpolate.py
@@ -1,10 +1,12 @@
import logging
-log = logging.getLogger(__name__)
from . import get_backend, default_backend
from . import exceptions
from . import events
+log = logging.getLogger(__name__)
+
+
def _slow_hfinterp_looper(histogramssets, alphasets, func):
all_results = []
for histoset, alphaset in zip(histogramssets, alphasets):
@@ -15,12 +17,13 @@ def _slow_hfinterp_looper(histogramssets, alphasets, func):
histo_result = set_result[-1]
for alpha in alphaset:
alpha_result = []
- for down,nom,up in zip(histo[0],histo[1],histo[2]):
+ for down, nom, up in zip(histo[0], histo[1], histo[2]):
v = func(down, nom, up, alpha)
alpha_result.append(v)
histo_result.append(alpha_result)
return all_results
+
class _hfinterpolator_code0(object):
def __init__(self, histogramssets):
# nb: this should never be a tensor, store in default backend (e.g. numpy)
@@ -32,57 +35,74 @@ def __init__(self, histogramssets):
def _precompute(self):
tensorlib, _ = get_backend()
- self.deltas_up = tensorlib.astensor(self._histogramssets[:,:,2] - self._histogramssets[:,:,1])
- self.deltas_dn = tensorlib.astensor(self._histogramssets[:,:,1] - self._histogramssets[:,:,0])
+ self.deltas_up = tensorlib.astensor(
+ self._histogramssets[:, :, 2] - self._histogramssets[:, :, 1]
+ )
+ self.deltas_dn = tensorlib.astensor(
+ self._histogramssets[:, :, 1] - self._histogramssets[:, :, 0]
+ )
self.broadcast_helper = tensorlib.ones(self.deltas_up.shape)
- self.mask_on = tensorlib.ones(self.alphasets_shape)
+ self.mask_on = tensorlib.ones(self.alphasets_shape)
self.mask_off = tensorlib.zeros(self.alphasets_shape)
def _precompute_alphasets(self, alphasets_shape):
- if alphasets_shape == self.alphasets_shape: return
+ if alphasets_shape == self.alphasets_shape:
+ return
tensorlib, _ = get_backend()
- self.mask_on = tensorlib.ones(alphasets_shape)
+ self.mask_on = tensorlib.ones(alphasets_shape)
self.mask_off = tensorlib.zeros(alphasets_shape)
self.alphasets_shape = alphasets_shape
def __call__(self, alphasets):
tensorlib, _ = get_backend()
self._precompute_alphasets(tensorlib.shape(alphasets))
- where_alphasets_positive = tensorlib.where(alphasets > 0, self.mask_on, self.mask_off)
+ where_alphasets_positive = tensorlib.where(
+ alphasets > 0, self.mask_on, self.mask_off
+ )
# s: set under consideration (i.e. the modifier)
# a: alpha variation
# h: histogram affected by modifier
# b: bin of histogram
- alphas_times_deltas_up = tensorlib.einsum('sa,shb->shab',alphasets, self.deltas_up)
- alphas_times_deltas_dn = tensorlib.einsum('sa,shb->shab',alphasets, self.deltas_dn)
+ alphas_times_deltas_up = tensorlib.einsum(
+ 'sa,shb->shab', alphasets, self.deltas_up
+ )
+ alphas_times_deltas_dn = tensorlib.einsum(
+ 'sa,shb->shab', alphasets, self.deltas_dn
+ )
- masks = tensorlib.einsum('sa,shb->shab', where_alphasets_positive, self.broadcast_helper)
+ masks = tensorlib.einsum(
+ 'sa,shb->shab', where_alphasets_positive, self.broadcast_helper
+ )
return tensorlib.where(masks, alphas_times_deltas_up, alphas_times_deltas_dn)
+
def _hfinterp_code0(histogramssets, alphasets):
tensorlib, _ = get_backend()
interpolator = _hfinterpolator_code0(tensorlib.tolist(histogramssets))
return interpolator(alphasets)
+
def _slow_hfinterp_code0(histogramssets, alphasets):
def summand(down, nom, up, alpha):
delta_up = up - nom
delta_down = nom - down
if alpha > 0:
- delta = delta_up*alpha
+ delta = delta_up * alpha
else:
- delta = delta_down*alpha
+ delta = delta_down * alpha
return delta
return _slow_hfinterp_looper(histogramssets, alphasets, summand)
+
def _hfinterp_code1(histogramssets, alphasets):
tensorlib, _ = get_backend()
interpolator = _hfinterpolator_code1(tensorlib.tolist(histogramssets))
return interpolator(alphasets)
+
class _hfinterpolator_code1(object):
def __init__(self, histogramssets):
# nb: this should never be a tensor, store in default backend (e.g. numpy)
@@ -94,20 +114,37 @@ def __init__(self, histogramssets):
def _precompute(self):
tensorlib, _ = get_backend()
- self.deltas_up = tensorlib.astensor(default_backend.divide(self._histogramssets[:,:,2], self._histogramssets[:,:,1]))
- self.deltas_dn = tensorlib.astensor(default_backend.divide(self._histogramssets[:,:,0], self._histogramssets[:,:,1]))
+ self.deltas_up = tensorlib.astensor(
+ default_backend.divide(
+ self._histogramssets[:, :, 2], self._histogramssets[:, :, 1]
+ )
+ )
+ self.deltas_dn = tensorlib.astensor(
+ default_backend.divide(
+ self._histogramssets[:, :, 0], self._histogramssets[:, :, 1]
+ )
+ )
self.broadcast_helper = tensorlib.ones(self.deltas_up.shape)
- self.bases_up = tensorlib.einsum('sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_up)
- self.bases_dn = tensorlib.einsum('sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_dn)
- self.mask_on = tensorlib.ones(self.alphasets_shape)
+ self.bases_up = tensorlib.einsum(
+ 'sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_up
+ )
+ self.bases_dn = tensorlib.einsum(
+ 'sa,shb->shab', tensorlib.ones(self.alphasets_shape), self.deltas_dn
+ )
+ self.mask_on = tensorlib.ones(self.alphasets_shape)
self.mask_off = tensorlib.zeros(self.alphasets_shape)
def _precompute_alphasets(self, alphasets_shape):
- if alphasets_shape == self.alphasets_shape: return
+ if alphasets_shape == self.alphasets_shape:
+ return
tensorlib, _ = get_backend()
- self.bases_up = tensorlib.einsum('sa,shb->shab', tensorlib.ones(alphasets_shape), self.deltas_up)
- self.bases_dn = tensorlib.einsum('sa,shb->shab', tensorlib.ones(alphasets_shape), self.deltas_dn)
- self.mask_on = tensorlib.ones(alphasets_shape)
+ self.bases_up = tensorlib.einsum(
+ 'sa,shb->shab', tensorlib.ones(alphasets_shape), self.deltas_up
+ )
+ self.bases_dn = tensorlib.einsum(
+ 'sa,shb->shab', tensorlib.ones(alphasets_shape), self.deltas_dn
+ )
+ self.mask_on = tensorlib.ones(alphasets_shape)
self.mask_off = tensorlib.zeros(alphasets_shape)
self.alphasets_shape = alphasets_shape
return
@@ -115,29 +152,33 @@ def _precompute_alphasets(self, alphasets_shape):
def __call__(self, alphasets):
tensorlib, _ = get_backend()
self._precompute_alphasets(tensorlib.shape(alphasets))
- where_alphasets_positive = tensorlib.where(alphasets > 0, self.mask_on, self.mask_off)
+ where_alphasets_positive = tensorlib.where(
+ alphasets > 0, self.mask_on, self.mask_off
+ )
# s: set under consideration (i.e. the modifier)
# a: alpha variation
# h: histogram affected by modifier
# b: bin of histogram
- exponents = tensorlib.einsum('sa,shb->shab', tensorlib.abs(alphasets), self.broadcast_helper)
- masks = tensorlib.einsum('sa,shb->shab', where_alphasets_positive, self.broadcast_helper)
+ exponents = tensorlib.einsum(
+ 'sa,shb->shab', tensorlib.abs(alphasets), self.broadcast_helper
+ )
+ masks = tensorlib.einsum(
+ 'sa,shb->shab', where_alphasets_positive, self.broadcast_helper
+ )
bases = tensorlib.where(masks, self.bases_up, self.bases_dn)
return tensorlib.power(bases, exponents)
-
-
def _slow_hfinterp_code1(histogramssets, alphasets):
def product(down, nom, up, alpha):
- delta_up = up/nom
- delta_down = down/nom
+ delta_up = up / nom
+ delta_down = down / nom
if alpha > 0:
- delta = delta_up**alpha
+ delta = delta_up ** alpha
else:
- delta = delta_down**(-alpha)
+ delta = delta_down ** (-alpha)
return delta
return _slow_hfinterp_looper(histogramssets, alphasets, product)
@@ -145,8 +186,10 @@ def product(down, nom, up, alpha):
# interpolation codes come from https://cds.cern.ch/record/1456844/files/CERN-OPEN-2012-016.pdf
def interpolator(interpcode, do_tensorized_calc=True):
- interpcodes = {0: _hfinterp_code0 if do_tensorized_calc else _slow_hfinterp_code0,
- 1: _hfinterp_code1 if do_tensorized_calc else _slow_hfinterp_code1}
+ interpcodes = {
+ 0: _hfinterp_code0 if do_tensorized_calc else _slow_hfinterp_code0,
+ 1: _hfinterp_code1 if do_tensorized_calc else _slow_hfinterp_code1,
+ }
try:
return interpcodes[interpcode]
diff --git a/pyhf/modifiers/__init__.py b/pyhf/modifiers/__init__.py
index e4f9b76488..4f53e05c19 100644
--- a/pyhf/modifiers/__init__.py
+++ b/pyhf/modifiers/__init__.py
@@ -1,33 +1,43 @@
from six import string_types
import logging
-log = logging.getLogger(__name__)
from .. import exceptions
from .. import get_backend
+log = logging.getLogger(__name__)
+
registry = {}
-'''
-Check if given object contains the right structure for modifiers
-'''
+
def validate_modifier_structure(modifier):
+ """
+ Check if given object contains the right structure for modifiers
+ """
required_methods = ['required_parset']
for method in required_methods:
if not hasattr(modifier, method):
- raise exceptions.InvalidModifier('Expected {0:s} method on modifier {1:s}'.format(method, modifier.__name__))
+ raise exceptions.InvalidModifier(
+ 'Expected {0:s} method on modifier {1:s}'.format(
+ method, modifier.__name__
+ )
+ )
return True
-'''
-Consistent add_to_registry() function that handles actually adding thing to the registry.
-Raises an error if the name to register for the modifier already exists in the registry,
-or if the modifier does not have the right structure.
-'''
-def add_to_registry(cls, cls_name=None, constrained=False, pdf_type='normal', op_code='addition'):
+def add_to_registry(
+ cls, cls_name=None, constrained=False, pdf_type='normal', op_code='addition'
+):
+ """
+ Consistent add_to_registry() function that handles actually adding thing to the registry.
+
+ Raises an error if the name to register for the modifier already exists in the registry,
+ or if the modifier does not have the right structure.
+ """
global registry
cls_name = cls_name or cls.__name__
- if cls_name in registry: raise KeyError('The modifier name "{0:s}" is already taken.'.format(cls_name))
+ if cls_name in registry:
+ raise KeyError('The modifier name "{0:s}" is already taken.'.format(cls_name))
# validate the structure
validate_modifier_structure(cls)
# set is_constrained
@@ -35,78 +45,87 @@ def add_to_registry(cls, cls_name=None, constrained=False, pdf_type='normal', op
if constrained:
tensorlib, _ = get_backend()
if not hasattr(tensorlib, pdf_type):
- raise exceptions.InvalidModifier('The specified pdf_type "{0:s}" is not valid for {1:s}({2:s}). See pyhf.tensor documentation for available pdfs.'.format(pdf_type, cls_name, cls.__name__))
+ raise exceptions.InvalidModifier(
+ 'The specified pdf_type "{0:s}" is not valid for {1:s}({2:s}). See pyhf.tensor documentation for available pdfs.'.format(
+ pdf_type, cls_name, cls.__name__
+ )
+ )
cls.pdf_type = pdf_type
else:
cls.pdf_type = None
if op_code not in ['addition', 'multiplication']:
- raise exceptions.InvalidModifier('The specified op_code "{0:s}" is not valid for {1:s}({2:s}). See pyhf.modifier documentation for available operation codes.'.format(op_code, cls_name, cls.__name__))
+ raise exceptions.InvalidModifier(
+ 'The specified op_code "{0:s}" is not valid for {1:s}({2:s}). See pyhf.modifier documentation for available operation codes.'.format(
+ op_code, cls_name, cls.__name__
+ )
+ )
cls.op_code = op_code
registry[cls_name] = cls
-'''
-Decorator for registering modifiers. To flag the modifier as a constrained modifier, add `constrained=True`.
-
-
-Args:
- name: the name of the modifier to use. Use the class name by default. (default: None)
- constrained: whether the modifier is constrained or not. (default: False)
- pdf_type: the name of the pdf to use from tensorlib if constrained. (default: normal)
- op_code: the name of the operation the modifier performs on the data (e.g. addition, multiplication)
-
-Returns:
- modifier
-
-Raises:
- ValueError: too many keyword arguments, or too many arguments, or wrong arguments
- TypeError: provided name is not a string
- pyhf.exceptions.InvalidModifier: object does not have necessary modifier structure
-
-Examples:
-
- >>> @modifiers.modifier
- >>> ... class myCustomModifier(object):
- >>> ... @classmethod
- >>> ... def required_parset(cls, npars): pass
-
- >>> @modifiers.modifier(name='myCustomNamer')
- >>> ... class myCustomModifier(object):
- >>> ... @classmethod
- >>> ... def required_parset(cls, npars): pass
-
- >>> @modifiers.modifier(constrained=False)
- >>> ... class myUnconstrainedModifier(object):
- >>> ... @classmethod
- >>> ... def required_parset(cls, npars): pass
- >>> ...
- >>> myUnconstrainedModifier.pdf_type
- None
-
- >>> @modifiers.modifier(constrained=True, pdf_type='poisson')
- >>> ... class myConstrainedCustomPoissonModifier(object):
- >>> ... @classmethod
- >>> ... def required_parset(cls, npars): pass
- >>> ...
- >>> myConstrainedCustomGaussianModifier.pdf_type
- 'poisson'
-
- >>> @modifiers.modifier(constrained=True)
- >>> ... class myCustomModifier(object):
- >>> ... @classmethod
- >>> ... def required_parset(cls, npars): pass
-
- >>> @modifiers.modifier(op_code='multiplication')
- >>> ... class myMultiplierModifier(object):
- >>> ... @classmethod
- >>> ... def required_parset(cls, npars): pass
- >>> ...
- >>> myMultiplierModifier.op_code
- 'multiplication'
-
-'''
+
def modifier(*args, **kwargs):
+ """
+ Decorator for registering modifiers. To flag the modifier as a constrained modifier, add `constrained=True`.
+
+
+ Args:
+ name: the name of the modifier to use. Use the class name by default. (default: None)
+ constrained: whether the modifier is constrained or not. (default: False)
+ pdf_type: the name of the pdf to use from tensorlib if constrained. (default: normal)
+ op_code: the name of the operation the modifier performs on the data (e.g. addition, multiplication)
+
+ Returns:
+ modifier
+
+ Raises:
+ ValueError: too many keyword arguments, or too many arguments, or wrong arguments
+ TypeError: provided name is not a string
+ pyhf.exceptions.InvalidModifier: object does not have necessary modifier structure
+ """
+ #
+ # Examples:
+ #
+ # >>> @modifiers.modifier
+ # >>> ... class myCustomModifier(object):
+ # >>> ... @classmethod
+ # >>> ... def required_parset(cls, npars): pass
+ #
+ # >>> @modifiers.modifier(name='myCustomNamer')
+ # >>> ... class myCustomModifier(object):
+ # >>> ... @classmethod
+ # >>> ... def required_parset(cls, npars): pass
+ #
+ # >>> @modifiers.modifier(constrained=False)
+ # >>> ... class myUnconstrainedModifier(object):
+ # >>> ... @classmethod
+ # >>> ... def required_parset(cls, npars): pass
+ # >>> ...
+ # >>> myUnconstrainedModifier.pdf_type
+ # None
+ #
+ # >>> @modifiers.modifier(constrained=True, pdf_type='poisson')
+ # >>> ... class myConstrainedCustomPoissonModifier(object):
+ # >>> ... @classmethod
+ # >>> ... def required_parset(cls, npars): pass
+ # >>> ...
+ # >>> myConstrainedCustomGaussianModifier.pdf_type
+ # 'poisson'
+ #
+ # >>> @modifiers.modifier(constrained=True)
+ # >>> ... class myCustomModifier(object):
+ # >>> ... @classmethod
+ # >>> ... def required_parset(cls, npars): pass
+ #
+ # >>> @modifiers.modifier(op_code='multiplication')
+ # >>> ... class myMultiplierModifier(object):
+ # >>> ... @classmethod
+ # >>> ... def required_parset(cls, npars): pass
+ # >>> ...
+ # >>> myMultiplierModifier.op_code
+ # 'multiplication'
+
name = kwargs.pop('name', None)
constrained = bool(kwargs.pop('constrained', False))
pdf_type = str(kwargs.pop('pdf_type', 'normal'))
@@ -116,12 +135,21 @@ def modifier(*args, **kwargs):
raise ValueError('Unparsed keyword arguments {}'.format(kwargs.keys()))
# check to make sure the given name is a string, if passed in one
if not isinstance(name, string_types) and name is not None:
- raise TypeError('@modifier must be given a string. You gave it {}'.format(type(name)))
+ raise TypeError(
+ '@modifier must be given a string. You gave it {}'.format(type(name))
+ )
def _modifier(name, constrained, pdf_type, op_code):
def wrapper(cls):
- add_to_registry(cls, cls_name=name, constrained=constrained, pdf_type=pdf_type, op_code=op_code)
+ add_to_registry(
+ cls,
+ cls_name=name,
+ constrained=constrained,
+ pdf_type=pdf_type,
+ op_code=op_code,
+ )
return cls
+
return wrapper
if len(args) == 0:
@@ -131,10 +159,21 @@ def wrapper(cls):
# called like @modifier
if not callable(args[0]):
raise ValueError('You must decorate a callable python object')
- add_to_registry(args[0], cls_name=name, constrained=constrained, pdf_type=pdf_type, op_code=op_code)
+ add_to_registry(
+ args[0],
+ cls_name=name,
+ constrained=constrained,
+ pdf_type=pdf_type,
+ op_code=op_code,
+ )
return args[0]
else:
- raise ValueError('@modifier must be called with only keyword arguments, @modifier(name=\'foo\'), or no arguments, @modifier; ({0:d} given)'.format(len(args)))
+ raise ValueError(
+ '@modifier must be called with only keyword arguments, @modifier(name=\'foo\'), or no arguments, @modifier; ({0:d} given)'.format(
+ len(args)
+ )
+ )
+
from .histosys import histosys, histosys_combined
from .normfactor import normfactor, normfactor_combined
@@ -142,20 +181,27 @@ def wrapper(cls):
from .shapefactor import shapefactor, shapefactor_combined
from .shapesys import shapesys, shapesys_combined
from .staterror import staterror, staterror_combined
+
combined = {
'normsys': normsys_combined,
'histosys': histosys_combined,
'normfactor': normfactor_combined,
'staterror': staterror_combined,
'shapefactor': shapefactor_combined,
- 'shapesys': shapesys_combined
+ 'shapesys': shapesys_combined,
}
__all__ = [
- 'histosys','histosys_combined',
- 'normfactor','normfactor_combined',
- 'normsys','normsys_combined',
- 'shapefactor','shapefactor_combined',
- 'shapesys','shapesys_combined',
- 'staterror','staterror_combined',
- 'combined'
+ 'histosys',
+ 'histosys_combined',
+ 'normfactor',
+ 'normfactor_combined',
+ 'normsys',
+ 'normsys_combined',
+ 'shapefactor',
+ 'shapefactor_combined',
+ 'shapesys',
+ 'shapesys_combined',
+ 'staterror',
+ 'staterror_combined',
+ 'combined',
]
diff --git a/pyhf/modifiers/histosys.py b/pyhf/modifiers/histosys.py
index c98fd48cc7..5cf51c316d 100644
--- a/pyhf/modifiers/histosys.py
+++ b/pyhf/modifiers/histosys.py
@@ -1,12 +1,14 @@
import logging
-log = logging.getLogger(__name__)
from . import modifier
from ..paramsets import constrained_by_normal
from .. import get_backend, events
from ..interpolate import _hfinterpolator_code0
-@modifier(name='histosys', constrained=True, op_code = 'addition')
+log = logging.getLogger(__name__)
+
+
+@modifier(name='histosys', constrained=True, op_code='addition')
class histosys(object):
@classmethod
def required_parset(cls, n_parameters):
@@ -18,14 +20,17 @@ def required_parset(cls, n_parameters):
'is_shared': True,
'op_code': cls.op_code,
'inits': (0.0,),
- 'bounds': ((-5., 5.),),
- 'auxdata': (0.,)
+ 'bounds': ((-5.0, 5.0),),
+ 'auxdata': (0.0,),
}
+
class histosys_combined(object):
- def __init__(self,histosys_mods,pdfconfig,mega_mods):
+ def __init__(self, histosys_mods, pdfconfig, mega_mods):
self._parindices = list(range(len(pdfconfig.suggested_init())))
- self._histo_indices = [self._parindices[pdfconfig.par_slice(m)] for m in histosys_mods]
+ self._histo_indices = [
+ self._parindices[pdfconfig.par_slice(m)] for m in histosys_mods
+ ]
self._histosys_histoset = [
[
[
@@ -34,15 +39,12 @@ def __init__(self,histosys_mods,pdfconfig,mega_mods):
mega_mods[s][m]['data']['hi_data'],
]
for s in pdfconfig.samples
- ] for m in histosys_mods
+ ]
+ for m in histosys_mods
]
self._histosys_mask = [
- [
- [
- mega_mods[s][m]['data']['mask'],
- ]
- for s in pdfconfig.samples
- ] for m in histosys_mods
+ [[mega_mods[s][m]['data']['mask']] for s in pdfconfig.samples]
+ for m in histosys_mods
]
if len(histosys_mods):
@@ -57,12 +59,14 @@ def _precompute(self):
self.histosys_default = tensorlib.zeros(self.histosys_mask.shape)
self.histo_indices = tensorlib.astensor(self._histo_indices, dtype='int')
- def apply(self,pars):
+ def apply(self, pars):
tensorlib, _ = get_backend()
if not tensorlib.shape(self.histo_indices)[0]:
return
- histosys_alphaset = tensorlib.gather(pars,self.histo_indices)
- results_histo = self.interpolator(histosys_alphaset)
+ histosys_alphaset = tensorlib.gather(pars, self.histo_indices)
+ results_histo = self.interpolator(histosys_alphaset)
# either rely on numerical no-op or force with line below
- results_histo = tensorlib.where(self.histosys_mask,results_histo,self.histosys_default)
+ results_histo = tensorlib.where(
+ self.histosys_mask, results_histo, self.histosys_default
+ )
return results_histo
diff --git a/pyhf/modifiers/normfactor.py b/pyhf/modifiers/normfactor.py
index ed5cb3b2aa..0f5d9bdffd 100644
--- a/pyhf/modifiers/normfactor.py
+++ b/pyhf/modifiers/normfactor.py
@@ -1,11 +1,13 @@
import logging
-log = logging.getLogger(__name__)
from . import modifier
from ..paramsets import unconstrained
from .. import get_backend, default_backend, events
-@modifier(name='normfactor', op_code = 'multiplication')
+log = logging.getLogger(__name__)
+
+
+@modifier(name='normfactor', op_code='multiplication')
class normfactor(object):
@classmethod
def required_parset(cls, n_parameters):
@@ -17,20 +19,19 @@ def required_parset(cls, n_parameters):
'is_shared': True,
'op_code': cls.op_code,
'inits': (1.0,),
- 'bounds': ((0, 10),)
+ 'bounds': ((0, 10),),
}
+
class normfactor_combined(object):
- def __init__(self,normfactor_mods,pdfconfig,mega_mods):
+ def __init__(self, normfactor_mods, pdfconfig, mega_mods):
self._parindices = list(range(len(pdfconfig.suggested_init())))
- self._normfactor_indices = [self._parindices[pdfconfig.par_slice(m)] for m in normfactor_mods]
+ self._normfactor_indices = [
+ self._parindices[pdfconfig.par_slice(m)] for m in normfactor_mods
+ ]
self._normfactor_mask = [
- [
- [
- mega_mods[s][m]['data']['mask'],
- ]
- for s in pdfconfig.samples
- ] for m in normfactor_mods
+ [[mega_mods[s][m]['data']['mask']] for s in pdfconfig.samples]
+ for m in normfactor_mods
]
self._precompute()
events.subscribe('tensorlib_changed')(self._precompute)
@@ -39,15 +40,23 @@ def _precompute(self):
tensorlib, _ = get_backend()
self.normfactor_mask = default_backend.astensor(self._normfactor_mask)
self.normfactor_default = default_backend.ones(self.normfactor_mask.shape)
- self.normfactor_indices = default_backend.astensor(self._normfactor_indices, dtype='int')
+ self.normfactor_indices = default_backend.astensor(
+ self._normfactor_indices, dtype='int'
+ )
- def apply(self,pars):
+ def apply(self, pars):
tensorlib, _ = get_backend()
normfactor_indices = tensorlib.astensor(self.normfactor_indices, dtype='int')
normfactor_mask = tensorlib.astensor(self.normfactor_mask)
if not tensorlib.shape(normfactor_indices)[0]:
return
- normfactors = tensorlib.gather(pars,normfactor_indices)
- results_normfactor = normfactor_mask * tensorlib.reshape(normfactors,tensorlib.shape(normfactors) + (1,1))
- results_normfactor = tensorlib.where(normfactor_mask,results_normfactor,tensorlib.astensor(self.normfactor_default))
+ normfactors = tensorlib.gather(pars, normfactor_indices)
+ results_normfactor = normfactor_mask * tensorlib.reshape(
+ normfactors, tensorlib.shape(normfactors) + (1, 1)
+ )
+ results_normfactor = tensorlib.where(
+ normfactor_mask,
+ results_normfactor,
+ tensorlib.astensor(self.normfactor_default),
+ )
return results_normfactor
diff --git a/pyhf/modifiers/normsys.py b/pyhf/modifiers/normsys.py
index e1670f21c1..8591a13b85 100644
--- a/pyhf/modifiers/normsys.py
+++ b/pyhf/modifiers/normsys.py
@@ -1,12 +1,14 @@
import logging
-log = logging.getLogger(__name__)
from . import modifier
from ..paramsets import constrained_by_normal
from .. import get_backend, events
from ..interpolate import _hfinterpolator_code1
-@modifier(name='normsys', constrained=True, op_code = 'multiplication')
+log = logging.getLogger(__name__)
+
+
+@modifier(name='normsys', constrained=True, op_code='multiplication')
class normsys(object):
@classmethod
def required_parset(cls, n_parameters):
@@ -18,14 +20,17 @@ def required_parset(cls, n_parameters):
'is_shared': True,
'op_code': cls.op_code,
'inits': (0.0,),
- 'bounds': ((-5., 5.),),
- 'auxdata': (0.,)
+ 'bounds': ((-5.0, 5.0),),
+ 'auxdata': (0.0,),
}
+
class normsys_combined(object):
def __init__(self, normsys_mods, pdfconfig, mega_mods):
self._parindices = list(range(len(pdfconfig.suggested_init())))
- self._normsys_indices = [self._parindices[pdfconfig.par_slice(m)] for m in normsys_mods]
+ self._normsys_indices = [
+ self._parindices[pdfconfig.par_slice(m)] for m in normsys_mods
+ ]
self._normsys_histoset = [
[
[
@@ -34,15 +39,12 @@ def __init__(self, normsys_mods, pdfconfig, mega_mods):
mega_mods[s][m]['data']['hi'],
]
for s in pdfconfig.samples
- ] for m in normsys_mods
+ ]
+ for m in normsys_mods
]
self._normsys_mask = [
- [
- [
- mega_mods[s][m]['data']['mask'],
- ]
- for s in pdfconfig.samples
- ] for m in normsys_mods
+ [[mega_mods[s][m]['data']['mask']] for s in pdfconfig.samples]
+ for m in normsys_mods
]
if len(normsys_mods):
@@ -57,13 +59,15 @@ def _precompute(self):
self.normsys_default = tensorlib.ones(self.normsys_mask.shape)
self.normsys_indices = tensorlib.astensor(self._normsys_indices, dtype='int')
- def apply(self,pars):
+ def apply(self, pars):
tensorlib, _ = get_backend()
if not tensorlib.shape(self.normsys_indices)[0]:
return
- normsys_alphaset = tensorlib.gather(pars,self.normsys_indices)
- results_norm = self.interpolator(normsys_alphaset)
+ normsys_alphaset = tensorlib.gather(pars, self.normsys_indices)
+ results_norm = self.interpolator(normsys_alphaset)
- #either rely on numerical no-op or force with line below
- results_norm = tensorlib.where(self.normsys_mask,results_norm,self.normsys_default)
+ # either rely on numerical no-op or force with line below
+ results_norm = tensorlib.where(
+ self.normsys_mask, results_norm, self.normsys_default
+ )
return results_norm
diff --git a/pyhf/modifiers/shapefactor.py b/pyhf/modifiers/shapefactor.py
index ffa6fc69fd..4e04cd7d05 100644
--- a/pyhf/modifiers/shapefactor.py
+++ b/pyhf/modifiers/shapefactor.py
@@ -1,11 +1,13 @@
import logging
-log = logging.getLogger(__name__)
from . import modifier
from ..paramsets import unconstrained
from .. import get_backend, default_backend, events
-@modifier(name='shapefactor', op_code = 'multiplication')
+log = logging.getLogger(__name__)
+
+
+@modifier(name='shapefactor', op_code='multiplication')
class shapefactor(object):
@classmethod
def required_parset(cls, n_parameters):
@@ -17,11 +19,12 @@ def required_parset(cls, n_parameters):
'is_shared': True,
'op_code': cls.op_code,
'inits': (1.0,) * n_parameters,
- 'bounds': ((0., 10.),) * n_parameters
+ 'bounds': ((0.0, 10.0),) * n_parameters,
}
+
class shapefactor_combined(object):
- def __init__(self,shapefactor_mods,pdfconfig,mega_mods):
+ def __init__(self, shapefactor_mods, pdfconfig, mega_mods):
"""
Imagine a situation where we have 2 channels (SR, CR), 3 samples (sig1,
bkg1, bkg2), and 2 shapefactor modifiers (coupled_shapefactor,
@@ -60,49 +63,58 @@ def __init__(self,shapefactor_mods,pdfconfig,mega_mods):
and at that point can be used to compute the effect of shapefactor.
"""
self._parindices = list(range(len(pdfconfig.suggested_init())))
- self._shapefactor_indices = [self._parindices[pdfconfig.par_slice(m)] for m in shapefactor_mods]
+ self._shapefactor_indices = [
+ self._parindices[pdfconfig.par_slice(m)] for m in shapefactor_mods
+ ]
self._shapefactor_mask = [
- [
- [
- mega_mods[s][m]['data']['mask'],
- ]
- for s in pdfconfig.samples
- ] for m in shapefactor_mods
+ [[mega_mods[s][m]['data']['mask']] for s in pdfconfig.samples]
+ for m in shapefactor_mods
+ ]
+ global_concatenated_bin_indices = [
+ j for c in pdfconfig.channels for j in range(pdfconfig.channel_nbins[c])
]
- global_concatenated_bin_indices = [j for c in pdfconfig.channels for j in range(pdfconfig.channel_nbins[c])]
# compute the max so that we can pad with 0s for the right shape
# for gather. The 0s will get masked by self._shapefactor_mask anyway
# For example: [[1,2,3],[4,5],[6,7,8]] -> [[1,2,3],[4,5,0],[6,7,8]]
- max_nbins = max(global_concatenated_bin_indices)+1
+ max_nbins = max(global_concatenated_bin_indices) + 1
self._shapefactor_indices = [
default_backend.tolist(
default_backend.gather(
- default_backend.astensor(indices + [0]*(max_nbins - len(indices)), dtype='int'),
- global_concatenated_bin_indices
+ default_backend.astensor(
+ indices + [0] * (max_nbins - len(indices)), dtype='int'
+ ),
+ global_concatenated_bin_indices,
)
- ) for indices in self._shapefactor_indices
+ )
+ for indices in self._shapefactor_indices
]
self._precompute()
events.subscribe('tensorlib_changed')(self._precompute)
def _precompute(self):
- if not self._shapefactor_indices: return
+ if not self._shapefactor_indices:
+ return
tensorlib, _ = get_backend()
self.shapefactor_mask = tensorlib.astensor(self._shapefactor_mask)
- self.shapefactor_default = tensorlib.ones(tensorlib.shape(self.shapefactor_mask))
- self.shapefactor_indices = tensorlib.astensor(self._shapefactor_indices, dtype='int')
+ self.shapefactor_default = tensorlib.ones(
+ tensorlib.shape(self.shapefactor_mask)
+ )
+ self.shapefactor_indices = tensorlib.astensor(
+ self._shapefactor_indices, dtype='int'
+ )
self.sample_ones = tensorlib.ones(tensorlib.shape(self.shapefactor_mask)[1])
self.alpha_ones = tensorlib.ones([1])
- def apply(self,pars):
- if not self._shapefactor_indices: return
+ def apply(self, pars):
+ if not self._shapefactor_indices:
+ return
tensorlib, _ = get_backend()
shapefactors = tensorlib.gather(pars, self.shapefactor_indices)
- results_shapefactor = tensorlib.einsum('s,a,mb->msab',
- self.sample_ones,
- self.alpha_ones,
- shapefactors
+ results_shapefactor = tensorlib.einsum(
+ 's,a,mb->msab', self.sample_ones, self.alpha_ones, shapefactors
+ )
+ results_shapefactor = tensorlib.where(
+ self.shapefactor_mask, results_shapefactor, self.shapefactor_default
)
- results_shapefactor = tensorlib.where(self.shapefactor_mask, results_shapefactor, self.shapefactor_default)
return results_shapefactor
diff --git a/pyhf/modifiers/shapesys.py b/pyhf/modifiers/shapesys.py
index 67f42e55af..c5e3986084 100644
--- a/pyhf/modifiers/shapesys.py
+++ b/pyhf/modifiers/shapesys.py
@@ -1,11 +1,15 @@
import logging
-log = logging.getLogger(__name__)
from . import modifier
from ..paramsets import constrained_by_poisson
from .. import get_backend, default_backend, events
-@modifier(name='shapesys', constrained=True, pdf_type='poisson', op_code = 'multiplication')
+log = logging.getLogger(__name__)
+
+
+@modifier(
+ name='shapesys', constrained=True, pdf_type='poisson', op_code='multiplication'
+)
class shapesys(object):
@classmethod
def required_parset(cls, n_parameters):
@@ -17,42 +21,46 @@ def required_parset(cls, n_parameters):
'is_shared': False,
'op_code': cls.op_code,
'inits': (1.0,) * n_parameters,
- 'bounds': ((1e-10, 10.),) * n_parameters,
+ 'bounds': ((1e-10, 10.0),) * n_parameters,
# nb: auxdata/factors set by finalize. Set to non-numeric to crash
# if we fail to set auxdata/factors correctly
'auxdata': (None,) * n_parameters,
- 'factors': (None,) * n_parameters
+ 'factors': (None,) * n_parameters,
}
+
class shapesys_combined(object):
- def __init__(self,shapesys_mods,pdfconfig,mega_mods):
+ def __init__(self, shapesys_mods, pdfconfig, mega_mods):
self._shapesys_mods = shapesys_mods
self._parindices = list(range(len(pdfconfig.suggested_init())))
- self._shapesys_indices = [self._parindices[pdfconfig.par_slice(m)] for m in shapesys_mods]
+ self._shapesys_indices = [
+ self._parindices[pdfconfig.par_slice(m)] for m in shapesys_mods
+ ]
self._shapesys_mask = [
- [
- [
- mega_mods[s][m]['data']['mask'],
- ]
- for s in pdfconfig.samples
- ] for m in shapesys_mods
+ [[mega_mods[s][m]['data']['mask']] for s in pdfconfig.samples]
+ for m in shapesys_mods
]
- self.__shapesys_uncrt = default_backend.astensor([
+ self.__shapesys_uncrt = default_backend.astensor(
[
[
- mega_mods[s][m]['data']['uncrt'],
- mega_mods[s][m]['data']['nom_data'],
+ [
+ mega_mods[s][m]['data']['uncrt'],
+ mega_mods[s][m]['data']['nom_data'],
+ ]
+ for s in pdfconfig.samples
]
- for s in pdfconfig.samples
- ] for m in shapesys_mods
- ])
+ for m in shapesys_mods
+ ]
+ )
if self._shapesys_indices:
access_rows = []
shapesys_mask = default_backend.astensor(self._shapesys_mask)
- for mask,inds in zip(shapesys_mask, self._shapesys_indices):
- summed_mask = default_backend.sum(mask[:,0,:],axis=0)
- assert default_backend.shape(summed_mask[summed_mask > 0]) == default_backend.shape(default_backend.astensor(inds))
+ for mask, inds in zip(shapesys_mask, self._shapesys_indices):
+ summed_mask = default_backend.sum(mask[:, 0, :], axis=0)
+ assert default_backend.shape(
+ summed_mask[summed_mask > 0]
+ ) == default_backend.shape(default_backend.astensor(inds))
# make masks of > 0 and == 0
positive_mask = summed_mask > 0
zero_mask = summed_mask == 0
@@ -60,7 +68,9 @@ def __init__(self,shapesys_mods,pdfconfig,mega_mods):
summed_mask[positive_mask] = inds
summed_mask[zero_mask] = len(self._parindices) - 1
access_rows.append(summed_mask.tolist())
- self._factor_access_indices = default_backend.tolist(default_backend.stack(access_rows))
+ self._factor_access_indices = default_backend.tolist(
+ default_backend.stack(access_rows)
+ )
self.finalize(pdfconfig)
else:
self._factor_access_indices = None
@@ -74,53 +84,60 @@ def _precompute(self):
self.shapesys_default = tensorlib.ones(tensorlib.shape(self.shapesys_mask))
if self._shapesys_indices:
- self.factor_access_indices = tensorlib.astensor(self._factor_access_indices, dtype='int')
+ self.factor_access_indices = tensorlib.astensor(
+ self._factor_access_indices, dtype='int'
+ )
self.default_value = tensorlib.astensor([1.0])
- self.sample_ones = tensorlib.ones(tensorlib.shape(self.shapesys_mask)[1])
- self.alpha_ones = tensorlib.astensor([1])
+ self.sample_ones = tensorlib.ones(tensorlib.shape(self.shapesys_mask)[1])
+ self.alpha_ones = tensorlib.astensor([1])
else:
self.factor_access_indices = None
- def finalize(self,pdfconfig):
- for uncert_this_mod,mod in zip(self.__shapesys_uncrt,self._shapesys_mods):
- unc_nom = default_backend.astensor([x for x in uncert_this_mod[:,:,:] if any(x[0][x[0]>0])])
- unc = unc_nom[0,0]
- nom = unc_nom[0,1]
- unc_sq = default_backend.power(unc,2)
- nom_sq = default_backend.power(nom,2)
-
- #the below tries to filter cases in which
- #this modifier is not used by checking non
- #zeroness.. shoudl probably use mask
- numerator = default_backend.where(
- unc_sq > 0,
- nom_sq,
- default_backend.zeros(unc_sq.shape)
+ def finalize(self, pdfconfig):
+ for uncert_this_mod, mod in zip(self.__shapesys_uncrt, self._shapesys_mods):
+ unc_nom = default_backend.astensor(
+ [x for x in uncert_this_mod[:, :, :] if any(x[0][x[0] > 0])]
+ )
+ unc = unc_nom[0, 0]
+ nom = unc_nom[0, 1]
+ unc_sq = default_backend.power(unc, 2)
+ nom_sq = default_backend.power(nom, 2)
+
+ # the below tries to filter cases in which
+ # this modifier is not used by checking non
+ # zeroness.. shoudl probably use mask
+ numerator = default_backend.where(
+ unc_sq > 0, nom_sq, default_backend.zeros(unc_sq.shape)
)
denominator = default_backend.where(
- unc_sq > 0,
- unc_sq,
- default_backend.ones(unc_sq.shape)
+ unc_sq > 0, unc_sq, default_backend.ones(unc_sq.shape)
)
- factors = numerator/denominator
- factors = factors[factors>0]
+ factors = numerator / denominator
+ factors = factors[factors > 0]
assert len(factors) == pdfconfig.param_set(mod).n_parameters
pdfconfig.param_set(mod).factors = default_backend.tolist(factors)
pdfconfig.param_set(mod).auxdata = default_backend.tolist(factors)
- def apply(self,pars):
+ def apply(self, pars):
tensorlib, _ = get_backend()
if self.factor_access_indices is None:
return
tensorlib, _ = get_backend()
- factor_row = tensorlib.gather(tensorlib.concatenate([tensorlib.astensor(pars), self.default_value]), self.factor_access_indices)
+ factor_row = tensorlib.gather(
+ tensorlib.concatenate([tensorlib.astensor(pars), self.default_value]),
+ self.factor_access_indices,
+ )
- results_shapesys = tensorlib.einsum('s,a,mb->msab',
- tensorlib.astensor(self.sample_ones),
- tensorlib.astensor(self.alpha_ones),
- factor_row)
+ results_shapesys = tensorlib.einsum(
+ 's,a,mb->msab',
+ tensorlib.astensor(self.sample_ones),
+ tensorlib.astensor(self.alpha_ones),
+ factor_row,
+ )
- results_shapesys = tensorlib.where(self.shapesys_mask,results_shapesys,self.shapesys_default)
+ results_shapesys = tensorlib.where(
+ self.shapesys_mask, results_shapesys, self.shapesys_default
+ )
return results_shapesys
diff --git a/pyhf/modifiers/staterror.py b/pyhf/modifiers/staterror.py
index 7e9c9751c0..26ec502d42 100644
--- a/pyhf/modifiers/staterror.py
+++ b/pyhf/modifiers/staterror.py
@@ -1,11 +1,13 @@
import logging
-log = logging.getLogger(__name__)
from . import modifier
from ..paramsets import constrained_by_normal
from .. import get_backend, default_backend, events
-@modifier(name='staterror', constrained=True, op_code = 'multiplication')
+log = logging.getLogger(__name__)
+
+
+@modifier(name='staterror', constrained=True, op_code='multiplication')
class staterror(object):
@classmethod
def required_parset(cls, n_parameters):
@@ -16,40 +18,44 @@ def required_parset(cls, n_parameters):
'is_constrained': cls.is_constrained,
'is_shared': True,
'op_code': cls.op_code,
- 'inits': (1.,) * n_parameters,
- 'bounds': ((1e-10, 10.),) * n_parameters,
- 'auxdata': (1.,) * n_parameters
+ 'inits': (1.0,) * n_parameters,
+ 'bounds': ((1e-10, 10.0),) * n_parameters,
+ 'auxdata': (1.0,) * n_parameters,
}
+
class staterror_combined(object):
- def __init__(self,staterr_mods,pdfconfig,mega_mods):
+ def __init__(self, staterr_mods, pdfconfig, mega_mods):
self._parindices = list(range(len(pdfconfig.suggested_init())))
- self._staterror_indices = [self._parindices[pdfconfig.par_slice(m)] for m in staterr_mods]
+ self._staterror_indices = [
+ self._parindices[pdfconfig.par_slice(m)] for m in staterr_mods
+ ]
self._staterr_mods = staterr_mods
self._staterror_mask = [
- [
- [
- mega_mods[s][m]['data']['mask'],
- ]
- for s in pdfconfig.samples
- ] for m in staterr_mods
+ [[mega_mods[s][m]['data']['mask']] for s in pdfconfig.samples]
+ for m in staterr_mods
]
- self.__staterror_uncrt = default_backend.astensor([
+ self.__staterror_uncrt = default_backend.astensor(
[
[
- mega_mods[s][m]['data']['uncrt'],
- mega_mods[s][m]['data']['nom_data'],
+ [
+ mega_mods[s][m]['data']['uncrt'],
+ mega_mods[s][m]['data']['nom_data'],
+ ]
+ for s in pdfconfig.samples
]
- for s in pdfconfig.samples
- ] for m in staterr_mods
- ])
+ for m in staterr_mods
+ ]
+ )
if self._staterror_indices:
access_rows = []
staterror_mask = default_backend.astensor(self._staterror_mask)
- for mask,inds in zip(staterror_mask, self._staterror_indices):
- summed_mask = default_backend.sum(mask[:,0,:],axis=0)
- assert default_backend.shape(summed_mask[summed_mask > 0]) == default_backend.shape(default_backend.astensor(inds))
+ for mask, inds in zip(staterror_mask, self._staterror_indices):
+ summed_mask = default_backend.sum(mask[:, 0, :], axis=0)
+ assert default_backend.shape(
+ summed_mask[summed_mask > 0]
+ ) == default_backend.shape(default_backend.astensor(inds))
# make masks of > 0 and == 0
positive_mask = summed_mask > 0
zero_mask = summed_mask == 0
@@ -57,7 +63,9 @@ def __init__(self,staterr_mods,pdfconfig,mega_mods):
summed_mask[positive_mask] = inds
summed_mask[zero_mask] = len(self._parindices) - 1
access_rows.append(summed_mask.tolist())
- self._factor_access_indices = default_backend.tolist(default_backend.stack(access_rows))
+ self._factor_access_indices = default_backend.tolist(
+ default_backend.stack(access_rows)
+ )
self.finalize(pdfconfig)
else:
self._factor_access_indices = None
@@ -71,59 +79,61 @@ def _precompute(self):
self.staterror_default = tensorlib.ones(tensorlib.shape(self.staterror_mask))
if self._staterror_indices:
- self.factor_access_indices = tensorlib.astensor(self._factor_access_indices, dtype='int')
+ self.factor_access_indices = tensorlib.astensor(
+ self._factor_access_indices, dtype='int'
+ )
self.default_value = tensorlib.astensor([1.0])
- self.sample_ones = tensorlib.ones(tensorlib.shape(self.staterror_mask)[1])
- self.alpha_ones = tensorlib.astensor([1])
+ self.sample_ones = tensorlib.ones(tensorlib.shape(self.staterror_mask)[1])
+ self.alpha_ones = tensorlib.astensor([1])
else:
self.factor_access_indices = None
- def finalize(self,pdfconfig):
+ def finalize(self, pdfconfig):
staterror_mask = default_backend.astensor(self._staterror_mask)
- for this_mask, uncert_this_mod,mod in zip(staterror_mask, self.__staterror_uncrt, self._staterr_mods):
+ for this_mask, uncert_this_mod, mod in zip(
+ staterror_mask, self.__staterror_uncrt, self._staterr_mods
+ ):
active_nominals = default_backend.where(
- this_mask[:,0,:], uncert_this_mod[:,1,:],
- default_backend.zeros(uncert_this_mod[:,1,:].shape)
+ this_mask[:, 0, :],
+ uncert_this_mod[:, 1, :],
+ default_backend.zeros(uncert_this_mod[:, 1, :].shape),
)
- summed_nominals = default_backend.sum(active_nominals, axis = 0)
+ summed_nominals = default_backend.sum(active_nominals, axis=0)
# the below tries to filter cases in which this modifier is not
# used by checking non zeroness.. should probably use mask
- numerator = default_backend.where(
- uncert_this_mod[:,1,:] > 0,
- uncert_this_mod[:,0,:],
- default_backend.zeros(uncert_this_mod[:,1,:].shape)
+ numerator = default_backend.where(
+ uncert_this_mod[:, 1, :] > 0,
+ uncert_this_mod[:, 0, :],
+ default_backend.zeros(uncert_this_mod[:, 1, :].shape),
)
denominator = default_backend.where(
summed_nominals > 0,
summed_nominals,
- default_backend.ones(uncert_this_mod[:,1,:].shape)
+ default_backend.ones(uncert_this_mod[:, 1, :].shape),
)
- relerrs = numerator/denominator
+ relerrs = numerator / denominator
sigmas = default_backend.sqrt(
- default_backend.sum(
- default_backend.power(relerrs,2),axis=0
- )
+ default_backend.sum(default_backend.power(relerrs, 2), axis=0)
)
- assert len(sigmas[sigmas>0]) == pdfconfig.param_set(mod).n_parameters
- pdfconfig.param_set(mod).sigmas = default_backend.tolist(sigmas[sigmas>0])
+ assert len(sigmas[sigmas > 0]) == pdfconfig.param_set(mod).n_parameters
+ pdfconfig.param_set(mod).sigmas = default_backend.tolist(sigmas[sigmas > 0])
- def apply(self,pars):
+ def apply(self, pars):
tensorlib, _ = get_backend()
if self.factor_access_indices is None:
return
- select_from = tensorlib.concatenate([pars,self.default_value])
+ select_from = tensorlib.concatenate([pars, self.default_value])
factor_row = tensorlib.gather(select_from, self.factor_access_indices)
- results_staterr = tensorlib.einsum('s,a,mb->msab',
- tensorlib.astensor(self.sample_ones),
- tensorlib.astensor(self.alpha_ones),
- factor_row
+ results_staterr = tensorlib.einsum(
+ 's,a,mb->msab',
+ tensorlib.astensor(self.sample_ones),
+ tensorlib.astensor(self.alpha_ones),
+ factor_row,
)
results_staterr = tensorlib.where(
- self.staterror_mask,
- results_staterr,
- self.staterror_default
+ self.staterror_mask, results_staterr, self.staterror_default
)
return results_staterr
diff --git a/pyhf/optimize/__init__.py b/pyhf/optimize/__init__.py
index f2819f8dbf..2b4588d025 100644
--- a/pyhf/optimize/__init__.py
+++ b/pyhf/optimize/__init__.py
@@ -1,20 +1,24 @@
from .opt_scipy import scipy_optimizer
+
assert scipy_optimizer
try:
from .opt_pytorch import pytorch_optimizer
+
assert pytorch_optimizer
except ImportError:
pass
try:
from .opt_tflow import tflow_optimizer
+
assert tflow_optimizer
except ImportError:
pass
try:
from .opt_minuit import minuit_optimizer
+
assert minuit_optimizer
except ImportError:
- pass
\ No newline at end of file
+ pass
diff --git a/pyhf/optimize/opt_minuit.py b/pyhf/optimize/opt_minuit.py
index 8f4cbe4f04..68f59bad36 100644
--- a/pyhf/optimize/opt_minuit.py
+++ b/pyhf/optimize/opt_minuit.py
@@ -4,18 +4,22 @@
log = logging.getLogger(__name__)
+
class minuit_optimizer(object):
- def __init__(self, verbose = False):
+ def __init__(self, verbose=False):
self.verbose = 0
- def _make_minuit(self, objective, data, pdf, init_pars, init_bounds, constrained_mu = None):
+ def _make_minuit(
+ self, objective, data, pdf, init_pars, init_bounds, constrained_mu=None
+ ):
def f(pars):
- result = objective(pars,data,pdf)
+ result = objective(pars, data, pdf)
logpdf = result[0]
return logpdf
- parnames = ['p{}'.format(i) for i in range(len(init_pars))]
- kw = {'limit_p{}'.format(i): b for i,b in enumerate(init_bounds)}
- initvals = {'p{}'.format(i): v for i,v in enumerate(init_pars)}
+
+ parnames = ['p{}'.format(i) for i in range(len(init_pars))]
+ kw = {'limit_p{}'.format(i): b for i, b in enumerate(init_bounds)}
+ initvals = {'p{}'.format(i): v for i, v in enumerate(init_pars)}
if constrained_mu is not None:
constraints = {'fix_p{}'.format(pdf.config.poi_index): True}
initvals['p{}'.format(pdf.config.poi_index)] = constrained_mu
@@ -24,17 +28,27 @@ def f(pars):
kwargs = {}
for d in [kw, constraints, initvals]:
kwargs.update(**d)
- mm = iminuit.Minuit(f, print_level = 1 if self.verbose else 0, use_array_call=True, forced_parameters = parnames, **kwargs)
+ mm = iminuit.Minuit(
+ f,
+ print_level=1 if self.verbose else 0,
+ use_array_call=True,
+ forced_parameters=parnames,
+ **kwargs
+ )
return mm
def unconstrained_bestfit(self, objective, data, pdf, init_pars, par_bounds):
# The Global Fit
- mm = self._make_minuit(objective,data,pdf,init_pars,par_bounds)
+ mm = self._make_minuit(objective, data, pdf, init_pars, par_bounds)
mm.migrad()
return np.asarray([x[1] for x in mm.values.items()])
- def constrained_bestfit(self, objective, constrained_mu, data, pdf, init_pars, par_bounds):
+ def constrained_bestfit(
+ self, objective, constrained_mu, data, pdf, init_pars, par_bounds
+ ):
# The Fit Conditions on a specific POI value
- mm = self._make_minuit(objective,data,pdf,init_pars,par_bounds, constrained_mu = constrained_mu)
+ mm = self._make_minuit(
+ objective, data, pdf, init_pars, par_bounds, constrained_mu=constrained_mu
+ )
mm.migrad()
return np.asarray([x[1] for x in mm.values.items()])
diff --git a/pyhf/optimize/opt_pytorch.py b/pyhf/optimize/opt_pytorch.py
index 1e9c251cea..ba956747c2 100644
--- a/pyhf/optimize/opt_pytorch.py
+++ b/pyhf/optimize/opt_pytorch.py
@@ -1,9 +1,11 @@
import torch.optim
+
+
class pytorch_optimizer(object):
def __init__(self, **kwargs):
self.tensorlib = kwargs['tensorlib']
- self.maxdelta = kwargs.get('maxdelta',1e-5)
- self.maxiter = kwargs.get('maxiter',100000)
+ self.maxdelta = kwargs.get('maxdelta', 1e-5)
+ self.maxiter = kwargs.get('maxiter', 100000)
def unconstrained_bestfit(self, objective, data, pdf, init_pars, par_bounds):
init_pars = self.tensorlib.astensor(init_pars)
@@ -21,15 +23,23 @@ def unconstrained_bestfit(self, objective, data, pdf, init_pars, par_bounds):
break
return init_pars
- def constrained_bestfit(self, objective, constrained_mu, data, pdf, init_pars, par_bounds):
- allvars = [self.tensorlib.astensor([v] if i!= pdf.config.poi_index else [constrained_mu]) for i,v in enumerate(init_pars)]
- nuis_pars = [v for i,v in enumerate(allvars) if i != pdf.config.poi_index]
- for np in nuis_pars: np.requires_grad = True
- poi_par = [v for i,v in enumerate(allvars) if i == pdf.config.poi_index][0]
+ def constrained_bestfit(
+ self, objective, constrained_mu, data, pdf, init_pars, par_bounds
+ ):
+ allvars = [
+ self.tensorlib.astensor(
+ [v] if i != pdf.config.poi_index else [constrained_mu]
+ )
+ for i, v in enumerate(init_pars)
+ ]
+ nuis_pars = [v for i, v in enumerate(allvars) if i != pdf.config.poi_index]
+ for np in nuis_pars:
+ np.requires_grad = True
+ poi_par = [v for i, v in enumerate(allvars) if i == pdf.config.poi_index][0]
def assemble(poi_par, nuis_pars):
pars = [x for x in nuis_pars]
- pars.insert(pdf.config.poi_index,poi_par)
+ pars.insert(pdf.config.poi_index, poi_par)
pars = self.tensorlib.concatenate(pars)
return pars
diff --git a/pyhf/optimize/opt_scipy.py b/pyhf/optimize/opt_scipy.py
index 6e2516f2a1..6248600ae5 100644
--- a/pyhf/optimize/opt_scipy.py
+++ b/pyhf/optimize/opt_scipy.py
@@ -1,15 +1,18 @@
from scipy.optimize import minimize
import logging
+
log = logging.getLogger(__name__)
+
class scipy_optimizer(object):
def __init__(self):
pass
def unconstrained_bestfit(self, objective, data, pdf, init_pars, par_bounds):
# The Global Fit
- result = minimize(objective, init_pars, method='SLSQP',
- args=(data, pdf), bounds=par_bounds)
+ result = minimize(
+ objective, init_pars, method='SLSQP', args=(data, pdf), bounds=par_bounds
+ )
try:
assert result.success
except AssertionError:
@@ -17,13 +20,19 @@ def unconstrained_bestfit(self, objective, data, pdf, init_pars, par_bounds):
raise
return result.x
-
- def constrained_bestfit(self, objective, constrained_mu, data, pdf, init_pars, par_bounds):
+ def constrained_bestfit(
+ self, objective, constrained_mu, data, pdf, init_pars, par_bounds
+ ):
# The Fit Conditions on a specific POI value
- cons = {'type': 'eq', 'fun': lambda v: v[
- pdf.config.poi_index] - constrained_mu}
- result = minimize(objective, init_pars, constraints=cons,
- method='SLSQP', args=(data, pdf), bounds=par_bounds)
+ cons = {'type': 'eq', 'fun': lambda v: v[pdf.config.poi_index] - constrained_mu}
+ result = minimize(
+ objective,
+ init_pars,
+ constraints=cons,
+ method='SLSQP',
+ args=(data, pdf),
+ bounds=par_bounds,
+ )
try:
assert result.success
except AssertionError:
diff --git a/pyhf/optimize/opt_tflow.py b/pyhf/optimize/opt_tflow.py
index 36423b6d38..48ac0f6fb5 100644
--- a/pyhf/optimize/opt_tflow.py
+++ b/pyhf/optimize/opt_tflow.py
@@ -4,6 +4,7 @@
log = logging.getLogger(__name__)
+
class tflow_optimizer(object):
def __init__(self, tensorlib):
self.tb = tensorlib
@@ -12,50 +13,64 @@ def __init__(self, tensorlib):
self.eps = 1e-4
def unconstrained_bestfit(self, objective, data, pdf, init_pars, par_bounds):
- #the graph
- data = self.tb.astensor(data)
- parlist = [self.tb.astensor([p]) for p in init_pars]
-
- pars = self.tb.concatenate(parlist)
- objective = objective(pars,data,pdf)
- hessian = tf.hessians(objective, pars)[0]
- gradient = tf.gradients(objective, pars)[0]
- invhess = tf.linalg.inv(hessian)
- update = tf.transpose(tf.matmul(invhess, tf.transpose(tf.stack([gradient]))))[0]
-
- #run newton's method
+ # the graph
+ data = self.tb.astensor(data)
+ parlist = [self.tb.astensor([p]) for p in init_pars]
+
+ pars = self.tb.concatenate(parlist)
+ objective = objective(pars, data, pdf)
+ hessian = tf.hessians(objective, pars)[0]
+ gradient = tf.gradients(objective, pars)[0]
+ invhess = tf.linalg.inv(hessian)
+ update = tf.transpose(tf.matmul(invhess, tf.transpose(tf.stack([gradient]))))[0]
+
+ # run newton's method
best_fit = init_pars
for i in range(self.maxit):
up = self.tb.session.run(update, feed_dict={pars: best_fit})
- best_fit = best_fit-self.relax*up
+ best_fit = best_fit - self.relax * up
if np.abs(np.max(up)) < self.eps:
break
return best_fit.tolist()
- def constrained_bestfit(self, objective, constrained_mu, data, pdf, init_pars, par_bounds):
- #the graph
- data = self.tb.astensor(data)
+ def constrained_bestfit(
+ self, objective, constrained_mu, data, pdf, init_pars, par_bounds
+ ):
+ # the graph
+ data = self.tb.astensor(data)
- nuis_pars = [self.tb.astensor([p]) for i,p in enumerate(init_pars) if i!=pdf.config.poi_index]
- poi_par = self.tb.astensor([constrained_mu])
+ nuis_pars = [
+ self.tb.astensor([p])
+ for i, p in enumerate(init_pars)
+ if i != pdf.config.poi_index
+ ]
+ poi_par = self.tb.astensor([constrained_mu])
nuis_cat = self.tb.concatenate(nuis_pars)
- pars = self.tb.concatenate([nuis_cat[:pdf.config.poi_index],poi_par,nuis_cat[pdf.config.poi_index:]])
- objective = objective(pars,data,pdf)
- hessian = tf.hessians(objective, nuis_cat)[0]
- gradient = tf.gradients(objective, nuis_cat)[0]
- invhess = tf.linalg.inv(hessian)
- update = tf.transpose(tf.matmul(invhess, tf.transpose(tf.stack([gradient]))))[0]
-
- #run newton's method
- best_fit_nuis = [x for i,x in enumerate(init_pars) if i!= pdf.config.poi_index]
+ pars = self.tb.concatenate(
+ [
+ nuis_cat[: pdf.config.poi_index],
+ poi_par,
+ nuis_cat[pdf.config.poi_index :],
+ ]
+ )
+ objective = objective(pars, data, pdf)
+ hessian = tf.hessians(objective, nuis_cat)[0]
+ gradient = tf.gradients(objective, nuis_cat)[0]
+ invhess = tf.linalg.inv(hessian)
+ update = tf.transpose(tf.matmul(invhess, tf.transpose(tf.stack([gradient]))))[0]
+
+ # run newton's method
+ best_fit_nuis = [
+ x for i, x in enumerate(init_pars) if i != pdf.config.poi_index
+ ]
for i in range(self.maxit):
up = self.tb.session.run(update, feed_dict={nuis_cat: best_fit_nuis})
- best_fit_nuis = best_fit_nuis-self.relax*up
+ best_fit_nuis = best_fit_nuis - self.relax * up
if np.abs(np.max(up)) < self.eps:
break
best_fit = best_fit_nuis.tolist()
- best_fit.insert(pdf.config.poi_index,constrained_mu)
+ best_fit.insert(pdf.config.poi_index, constrained_mu)
return best_fit
diff --git a/pyhf/paramsets.py b/pyhf/paramsets.py
index ddcc59c6c2..0bd81ea331 100644
--- a/pyhf/paramsets.py
+++ b/pyhf/paramsets.py
@@ -1,27 +1,31 @@
from . import get_backend
from . import exceptions
+
class paramset(object):
def __init__(self, **kwargs):
self.n_parameters = kwargs.pop('n_parameters')
self.suggested_init = kwargs.pop('inits')
self.suggested_bounds = kwargs.pop('bounds')
+
class unconstrained(paramset):
pass
+
class constrained_by_normal(paramset):
def __init__(self, **kwargs):
- super(constrained_by_normal,self).__init__(**kwargs)
+ super(constrained_by_normal, self).__init__(**kwargs)
self.pdf_type = 'normal'
self.auxdata = kwargs.pop('auxdata')
def expected_data(self, pars):
return pars
+
class constrained_by_poisson(paramset):
def __init__(self, **kwargs):
- super(constrained_by_poisson,self).__init__(**kwargs)
+ super(constrained_by_poisson, self).__init__(**kwargs)
self.pdf_type = 'poisson'
self.auxdata = kwargs.pop('auxdata')
self.factors = kwargs.pop('factors')
@@ -29,22 +33,23 @@ def __init__(self, **kwargs):
def expected_data(self, pars):
tensorlib, _ = get_backend()
return tensorlib.product(
- tensorlib.stack([pars, tensorlib.astensor(self.factors)]
- ),
- axis=0
+ tensorlib.stack([pars, tensorlib.astensor(self.factors)]), axis=0
)
+
def reduce_paramset_requirements(paramset_requirements):
reduced_paramset_requirements = {}
# nb: normsys and histosys have different op_codes so can't currently be shared
- param_keys = ['paramset_type',
- 'n_parameters',
- 'op_code',
- 'inits',
- 'bounds',
- 'auxdata',
- 'factors']
+ param_keys = [
+ 'paramset_type',
+ 'n_parameters',
+ 'op_code',
+ 'inits',
+ 'bounds',
+ 'auxdata',
+ 'factors',
+ ]
for param_name in list(paramset_requirements.keys()):
params = paramset_requirements[param_name]
@@ -56,10 +61,15 @@ def reduce_paramset_requirements(paramset_requirements):
for k in param_keys:
if len(combined_param[k]) != 1:
- raise exceptions.InvalidNameReuse("Multiple values for '{}' ({}) were found for {}. Use unique modifier names or use qualify_names=True when constructing the pdf.".format(k, list(combined_param[k]), param_name))
+ raise exceptions.InvalidNameReuse(
+ "Multiple values for '{}' ({}) were found for {}. Use unique modifier names or use qualify_names=True when constructing the pdf.".format(
+ k, list(combined_param[k]), param_name
+ )
+ )
else:
v = combined_param[k].pop()
- if isinstance(v, tuple): v = list(v)
+ if isinstance(v, tuple):
+ v = list(v)
combined_param[k] = v
reduced_paramset_requirements[param_name] = combined_param
diff --git a/pyhf/pdf.py b/pyhf/pdf.py
index 5aa2350d03..a9b6b576c4 100644
--- a/pyhf/pdf.py
+++ b/pyhf/pdf.py
@@ -1,6 +1,5 @@
import copy
import logging
-log = logging.getLogger(__name__)
from . import get_backend, default_backend
from . import exceptions
@@ -9,10 +8,11 @@
from .constraints import gaussian_constraint_combined, poisson_constraint_combined
from .paramsets import reduce_paramset_requirements
+log = logging.getLogger(__name__)
class _ModelConfig(object):
- def __init__(self, spec, poiname = 'mu', qualify_names = False):
+ def __init__(self, spec, poiname='mu', qualify_names=False):
self.poi_index = None
self.par_map = {}
self.par_order = []
@@ -38,7 +38,9 @@ def __init__(self, spec, poiname = 'mu', qualify_names = False):
for modifier_def in sample['modifiers']:
self.parameters.append(modifier_def['name'])
if qualify_names:
- fullname = '{}/{}'.format(modifier_def['type'],modifier_def['name'])
+ fullname = '{}/{}'.format(
+ modifier_def['type'], modifier_def['name']
+ )
if modifier_def['name'] == poiname:
poiname = fullname
modifier_def['name'] = fullname
@@ -46,19 +48,38 @@ def __init__(self, spec, poiname = 'mu', qualify_names = False):
# get the paramset requirements for the given modifier. If
# modifier does not exist, we'll have a KeyError
try:
- paramset_requirements = modifiers.registry[modifier_def['type']].required_parset(len(sample['data']))
+ paramset_requirements = modifiers.registry[
+ modifier_def['type']
+ ].required_parset(len(sample['data']))
except KeyError:
- log.exception('Modifier not implemented yet (processing {0:s}). Available modifiers: {1}'.format(modifier_def['type'], modifiers.registry.keys()))
+ log.exception(
+ 'Modifier not implemented yet (processing {0:s}). Available modifiers: {1}'.format(
+ modifier_def['type'], modifiers.registry.keys()
+ )
+ )
raise exceptions.InvalidModifier()
- self.modifiers.append((modifier_def['name'],modifier_def['type']))
+ self.modifiers.append((modifier_def['name'], modifier_def['type']))
# check the shareability (e.g. for shapesys for example)
is_shared = paramset_requirements['is_shared']
- if not(is_shared) and modifier_def['name'] in _paramsets_requirements:
- raise ValueError("Trying to add unshared-paramset but other paramsets exist with the same name.")
- if is_shared and not(_paramsets_requirements.get(modifier_def['name'], [{'is_shared': True}])[0]['is_shared']):
- raise ValueError("Trying to add shared-paramset but other paramset of same name is indicated to be unshared.")
- _paramsets_requirements.setdefault(modifier_def['name'],[]).append(paramset_requirements)
+ if (
+ not (is_shared)
+ and modifier_def['name'] in _paramsets_requirements
+ ):
+ raise ValueError(
+ "Trying to add unshared-paramset but other paramsets exist with the same name."
+ )
+ if is_shared and not (
+ _paramsets_requirements.get(
+ modifier_def['name'], [{'is_shared': True}]
+ )[0]['is_shared']
+ ):
+ raise ValueError(
+ "Trying to add shared-paramset but other paramset of same name is indicated to be unshared."
+ )
+ _paramsets_requirements.setdefault(modifier_def['name'], []).append(
+ paramset_requirements
+ )
self.channels = list(set(self.channels))
self.samples = list(set(self.samples))
@@ -86,34 +107,42 @@ def par_slice(self, name):
def param_set(self, name):
return self.par_map[name]['paramset']
- def set_poi(self,name):
- if name not in [x for x,_ in self.modifiers]:
- raise exceptions.InvalidModel("The paramter of interest '{0:s}' cannot be fit as it is not declared in the model specification.".format(name))
+ def set_poi(self, name):
+ if name not in [x for x, _ in self.modifiers]:
+ raise exceptions.InvalidModel(
+ "The paramter of interest '{0:s}' cannot be fit as it is not declared in the model specification.".format(
+ name
+ )
+ )
s = self.par_slice(name)
- assert s.stop-s.start == 1
+ assert s.stop - s.start == 1
self.poi_index = s.start
def _register_paramset(self, param_name, paramset):
'''allocates n nuisance parameters and stores paramset > modifier map'''
- log.info('adding modifier %s (%s new nuisance parameters)', param_name, paramset.n_parameters)
+ log.info(
+ 'adding modifier %s (%s new nuisance parameters)',
+ param_name,
+ paramset.n_parameters,
+ )
sl = slice(self.next_index, self.next_index + paramset.n_parameters)
self.next_index = self.next_index + paramset.n_parameters
self.par_order.append(param_name)
- self.par_map[param_name] = {
- 'slice': sl,
- 'paramset': paramset,
- }
+ self.par_map[param_name] = {'slice': sl, 'paramset': paramset}
def _create_and_register_paramsets(self, paramsets_requirements):
- for param_name, paramset_requirements in reduce_paramset_requirements(paramsets_requirements).items():
+ for param_name, paramset_requirements in reduce_paramset_requirements(
+ paramsets_requirements
+ ).items():
paramset_type = paramset_requirements.get('paramset_type')
paramset = paramset_type(**paramset_requirements)
self._register_paramset(param_name, paramset)
+
class Model(object):
def __init__(self, spec, **config_kwargs):
- self.spec = copy.deepcopy(spec) #may get modified by config
+ self.spec = copy.deepcopy(spec) # may get modified by config
self.schema = config_kwargs.pop('schema', utils.get_default_schema())
# run jsonschema validation of input specification against the (provided) schema
log.info("Validating spec against schema: {0:s}".format(self.schema))
@@ -123,28 +152,31 @@ def __init__(self, spec, **config_kwargs):
self._create_nominal_and_modifiers()
- #this is tricky, must happen before constraint
- #terms try to access auxdata but after
- #combined mods have been created that
- #set the aux data
+ # this is tricky, must happen before constraint
+ # terms try to access auxdata but after
+ # combined mods have been created that
+ # set the aux data
for k in sorted(self.config.par_map.keys()):
parset = self.config.param_set(k)
- if hasattr(parset,'pdf_type'): #is constrained
+ if hasattr(parset, 'pdf_type'): # is constrained
self.config.auxdata += parset.auxdata
self.config.auxdata_order.append(k)
-
self.constraints_gaussian = gaussian_constraint_combined(self.config)
self.constraints_poisson = poisson_constraint_combined(self.config)
-
def _create_nominal_and_modifiers(self):
default_data_makers = {
- 'histosys': lambda: {'hi_data': [], 'lo_data': [], 'nom_data': [],'mask': []},
+ 'histosys': lambda: {
+ 'hi_data': [],
+ 'lo_data': [],
+ 'nom_data': [],
+ 'mask': [],
+ },
'normsys': lambda: {'hi': [], 'lo': [], 'nom_data': [], 'mask': []},
'normfactor': lambda: {'mask': []},
'shapefactor': lambda: {'mask': []},
- 'shapesys': lambda: {'mask': [], 'uncrt': [], 'nom_data' :[]},
+ 'shapesys': lambda: {'mask': [], 'uncrt': [], 'nom_data': []},
'staterror': lambda: {'mask': [], 'uncrt': [], 'nom_data': []},
}
@@ -159,31 +191,39 @@ def _create_nominal_and_modifiers(self):
# We don't actually set up the modifier data here for no-ops, but we do
# set up the entire structure
mega_mods = {}
- for m,mtype in self.config.modifiers:
+ for m, mtype in self.config.modifiers:
for s in self.config.samples:
- mega_mods.setdefault(s,{})[m] = {
+ mega_mods.setdefault(s, {})[m] = {
'type': mtype,
'name': m,
- 'data': default_data_makers[mtype]()
+ 'data': default_data_makers[mtype](),
}
# helper maps channel-name/sample-name to pairs of channel-sample structs
helper = {}
for c in self.spec['channels']:
for s in c['samples']:
- helper.setdefault(c['name'],{})[s['name']] = (c,s)
+ helper.setdefault(c['name'], {})[s['name']] = (c, s)
mega_samples = {}
for s in self.config.samples:
mega_nom = []
for c in self.config.channels:
- defined_samp = helper.get(c,{}).get(s)
+ defined_samp = helper.get(c, {}).get(s)
defined_samp = None if not defined_samp else defined_samp[1]
# set nominal to 0 for channel/sample if the pair doesn't exist
- nom = defined_samp['data'] if defined_samp else [0.0]*self.config.channel_nbins[c]
+ nom = (
+ defined_samp['data']
+ if defined_samp
+ else [0.0] * self.config.channel_nbins[c]
+ )
mega_nom += nom
- defined_mods = {x['name']:x for x in defined_samp['modifiers']} if defined_samp else {}
- for m,mtype in self.config.modifiers:
+ defined_mods = (
+ {x['name']: x for x in defined_samp['modifiers']}
+ if defined_samp
+ else {}
+ )
+ for m, mtype in self.config.modifiers:
# this is None if modifier doesn't affect channel/sample.
thismod = defined_mods.get(m)
if mtype == 'histosys':
@@ -193,55 +233,68 @@ def _create_nominal_and_modifiers(self):
mega_mods[s][m]['data']['lo_data'] += lo_data
mega_mods[s][m]['data']['hi_data'] += hi_data
mega_mods[s][m]['data']['nom_data'] += nom
- mega_mods[s][m]['data']['mask'] += [maskval]*len(nom) #broadcasting
+ mega_mods[s][m]['data']['mask'] += [maskval] * len(
+ nom
+ ) # broadcasting
pass
elif mtype == 'normsys':
maskval = True if thismod else False
lo_factor = thismod['data']['lo'] if thismod else 1.0
hi_factor = thismod['data']['hi'] if thismod else 1.0
- mega_mods[s][m]['data']['nom_data'] += [1.0]*len(nom)
- mega_mods[s][m]['data']['lo'] += [lo_factor]*len(nom) #broadcasting
- mega_mods[s][m]['data']['hi'] += [hi_factor]*len(nom)
- mega_mods[s][m]['data']['mask'] += [maskval] *len(nom) #broadcasting
+ mega_mods[s][m]['data']['nom_data'] += [1.0] * len(nom)
+ mega_mods[s][m]['data']['lo'] += [lo_factor] * len(
+ nom
+ ) # broadcasting
+ mega_mods[s][m]['data']['hi'] += [hi_factor] * len(nom)
+ mega_mods[s][m]['data']['mask'] += [maskval] * len(
+ nom
+ ) # broadcasting
elif mtype in ['normfactor', 'shapefactor']:
maskval = True if thismod else False
- mega_mods[s][m]['data']['mask'] += [maskval]*len(nom) #broadcasting
+ mega_mods[s][m]['data']['mask'] += [maskval] * len(
+ nom
+ ) # broadcasting
elif mtype in ['shapesys', 'staterror']:
- uncrt = thismod['data'] if thismod else [0.0]*len(nom)
- maskval = [True if thismod else False]*len(nom)
- mega_mods[s][m]['data']['mask'] += maskval
+ uncrt = thismod['data'] if thismod else [0.0] * len(nom)
+ maskval = [True if thismod else False] * len(nom)
+ mega_mods[s][m]['data']['mask'] += maskval
mega_mods[s][m]['data']['uncrt'] += uncrt
mega_mods[s][m]['data']['nom_data'] += nom
else:
- raise RuntimeError('not sure how to combine {mtype} into the mega-channel'.format(mtype = mtype))
+ raise RuntimeError(
+ 'not sure how to combine {mtype} into the mega-channel'.format(
+ mtype=mtype
+ )
+ )
sample_dict = {
'name': 'mega_{}'.format(s),
'nom': mega_nom,
- 'modifiers': list(mega_mods[s].values())
+ 'modifiers': list(mega_mods[s].values()),
}
mega_samples[s] = sample_dict
self.mega_mods = mega_mods
-
- tensorlib,_ = get_backend()
+ tensorlib, _ = get_backend()
thenom = default_backend.astensor(
[mega_samples[s]['nom'] for s in self.config.samples]
)
- self.thenom = default_backend.reshape(thenom,(
- 1,
- len(self.config.samples),
- 1,
- sum(list(self.config.channel_nbins.values()))
- )
+ self.thenom = default_backend.reshape(
+ thenom,
+ (
+ 1,
+ len(self.config.samples),
+ 1,
+ sum(list(self.config.channel_nbins.values())),
+ ),
)
self.modifiers_appliers = {
- k:c(
- [m for m,mtype in self.config.modifiers if mtype == k],
+ k: c(
+ [m for m, mtype in self.config.modifiers if mtype == k],
self.config,
- mega_mods
+ mega_mods,
)
- for k,c in modifiers.combined.items()
+ for k, c in modifiers.combined.items()
}
def expected_auxdata(self, pars):
@@ -250,27 +303,32 @@ def expected_auxdata(self, pars):
for parname in self.config.auxdata_order:
# order matters! because we generated auxdata in a certain order
thisaux = self.config.param_set(parname).expected_data(
- pars[self.config.par_slice(parname)])
+ pars[self.config.par_slice(parname)]
+ )
tocat = [thisaux] if auxdata is None else [auxdata, thisaux]
auxdata = tensorlib.concatenate(tocat)
return auxdata
- def _modifications(self,pars):
- factor_mods = ['normsys','staterror','shapesys','normfactor', 'shapefactor']
- delta_mods = ['histosys']
+ def _modifications(self, pars):
+ factor_mods = ['normsys', 'staterror', 'shapesys', 'normfactor', 'shapefactor']
+ delta_mods = ['histosys']
- deltas = list(filter(lambda x: x is not None,[
- self.modifiers_appliers[k].apply(pars)
- for k in delta_mods
- ]))
- factors = list(filter(lambda x: x is not None,[
- self.modifiers_appliers[k].apply(pars)
- for k in factor_mods
- ]))
+ deltas = list(
+ filter(
+ lambda x: x is not None,
+ [self.modifiers_appliers[k].apply(pars) for k in delta_mods],
+ )
+ )
+ factors = list(
+ filter(
+ lambda x: x is not None,
+ [self.modifiers_appliers[k].apply(pars) for k in factor_mods],
+ )
+ )
return deltas, factors
- def expected_actualdata(self,pars):
+ def expected_actualdata(self, pars):
"""
For a single channel single sample, we compute
@@ -301,14 +359,16 @@ def expected_actualdata(self,pars):
allsum = tensorlib.concatenate(deltas + [tensorlib.astensor(self.thenom)])
- nom_plus_delta = tensorlib.sum(allsum,axis=0)
- nom_plus_delta = tensorlib.reshape(nom_plus_delta,(1,)+tensorlib.shape(nom_plus_delta))
+ nom_plus_delta = tensorlib.sum(allsum, axis=0)
+ nom_plus_delta = tensorlib.reshape(
+ nom_plus_delta, (1,) + tensorlib.shape(nom_plus_delta)
+ )
allfac = tensorlib.concatenate(factors + [nom_plus_delta])
- newbysample = tensorlib.product(allfac,axis=0)
- newresults = tensorlib.sum(newbysample,axis=0)
- return newresults[0] #only one alphas
+ newbysample = tensorlib.product(allfac, axis=0)
+ newresults = tensorlib.sum(newbysample, axis=0)
+ return newresults[0] # only one alphas
def expected_data(self, pars, include_auxdata=True):
tensorlib, _ = get_backend()
@@ -318,20 +378,24 @@ def expected_data(self, pars, include_auxdata=True):
if not include_auxdata:
return expected_actual
expected_constraints = self.expected_auxdata(pars)
- tocat = [expected_actual] if expected_constraints is None else [expected_actual,expected_constraints]
+ tocat = (
+ [expected_actual]
+ if expected_constraints is None
+ else [expected_actual, expected_constraints]
+ )
return tensorlib.concatenate(tocat)
def constraint_logpdf(self, auxdata, pars):
- normal = self.constraints_gaussian.logpdf(auxdata,pars)
- poisson = self.constraints_poisson.logpdf(auxdata,pars)
+ normal = self.constraints_gaussian.logpdf(auxdata, pars)
+ poisson = self.constraints_poisson.logpdf(auxdata, pars)
return normal + poisson
def mainlogpdf(self, maindata, pars):
tensorlib, _ = get_backend()
lambdas_data = self.expected_actualdata(pars)
- summands = tensorlib.poisson_logpdf(maindata, lambdas_data)
- tosum = tensorlib.boolean_mask(summands,tensorlib.isfinite(summands))
- mainpdf = tensorlib.sum(tosum)
+ summands = tensorlib.poisson_logpdf(maindata, lambdas_data)
+ tosum = tensorlib.boolean_mask(summands, tensorlib.isfinite(summands))
+ mainpdf = tensorlib.sum(tosum)
return mainpdf
def logpdf(self, pars, data):
@@ -341,16 +405,19 @@ def logpdf(self, pars, data):
cut = tensorlib.shape(data)[0] - len(self.config.auxdata)
actual_data, aux_data = data[:cut], data[cut:]
- mainpdf = self.mainlogpdf(actual_data,pars)
+ mainpdf = self.mainlogpdf(actual_data, pars)
constraint = self.constraint_logpdf(aux_data, pars)
result = mainpdf + constraint
- return result * tensorlib.ones((1)) #ensure (1,) array shape also for numpy
+ return result * tensorlib.ones(
+ (1)
+ ) # ensure (1,) array shape also for numpy
except:
- log.error('eval failed for data {} pars: {}'.format(
- tensorlib.tolist(data),
- tensorlib.tolist(pars)
- ))
+ log.error(
+ 'eval failed for data {} pars: {}'.format(
+ tensorlib.tolist(data), tensorlib.tolist(pars)
+ )
+ )
raise
def pdf(self, pars, data):
diff --git a/pyhf/readxml.py b/pyhf/readxml.py
index 64ad5fa225..05993fd578 100644
--- a/pyhf/readxml.py
+++ b/pyhf/readxml.py
@@ -1,11 +1,12 @@
import logging
-log = logging.getLogger(__name__)
import os
import xml.etree.ElementTree as ET
import numpy as np
import tqdm
+log = logging.getLogger(__name__)
+
def extract_error(h):
"""
@@ -24,8 +25,10 @@ def extract_error(h):
err = h.variances if h.variances else h.numpy()[0]
return np.sqrt(err).tolist()
+
def import_root_histogram(rootdir, filename, path, name):
import uproot
+
# strip leading slashes as uproot doesn't use "/" for top-level
path = path or ''
path = path.strip('/')
@@ -36,98 +39,120 @@ def import_root_histogram(rootdir, filename, path, name):
try:
h = f[os.path.join(path, name)]
except KeyError:
- raise KeyError('Both {0:s} and {1:s} were tried and not found in {2:s}'.format(name, os.path.join(path, name), os.path.join(rootdir, filename)))
+ raise KeyError(
+ 'Both {0:s} and {1:s} were tried and not found in {2:s}'.format(
+ name, os.path.join(path, name), os.path.join(rootdir, filename)
+ )
+ )
return h.numpy()[0].tolist(), extract_error(h)
-def process_sample(sample,rootdir,inputfile, histopath, channelname, track_progress=False):
+
+def process_sample(
+ sample, rootdir, inputfile, histopath, channelname, track_progress=False
+):
if 'InputFile' in sample.attrib:
- inputfile = sample.attrib.get('InputFile')
+ inputfile = sample.attrib.get('InputFile')
if 'HistoPath' in sample.attrib:
histopath = sample.attrib.get('HistoPath')
histoname = sample.attrib['HistoName']
- data,err = import_root_histogram(rootdir, inputfile, histopath, histoname)
+ data, err = import_root_histogram(rootdir, inputfile, histopath, histoname)
modifiers = []
- modtags = tqdm.tqdm(sample.iter(), unit='modifier', disable=not(track_progress), total=len(sample))
+ modtags = tqdm.tqdm(
+ sample.iter(), unit='modifier', disable=not (track_progress), total=len(sample)
+ )
for modtag in modtags:
- modtags.set_description(' - modifier {0:s}({1:s})'.format(modtag.attrib.get('Name', 'n/a'), modtag.tag))
+ modtags.set_description(
+ ' - modifier {0:s}({1:s})'.format(
+ modtag.attrib.get('Name', 'n/a'), modtag.tag
+ )
+ )
if modtag == sample:
continue
if modtag.tag == 'OverallSys':
- modifiers.append({
- 'name': modtag.attrib['Name'],
- 'type': 'normsys',
- 'data': {'lo': float(modtag.attrib['Low']), 'hi': float(modtag.attrib['High'])}
- })
+ modifiers.append(
+ {
+ 'name': modtag.attrib['Name'],
+ 'type': 'normsys',
+ 'data': {
+ 'lo': float(modtag.attrib['Low']),
+ 'hi': float(modtag.attrib['High']),
+ },
+ }
+ )
elif modtag.tag == 'NormFactor':
- modifiers.append({
- 'name': modtag.attrib['Name'],
- 'type': 'normfactor',
- 'data': None
- })
+ modifiers.append(
+ {'name': modtag.attrib['Name'], 'type': 'normfactor', 'data': None}
+ )
elif modtag.tag == 'HistoSys':
- lo,_ = import_root_histogram(rootdir,
- modtag.attrib.get('HistoFileLow',inputfile),
- modtag.attrib.get('HistoPathLow',''),
- modtag.attrib['HistoNameLow']
- )
- hi,_ = import_root_histogram(rootdir,
- modtag.attrib.get('HistoFileHigh',inputfile),
- modtag.attrib.get('HistoPathHigh',''),
- modtag.attrib['HistoNameHigh']
- )
- modifiers.append({
- 'name': modtag.attrib['Name'],
- 'type': 'histosys',
- 'data': {'lo_data': lo, 'hi_data': hi}
- })
+ lo, _ = import_root_histogram(
+ rootdir,
+ modtag.attrib.get('HistoFileLow', inputfile),
+ modtag.attrib.get('HistoPathLow', ''),
+ modtag.attrib['HistoNameLow'],
+ )
+ hi, _ = import_root_histogram(
+ rootdir,
+ modtag.attrib.get('HistoFileHigh', inputfile),
+ modtag.attrib.get('HistoPathHigh', ''),
+ modtag.attrib['HistoNameHigh'],
+ )
+ modifiers.append(
+ {
+ 'name': modtag.attrib['Name'],
+ 'type': 'histosys',
+ 'data': {'lo_data': lo, 'hi_data': hi},
+ }
+ )
elif modtag.tag == 'StatError' and modtag.attrib['Activate'] == 'True':
- if modtag.attrib.get('HistoName','') == '':
+ if modtag.attrib.get('HistoName', '') == '':
staterr = err
else:
- extstat,_ = import_root_histogram(rootdir,
- modtag.attrib.get('HistoFile',inputfile),
- modtag.attrib.get('HistoPath',''),
- modtag.attrib['HistoName']
+ extstat, _ = import_root_histogram(
+ rootdir,
+ modtag.attrib.get('HistoFile', inputfile),
+ modtag.attrib.get('HistoPath', ''),
+ modtag.attrib['HistoName'],
)
- staterr = np.multiply(extstat,data).tolist()
+ staterr = np.multiply(extstat, data).tolist()
if not staterr:
raise RuntimeError('cannot determine stat error.')
- modifiers.append({
- 'name': 'staterror_{}'.format(channelname),
- 'type': 'staterror',
- 'data': staterr
- })
+ modifiers.append(
+ {
+ 'name': 'staterror_{}'.format(channelname),
+ 'type': 'staterror',
+ 'data': staterr,
+ }
+ )
else:
log.warning('not considering modifier tag %s', modtag)
+ return {'name': sample.attrib['Name'], 'data': data, 'modifiers': modifiers}
- return {
- 'name': sample.attrib['Name'],
- 'data': data,
- 'modifiers': modifiers
- }
-def process_data(sample,rootdir,inputfile, histopath):
+def process_data(sample, rootdir, inputfile, histopath):
if 'InputFile' in sample.attrib:
- inputfile = sample.attrib.get('InputFile')
+ inputfile = sample.attrib.get('InputFile')
if 'HistoPath' in sample.attrib:
histopath = sample.attrib.get('HistoPath')
histoname = sample.attrib['HistoName']
- data,_ = import_root_histogram(rootdir, inputfile, histopath, histoname)
+ data, _ = import_root_histogram(rootdir, inputfile, histopath, histoname)
return data
+
def process_channel(channelxml, rootdir, track_progress=False):
channel = channelxml.getroot()
inputfile = channel.attrib.get('InputFile')
histopath = channel.attrib.get('HistoPath')
- samples = tqdm.tqdm(channel.findall('Sample'), unit='sample', disable=not(track_progress))
+ samples = tqdm.tqdm(
+ channel.findall('Sample'), unit='sample', disable=not (track_progress)
+ )
data = channel.findall('Data')
if data:
@@ -138,27 +163,39 @@ def process_channel(channelxml, rootdir, track_progress=False):
results = []
for sample in samples:
- samples.set_description(' - sample {}'.format(sample.attrib.get('Name')))
- result = process_sample(sample, rootdir, inputfile, histopath, channelname, track_progress)
- results.append(result)
+ samples.set_description(' - sample {}'.format(sample.attrib.get('Name')))
+ result = process_sample(
+ sample, rootdir, inputfile, histopath, channelname, track_progress
+ )
+ results.append(result)
return channelname, parsed_data, results
+
def parse(configfile, rootdir, track_progress=False):
toplvl = ET.parse(configfile)
- inputs = tqdm.tqdm([x.text for x in toplvl.findall('Input')], unit='channel', disable=not(track_progress))
+ inputs = tqdm.tqdm(
+ [x.text for x in toplvl.findall('Input')],
+ unit='channel',
+ disable=not (track_progress),
+ )
channels = {}
for inp in inputs:
inputs.set_description('Processing {}'.format(inp))
- channel, data, samples = process_channel(ET.parse(os.path.join(rootdir,inp)), rootdir, track_progress)
+ channel, data, samples = process_channel(
+ ET.parse(os.path.join(rootdir, inp)), rootdir, track_progress
+ )
channels[channel] = {'data': data, 'samples': samples}
return {
- 'toplvl':{
- 'resultprefix':toplvl.getroot().attrib['OutputFilePrefix'],
- 'measurements': [{'name': x.attrib['Name'], 'config': {'poi': x.findall('POI')[0].text}} for x in toplvl.findall('Measurement')]
+ 'toplvl': {
+ 'resultprefix': toplvl.getroot().attrib['OutputFilePrefix'],
+ 'measurements': [
+ {'name': x.attrib['Name'], 'config': {'poi': x.findall('POI')[0].text}}
+ for x in toplvl.findall('Measurement')
+ ],
},
- 'channels': [{'name': k, 'samples': v['samples']} for k,v in channels.items()],
- 'data': {k:v['data'] for k,v in channels.items()}
+ 'channels': [{'name': k, 'samples': v['samples']} for k, v in channels.items()],
+ 'data': {k: v['data'] for k, v in channels.items()},
}
diff --git a/pyhf/simplemodels.py b/pyhf/simplemodels.py
index 7cea8560fc..0d0eceabd4 100644
--- a/pyhf/simplemodels.py
+++ b/pyhf/simplemodels.py
@@ -1,5 +1,6 @@
from . import Model
+
def hepdata_like(signal_data, bkg_data, bkg_uncerts):
spec = {
'channels': [
@@ -11,16 +12,20 @@ def hepdata_like(signal_data, bkg_data, bkg_uncerts):
'data': signal_data,
'modifiers': [
{'name': 'mu', 'type': 'normfactor', 'data': None}
- ]
+ ],
},
{
'name': 'background',
'data': bkg_data,
'modifiers': [
- {'name': 'uncorr_bkguncrt', 'type': 'shapesys', 'data': bkg_uncerts}
- ]
- }
- ]
+ {
+ 'name': 'uncorr_bkguncrt',
+ 'type': 'shapesys',
+ 'data': bkg_uncerts,
+ }
+ ],
+ },
+ ],
}
]
}
diff --git a/pyhf/tensor/__init__.py b/pyhf/tensor/__init__.py
index a2d481b117..3925f19424 100644
--- a/pyhf/tensor/__init__.py
+++ b/pyhf/tensor/__init__.py
@@ -1,20 +1,24 @@
from .numpy_backend import numpy_backend
+
assert numpy_backend
try:
from .pytorch_backend import pytorch_backend
+
assert pytorch_backend
except ImportError:
pass
try:
from .tensorflow_backend import tensorflow_backend
+
assert tensorflow_backend
except ImportError:
pass
try:
from .mxnet_backend import mxnet_backend
+
assert mxnet_backend
except ImportError:
pass
diff --git a/pyhf/tensor/mxnet_backend.py b/pyhf/tensor/mxnet_backend.py
index 5a491c989d..c966737a8a 100644
--- a/pyhf/tensor/mxnet_backend.py
+++ b/pyhf/tensor/mxnet_backend.py
@@ -4,8 +4,10 @@
import math # Required for normal()
from numbers import Number # Required for normal()
from scipy.stats import norm # Required for normal_cdf()
+
log = logging.getLogger(__name__)
+
class mxnet_backend(object):
"""MXNet backend for pyhf"""
@@ -50,7 +52,8 @@ def tolist(self, tensor_in):
try:
return tensor_in.asnumpy().tolist()
except AttributeError:
- if isinstance(tensor_in, list): return tensor_in
+ if isinstance(tensor_in, list):
+ return tensor_in
raise
def outer(self, tensor_in_1, tensor_in_2):
@@ -76,18 +79,22 @@ def outer(self, tensor_in_1, tensor_in_2):
rows1, cols1 = tensor_1_shape
rows2, cols2 = tensor_2_shape
- return nd.reshape(nd.broadcast_mul(tensor_in_1.reshape((rows1, 1, cols1, 1)),
- tensor_in_2.reshape((1, rows2, 1, cols2))),
- (rows1 * cols1, rows2 * cols2))
-
- def gather(self,tensor,indices):
+ return nd.reshape(
+ nd.broadcast_mul(
+ tensor_in_1.reshape((rows1, 1, cols1, 1)),
+ tensor_in_2.reshape((1, rows2, 1, cols2)),
+ ),
+ (rows1 * cols1, rows2 * cols2),
+ )
+
+ def gather(self, tensor, indices):
return tensor[indices]
def boolean_mask(self, tensor, mask):
raise NotImplementedError("mxnet::boolean_mask is not implemented.")
return
- def astensor(self, tensor_in, dtype = 'float'):
+ def astensor(self, tensor_in, dtype='float'):
"""
Convert to a MXNet NDArray.
@@ -100,9 +107,9 @@ def astensor(self, tensor_in, dtype = 'float'):
dtypemap = {'float': 'float32', 'int': 'int32', 'bool': 'uint8'}
dtype = dtypemap[dtype]
try:
- tensor = nd.array(tensor_in, dtype = dtype)
+ tensor = nd.array(tensor_in, dtype=dtype)
except ValueError:
- tensor = nd.array([tensor_in], dtype = dtype)
+ tensor = nd.array([tensor_in], dtype=dtype)
return tensor
def sum(self, tensor_in, axis=None):
@@ -282,8 +289,10 @@ def where(self, mask, tensor_in_1, tensor_in_2):
mask = self.astensor(mask)
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
- return nd.add(nd.multiply(mask, tensor_in_1),
- nd.multiply(nd.subtract(1, mask), tensor_in_2))
+ return nd.add(
+ nd.multiply(mask, tensor_in_1),
+ nd.multiply(nd.subtract(1, mask), tensor_in_2),
+ )
def concatenate(self, sequence, axis=0):
"""
@@ -328,12 +337,16 @@ def simple_broadcast(self, *args):
assert len([arg for arg in args if 1 < len(arg) < max_dim]) == 0
except AssertionError as error:
log.error(
- 'ERROR: The arguments must be of compatible size: 1 or %i', max_dim)
+ 'ERROR: The arguments must be of compatible size: 1 or %i', max_dim
+ )
raise error
- broadcast = [arg if len(arg) > 1
- else nd.broadcast_axis(arg[0], axis=len(arg.shape) - 1, size=max_dim)
- for arg in args]
+ broadcast = [
+ arg
+ if len(arg) > 1
+ else nd.broadcast_axis(arg[0], axis=len(arg.shape) - 1, size=max_dim)
+ for arg in args
+ ]
return nd.stack(*broadcast)
def shape(self, tensor):
@@ -343,7 +356,7 @@ def shape(self, tensor):
return tensor.shape
def reshape(self, tensor, newshape):
- return nd.reshape(tensor,newshape)
+ return nd.reshape(tensor, newshape)
def einsum(self, subscripts, *operands):
"""
@@ -364,7 +377,7 @@ def einsum(self, subscripts, *operands):
def poisson_logpdf(self, n, lam):
n = self.astensor(n)
lam = self.astensor(lam)
- return n * nd.log(lam) - lam - nd.gammaln(n + 1.)
+ return n * nd.log(lam) - lam - nd.gammaln(n + 1.0)
def poisson(self, n, lam):
r"""
@@ -396,7 +409,7 @@ def poisson(self, n, lam):
# This is currently copied directly from PyTorch's source until a better
# way can be found to do this in MXNet
# https://github.com/pytorch/pytorch/blob/39520ffec15ab7e97691fed048de1832e83785e8/torch/distributions/poisson.py#L59-L63
- return nd.exp((nd.log(lam) * n) - lam - nd.gammaln(n + 1.))
+ return nd.exp((nd.log(lam) * n) - lam - nd.gammaln(n + 1.0))
def normal_logpdf(self, x, mu, sigma):
# This is currently copied directly from PyTorch's source until a better
@@ -404,7 +417,11 @@ def normal_logpdf(self, x, mu, sigma):
# https://github.com/pytorch/pytorch/blob/39520ffec15ab7e97691fed048de1832e83785e8/torch/distributions/normal.py#L70-L76
variance = sigma ** 2
log_scale = math.log(sigma) if isinstance(sigma, Number) else sigma.log()
- return -((x - mu) ** 2) / (2 * variance) - log_scale - math.log(math.sqrt(2 * math.pi))
+ return (
+ -((x - mu) ** 2) / (2 * variance)
+ - log_scale
+ - math.log(math.sqrt(2 * math.pi))
+ )
def normal(self, x, mu, sigma):
r"""
@@ -460,7 +477,8 @@ def normal_cdf(self, x, mu=0, sigma=1):
MXNet NDArray: The CDF
"""
log.warning(
- 'normal_cdf currently uses SciPy stats until pure MXNet distribuiton support is available.')
+ 'normal_cdf currently uses SciPy stats until pure MXNet distribuiton support is available.'
+ )
x = self.astensor(x).asnumpy()
mu = self.astensor(mu).asnumpy()
sigma = self.astensor(sigma).asnumpy()
diff --git a/pyhf/tensor/numpy_backend.py b/pyhf/tensor/numpy_backend.py
index 30b18aa8bc..055b32a7b0 100644
--- a/pyhf/tensor/numpy_backend.py
+++ b/pyhf/tensor/numpy_backend.py
@@ -2,8 +2,10 @@
import logging
from scipy.special import gammaln
from scipy.stats import norm
+
log = logging.getLogger(__name__)
+
class numpy_backend(object):
"""NumPy backend for pyhf"""
@@ -32,19 +34,20 @@ def clip(self, tensor_in, min, max):
"""
return np.clip(tensor_in, min, max)
- def tolist(self,tensor_in):
+ def tolist(self, tensor_in):
try:
return tensor_in.tolist()
except AttributeError:
- if isinstance(tensor_in, list): return tensor_in
+ if isinstance(tensor_in, list):
+ return tensor_in
raise
def outer(self, tensor_in_1, tensor_in_2):
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
- return np.outer(tensor_in_1,tensor_in_2)
+ return np.outer(tensor_in_1, tensor_in_2)
- def gather(self,tensor,indices):
+ def gather(self, tensor, indices):
return tensor[indices]
def boolean_mask(self, tensor, mask):
@@ -53,7 +56,7 @@ def boolean_mask(self, tensor, mask):
def isfinite(self, tensor):
return np.isfinite(tensor)
- def astensor(self, tensor_in, dtype = 'float'):
+ def astensor(self, tensor_in, dtype='float'):
"""
Convert to a NumPy array.
@@ -65,40 +68,40 @@ def astensor(self, tensor_in, dtype = 'float'):
"""
dtypemap = {'float': np.float64, 'int': np.int64, 'bool': np.bool_}
dtype = dtypemap[dtype]
- return np.asarray(tensor_in, dtype = dtype)
+ return np.asarray(tensor_in, dtype=dtype)
def sum(self, tensor_in, axis=None):
return np.sum(tensor_in, axis=axis)
def product(self, tensor_in, axis=None):
- return np.product(tensor_in, axis = axis)
+ return np.product(tensor_in, axis=axis)
def abs(self, tensor):
return np.abs(tensor)
- def ones(self,shape):
+ def ones(self, shape):
return np.ones(shape)
- def zeros(self,shape):
+ def zeros(self, shape):
return np.zeros(shape)
- def power(self,tensor_in_1, tensor_in_2):
+ def power(self, tensor_in_1, tensor_in_2):
return np.power(tensor_in_1, tensor_in_2)
- def sqrt(self,tensor_in):
+ def sqrt(self, tensor_in):
return np.sqrt(tensor_in)
- def divide(self,tensor_in_1, tensor_in_2):
+ def divide(self, tensor_in_1, tensor_in_2):
return np.divide(tensor_in_1, tensor_in_2)
- def log(self,tensor_in):
+ def log(self, tensor_in):
return np.log(tensor_in)
- def exp(self,tensor_in):
+ def exp(self, tensor_in):
return np.exp(tensor_in)
- def stack(self, sequence, axis = 0):
- return np.stack(sequence,axis = axis)
+ def stack(self, sequence, axis=0):
+ return np.stack(sequence, axis=axis)
def where(self, mask, tensor_in_1, tensor_in_2):
return np.where(mask, tensor_in_1, tensor_in_2)
@@ -143,7 +146,7 @@ def shape(self, tensor):
return tensor.shape
def reshape(self, tensor, newshape):
- return np.reshape(tensor,newshape)
+ return np.reshape(tensor, newshape)
def einsum(self, subscripts, *operands):
"""
@@ -167,7 +170,7 @@ def einsum(self, subscripts, *operands):
def poisson_logpdf(self, n, lam):
n = np.asarray(n)
lam = np.asarray(lam)
- return n * np.log(lam) - lam - gammaln(n + 1.)
+ return n * np.log(lam) - lam - gammaln(n + 1.0)
def poisson(self, n, lam):
r"""
@@ -193,17 +196,17 @@ def poisson(self, n, lam):
"""
n = np.asarray(n)
lam = np.asarray(lam)
- return np.exp(n * np.log(lam) - lam - gammaln(n + 1.))
+ return np.exp(n * np.log(lam) - lam - gammaln(n + 1.0))
def normal_logpdf(self, x, mu, sigma):
# this is much faster than
# norm.logpdf(x, loc=mu, scale=sigma)
# https://codereview.stackexchange.com/questions/69718/fastest-computation-of-n-likelihoods-on-normal-distributions
root2 = np.sqrt(2)
- root2pi = np.sqrt(2*np.pi)
+ root2pi = np.sqrt(2 * np.pi)
prefactor = -np.log(sigma * root2pi)
- summand = -np.square(np.divide((x - mu),(root2 * sigma)))
- return prefactor + summand
+ summand = -np.square(np.divide((x - mu), (root2 * sigma)))
+ return prefactor + summand
# def normal_logpdf(self, x, mu, sigma):
# return norm.logpdf(x, loc=mu, scale=sigma)
diff --git a/pyhf/tensor/pytorch_backend.py b/pyhf/tensor/pytorch_backend.py
index 80a3c7720b..aba2608211 100644
--- a/pyhf/tensor/pytorch_backend.py
+++ b/pyhf/tensor/pytorch_backend.py
@@ -1,8 +1,10 @@
import torch
import torch.autograd
import logging
+
log = logging.getLogger(__name__)
+
class pytorch_backend(object):
"""PyTorch backend for pyhf"""
@@ -36,15 +38,16 @@ def tolist(self, tensor_in):
try:
return tensor_in.data.numpy().tolist()
except AttributeError:
- if isinstance(tensor_in, list): return tensor_in
+ if isinstance(tensor_in, list):
+ return tensor_in
raise
def outer(self, tensor_in_1, tensor_in_2):
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
- return torch.ger(tensor_in_1,tensor_in_2)
+ return torch.ger(tensor_in_1, tensor_in_2)
- def astensor(self, tensor_in, dtype = 'float'):
+ def astensor(self, tensor_in, dtype='float'):
"""
Convert to a PyTorch Tensor.
@@ -56,24 +59,28 @@ def astensor(self, tensor_in, dtype = 'float'):
"""
dtypemap = {'float': torch.float, 'int': torch.int, 'bool': torch.uint8}
dtype = dtypemap[dtype]
- return torch.as_tensor(tensor_in, dtype = dtype)
+ return torch.as_tensor(tensor_in, dtype=dtype)
- def gather(self,tensor,indices):
- return torch.take(tensor,indices.type(torch.LongTensor))
+ def gather(self, tensor, indices):
+ return torch.take(tensor, indices.type(torch.LongTensor))
def boolean_mask(self, tensor, mask):
mask = self.astensor(mask).type(torch.ByteTensor)
- return torch.masked_select(tensor,mask)
+ return torch.masked_select(tensor, mask)
def reshape(self, tensor, newshape):
- return torch.reshape(tensor,newshape)
+ return torch.reshape(tensor, newshape)
def shape(self, tensor):
- return tuple(map(int,tensor.shape))
+ return tuple(map(int, tensor.shape))
def sum(self, tensor_in, axis=None):
tensor_in = self.astensor(tensor_in)
- return torch.sum(tensor_in) if (axis is None or tensor_in.shape == torch.Size([])) else torch.sum(tensor_in, axis)
+ return (
+ torch.sum(tensor_in)
+ if (axis is None or tensor_in.shape == torch.Size([]))
+ else torch.sum(tensor_in, axis)
+ )
def product(self, tensor_in, axis=None):
tensor_in = self.astensor(tensor_in)
@@ -94,31 +101,31 @@ def power(self, tensor_in_1, tensor_in_2):
tensor_in_2 = self.astensor(tensor_in_2)
return torch.pow(tensor_in_1, tensor_in_2)
- def sqrt(self,tensor_in):
+ def sqrt(self, tensor_in):
tensor_in = self.astensor(tensor_in)
return torch.sqrt(tensor_in)
- def divide(self,tensor_in_1, tensor_in_2):
+ def divide(self, tensor_in_1, tensor_in_2):
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
return torch.div(tensor_in_1, tensor_in_2)
- def log(self,tensor_in):
+ def log(self, tensor_in):
tensor_in = self.astensor(tensor_in)
return torch.log(tensor_in)
- def exp(self,tensor_in):
+ def exp(self, tensor_in):
tensor_in = self.astensor(tensor_in)
return torch.exp(tensor_in)
- def stack(self, sequence, axis = 0):
- return torch.stack(sequence,dim = axis)
+ def stack(self, sequence, axis=0):
+ return torch.stack(sequence, dim=axis)
def where(self, mask, tensor_in_1, tensor_in_2):
mask = self.astensor(mask).type(torch.FloatTensor)
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
- return mask * tensor_in_1 + (1-mask) * tensor_in_2
+ return mask * tensor_in_1 + (1 - mask) * tensor_in_2
def concatenate(self, sequence, axis=0):
"""
@@ -157,6 +164,7 @@ def simple_broadcast(self, *args):
Returns:
list of Tensors: The sequence broadcast together.
"""
+
def generic_len(a):
try:
return len(a)
@@ -171,11 +179,14 @@ def generic_len(a):
try:
assert len([arg for arg in args if 1 < generic_len(arg) < max_dim]) == 0
except AssertionError as error:
- log.error('ERROR: The arguments must be of compatible size: 1 or %i', max_dim)
+ log.error(
+ 'ERROR: The arguments must be of compatible size: 1 or %i', max_dim
+ )
raise error
- broadcast = [arg if generic_len(arg) > 1 else arg.expand(max_dim)
- for arg in args]
+ broadcast = [
+ arg if generic_len(arg) > 1 else arg.expand(max_dim) for arg in args
+ ]
return broadcast
def einsum(self, subscripts, *operands):
@@ -262,7 +273,7 @@ def normal(self, x, mu, sigma):
normal = torch.distributions.Normal(mu, sigma)
return self.exp(normal.log_prob(x))
- def normal_cdf(self, x, mu=[0.], sigma=[1.]):
+ def normal_cdf(self, x, mu=[0.0], sigma=[1.0]):
"""
The cumulative distribution function for the Normal distribution
diff --git a/pyhf/tensor/tensorflow_backend.py b/pyhf/tensor/tensorflow_backend.py
index 5f3fc27653..56112712e1 100644
--- a/pyhf/tensor/tensorflow_backend.py
+++ b/pyhf/tensor/tensorflow_backend.py
@@ -52,7 +52,8 @@ def tolist(self, tensor_in):
return tensor_in
if "no attribute 'run'" in str(err):
raise RuntimeError(
- 'evaluation of tensor requested via .tolist() but no session defined')
+ 'evaluation of tensor requested via .tolist() but no session defined'
+ )
raise
except RuntimeError as err:
# if no tensor operations have been added to the graph, but we want
@@ -72,8 +73,16 @@ def tolist(self, tensor_in):
def outer(self, tensor_in_1, tensor_in_2):
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
- tensor_in_1 = tensor_in_1 if tensor_in_1.dtype is not tf.bool else tf.cast(tensor_in_1, tf.float32)
- tensor_in_1 = tensor_in_1 if tensor_in_2.dtype is not tf.bool else tf.cast(tensor_in_2, tf.float32)
+ tensor_in_1 = (
+ tensor_in_1
+ if tensor_in_1.dtype is not tf.bool
+ else tf.cast(tensor_in_1, tf.float32)
+ )
+ tensor_in_1 = (
+ tensor_in_1
+ if tensor_in_2.dtype is not tf.bool
+ else tf.cast(tensor_in_2, tf.float32)
+ )
return tf.einsum('i,j->ij', tensor_in_1, tensor_in_2)
def gather(self, tensor, indices):
@@ -110,11 +119,19 @@ def astensor(self, tensor_in, dtype='float'):
def sum(self, tensor_in, axis=None):
tensor_in = self.astensor(tensor_in)
- return tf.reduce_sum(tensor_in) if (axis is None or tensor_in.shape == tf.TensorShape([])) else tf.reduce_sum(tensor_in, axis)
+ return (
+ tf.reduce_sum(tensor_in)
+ if (axis is None or tensor_in.shape == tf.TensorShape([]))
+ else tf.reduce_sum(tensor_in, axis)
+ )
def product(self, tensor_in, axis=None):
tensor_in = self.astensor(tensor_in)
- return tf.reduce_prod(tensor_in) if axis is None else tf.reduce_prod(tensor_in, axis)
+ return (
+ tf.reduce_prod(tensor_in)
+ if axis is None
+ else tf.reduce_prod(tensor_in, axis)
+ )
def abs(self, tensor):
tensor = self.astensor(tensor)
@@ -198,6 +215,7 @@ def simple_broadcast(self, *args):
Returns:
list of Tensors: The sequence broadcast together.
"""
+
def generic_len(a):
try:
return len(a)
@@ -210,15 +228,19 @@ def generic_len(a):
args = [self.astensor(arg) for arg in args]
max_dim = max(map(generic_len, args))
try:
- assert len([arg for arg in args
- if 1 < generic_len(arg) < max_dim]) == 0
+ assert len([arg for arg in args if 1 < generic_len(arg) < max_dim]) == 0
except AssertionError as error:
log.error(
- 'ERROR: The arguments must be of compatible size: 1 or %i', max_dim)
+ 'ERROR: The arguments must be of compatible size: 1 or %i', max_dim
+ )
raise error
- broadcast = [arg if generic_len(arg) > 1 else
- tf.tile(tf.slice(arg, [0], [1]), tf.stack([max_dim])) for arg in args]
+ broadcast = [
+ arg
+ if generic_len(arg) > 1
+ else tf.tile(tf.slice(arg, [0], [1]), tf.stack([max_dim]))
+ for arg in args
+ ]
return broadcast
def einsum(self, subscripts, *operands):
diff --git a/pyhf/utils.py b/pyhf/utils.py
index 150e3611f4..4e466ec8f8 100644
--- a/pyhf/utils.py
+++ b/pyhf/utils.py
@@ -76,9 +76,11 @@ def qmu(mu, data, pdf, init_pars, par_bounds):
"""
tensorlib, optimizer = get_backend()
mubhathat = optimizer.constrained_bestfit(
- loglambdav, mu, data, pdf, init_pars, par_bounds)
+ loglambdav, mu, data, pdf, init_pars, par_bounds
+ )
muhatbhat = optimizer.unconstrained_bestfit(
- loglambdav, data, pdf, init_pars, par_bounds)
+ loglambdav, data, pdf, init_pars, par_bounds
+ )
qmu = loglambdav(mubhathat, data, pdf) - loglambdav(muhatbhat, data, pdf)
qmu = tensorlib.where(muhatbhat[pdf.config.poi_index] > mu, [0], qmu)
return qmu
@@ -87,7 +89,8 @@ def qmu(mu, data, pdf, init_pars, par_bounds):
def generate_asimov_data(asimov_mu, data, pdf, init_pars, par_bounds):
_, optimizer = get_backend()
bestfit_nuisance_asimov = optimizer.constrained_bestfit(
- loglambdav, asimov_mu, data, pdf, init_pars, par_bounds)
+ loglambdav, asimov_mu, data, pdf, init_pars, par_bounds
+ )
return pdf.expected_data(bestfit_nuisance_asimov)
@@ -151,16 +154,15 @@ def runOnePoint(muTest, data, pdf, init_pars=None, par_bounds=None):
par_bounds = par_bounds or pdf.config.suggested_bounds()
tensorlib, _ = get_backend()
- asimov_mu = 0.
- asimov_data = generate_asimov_data(
- asimov_mu, data, pdf, init_pars, par_bounds)
+ asimov_mu = 0.0
+ asimov_data = generate_asimov_data(asimov_mu, data, pdf, init_pars, par_bounds)
- qmu_v = tensorlib.clip(
- qmu(muTest, data, pdf, init_pars, par_bounds), 0, max=None)
+ qmu_v = tensorlib.clip(qmu(muTest, data, pdf, init_pars, par_bounds), 0, max=None)
sqrtqmu_v = tensorlib.sqrt(qmu_v)
qmuA_v = tensorlib.clip(
- qmu(muTest, asimov_data, pdf, init_pars, par_bounds), 0, max=None)
+ qmu(muTest, asimov_data, pdf, init_pars, par_bounds), 0, max=None
+ )
sqrtqmuA_v = tensorlib.sqrt(qmuA_v)
CLsb, CLb, CLs = pvals_from_teststat(sqrtqmu_v, sqrtqmuA_v)
@@ -168,7 +170,6 @@ def runOnePoint(muTest, data, pdf, init_pars=None, par_bounds=None):
CLs_exp = []
for nsigma in [-2, -1, 0, 1, 2]:
sqrtqmu_v_sigma = sqrtqmuA_v - nsigma
- CLs_exp.append(
- pvals_from_teststat(sqrtqmu_v_sigma, sqrtqmuA_v)[-1])
+ CLs_exp.append(pvals_from_teststat(sqrtqmu_v_sigma, sqrtqmuA_v)[-1])
CLs_exp = tensorlib.astensor(CLs_exp)
return qmu_v, qmuA_v, CLsb, CLb, CLs, CLs_exp
diff --git a/pyhf/writexml.py b/pyhf/writexml.py
index a93746b68c..c98d9eef8c 100644
--- a/pyhf/writexml.py
+++ b/pyhf/writexml.py
@@ -1,40 +1,45 @@
import os
import xml.etree.cElementTree as ET
-def measurement(lumi, lumierr, poi, param_settings = None, name = 'Meas1'):
+
+def measurement(lumi, lumierr, poi, param_settings=None, name='Meas1'):
param_settings = param_settings or []
- meas = ET.Element("Measurement", Name = name, Lumi = str(lumi), LumiRelErr = str(lumierr))
- poiel = ET.Element('POI')
+ meas = ET.Element("Measurement", Name=name, Lumi=str(lumi), LumiRelErr=str(lumierr))
+ poiel = ET.Element('POI')
poiel.text = poi
meas.append(poiel)
for s in param_settings:
- se = ET.Element('ParamSetting', **s['attrs'])
+ se = ET.Element('ParamSetting', **s['attrs'])
se.text = ' '.join(s['params'])
meas.append(se)
return meas
+
def write_channel(channelspec, filename, data_rootdir):
- #need to write channelfile here
- with open(filename,'w') as f:
- channel = ET.Element('Channel', Name = channelspec['name'])
- channel = ET.Element('Channel', Name = channelspec['name'])
- f.write(ET.tostring(channel, encoding = 'utf-8').decode('utf-8'))
+ # need to write channelfile here
+ with open(filename, 'w') as f:
+ channel = ET.Element('Channel', Name=channelspec['name'])
+ channel = ET.Element('Channel', Name=channelspec['name'])
+ f.write(ET.tostring(channel, encoding='utf-8').decode('utf-8'))
pass
-def writexml(spec, specdir, data_rootdir , result_outputprefix):
- combination = ET.Element("Combination", OutputFilePrefix = result_outputprefix)
+def writexml(spec, specdir, data_rootdir, result_outputprefix):
+ combination = ET.Element("Combination", OutputFilePrefix=result_outputprefix)
for c in spec['channels']:
- channelfilename = os.path.join(specdir,'channel_{}.xml'.format(c['name']))
- write_channel(c,channelfilename,data_rootdir)
- inp = ET.Element("Input")
+ channelfilename = os.path.join(specdir, 'channel_{}.xml'.format(c['name']))
+ write_channel(c, channelfilename, data_rootdir)
+ inp = ET.Element("Input")
inp.text = channelfilename
combination.append(inp)
-
- m = measurement(1,0.1,'SigXsecOverSM',[{'attrs': {'Const': 'True'}, 'params': ['Lumi' 'alpha_syst1']}])
+ m = measurement(
+ 1,
+ 0.1,
+ 'SigXsecOverSM',
+ [{'attrs': {'Const': 'True'}, 'params': ['Lumi' 'alpha_syst1']}],
+ )
combination.append(m)
- return ET.tostring(combination, encoding = 'utf-8')
-
+ return ET.tostring(combination, encoding='utf-8')
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000..d57533bb87
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,12 @@
+[tool.black]
+line-length = 88
+py36 = false # Python 2 is still supported
+skip-string-normalization = true
+skip-numeric-underscore-normalization = true # Python 2 is still supported
+include = '\.pyi?$'
+exclude = '''
+/(
+ \.git
+ | build
+)/
+'''
diff --git a/setup.py b/setup.py
index 228f6be4b3..2b0ae3e678 100644
--- a/setup.py
+++ b/setup.py
@@ -9,9 +9,7 @@
'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
'setuptools<=39.1.0',
],
- 'torch': [
- 'torch>=0.4.0'
- ],
+ 'torch': ['torch>=0.4.0'],
'mxnet': [
'mxnet>=1.0.0',
'requests<2.19.0,>=2.18.4',
@@ -21,12 +19,8 @@
# 'dask': [
# 'dask[array]'
# ],
- 'xmlimport': [
- 'uproot',
- ],
- 'minuit': [
- 'iminuit'
- ],
+ 'xmlimport': ['uproot'],
+ 'minuit': ['iminuit'],
'develop': [
'pyflakes',
'pytest>=3.5.1',
@@ -49,7 +43,9 @@
'm2r',
'jsonpatch',
'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
- ]
+ 'pre-commit',
+ 'black;python_version>="3.6"', # Black is Python3 only
+ ],
}
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
@@ -77,11 +73,9 @@
'tqdm', # for readxml
'six', # for modifiers
'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
- 'jsonpatch'
+ 'jsonpatch',
],
extras_require=extras_require,
- entry_points={
- 'console_scripts': ['pyhf=pyhf.commandline:pyhf']
- },
- dependency_links=[]
+ entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
+ dependency_links=[],
)
diff --git a/tests/benchmarks/test_benchmark.py b/tests/benchmarks/test_benchmark.py
index 8178b4d518..fd680d8b1f 100644
--- a/tests/benchmarks/test_benchmark.py
+++ b/tests/benchmarks/test_benchmark.py
@@ -22,12 +22,7 @@ def generate_source_static(n_bins):
source = {
'binning': binning,
- 'bindata': {
- 'data': data,
- 'bkg': bkg,
- 'bkgerr': bkgerr,
- 'sig': sig
- }
+ 'bindata': {'data': data, 'bkg': bkg, 'bkgerr': bkgerr, 'sig': sig},
}
return source
@@ -52,20 +47,15 @@ def generate_source_poisson(n_bins):
source = {
'binning': binning,
- 'bindata': {
- 'data': data,
- 'bkg': bkg,
- 'bkgerr': bkgerr,
- 'sig': sig
- }
+ 'bindata': {'data': data, 'bkg': bkg, 'bkgerr': bkgerr, 'sig': sig},
}
return source
def runOnePoint(pdf, data):
- return pyhf.utils.runOnePoint(1.0, data, pdf,
- pdf.config.suggested_init(),
- pdf.config.suggested_bounds())
+ return pyhf.utils.runOnePoint(
+ 1.0, data, pdf, pdf.config.suggested_init(), pdf.config.suggested_bounds()
+ )
# bins = [1, 10, 50, 100, 200, 500, 800, 1000]
@@ -89,8 +79,8 @@ def test_runOnePoint(benchmark, backend, n_bins):
None
"""
source = generate_source_static(n_bins)
- pdf = hepdata_like(source['bindata']['sig'],
- source['bindata']['bkg'],
- source['bindata']['bkgerr'])
+ pdf = hepdata_like(
+ source['bindata']['sig'], source['bindata']['bkg'], source['bindata']['bkgerr']
+ )
data = source['bindata']['data'] + pdf.config.auxdata
assert benchmark(runOnePoint, pdf, data)
diff --git a/tests/conftest.py b/tests/conftest.py
index 699b4b39c0..c777a8a4c9 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -3,51 +3,55 @@
import tensorflow as tf
import sys
-'''
-This fixture isolates the sys.modules imported in case you need to mess around with them and do not want to break other tests.
-This is not done automatically.
-'''
@pytest.fixture(scope='function')
def isolate_modules():
+ """
+ This fixture isolates the sys.modules imported in case you need to mess around with them and do not want to break other tests.
+
+ This is not done automatically.
+ """
CACHE_MODULES = sys.modules.copy()
yield isolate_modules
sys.modules.update(CACHE_MODULES)
-'''
-This fixture is automatically run to clear out the events registered before and after a test function runs.
-'''
+
@pytest.fixture(scope='function', autouse=True)
def reset_events():
+ """
+ This fixture is automatically run to clear out the events registered before and after a test function runs.
+ """
pyhf.events.__events.clear()
pyhf.events.__disabled_events.clear()
yield reset_events
pyhf.events.__events.clear()
pyhf.events.__disabled_events.clear()
-'''
-This fixture is automatically run to reset the backend before and after a test function runs.
-'''
+
@pytest.fixture(scope='function', autouse=True)
def reset_backend():
+ """
+ This fixture is automatically run to reset the backend before and after a test function runs.
+ """
pyhf.set_backend(pyhf.default_backend)
yield reset_backend
pyhf.set_backend(pyhf.default_backend)
-@pytest.fixture(scope='function', params=[
- (pyhf.tensor.numpy_backend(), None),
- (pyhf.tensor.tensorflow_backend(session=tf.Session()), None),
- (pyhf.tensor.pytorch_backend(), None),
- (pyhf.tensor.mxnet_backend(), None),
- (pyhf.tensor.numpy_backend(poisson_from_normal=True), pyhf.optimize.minuit_optimizer()),
- ],
- ids=[
- 'numpy',
- 'tensorflow',
- 'pytorch',
- 'mxnet',
- 'numpy_minuit',
- ])
+
+@pytest.fixture(
+ scope='function',
+ params=[
+ (pyhf.tensor.numpy_backend(), None),
+ (pyhf.tensor.tensorflow_backend(session=tf.Session()), None),
+ (pyhf.tensor.pytorch_backend(), None),
+ (pyhf.tensor.mxnet_backend(), None),
+ (
+ pyhf.tensor.numpy_backend(poisson_from_normal=True),
+ pyhf.optimize.minuit_optimizer(),
+ ),
+ ],
+ ids=['numpy', 'tensorflow', 'pytorch', 'mxnet', 'numpy_minuit'],
+)
def backend(request):
param = request.param
# a better way to get the id? all the backends we have so far for testing
@@ -62,15 +66,25 @@ def backend(request):
# allow the specific backend to fail if specified
fail_backend = request.node.get_marker('fail_{param}'.format(param=param_id))
# only look at the specific backends
- only_backends = [pid for pid in param_ids if request.node.get_marker('only_{param}'.format(param=pid))]
+ only_backends = [
+ pid
+ for pid in param_ids
+ if request.node.get_marker('only_{param}'.format(param=pid))
+ ]
- if(skip_backend and (param_id in only_backends)):
- raise ValueError("Must specify skip_{param} or only_{param} but not both!".format(param=pid))
+ if skip_backend and (param_id in only_backends):
+ raise ValueError(
+ "Must specify skip_{param} or only_{param} but not both!".format(param=pid)
+ )
if skip_backend:
pytest.skip("skipping {func} as specified".format(func=func_name))
elif only_backends and param_id not in only_backends:
- pytest.skip("skipping {func} as specified to only look at: {backends}".format(func=func_name, backends=', '.join(only_backends)))
+ pytest.skip(
+ "skipping {func} as specified to only look at: {backends}".format(
+ func=func_name, backends=', '.join(only_backends)
+ )
+ )
if fail_backend:
pytest.xfail("expect {func} to fail as specified".format(func=func_name))
diff --git a/tests/test_backend_consistency.py b/tests/test_backend_consistency.py
index eb1d5ec08f..604db1bfdd 100644
--- a/tests/test_backend_consistency.py
+++ b/tests/test_backend_consistency.py
@@ -1,5 +1,4 @@
import pyhf
-from pyhf.simplemodels import hepdata_like
import tensorflow as tf
import numpy as np
import pytest
@@ -23,12 +22,7 @@ def generate_source_static(n_bins):
source = {
'binning': binning,
- 'bindata': {
- 'data': data,
- 'bkg': bkg,
- 'bkgerr': bkgerr,
- 'sig': sig
- }
+ 'bindata': {'data': data, 'bkg': bkg, 'bkgerr': bkgerr, 'sig': sig},
}
return source
@@ -53,12 +47,7 @@ def generate_source_poisson(n_bins):
source = {
'binning': binning,
- 'bindata': {
- 'data': data,
- 'bkg': bkg,
- 'bkgerr': bkgerr,
- 'sig': sig
- }
+ 'bindata': {'data': data, 'bkg': bkg, 'bkgerr': bkgerr, 'sig': sig},
}
return source
@@ -69,12 +58,10 @@ def generate_source_poisson(n_bins):
@pytest.mark.parametrize('n_bins', bins, ids=bin_ids)
-@pytest.mark.parametrize('invert_order', [False,True], ids=['normal','inverted'])
-def test_runOnePoint_q_mu(n_bins,invert_order,
- tolerance={
- 'numpy': 1e-02,
- 'tensors': 5e-03
- }):
+@pytest.mark.parametrize('invert_order', [False, True], ids=['normal', 'inverted'])
+def test_runOnePoint_q_mu(
+ n_bins, invert_order, tolerance={'numpy': 1e-02, 'tensors': 5e-03}
+):
"""
Check that the different backends all compute a test statistic
that is within a specific tolerance of each other.
@@ -90,33 +77,29 @@ def test_runOnePoint_q_mu(n_bins,invert_order,
source = generate_source_static(n_bins)
- signal_sample = {
+ signal_sample = {
'name': 'signal',
'data': source['bindata']['sig'],
- 'modifiers': [
- {'name': 'mu', 'type': 'normfactor', 'data': None}
- ]
+ 'modifiers': [{'name': 'mu', 'type': 'normfactor', 'data': None}],
}
background_sample = {
'name': 'background',
'data': source['bindata']['bkg'],
'modifiers': [
- {'name': 'uncorr_bkguncrt',
- 'type': 'shapesys',
- 'data': source['bindata']['bkgerr']
- }
- ]
- }
- samples = [background_sample,signal_sample] if invert_order else [signal_sample, background_sample]
- spec = {
- 'channels': [
{
- 'name': 'singlechannel',
- 'samples': samples
+ 'name': 'uncorr_bkguncrt',
+ 'type': 'shapesys',
+ 'data': source['bindata']['bkgerr'],
}
- ]
+ ],
}
+ samples = (
+ [background_sample, signal_sample]
+ if invert_order
+ else [signal_sample, background_sample]
+ )
+ spec = {'channels': [{'name': 'singlechannel', 'samples': samples}]}
pdf = pyhf.Model(spec)
data = source['bindata']['data'] + pdf.config.auxdata
@@ -135,9 +118,9 @@ def test_runOnePoint_q_mu(n_bins,invert_order,
backend.session = tf.Session()
pyhf.set_backend(backend)
- q_mu = pyhf.utils.runOnePoint(1.0, data, pdf,
- pdf.config.suggested_init(),
- pdf.config.suggested_bounds())[0]
+ q_mu = pyhf.utils.runOnePoint(
+ 1.0, data, pdf, pdf.config.suggested_init(), pdf.config.suggested_bounds()
+ )[0]
test_statistic.append(pyhf.tensorlib.tolist(q_mu))
# compare to NumPy/SciPy
@@ -152,12 +135,18 @@ def test_runOnePoint_q_mu(n_bins,invert_order,
try:
assert (numpy_ratio_delta_unity < tolerance['numpy']).all()
except AssertionError:
- print('Ratio to NumPy+SciPy exceeded tolerance of {}: {}'.format(
- tolerance['numpy'], numpy_ratio_delta_unity.tolist()))
+ print(
+ 'Ratio to NumPy+SciPy exceeded tolerance of {}: {}'.format(
+ tolerance['numpy'], numpy_ratio_delta_unity.tolist()
+ )
+ )
assert False
try:
assert (tensors_ratio_delta_unity < tolerance['tensors']).all()
except AssertionError:
- print('Ratio between tensor backends exceeded tolerance of {}: {}'.format(
- tolerance['tensors'], tensors_ratio_delta_unity.tolist()))
+ print(
+ 'Ratio between tensor backends exceeded tolerance of {}: {}'.format(
+ tolerance['tensors'], tensors_ratio_delta_unity.tolist()
+ )
+ )
assert False
diff --git a/tests/test_constraints.py b/tests/test_constraints.py
index 0b474c8410..edc25853c9 100644
--- a/tests/test_constraints.py
+++ b/tests/test_constraints.py
@@ -1,7 +1,7 @@
import pytest
import pyhf
-# @pytest.mark.skip_mxnet
+
def test_numpy_pdf_inputs(backend):
spec = {
'channels': [
@@ -10,69 +10,92 @@ def test_numpy_pdf_inputs(backend):
'samples': [
{
'name': 'mu',
- 'data': [10.,10.],
+ 'data': [10.0, 10.0],
'modifiers': [
{'name': 'mu', 'type': 'normfactor', 'data': None}
- ]
+ ],
},
{
'name': 'bkg1',
'data': [50.0, 70.0],
'modifiers': [
- {'name': 'stat_firstchannel', 'type': 'staterror', 'data': [12.,12.]}
- ]
+ {
+ 'name': 'stat_firstchannel',
+ 'type': 'staterror',
+ 'data': [12.0, 12.0],
+ }
+ ],
},
{
'name': 'bkg2',
- 'data': [30.0, 20.],
+ 'data': [30.0, 20.0],
'modifiers': [
- {'name': 'stat_firstchannel', 'type': 'staterror', 'data': [5.,5.]}
- ]
+ {
+ 'name': 'stat_firstchannel',
+ 'type': 'staterror',
+ 'data': [5.0, 5.0],
+ }
+ ],
},
{
'name': 'bkg3',
'data': [20.0, 15.0],
'modifiers': [
- {'name': 'bkg_norm', 'type': 'shapesys','data': [10, 10]}
- ]
- }
- ]
- },
+ {'name': 'bkg_norm', 'type': 'shapesys', 'data': [10, 10]}
+ ],
+ },
+ ],
+ }
]
}
m = pyhf.Model(spec)
+
def slow(self, auxdata, pars):
- tensorlib,_ = pyhf.get_backend()
+ tensorlib, _ = pyhf.get_backend()
# iterate over all constraints order doesn't matter....
start_index = 0
summands = None
for cname in self.config.auxdata_order:
- parset, parslice = self.config.param_set(cname), \
- self.config.par_slice(cname)
+ parset, parslice = (
+ self.config.param_set(cname),
+ self.config.par_slice(cname),
+ )
end_index = start_index + parset.n_parameters
thisauxdata = auxdata[start_index:end_index]
start_index = end_index
if parset.pdf_type == 'normal':
paralphas = pars[parslice]
- sigmas = parset.sigmas if hasattr(parset,'sigmas') else tensorlib.ones(paralphas.shape)
+ sigmas = (
+ parset.sigmas
+ if hasattr(parset, 'sigmas')
+ else tensorlib.ones(paralphas.shape)
+ )
sigmas = tensorlib.astensor(sigmas)
- constraint_term = tensorlib.normal_logpdf(thisauxdata, paralphas, sigmas)
+ constraint_term = tensorlib.normal_logpdf(
+ thisauxdata, paralphas, sigmas
+ )
elif parset.pdf_type == 'poisson':
paralphas = tensorlib.product(
- tensorlib.stack([pars[parslice], tensorlib.astensor(parset.factors)]
- ),
- axis=0
- )
+ tensorlib.stack(
+ [pars[parslice], tensorlib.astensor(parset.factors)]
+ ),
+ axis=0,
+ )
- constraint_term = tensorlib.poisson_logpdf(thisauxdata,paralphas)
- summands = constraint_term if summands is None else tensorlib.concatenate([summands,constraint_term])
+ constraint_term = tensorlib.poisson_logpdf(thisauxdata, paralphas)
+ summands = (
+ constraint_term
+ if summands is None
+ else tensorlib.concatenate([summands, constraint_term])
+ )
return tensorlib.sum(summands) if summands is not None else 0
+
def fast(self, auxdata, pars):
- return self.constraint_logpdf(auxdata,pars)
+ return self.constraint_logpdf(auxdata, pars)
auxd = pyhf.tensorlib.astensor(m.config.auxdata)
pars = pyhf.tensorlib.astensor(m.config.suggested_init())
- slow_result = pyhf.tensorlib.tolist(slow(m,auxd,pars))
- fast_result = pyhf.tensorlib.tolist(fast(m,auxd,pars))
+ slow_result = pyhf.tensorlib.tolist(slow(m, auxd, pars))
+ fast_result = pyhf.tensorlib.tolist(fast(m, auxd, pars))
assert pytest.approx(slow_result) == fast_result
diff --git a/tests/test_events.py b/tests/test_events.py
index 0fa3b2dbbe..888af43dd1 100644
--- a/tests/test_events.py
+++ b/tests/test_events.py
@@ -2,6 +2,7 @@
import pyhf.events as events
import mock
+
def test_subscribe_event():
ename = 'test'
@@ -12,6 +13,7 @@ def test_subscribe_event():
assert m in events.__events.get(ename)
del events.__events[ename]
+
def test_event():
ename = 'test'
@@ -22,6 +24,7 @@ def test_event():
m.assert_called_once()
del events.__events[ename]
+
def test_disable_event():
ename = 'test'
@@ -31,17 +34,18 @@ def test_disable_event():
events.subscribe(ename)(m)
events.disable(ename)
- assert m.called == False
+ assert m.called is False
assert ename in events.__disabled_events
assert events.trigger(ename) == events.noop
assert events.trigger(ename)() == events.noop()
- assert m.called == False
+ assert m.called is False
assert noop_m.is_called_once()
events.enable(ename)
assert ename not in events.__disabled_events
del events.__events[ename]
events.noop = noop
+
def test_trigger_noevent():
noop, noop_m = events.noop, mock.Mock()
diff --git a/tests/test_import.py b/tests/test_import.py
index 6f350f5be5..448e539c25 100644
--- a/tests/test_import.py
+++ b/tests/test_import.py
@@ -1,24 +1,28 @@
import pyhf
import pyhf.readxml
-import json
-import pytest
import numpy as np
def test_import_prepHistFactory():
- parsed_xml = pyhf.readxml.parse('validation/xmlimport_input/config/example.xml',
- 'validation/xmlimport_input/')
+ parsed_xml = pyhf.readxml.parse(
+ 'validation/xmlimport_input/config/example.xml', 'validation/xmlimport_input/'
+ )
# build the spec, strictly checks properties included
spec = {'channels': parsed_xml['channels']}
pdf = pyhf.Model(spec, poiname='SigXsecOverSM')
- data = [binvalue for k in pdf.spec['channels'] for binvalue
- in parsed_xml['data'][k['name']]] + pdf.config.auxdata
+ data = [
+ binvalue
+ for k in pdf.spec['channels']
+ for binvalue in parsed_xml['data'][k['name']]
+ ] + pdf.config.auxdata
channels = {channel['name'] for channel in pdf.spec['channels']}
- samples = {channel['name']: [sample['name']
- for sample in channel['samples']] for channel in pdf.spec['channels']}
+ samples = {
+ channel['name']: [sample['name'] for sample in channel['samples']]
+ for channel in pdf.spec['channels']
+ }
###
# signal overallsys
@@ -32,38 +36,50 @@ def test_import_prepHistFactory():
assert 'background2' in samples['channel1']
assert pdf.spec['channels'][0]['samples'][2]['modifiers'][0]['type'] == 'staterror'
- assert pdf.spec['channels'][0]['samples'][2]['modifiers'][0]['data'] == [0, 10.]
+ assert pdf.spec['channels'][0]['samples'][2]['modifiers'][0]['data'] == [0, 10.0]
assert pdf.spec['channels'][0]['samples'][1]['modifiers'][0]['type'] == 'staterror'
- assert all(np.isclose(
- pdf.spec['channels'][0]['samples'][1]['modifiers'][0]['data'], [5.0, 0.0]))
+ assert all(
+ np.isclose(
+ pdf.spec['channels'][0]['samples'][1]['modifiers'][0]['data'], [5.0, 0.0]
+ )
+ )
- assert pdf.expected_actualdata(
- pdf.config.suggested_init()).tolist() == [120.0, 110.0]
+ assert pdf.expected_actualdata(pdf.config.suggested_init()).tolist() == [
+ 120.0,
+ 110.0,
+ ]
- assert pdf.config.auxdata_order == sorted(['syst1', 'staterror_channel1', 'syst2', 'syst3'])
+ assert pdf.config.auxdata_order == sorted(
+ ['syst1', 'staterror_channel1', 'syst2', 'syst3']
+ )
- assert data == [122.0, 112.0, 1.0, 1.0, 0.0, 0.0, 0.0]
+ assert data == [122.0, 112.0, 1.0, 1.0, 0.0, 0.0, 0.0]
pars = pdf.config.suggested_init()
pars[pdf.config.par_slice('SigXsecOverSM')] = [2.0]
- assert pdf.expected_data(
- pars, include_auxdata=False).tolist() == [140, 120]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [140, 120]
def test_import_histosys():
- parsed_xml = pyhf.readxml.parse('validation/xmlimport_input2/config/example.xml',
- 'validation/xmlimport_input2')
+ parsed_xml = pyhf.readxml.parse(
+ 'validation/xmlimport_input2/config/example.xml', 'validation/xmlimport_input2'
+ )
# build the spec, strictly checks properties included
spec = {'channels': parsed_xml['channels']}
pdf = pyhf.Model(spec, poiname='SigXsecOverSM')
- data = [binvalue for k in pdf.spec['channels'] for binvalue
- in parsed_xml['data'][k['name']]] + pdf.config.auxdata
+ data = [
+ binvalue
+ for k in pdf.spec['channels']
+ for binvalue in parsed_xml['data'][k['name']]
+ ] + pdf.config.auxdata
channels = {channel['name']: channel for channel in pdf.spec['channels']}
- samples = {channel['name']: [sample['name']
- for sample in channel['samples']] for channel in pdf.spec['channels']}
+ samples = {
+ channel['name']: [sample['name'] for sample in channel['samples']]
+ for channel in pdf.spec['channels']
+ }
assert channels['channel2']['samples'][0]['modifiers'][0]['type'] == 'histosys'
diff --git a/tests/test_init.py b/tests/test_init.py
index 50df283925..298d10c5c2 100644
--- a/tests/test_init.py
+++ b/tests/test_init.py
@@ -1,13 +1,17 @@
import pytest
import sys
-@pytest.mark.parametrize("param", [
+
+@pytest.mark.parametrize(
+ "param",
+ [
["numpy", "numpy_backend", pytest.raises(ImportError)],
["torch", "pytorch_backend", pytest.raises(AttributeError)],
["tensorflow", "tensorflow_backend", pytest.raises(AttributeError)],
["mxnet", "mxnet_backend", pytest.raises(AttributeError)],
],
- ids=["numpy", "pytorch", "tensorflow", "mxnet"])
+ ids=["numpy", "pytorch", "tensorflow", "mxnet"],
+)
def test_missing_backends(isolate_modules, param):
backend_name, module_name, expectation = param
@@ -20,6 +24,7 @@ def test_missing_backends(isolate_modules, param):
with expectation:
import pyhf.tensor
+
getattr(pyhf.tensor, module_name)
# put back
diff --git a/tests/test_interpolate.py b/tests/test_interpolate.py
index 8a9b659609..50e8f343e6 100644
--- a/tests/test_interpolate.py
+++ b/tests/test_interpolate.py
@@ -1,129 +1,161 @@
import pyhf
import numpy as np
import pytest
-import tensorflow as tf
+
@pytest.fixture
def random_histosets_alphasets_pair():
- def generate_shapes(histogramssets,alphasets):
- h_shape = [len(histogramssets),0,0,0]
- a_shape = (len(alphasets),max(map(len,alphasets)))
+ def generate_shapes(histogramssets, alphasets):
+ h_shape = [len(histogramssets), 0, 0, 0]
+ a_shape = (len(alphasets), max(map(len, alphasets)))
for hs in histogramssets:
- h_shape[1] = max(h_shape[1],len(hs))
+ h_shape[1] = max(h_shape[1], len(hs))
for h in hs:
- h_shape[2] = max(h_shape[2],len(h))
+ h_shape[2] = max(h_shape[2], len(h))
for sh in h:
- h_shape[3] = max(h_shape[3],len(sh))
- return tuple(h_shape),a_shape
+ h_shape[3] = max(h_shape[3], len(sh))
+ return tuple(h_shape), a_shape
- def filled_shapes(histogramssets,alphasets):
+ def filled_shapes(histogramssets, alphasets):
# pad our shapes with NaNs
- histos, alphas = generate_shapes(histogramssets,alphasets)
+ histos, alphas = generate_shapes(histogramssets, alphasets)
histos, alphas = np.ones(histos) * np.nan, np.ones(alphas) * np.nan
- for i,syst in enumerate(histogramssets):
- for j,sample in enumerate(syst):
- for k,variation in enumerate(sample):
- histos[i,j,k,:len(variation)] = variation
- for i,alphaset in enumerate(alphasets):
- alphas[i,:len(alphaset)] = alphaset
- return histos,alphas
+ for i, syst in enumerate(histogramssets):
+ for j, sample in enumerate(syst):
+ for k, variation in enumerate(sample):
+ histos[i, j, k, : len(variation)] = variation
+ for i, alphaset in enumerate(alphasets):
+ alphas[i, : len(alphaset)] = alphaset
+ return histos, alphas
nsysts = 150
nhistos_per_syst_upto = 300
nalphas = 1
nbins_upto = 1
- nsyst_histos = np.random.randint(1, 1+nhistos_per_syst_upto, size=nsysts)
- nhistograms = [np.random.randint(1, nbins_upto+1, size=n) for n in nsyst_histos]
- random_alphas = [np.random.uniform(-1, 1,size=nalphas) for n in nsyst_histos]
+ nsyst_histos = np.random.randint(1, 1 + nhistos_per_syst_upto, size=nsysts)
+ nhistograms = [np.random.randint(1, nbins_upto + 1, size=n) for n in nsyst_histos]
+ random_alphas = [np.random.uniform(-1, 1, size=nalphas) for n in nsyst_histos]
random_histogramssets = [
- [# all histos affected by systematic $nh
- [# sample $i, systematic $nh
- np.random.uniform(10*i+j,10*i+j+1, size = nbin).tolist() for j in range(3)
- ] for i,nbin in enumerate(nh)
- ] for nh in nhistograms
+ [ # all histos affected by systematic $nh
+ [ # sample $i, systematic $nh
+ np.random.uniform(10 * i + j, 10 * i + j + 1, size=nbin).tolist()
+ for j in range(3)
+ ]
+ for i, nbin in enumerate(nh)
+ ]
+ for nh in nhistograms
]
- h,a = filled_shapes(random_histogramssets,random_alphas)
- return h,a
+ h, a = filled_shapes(random_histogramssets, random_alphas)
+ return h, a
+
@pytest.mark.skip_mxnet
@pytest.mark.parametrize("interpcode", [0, 1])
def test_interpolator(backend, interpcode, random_histosets_alphasets_pair):
histogramssets, alphasets = random_histosets_alphasets_pair
- interpolator = getattr(pyhf.interpolate, '_hfinterpolator_code{}'.format(interpcode))(histogramssets.tolist())
+ interpolator = getattr(
+ pyhf.interpolate, '_hfinterpolator_code{}'.format(interpcode)
+ )(histogramssets.tolist())
assert interpolator.alphasets_shape == (histogramssets.shape[0], 1)
interpolator(pyhf.tensorlib.astensor(alphasets.tolist()))
assert interpolator.alphasets_shape == alphasets.shape
+
@pytest.mark.skip_mxnet
@pytest.mark.parametrize("interpcode", [0, 1])
def test_interpcode(backend, interpcode, random_histosets_alphasets_pair):
histogramssets, alphasets = random_histosets_alphasets_pair
# single-float precision backends, calculate using single-floats
- if isinstance(pyhf.tensorlib, pyhf.tensor.tensorflow_backend) or isinstance(pyhf.tensorlib, pyhf.tensor.pytorch_backend):
+ if isinstance(pyhf.tensorlib, pyhf.tensor.tensorflow_backend) or isinstance(
+ pyhf.tensorlib, pyhf.tensor.pytorch_backend
+ ):
histogramssets = np.asarray(histogramssets, dtype=np.float32)
alphasets = np.asarray(alphasets, dtype=np.float32)
- slow_result = np.asarray(pyhf.interpolate.interpolator(interpcode, do_tensorized_calc=False)(histogramssets=histogramssets, alphasets=alphasets))
- fast_result = np.asarray(pyhf.tensorlib.tolist(pyhf.interpolate.interpolator(interpcode, do_tensorized_calc=True)(histogramssets=pyhf.tensorlib.astensor(histogramssets.tolist()), alphasets=pyhf.tensorlib.astensor(alphasets.tolist()))))
+ slow_result = np.asarray(
+ pyhf.interpolate.interpolator(interpcode, do_tensorized_calc=False)(
+ histogramssets=histogramssets, alphasets=alphasets
+ )
+ )
+ fast_result = np.asarray(
+ pyhf.tensorlib.tolist(
+ pyhf.interpolate.interpolator(interpcode, do_tensorized_calc=True)(
+ histogramssets=pyhf.tensorlib.astensor(histogramssets.tolist()),
+ alphasets=pyhf.tensorlib.astensor(alphasets.tolist()),
+ )
+ )
+ )
+
+ assert (
+ pytest.approx(slow_result[~np.isnan(slow_result)].ravel().tolist())
+ == fast_result[~np.isnan(fast_result)].ravel().tolist()
+ )
- assert pytest.approx(slow_result[~np.isnan(slow_result)].ravel().tolist()) == fast_result[~np.isnan(fast_result)].ravel().tolist()
@pytest.mark.skip_mxnet
-@pytest.mark.parametrize("do_tensorized_calc", [False, True], ids=['slow','fast'])
+@pytest.mark.parametrize("do_tensorized_calc", [False, True], ids=['slow', 'fast'])
def test_interpcode_0(backend, do_tensorized_calc):
- histogramssets = pyhf.tensorlib.astensor([
- [
- [
- [0.5],
- [1.0],
- [2.0]
- ]
- ]
- ])
- alphasets = pyhf.tensorlib.astensor([[-2,-1,0,1,2]])
- expected = pyhf.tensorlib.astensor([[[[0],[0.5],[1.0],[2.0],[3.0]]]])
+ histogramssets = pyhf.tensorlib.astensor([[[[0.5], [1.0], [2.0]]]])
+ alphasets = pyhf.tensorlib.astensor([[-2, -1, 0, 1, 2]])
+ expected = pyhf.tensorlib.astensor([[[[0], [0.5], [1.0], [2.0], [3.0]]]])
if do_tensorized_calc:
- result_deltas = pyhf.interpolate.interpolator(0, do_tensorized_calc=do_tensorized_calc)(histogramssets, alphasets)
+ result_deltas = pyhf.interpolate.interpolator(
+ 0, do_tensorized_calc=do_tensorized_calc
+ )(histogramssets, alphasets)
else:
- result_deltas = pyhf.tensorlib.astensor(pyhf.interpolate.interpolator(0, do_tensorized_calc=do_tensorized_calc)(pyhf.tensorlib.tolist(histogramssets), pyhf.tensorlib.tolist(alphasets)))
-
+ result_deltas = pyhf.tensorlib.astensor(
+ pyhf.interpolate.interpolator(0, do_tensorized_calc=do_tensorized_calc)(
+ pyhf.tensorlib.tolist(histogramssets), pyhf.tensorlib.tolist(alphasets)
+ )
+ )
# calculate the actual change
- allsets_allhistos_noms_repeated = pyhf.tensorlib.einsum('sa,shb->shab', pyhf.tensorlib.ones(alphasets.shape), histogramssets[:,:,1])
+ allsets_allhistos_noms_repeated = pyhf.tensorlib.einsum(
+ 'sa,shb->shab', pyhf.tensorlib.ones(alphasets.shape), histogramssets[:, :, 1]
+ )
results = allsets_allhistos_noms_repeated + result_deltas
- assert pytest.approx(np.asarray(pyhf.tensorlib.tolist(results)).ravel().tolist()) == np.asarray(pyhf.tensorlib.tolist(expected)).ravel().tolist()
+ assert (
+ pytest.approx(np.asarray(pyhf.tensorlib.tolist(results)).ravel().tolist())
+ == np.asarray(pyhf.tensorlib.tolist(expected)).ravel().tolist()
+ )
+
@pytest.mark.skip_mxnet
-@pytest.mark.parametrize("do_tensorized_calc", [False, True], ids=['slow','fast'])
+@pytest.mark.parametrize("do_tensorized_calc", [False, True], ids=['slow', 'fast'])
def test_interpcode_1(backend, do_tensorized_calc):
- histogramssets = pyhf.tensorlib.astensor([
- [
- [
- [0.9],
- [1.0],
- [1.1]
- ]
- ]
- ])
- alphasets = pyhf.tensorlib.astensor([[-2,-1,0,1,2]])
- expected = pyhf.tensorlib.astensor([[[[0.9**2], [0.9], [1.0], [1.1], [1.1**2]]]])
+ histogramssets = pyhf.tensorlib.astensor([[[[0.9], [1.0], [1.1]]]])
+ alphasets = pyhf.tensorlib.astensor([[-2, -1, 0, 1, 2]])
+ expected = pyhf.tensorlib.astensor(
+ [[[[0.9 ** 2], [0.9], [1.0], [1.1], [1.1 ** 2]]]]
+ )
if do_tensorized_calc:
- result_deltas = pyhf.interpolate.interpolator(1, do_tensorized_calc=do_tensorized_calc)(histogramssets, alphasets)
+ result_deltas = pyhf.interpolate.interpolator(
+ 1, do_tensorized_calc=do_tensorized_calc
+ )(histogramssets, alphasets)
else:
- result_deltas = pyhf.tensorlib.astensor(pyhf.interpolate.interpolator(1, do_tensorized_calc=do_tensorized_calc)(pyhf.tensorlib.tolist(histogramssets), pyhf.tensorlib.tolist(alphasets)))
+ result_deltas = pyhf.tensorlib.astensor(
+ pyhf.interpolate.interpolator(1, do_tensorized_calc=do_tensorized_calc)(
+ pyhf.tensorlib.tolist(histogramssets), pyhf.tensorlib.tolist(alphasets)
+ )
+ )
# calculate the actual change
- allsets_allhistos_noms_repeated = pyhf.tensorlib.einsum('sa,shb->shab', pyhf.tensorlib.ones(alphasets.shape), histogramssets[:,:,1])
+ allsets_allhistos_noms_repeated = pyhf.tensorlib.einsum(
+ 'sa,shb->shab', pyhf.tensorlib.ones(alphasets.shape), histogramssets[:, :, 1]
+ )
results = allsets_allhistos_noms_repeated * result_deltas
- assert pytest.approx(np.asarray(pyhf.tensorlib.tolist(results)).ravel().tolist()) == np.asarray(pyhf.tensorlib.tolist(expected)).ravel().tolist()
+ assert (
+ pytest.approx(np.asarray(pyhf.tensorlib.tolist(results)).ravel().tolist())
+ == np.asarray(pyhf.tensorlib.tolist(expected)).ravel().tolist()
+ )
def test_invalid_interpcode():
diff --git a/tests/test_modifiers.py b/tests/test_modifiers.py
index 66cc827500..9ef50ad4b1 100644
--- a/tests/test_modifiers.py
+++ b/tests/test_modifiers.py
@@ -1,15 +1,23 @@
import pytest
-import sys
import inspect
from six import with_metaclass
import pyhf
-modifiers_to_test = ["histosys", "normfactor", "normsys", "shapefactor", "shapesys", "staterror"]
+modifiers_to_test = [
+ "histosys",
+ "normfactor",
+ "normsys",
+ "shapefactor",
+ "shapesys",
+ "staterror",
+]
modifier_pdf_types = ["normal", None, "normal", None, "poisson", "normal"]
# we make sure we can import all of our pre-defined modifiers correctly
-@pytest.mark.parametrize("test_modifierPair", zip(modifiers_to_test, modifier_pdf_types))
+@pytest.mark.parametrize(
+ "test_modifierPair", zip(modifiers_to_test, modifier_pdf_types)
+)
def test_import_default_modifiers(test_modifierPair):
test_modifier, test_mod_type = test_modifierPair
modifier = pyhf.modifiers.registry.get(test_modifier, None)
@@ -19,7 +27,8 @@ def test_import_default_modifiers(test_modifierPair):
assert hasattr(modifier, 'is_constrained')
assert hasattr(modifier, 'pdf_type')
assert hasattr(modifier, 'op_code')
- assert modifier.op_code in ['addition','multiplication']
+ assert modifier.op_code in ['addition', 'multiplication']
+
# we make sure modifiers have right structure
def test_modifiers_structure():
@@ -28,7 +37,8 @@ def test_modifiers_structure():
@modifier(name='myUnconstrainedModifier')
class myCustomModifier(object):
@classmethod
- def required_parset(cls, n_parameters): pass
+ def required_parset(cls, n_parameters):
+ pass
assert inspect.isclass(myCustomModifier)
assert 'myUnconstrainedModifier' in pyhf.modifiers.registry
@@ -39,7 +49,8 @@ def required_parset(cls, n_parameters): pass
@modifier(name='myConstrainedModifier', constrained=True)
class myCustomModifier(object):
@classmethod
- def required_parset(cls, n_parameters): pass
+ def required_parset(cls, n_parameters):
+ pass
assert inspect.isclass(myCustomModifier)
assert 'myConstrainedModifier' in pyhf.modifiers.registry
@@ -47,6 +58,7 @@ def required_parset(cls, n_parameters): pass
assert pyhf.modifiers.registry['myConstrainedModifier'].is_constrained == True
del pyhf.modifiers.registry['myConstrainedModifier']
+
# we make sure decorate can use auto-naming
def test_modifier_name_auto():
from pyhf.modifiers import modifier
@@ -54,7 +66,8 @@ def test_modifier_name_auto():
@modifier
class myCustomModifier(object):
@classmethod
- def required_parset(cls, n_parameters): pass
+ def required_parset(cls, n_parameters):
+ pass
assert inspect.isclass(myCustomModifier)
assert 'myCustomModifier' in pyhf.modifiers.registry
@@ -69,7 +82,8 @@ def test_modifier_name_auto_withkwargs():
@modifier(name=None, constrained=False)
class myCustomModifier(object):
@classmethod
- def required_parset(cls, n_parameters): pass
+ def required_parset(cls, n_parameters):
+ pass
assert inspect.isclass(myCustomModifier)
assert 'myCustomModifier' in pyhf.modifiers.registry
@@ -84,7 +98,8 @@ def test_modifier_name_custom():
@modifier(name='myCustomName')
class myCustomModifier(object):
@classmethod
- def required_parset(cls, n_parameters): pass
+ def required_parset(cls, n_parameters):
+ pass
assert inspect.isclass(myCustomModifier)
assert 'myCustomModifier' not in pyhf.modifiers.registry
@@ -98,16 +113,19 @@ def test_decorate_with_wrong_values():
from pyhf.modifiers import modifier
with pytest.raises(ValueError):
- @modifier('too','many','args')
+
+ @modifier('too', 'many', 'args')
class myCustomModifier(object):
pass
with pytest.raises(TypeError):
+
@modifier(name=1.5)
class myCustomModifier(object):
pass
with pytest.raises(ValueError):
+
@modifier(unused='arg')
class myCustomModifier(object):
pass
@@ -118,13 +136,16 @@ def test_registry_name_clash():
from pyhf.modifiers import modifier
with pytest.raises(KeyError):
+
@modifier(name='histosys')
class myCustomModifier(object):
pass
with pytest.raises(KeyError):
+
class myCustomModifier(object):
@classmethod
- def required_parset(cls, n_parameters): pass
+ def required_parset(cls, n_parameters):
+ pass
pyhf.modifiers.add_to_registry(myCustomModifier, 'histosys')
diff --git a/tests/test_notebooks.py b/tests/test_notebooks.py
index 6ebe197b14..d342679f7a 100644
--- a/tests/test_notebooks.py
+++ b/tests/test_notebooks.py
@@ -7,11 +7,10 @@ def test_notebooks(tmpdir):
outputnb = tmpdir.join('output.ipynb')
common_kwargs = {
'output': str(outputnb),
- 'kernel_name': 'python{}'.format(sys.version_info.major)
+ 'kernel_name': 'python{}'.format(sys.version_info.major),
}
- pm.execute_notebook(
- 'docs/examples/notebooks/hello-world.ipynb', **common_kwargs)
+ pm.execute_notebook('docs/examples/notebooks/hello-world.ipynb', **common_kwargs)
if sys.version_info.major > 2:
# The Binder example uses specific relative paths
@@ -21,16 +20,20 @@ def test_notebooks(tmpdir):
os.chdir(cwd)
pm.execute_notebook(
- 'docs/examples/notebooks/learn/InterpolationCodes.ipynb', **common_kwargs)
+ 'docs/examples/notebooks/learn/InterpolationCodes.ipynb', **common_kwargs
+ )
+ pm.execute_notebook('docs/examples/notebooks/ShapeFactor.ipynb', **common_kwargs)
+ pm.execute_notebook(
+ 'docs/examples/notebooks/multichannel-coupled-histo.ipynb',
+ parameters={'validation_datadir': 'validation/data'},
+ **common_kwargs
+ )
pm.execute_notebook(
- 'docs/examples/notebooks/ShapeFactor.ipynb', **common_kwargs)
- pm.execute_notebook('docs/examples/notebooks/multichannel-coupled-histo.ipynb',
- parameters={'validation_datadir': 'validation/data'},
- **common_kwargs)
- pm.execute_notebook('docs/examples/notebooks/multiBinPois.ipynb',
- parameters={'validation_datadir': 'validation/data'},
- **common_kwargs)
+ 'docs/examples/notebooks/multiBinPois.ipynb',
+ parameters={'validation_datadir': 'validation/data'},
+ **common_kwargs
+ )
nb = pm.read_notebook(str(outputnb))
assert nb.data['number_2d_successpoints'] > 200
diff --git a/tests/test_optim.py b/tests/test_optim.py
index 7856cf586b..671c5c4718 100644
--- a/tests/test_optim.py
+++ b/tests/test_optim.py
@@ -1,5 +1,4 @@
import pyhf
-import tensorflow as tf
import pytest
@@ -12,8 +11,8 @@ def source():
'bkg': [100.0, 150.0],
'bkgsys_up': [102, 190],
'bkgsys_dn': [98, 100],
- 'sig': [30.0, 95.0]
- }
+ 'sig': [30.0, 95.0],
+ },
}
return source
@@ -29,12 +28,8 @@ def spec(source):
'name': 'signal',
'data': source['bindata']['sig'],
'modifiers': [
- {
- 'name': 'mu',
- 'type': 'normfactor',
- 'data': None
- }
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'background',
@@ -45,25 +40,19 @@ def spec(source):
'type': 'histosys',
'data': {
'lo_data': source['bindata']['bkgsys_dn'],
- 'hi_data': source['bindata']['bkgsys_up']
- }
+ 'hi_data': source['bindata']['bkgsys_up'],
+ },
}
- ]
- }
- ]
+ ],
+ },
+ ],
}
]
}
return spec
-@pytest.mark.parametrize('mu',
- [
- 1.,
- ],
- ids=[
- 'mu=1',
- ])
+@pytest.mark.parametrize('mu', [1.0], ids=['mu=1'])
@pytest.mark.skip_mxnet
def test_optim(backend, source, spec, mu):
pdf = pyhf.Model(spec)
@@ -75,9 +64,11 @@ def test_optim(backend, source, spec, mu):
optim = pyhf.optimizer
result = optim.unconstrained_bestfit(
- pyhf.utils.loglambdav, data, pdf, init_pars, par_bounds)
+ pyhf.utils.loglambdav, data, pdf, init_pars, par_bounds
+ )
assert pyhf.tensorlib.tolist(result)
result = optim.constrained_bestfit(
- pyhf.utils.loglambdav, mu, data, pdf, init_pars, par_bounds)
+ pyhf.utils.loglambdav, mu, data, pdf, init_pars, par_bounds
+ )
assert pyhf.tensorlib.tolist(result)
diff --git a/tests/test_pdf.py b/tests/test_pdf.py
index fc55c15486..8ab9d2721e 100644
--- a/tests/test_pdf.py
+++ b/tests/test_pdf.py
@@ -5,54 +5,57 @@
import numpy as np
import json
+
@pytest.mark.fail_mxnet
def test_pdf_inputs(backend):
source = {
- "binning": [2,-0.5,1.5],
- "bindata": {
- "data": [55.0],
- "bkg": [50.0],
- "bkgerr": [7.0],
- "sig": [10.0]
- }
+ "binning": [2, -0.5, 1.5],
+ "bindata": {"data": [55.0], "bkg": [50.0], "bkgerr": [7.0], "sig": [10.0]},
}
- pdf = pyhf.simplemodels.hepdata_like(source['bindata']['sig'], source['bindata']['bkg'], source['bindata']['bkgerr'])
+ pdf = pyhf.simplemodels.hepdata_like(
+ source['bindata']['sig'], source['bindata']['bkg'], source['bindata']['bkgerr']
+ )
pars = pdf.config.suggested_init()
data = source['bindata']['data'] + pdf.config.auxdata
-
tensorlib, _ = backend
assert tensorlib.shape(tensorlib.astensor(data)) == (2,)
assert tensorlib.shape(tensorlib.astensor(pars)) == (2,)
- assert tensorlib.tolist(pdf.pdf(pars,data)) == pytest.approx([0.002417160663753748], abs=1e-4)
- assert tensorlib.tolist(pdf.logpdf(pars,data)) == pytest.approx([-6.025179228209936], abs=1e-4)
+ assert tensorlib.tolist(pdf.pdf(pars, data)) == pytest.approx(
+ [0.002417160663753748], abs=1e-4
+ )
+ assert tensorlib.tolist(pdf.logpdf(pars, data)) == pytest.approx(
+ [-6.025179228209936], abs=1e-4
+ )
@pytest.mark.only_numpy
def test_core_pdf_broadcasting(backend):
- data = [10,11,12,13,14,15]
- lambdas = [15,14,13,12,11,10]
- naive_python = [pyhf.tensorlib.poisson(d, lam) for d,lam in zip(data, lambdas)]
+ data = [10, 11, 12, 13, 14, 15]
+ lambdas = [15, 14, 13, 12, 11, 10]
+ naive_python = [pyhf.tensorlib.poisson(d, lam) for d, lam in zip(data, lambdas)]
- broadcasted = pyhf.tensorlib.poisson(data, lambdas)
+ broadcasted = pyhf.tensorlib.poisson(data, lambdas)
assert np.array(data).shape == np.array(lambdas).shape
- assert broadcasted.shape == np.array(data).shape
- assert np.all(naive_python == broadcasted)
-
+ assert broadcasted.shape == np.array(data).shape
+ assert np.all(naive_python == broadcasted)
- data = [10,11,12,13,14,15]
- mus = [15,14,13,12,11,10]
- sigmas = [1,2,3,4,5,6]
- naive_python = [pyhf.tensorlib.normal(d, mu,sig) for d,mu,sig in zip(data, mus, sigmas)]
+ data = [10, 11, 12, 13, 14, 15]
+ mus = [15, 14, 13, 12, 11, 10]
+ sigmas = [1, 2, 3, 4, 5, 6]
+ naive_python = [
+ pyhf.tensorlib.normal(d, mu, sig) for d, mu, sig in zip(data, mus, sigmas)
+ ]
- broadcasted = pyhf.tensorlib.normal(data, mus, sigmas)
+ broadcasted = pyhf.tensorlib.normal(data, mus, sigmas)
assert np.array(data).shape == np.array(mus).shape
assert np.array(data).shape == np.array(sigmas).shape
- assert broadcasted.shape == np.array(data).shape
- assert np.all(naive_python == broadcasted)
+ assert broadcasted.shape == np.array(data).shape
+ assert np.all(naive_python == broadcasted)
+
def test_pdf_integration_staterror(backend):
spec = {
@@ -62,44 +65,50 @@ def test_pdf_integration_staterror(backend):
'samples': [
{
'name': 'mu',
- 'data': [10.,10.],
+ 'data': [10.0, 10.0],
'modifiers': [
{'name': 'mu', 'type': 'normfactor', 'data': None}
- ]
+ ],
},
{
'name': 'bkg1',
'data': [50.0, 70.0],
'modifiers': [
- {'name': 'stat_firstchannel', 'type': 'staterror', 'data': [12.,12.]}
- ]
+ {
+ 'name': 'stat_firstchannel',
+ 'type': 'staterror',
+ 'data': [12.0, 12.0],
+ }
+ ],
},
{
'name': 'bkg2',
- 'data': [30.0, 20.],
+ 'data': [30.0, 20.0],
'modifiers': [
- {'name': 'stat_firstchannel', 'type': 'staterror', 'data': [5.,5.]}
- ]
+ {
+ 'name': 'stat_firstchannel',
+ 'type': 'staterror',
+ 'data': [5.0, 5.0],
+ }
+ ],
},
- {
- 'name': 'bkg3',
- 'data': [20.0, 15.0],
- 'modifiers': [
- ]
- }
- ]
- },
+ {'name': 'bkg3', 'data': [20.0, 15.0], 'modifiers': []},
+ ],
+ }
]
}
pdf = pyhf.Model(spec)
par = pdf.config.par_slice('stat_firstchannel')
par_set = pdf.config.param_set('stat_firstchannel')
tensorlib, _ = backend
- uncerts = tensorlib.astensor([[12.,12.],[5.,5.]])
- nominal = tensorlib.astensor([[50.,70.],[30.,20.]])
+ uncerts = tensorlib.astensor([[12.0, 12.0], [5.0, 5.0]])
+ nominal = tensorlib.astensor([[50.0, 70.0], [30.0, 20.0]])
quad = tensorlib.sqrt(tensorlib.sum(tensorlib.power(uncerts, 2), axis=0))
totals = tensorlib.sum(nominal, axis=0)
- assert pytest.approx(tensorlib.tolist(par_set.sigmas)) == tensorlib.tolist(tensorlib.divide(quad, totals))
+ assert pytest.approx(tensorlib.tolist(par_set.sigmas)) == tensorlib.tolist(
+ tensorlib.divide(quad, totals)
+ )
+
@pytest.mark.only_numpy
def test_pdf_integration_histosys(backend):
@@ -114,42 +123,71 @@ def test_pdf_integration_histosys(backend):
'data': source['bindata']['sig'],
'modifiers': [
{'name': 'mu', 'type': 'normfactor', 'data': None}
- ]
+ ],
},
{
'name': 'background',
'data': source['bindata']['bkg'],
'modifiers': [
- {'name': 'bkg_norm', 'type': 'histosys', 'data': {'lo_data': source['bindata']['bkgsys_dn'], 'hi_data': source['bindata']['bkgsys_up']}}
- ]
- }
- ]
+ {
+ 'name': 'bkg_norm',
+ 'type': 'histosys',
+ 'data': {
+ 'lo_data': source['bindata']['bkgsys_dn'],
+ 'hi_data': source['bindata']['bkgsys_up'],
+ },
+ }
+ ],
+ },
+ ],
}
]
}
- pdf = pyhf.Model(spec)
-
-
- pars = [None,None]
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [1.0]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [102,190]
-
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [2.0]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [104,230]
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [-1.0]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [ 98,100]
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [-2.0]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [ 96, 50]
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[1.0], [1.0]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [102+30,190+95]
+ pdf = pyhf.Model(spec)
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[1.0], [-1.0]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [ 98+30,100+95]
+ pars = [None, None]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [1.0],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [102, 190]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [2.0],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [104, 230]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [-1.0],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [98, 100]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [-2.0],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [96, 50]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [1.0],
+ [1.0],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [
+ 102 + 30,
+ 190 + 95,
+ ]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [1.0],
+ [-1.0],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [
+ 98 + 30,
+ 100 + 95,
+ ]
@pytest.mark.skip_mxnet
@@ -165,30 +203,53 @@ def test_pdf_integration_normsys(backend):
'data': source['bindata']['sig'],
'modifiers': [
{'name': 'mu', 'type': 'normfactor', 'data': None}
- ]
+ ],
},
{
'name': 'background',
'data': source['bindata']['bkg'],
'modifiers': [
- {'name': 'bkg_norm', 'type': 'normsys','data': {'lo': 0.9, 'hi': 1.1}}
- ]
- }
- ]
+ {
+ 'name': 'bkg_norm',
+ 'type': 'normsys',
+ 'data': {'lo': 0.9, 'hi': 1.1},
+ }
+ ],
+ },
+ ],
}
]
}
- pdf = pyhf.Model(spec)
-
- pars = [None,None]
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [0.0]]
- assert np.allclose(pyhf.tensorlib.tolist(pdf.expected_data(pars, include_auxdata = False)),[100,150])
+ pdf = pyhf.Model(spec)
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [1.0]]
- assert np.allclose(pyhf.tensorlib.tolist(pdf.expected_data(pars, include_auxdata = False)),[100*1.1,150*1.1])
+ pars = [None, None]
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [0.0],
+ ]
+ assert np.allclose(
+ pyhf.tensorlib.tolist(pdf.expected_data(pars, include_auxdata=False)),
+ [100, 150],
+ )
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [1.0],
+ ]
+ assert np.allclose(
+ pyhf.tensorlib.tolist(pdf.expected_data(pars, include_auxdata=False)),
+ [100 * 1.1, 150 * 1.1],
+ )
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [-1.0],
+ ]
+ assert np.allclose(
+ pyhf.tensorlib.tolist(pdf.expected_data(pars, include_auxdata=False)),
+ [100 * 0.9, 150 * 0.9],
+ )
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [-1.0]]
- assert np.allclose(pyhf.tensorlib.tolist(pdf.expected_data(pars, include_auxdata = False)),[100*0.9,150*0.9])
@pytest.mark.only_numpy
def test_pdf_integration_shapesys(backend):
@@ -202,41 +263,59 @@ def test_pdf_integration_shapesys(backend):
'name': 'signal',
'data': source['bindata']['sig'],
'modifiers': [
- {'name': 'mu', 'type': 'normfactor', 'data': None}
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'background',
'data': source['bindata']['bkg'],
'modifiers': [
- {'name': 'bkg_norm', 'type': 'shapesys','data': [10, 10]}
- ]
- }
- ]
+ {'name': 'bkg_norm', 'type': 'shapesys', 'data': [10, 10]}
+ ],
+ },
+ ],
}
]
}
- pdf = pyhf.Model(spec)
-
-
-
- pars = [None,None]
-
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [1.0,1.0]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [100,150]
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [1.1,1.0]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [100*1.1,150]
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [1.0,1.1]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [100,150*1.1]
-
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [1.1, 0.9]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [100*1.1,150*0.9]
+ pdf = pyhf.Model(spec)
- pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [[0.0], [0.9,1.1]]
- assert pdf.expected_data(pars, include_auxdata = False).tolist() == [100*0.9,150*1.1]
+ pars = [None, None]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [1.0, 1.0],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [100, 150]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [1.1, 1.0],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [100 * 1.1, 150]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [1.0, 1.1],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [100, 150 * 1.1]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [1.1, 0.9],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [
+ 100 * 1.1,
+ 150 * 0.9,
+ ]
+
+ pars[pdf.config.par_slice('mu')], pars[pdf.config.par_slice('bkg_norm')] = [
+ [0.0],
+ [0.9, 1.1],
+ ]
+ assert pdf.expected_data(pars, include_auxdata=False).tolist() == [
+ 100 * 0.9,
+ 150 * 1.1,
+ ]
def test_invalid_modifier():
@@ -249,10 +328,14 @@ def test_invalid_modifier():
'name': 'ttbar',
'data': [1],
'modifiers': [
- {'name': 'a_name', 'type': 'this_should_not_exist', 'data': [1]}
- ]
- },
- ]
+ {
+ 'name': 'a_name',
+ 'type': 'this_should_not_exist',
+ 'data': [1],
+ }
+ ],
+ }
+ ],
}
]
}
@@ -268,23 +351,27 @@ def test_invalid_modifier_name_resuse():
'samples': [
{
'name': 'signal',
- 'data': [5.],
+ 'data': [5.0],
'modifiers': [
{'name': 'reused_name', 'type': 'normfactor', 'data': None}
- ]
+ ],
},
{
'name': 'background',
- 'data': [50.],
+ 'data': [50.0],
'modifiers': [
- {'name': 'reused_name', 'type': 'normsys','data': {'lo': 0.9, 'hi': 1.1}}
- ]
- }
- ]
+ {
+ 'name': 'reused_name',
+ 'type': 'normsys',
+ 'data': {'lo': 0.9, 'hi': 1.1},
+ }
+ ],
+ },
+ ],
}
]
}
with pytest.raises(pyhf.exceptions.InvalidNameReuse):
- pdf = pyhf.Model(spec, poiname = 'reused_name')
+ pdf = pyhf.Model(spec, poiname='reused_name')
- pdf = pyhf.Model(spec, poiname = 'reused_name', qualify_names = True)
+ pdf = pyhf.Model(spec, poiname='reused_name', qualify_names=True)
diff --git a/tests/test_schema.py b/tests/test_schema.py
index eec6bfff9d..aa10c854cf 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -1,89 +1,63 @@
import pyhf
import pytest
+
def test_no_samples():
- spec = {
- 'channels': [
- {
- 'name': 'channel',
- 'samples': []
- },
- ]
- }
+ spec = {'channels': [{'name': 'channel', 'samples': []}]}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.Model(spec)
+
def test_sample_missing_data():
spec = {
'channels': [
{
'name': 'channel',
- 'samples': [
- {
- 'name': 'sample',
- 'data': [],
- 'modifiers': []
- }
- ]
- },
+ 'samples': [{'name': 'sample', 'data': [], 'modifiers': []}],
+ }
]
}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.Model(spec)
+
def test_sample_missing_name():
spec = {
- 'channels': [
- {
- 'name': 'channel',
- 'samples': [
- {
- 'data': [1],
- 'modifiers': []
- },
- ]
- }
- ]
+ 'channels': [{'name': 'channel', 'samples': [{'data': [1], 'modifiers': []}]}]
}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.Model(spec)
+
def test_sample_missing_all_modifiers():
spec = {
'channels': [
{
'name': 'channel',
- 'samples': [
- {
- 'name': 'sample',
- 'data': [10.],
- 'modifiers': []
- }
- ]
- },
+ 'samples': [{'name': 'sample', 'data': [10.0], 'modifiers': []}],
+ }
]
}
with pytest.raises(pyhf.exceptions.InvalidModel):
pyhf.Model(spec)
+
def test_one_sample_missing_modifiers():
spec = {
'channels': [
{
'name': 'channel',
'samples': [
- {
- 'name': 'sample',
- 'data': [10.],
- 'modifiers': []
- },
+ {'name': 'sample', 'data': [10.0], 'modifiers': []},
{
'name': 'another_sample',
- 'data': [5.],
- 'modifiers': [{'name': 'mypoi', 'type': 'normfactor', 'data': None}]
- }
- ]
- },
+ 'data': [5.0],
+ 'modifiers': [
+ {'name': 'mypoi', 'type': 'normfactor', 'data': None}
+ ],
+ },
+ ],
+ }
]
}
pyhf.Model(spec, poiname='mypoi')
@@ -99,16 +73,21 @@ def test_add_unknown_modifier():
'name': 'ttbar',
'data': [1],
'modifiers': [
- {'name': 'a_name', 'type': 'this_should_not_exist', 'data': [1]}
- ]
- },
- ]
+ {
+ 'name': 'a_name',
+ 'type': 'this_should_not_exist',
+ 'data': [1],
+ }
+ ],
+ }
+ ],
}
]
}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.Model(spec)
+
def test_empty_staterror():
spec = {
'channels': [
@@ -117,18 +96,23 @@ def test_empty_staterror():
'samples': [
{
'name': 'sample',
- 'data': [10.],
+ 'data': [10.0],
'modifiers': [
- {'name': 'staterror_channel', 'type': 'staterror', 'data': []}
- ]
+ {
+ 'name': 'staterror_channel',
+ 'type': 'staterror',
+ 'data': [],
+ }
+ ],
}
- ]
- },
+ ],
+ }
]
}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.Model(spec)
+
def test_empty_shapesys():
spec = {
'channels': [
@@ -137,18 +121,19 @@ def test_empty_shapesys():
'samples': [
{
'name': 'sample',
- 'data': [10.],
+ 'data': [10.0],
'modifiers': [
- {'name': 'sample_norm', 'type': 'shapesys','data': []}
- ]
+ {'name': 'sample_norm', 'type': 'shapesys', 'data': []}
+ ],
}
- ]
- },
+ ],
+ }
]
}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.Model(spec)
+
def test_empty_histosys():
spec = {
'channels': [
@@ -157,13 +142,17 @@ def test_empty_histosys():
'samples': [
{
'name': 'sample',
- 'data': [10.],
+ 'data': [10.0],
'modifiers': [
- {'name': 'modifier', 'type': 'histosys', 'data': {'lo_data': [], 'hi_data': []}}
- ]
+ {
+ 'name': 'modifier',
+ 'type': 'histosys',
+ 'data': {'lo_data': [], 'hi_data': []},
+ }
+ ],
}
- ]
- },
+ ],
+ }
]
}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
diff --git a/tests/test_scripts.py b/tests/test_scripts.py
index 105bca3ed5..f2eb997cd9 100644
--- a/tests/test_scripts.py
+++ b/tests/test_scripts.py
@@ -1,13 +1,13 @@
-import pytest
import json
import shlex
-
import pyhf
# see test_import.py for the same (detailed) test
def test_import_prepHistFactory(tmpdir, script_runner):
temp = tmpdir.join("parsed_output.json")
- command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(temp.strpath)
+ command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s} --hide-progress'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
assert ret.success
assert ret.stdout == ''
@@ -17,17 +17,23 @@ def test_import_prepHistFactory(tmpdir, script_runner):
spec = {'channels': parsed_xml['channels']}
pyhf.utils.validate(spec, pyhf.utils.get_default_schema())
+
def test_import_prepHistFactory_withProgress(tmpdir, script_runner):
temp = tmpdir.join("parsed_output.json")
- command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(temp.strpath)
+ command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
assert ret.success
assert ret.stdout == ''
assert ret.stderr != ''
+
def test_import_prepHistFactory_stdout(tmpdir, script_runner):
temp = tmpdir.join("parsed_output.json")
- command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/'.format(temp.strpath)
+ command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
assert ret.success
assert ret.stdout != ''
@@ -36,9 +42,12 @@ def test_import_prepHistFactory_stdout(tmpdir, script_runner):
assert d
assert 'channels' in d
+
def test_import_prepHistFactory_and_cls(tmpdir, script_runner):
temp = tmpdir.join("parsed_output.json")
- command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(temp.strpath)
+ command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
command = 'pyhf cls {0:s}'.format(temp.strpath)
@@ -50,7 +59,12 @@ def test_import_prepHistFactory_and_cls(tmpdir, script_runner):
assert 'CLs_obs' in d
assert 'CLs_exp' in d
- for measurement in ['GaussExample','GammaExample','LogNormExample','ConstExample']:
+ for measurement in [
+ 'GaussExample',
+ 'GammaExample',
+ 'LogNormExample',
+ 'ConstExample',
+ ]:
command = 'pyhf cls {0:s} --measurement {1:s}'.format(temp.strpath, measurement)
ret = script_runner.run(*shlex.split(command))
@@ -69,15 +83,21 @@ def test_import_prepHistFactory_and_cls(tmpdir, script_runner):
assert 'CLs_obs' in d
assert 'CLs_exp' in d
+
def test_import_and_export(tmpdir, script_runner):
temp = tmpdir.join("parsed_output.json")
- command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(temp.strpath)
+ command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
- command = 'pyhf json2xml {0:s} --specroot {1:s} --dataroot {1:s}'.format(temp.strpath,str(tmpdir))
+ command = 'pyhf json2xml {0:s} --specroot {1:s} --dataroot {1:s}'.format(
+ temp.strpath, str(tmpdir)
+ )
ret = script_runner.run(*shlex.split(command))
assert ret.success
+
def test_patch(tmpdir, script_runner):
patch = tmpdir.join('patch.json')
@@ -87,17 +107,22 @@ def test_patch(tmpdir, script_runner):
patch.write(patchcontent)
temp = tmpdir.join("parsed_output.json")
- command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(temp.strpath)
+ command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
- command = 'pyhf cls {0:s} --patch {1:s}'.format(temp.strpath,patch.strpath)
+ command = 'pyhf cls {0:s} --patch {1:s}'.format(temp.strpath, patch.strpath)
ret = script_runner.run(*shlex.split(command))
assert ret.success
import io
- command = 'pyhf cls {0:s} --patch -'.format(temp.strpath,patch.strpath)
- pipefile = io.StringIO(patchcontent) # python 2.7 pytest-files are not file-like enough
- ret = script_runner.run(*shlex.split(command), stdin = pipefile)
+ command = 'pyhf cls {0:s} --patch -'.format(temp.strpath, patch.strpath)
+
+ pipefile = io.StringIO(
+ patchcontent
+ ) # python 2.7 pytest-files are not file-like enough
+ ret = script_runner.run(*shlex.split(command), stdin=pipefile)
print(ret.stderr)
assert ret.success
@@ -108,19 +133,26 @@ def test_patch_fail(tmpdir, script_runner):
patch.write('''not,json''')
temp = tmpdir.join("parsed_output.json")
- command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(temp.strpath)
+ command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
- command = 'pyhf cls {0:s} --patch {1:s}'.format(temp.strpath,patch.strpath)
+ command = 'pyhf cls {0:s} --patch {1:s}'.format(temp.strpath, patch.strpath)
ret = script_runner.run(*shlex.split(command))
assert not ret.success
+
def test_bad_measurement_name(tmpdir, script_runner):
temp = tmpdir.join("parsed_output.json")
- command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(temp.strpath)
+ command = 'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {0:s}'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
- command = 'pyhf cls {0:s} --measurement "a-fake-measurement-name"'.format(temp.strpath)
+ command = 'pyhf cls {0:s} --measurement "a-fake-measurement-name"'.format(
+ temp.strpath
+ )
ret = script_runner.run(*shlex.split(command))
assert not ret.success
- #assert 'no measurement by name' in ret.stderr # numpy swallows the log.error() here, dunno why
+ # assert 'no measurement by name' in ret.stderr # numpy swallows the log.error() here, dunno why
diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index c6496ceb86..3a48276ef0 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -1,15 +1,8 @@
+import pytest
+import numpy as np
import pyhf
-
-from pyhf.tensor.numpy_backend import numpy_backend
-from pyhf.tensor.tensorflow_backend import tensorflow_backend
-from pyhf.tensor.pytorch_backend import pytorch_backend
-from pyhf.tensor.mxnet_backend import mxnet_backend
from pyhf.simplemodels import hepdata_like
-import numpy as np
-import tensorflow as tf
-
-import pytest
def test_simple_tensor_ops(backend):
tb = pyhf.tensorlib
@@ -21,106 +14,195 @@ def test_simple_tensor_ops(backend):
assert tb.tolist(tb.log(tb.exp([2, 3, 4]))) == [2, 3, 4]
assert tb.tolist(tb.abs([-1, -2])) == [1, 2]
+
def test_complex_tensor_ops(backend):
tb = pyhf.tensorlib
+ assert tb.tolist(tb.outer([1, 2, 3], [4, 5, 6])) == [
+ [4, 5, 6],
+ [8, 10, 12],
+ [12, 15, 18],
+ ]
+ assert tb.tolist(tb.stack([tb.astensor([1, 2, 3]), tb.astensor([4, 5, 6])])) == [
+ [1, 2, 3],
+ [4, 5, 6],
+ ]
assert tb.tolist(
- tb.outer([1, 2, 3], [4, 5, 6])) == [[4, 5, 6], [8, 10, 12], [12, 15, 18]]
- assert tb.tolist(tb.stack(
- [tb.astensor([1, 2, 3]), tb.astensor([4, 5, 6])])) == [[1, 2, 3], [4, 5, 6]]
- assert tb.tolist(tb.concatenate(
- [tb.astensor([1, 2, 3]), tb.astensor([4, 5, 6])])) == [1, 2, 3, 4, 5, 6]
+ tb.concatenate([tb.astensor([1, 2, 3]), tb.astensor([4, 5, 6])])
+ ) == [1, 2, 3, 4, 5, 6]
+ assert tb.tolist(tb.clip(tb.astensor([-2, -1, 0, 1, 2]), -1, 1)) == [
+ -1,
+ -1,
+ 0,
+ 1,
+ 1,
+ ]
assert tb.tolist(
- tb.clip(tb.astensor([-2, -1, 0, 1, 2]), -1, 1)) == [-1, -1, 0, 1, 1]
- assert tb.tolist(tb.where(
- tb.astensor([1, 0, 1]),
- tb.astensor([1, 1, 1]),
- tb.astensor([2, 2, 2]))) == [1, 2, 1]
+ tb.where(tb.astensor([1, 0, 1]), tb.astensor([1, 1, 1]), tb.astensor([2, 2, 2]))
+ ) == [1, 2, 1]
+
def test_zeros(backend):
tb = pyhf.tensorlib
assert tb.tolist(tb.ones((2, 3))) == [[1, 1, 1], [1, 1, 1]]
- assert tb.tolist(tb.ones((4, 5))) == [[1.] * 5] * 4
+ assert tb.tolist(tb.ones((4, 5))) == [[1.0] * 5] * 4
+
def test_ones(backend):
tb = pyhf.tensorlib
- assert tb.tolist(tb.zeros((4, 5))) == [[0.] * 5] * 4
+ assert tb.tolist(tb.zeros((4, 5))) == [[0.0] * 5] * 4
+
def test_broadcasting(backend):
tb = pyhf.tensorlib
- assert list(map(tb.tolist, tb.simple_broadcast(
- tb.astensor([1, 1, 1]),
- tb.astensor([2]),
- tb.astensor([3, 3, 3])))) == [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
- assert list(map(tb.tolist, tb.simple_broadcast(1, [2, 3, 4], [5, 6, 7]))) \
- == [[1, 1, 1], [2, 3, 4], [5, 6, 7]]
- assert list(map(tb.tolist, tb.simple_broadcast([1], [2, 3, 4], [5, 6, 7]))) \
- == [[1, 1, 1], [2, 3, 4], [5, 6, 7]]
+ assert list(
+ map(
+ tb.tolist,
+ tb.simple_broadcast(
+ tb.astensor([1, 1, 1]), tb.astensor([2]), tb.astensor([3, 3, 3])
+ ),
+ )
+ ) == [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
+ assert list(map(tb.tolist, tb.simple_broadcast(1, [2, 3, 4], [5, 6, 7]))) == [
+ [1, 1, 1],
+ [2, 3, 4],
+ [5, 6, 7],
+ ]
+ assert list(map(tb.tolist, tb.simple_broadcast([1], [2, 3, 4], [5, 6, 7]))) == [
+ [1, 1, 1],
+ [2, 3, 4],
+ [5, 6, 7],
+ ]
with pytest.raises(Exception):
tb.simple_broadcast([1], [2, 3], [5, 6, 7])
+
def test_reshape(backend):
tb = pyhf.tensorlib
- assert tb.tolist(tb.reshape(tb.ones((1,2,3)), (-1,))) == [1, 1, 1, 1, 1, 1]
+ assert tb.tolist(tb.reshape(tb.ones((1, 2, 3)), (-1,))) == [1, 1, 1, 1, 1, 1]
+
def test_shape(backend):
tb = pyhf.tensorlib
- assert tb.shape(tb.ones((1,2,3,4,5))) == (1,2,3,4,5)
+ assert tb.shape(tb.ones((1, 2, 3, 4, 5))) == (1, 2, 3, 4, 5)
+
def test_pdf_calculations(backend):
tb = pyhf.tensorlib
+ assert tb.tolist(tb.normal_cdf(tb.astensor([0.8]))) == pytest.approx(
+ [0.7881446014166034], 1e-07
+ )
assert tb.tolist(
- tb.normal_cdf(tb.astensor([0.8]))) == pytest.approx([0.7881446014166034], 1e-07)
- assert tb.tolist(
- tb.normal_logpdf(tb.astensor([0, 0, 1, 1, 0, 0, 1, 1]), tb.astensor([0, 1, 0, 1, 0, 1, 0, 1]), tb.astensor([0, 0, 0, 0, 1, 1, 1, 1]))) == pytest.approx([np.nan, np.nan, np.nan, np.nan, -0.91893853, -1.41893853, -1.41893853, -0.91893853], nan_ok=True)
+ tb.normal_logpdf(
+ tb.astensor([0, 0, 1, 1, 0, 0, 1, 1]),
+ tb.astensor([0, 1, 0, 1, 0, 1, 0, 1]),
+ tb.astensor([0, 0, 0, 0, 1, 1, 1, 1]),
+ )
+ ) == pytest.approx(
+ [
+ np.nan,
+ np.nan,
+ np.nan,
+ np.nan,
+ -0.91893853,
+ -1.41893853,
+ -1.41893853,
+ -0.91893853,
+ ],
+ nan_ok=True,
+ )
# poisson(lambda=0) is not defined, should return NaN
+ assert tb.tolist(tb.poisson([0, 0, 1, 1], [0, 1, 0, 1])) == pytest.approx(
+ [np.nan, 0.3678794503211975, 0.0, 0.3678794503211975], nan_ok=True
+ )
assert tb.tolist(
- tb.poisson([0, 0, 1, 1], [0, 1, 0, 1])) == pytest.approx([np.nan, 0.3678794503211975, 0.0, 0.3678794503211975], nan_ok=True)
- assert tb.tolist(
- tb.poisson_logpdf(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1]))) == pytest.approx(np.log([np.nan, 0.3678794503211975, 0.0, 0.3678794503211975]).tolist(), nan_ok=True)
+ tb.poisson_logpdf(tb.astensor([0, 0, 1, 1]), tb.astensor([0, 1, 0, 1]))
+ ) == pytest.approx(
+ np.log([np.nan, 0.3678794503211975, 0.0, 0.3678794503211975]).tolist(),
+ nan_ok=True,
+ )
+
@pytest.mark.skip_mxnet
def test_boolean_mask(backend):
tb = pyhf.tensorlib
- assert tb.tolist(tb.boolean_mask(tb.astensor([1,2,3,4,5,6]), tb.astensor([True, True, False, True, False, False], dtype='bool'))) == [1, 2, 4]
- assert tb.tolist(tb.boolean_mask(tb.astensor([[1,2],[3,4],[5,6]]), tb.astensor([[True, True], [False, True], [False, False]], dtype='bool'))) == [1, 2, 4]
+ assert tb.tolist(
+ tb.boolean_mask(
+ tb.astensor([1, 2, 3, 4, 5, 6]),
+ tb.astensor([True, True, False, True, False, False], dtype='bool'),
+ )
+ ) == [1, 2, 4]
+ assert tb.tolist(
+ tb.boolean_mask(
+ tb.astensor([[1, 2], [3, 4], [5, 6]]),
+ tb.astensor([[True, True], [False, True], [False, False]], dtype='bool'),
+ )
+ ) == [1, 2, 4]
+
def test_1D_gather(backend):
tb = pyhf.tensorlib
- assert tb.tolist(tb.gather(tb.astensor([1,2,3,4,5,6]), tb.astensor([4,0,3,2], dtype='int'))) == [5, 1, 4, 3]
- assert tb.tolist(tb.gather(tb.astensor([1,2,3,4,5,6]), tb.astensor([[4,0],[3,2]], dtype='int'))) == [[5, 1], [4, 3]]
+ assert tb.tolist(
+ tb.gather(
+ tb.astensor([1, 2, 3, 4, 5, 6]), tb.astensor([4, 0, 3, 2], dtype='int')
+ )
+ ) == [5, 1, 4, 3]
+ assert tb.tolist(
+ tb.gather(
+ tb.astensor([1, 2, 3, 4, 5, 6]), tb.astensor([[4, 0], [3, 2]], dtype='int')
+ )
+ ) == [[5, 1], [4, 3]]
+
@pytest.mark.fail_pytorch
def test_ND_gather(backend):
tb = pyhf.tensorlib
- assert tb.tolist(tb.gather(tb.astensor([[1,2],[3,4],[5,6]]), tb.astensor([1,0], dtype='int'))) == [[3, 4], [1, 2]]
+ assert tb.tolist(
+ tb.gather(
+ tb.astensor([[1, 2], [3, 4], [5, 6]]), tb.astensor([1, 0], dtype='int')
+ )
+ ) == [[3, 4], [1, 2]]
+
@pytest.mark.fail_mxnet
def test_isfinite(backend):
tb = pyhf.tensorlib
- assert tb.tolist(tb.isfinite(tb.astensor([1.0, float("nan"), float("inf")]))) == [True, False, False]
+ assert tb.tolist(tb.isfinite(tb.astensor([1.0, float("nan"), float("inf")]))) == [
+ True,
+ False,
+ False,
+ ]
+
def test_einsum(backend):
tb = pyhf.tensorlib
- x = np.arange(20).reshape(5,4).tolist()
+ x = np.arange(20).reshape(5, 4).tolist()
if isinstance(pyhf.tensorlib, pyhf.tensor.mxnet_backend):
with pytest.raises(NotImplementedError):
- assert tb.einsum('ij->ji',[1,2,3])
+ assert tb.einsum('ij->ji', [1, 2, 3])
else:
- assert np.all(tb.tolist(tb.einsum('ij->ji',x)) == np.asarray(x).T.tolist())
- assert tb.tolist(tb.einsum('i,j->ij',tb.astensor([1,1,1]),tb.astensor([1,2,3]))) == [[1,2,3]]*3
+ assert np.all(tb.tolist(tb.einsum('ij->ji', x)) == np.asarray(x).T.tolist())
+ assert (
+ tb.tolist(
+ tb.einsum('i,j->ij', tb.astensor([1, 1, 1]), tb.astensor([1, 2, 3]))
+ )
+ == [[1, 2, 3]] * 3
+ )
+
def test_list_to_list(backend):
tb = pyhf.tensorlib
# test when no other tensor operations are done
- assert tb.tolist([1,2,3,4]) == [1,2,3,4]
- assert tb.tolist([[1],[2],[3],[4]]) == [[1],[2],[3],[4]]
- assert tb.tolist([[1,2], 3, [4]]) == [[1,2], 3, [4]]
+ assert tb.tolist([1, 2, 3, 4]) == [1, 2, 3, 4]
+ assert tb.tolist([[1], [2], [3], [4]]) == [[1], [2], [3], [4]]
+ assert tb.tolist([[1, 2], 3, [4]]) == [[1, 2], 3, [4]]
+
def test_tensor_to_list(backend):
tb = pyhf.tensorlib
- assert tb.tolist(tb.astensor([1,2,3,4])) == [1,2,3,4]
- assert tb.tolist(tb.astensor([[1],[2],[3],[4]])) == [[1],[2],[3],[4]]
+ assert tb.tolist(tb.astensor([1, 2, 3, 4])) == [1, 2, 3, 4]
+ assert tb.tolist(tb.astensor([[1], [2], [3], [4]])) == [[1], [2], [3], [4]]
+
@pytest.mark.only_tensorflow
def test_tensor_list_conversion(backend):
@@ -137,22 +219,23 @@ def test_tensorflow_tolist_nosession():
# this isn't covered by test_list_to_list since we need to check if it's ok
# without a session explicitly
- assert tb.tolist([1,2,3,4]) == [1,2,3,4]
+ assert tb.tolist([1, 2, 3, 4]) == [1, 2, 3, 4]
with pytest.raises(RuntimeError):
# but a tensor shouldn't
- assert tb.tolist(tb.astensor([1,2,3,4])) == [1,2,3,4]
+ assert tb.tolist(tb.astensor([1, 2, 3, 4])) == [1, 2, 3, 4]
+
@pytest.mark.skip_mxnet
def test_pdf_eval(backend):
source = {
"binning": [2, -0.5, 1.5],
"bindata": {
- "data": [120.0, 180.0],
- "bkg": [100.0, 150.0],
- "bkgsys_up": [102, 190],
- "bkgsys_dn": [98, 100],
- "sig": [30.0, 95.0]
- }
+ "data": [120.0, 180.0],
+ "bkg": [100.0, 150.0],
+ "bkgsys_up": [102, 190],
+ "bkgsys_dn": [98, 100],
+ "sig": [30.0, 95.0],
+ },
}
spec = {
'channels': [
@@ -162,22 +245,33 @@ def test_pdf_eval(backend):
{
'name': 'signal',
'data': source['bindata']['sig'],
- 'modifiers': [{'name': 'mu', 'type': 'normfactor', 'data': None}]
+ 'modifiers': [
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'background',
'data': source['bindata']['bkg'],
'modifiers': [
- {'name': 'bkg_norm', 'type': 'histosys', 'data': {'lo_data': source['bindata']['bkgsys_dn'], 'hi_data': source['bindata']['bkgsys_up']}}
- ]
- }
- ]
+ {
+ 'name': 'bkg_norm',
+ 'type': 'histosys',
+ 'data': {
+ 'lo_data': source['bindata']['bkgsys_dn'],
+ 'hi_data': source['bindata']['bkgsys_up'],
+ },
+ }
+ ],
+ },
+ ],
}
]
}
pdf = pyhf.Model(spec)
data = source['bindata']['data'] + pdf.config.auxdata
- assert pytest.approx([-17.648827643136507], rel=5e-5) == pyhf.tensorlib.tolist(pdf.logpdf(pdf.config.suggested_init(), data))
+ assert pytest.approx([-17.648827643136507], rel=5e-5) == pyhf.tensorlib.tolist(
+ pdf.logpdf(pdf.config.suggested_init(), data)
+ )
@pytest.mark.fail_mxnet
@@ -185,15 +279,18 @@ def test_pdf_eval_2(backend):
source = {
"binning": [2, -0.5, 1.5],
"bindata": {
- "data": [120.0, 180.0],
- "bkg": [100.0, 150.0],
- "bkgerr": [10.0, 10.0],
- "sig": [30.0, 95.0]
- }
+ "data": [120.0, 180.0],
+ "bkg": [100.0, 150.0],
+ "bkgerr": [10.0, 10.0],
+ "sig": [30.0, 95.0],
+ },
}
- pdf = hepdata_like(source['bindata']['sig'], source['bindata'][
- 'bkg'], source['bindata']['bkgerr'])
+ pdf = hepdata_like(
+ source['bindata']['sig'], source['bindata']['bkg'], source['bindata']['bkgerr']
+ )
data = source['bindata']['data'] + pdf.config.auxdata
- assert pytest.approx([-23.579605171119738], rel=5e-5) == pyhf.tensorlib.tolist(pdf.logpdf(pdf.config.suggested_init(), data))
+ assert pytest.approx([-23.579605171119738], rel=5e-5) == pyhf.tensorlib.tolist(
+ pdf.logpdf(pdf.config.suggested_init(), data)
+ )
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 1098c572ed..ad32ce55d1 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -2,16 +2,20 @@
import pytest
import os
+
def test_get_default_schema():
assert os.path.isfile(pyhf.utils.get_default_schema())
+
def test_load_default_schema():
assert pyhf.utils.load_schema(pyhf.utils.get_default_schema())
+
def test_load_missing_schema():
with pytest.raises(IOError):
pyhf.utils.load_schema('a/fake/path/that/should/not/work.json')
+
def test_load_custom_schema(tmpdir):
temp = tmpdir.join("custom_schema.json")
temp.write('{"foo": "bar"}')
diff --git a/tests/test_validation.py b/tests/test_validation.py
index b3ac6c44be..946912d764 100644
--- a/tests/test_validation.py
+++ b/tests/test_validation.py
@@ -20,12 +20,8 @@ def spec_1bin_shapesys(source=source_1bin_example1()):
'name': 'signal',
'data': source['bindata']['sig'],
'modifiers': [
- {
- 'name': 'mu',
- 'type': 'normfactor',
- 'data': None
- }
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'background',
@@ -34,11 +30,11 @@ def spec_1bin_shapesys(source=source_1bin_example1()):
{
'name': 'uncorr_bkguncrt',
'type': 'shapesys',
- 'data': source['bindata']['bkgerr']
+ 'data': source['bindata']['bkgerr'],
}
- ]
- }
- ]
+ ],
+ },
+ ],
}
]
}
@@ -46,34 +42,34 @@ def spec_1bin_shapesys(source=source_1bin_example1()):
@pytest.fixture(scope='module')
-def expected_result_1bin_shapesys(mu=1.):
+def expected_result_1bin_shapesys(mu=1.0):
if mu == 1:
- expected_result = {"exp": [
- 0.06371799398864626,
- 0.15096503398048894,
- 0.3279606950533305,
- 0.6046087303039118,
- 0.8662627605298466
+ expected_result = {
+ "exp": [
+ 0.06371799398864626,
+ 0.15096503398048894,
+ 0.3279606950533305,
+ 0.6046087303039118,
+ 0.8662627605298466,
],
- "obs": 0.4541865416107029
+ "obs": 0.4541865416107029,
}
return expected_result
@pytest.fixture(scope='module')
-def setup_1bin_shapesys(source=source_1bin_example1(),
- spec=spec_1bin_shapesys(source_1bin_example1()),
- mu=1,
- expected_result=expected_result_1bin_shapesys(1.),
- config={'init_pars': 2, 'par_bounds': 2}):
+def setup_1bin_shapesys(
+ source=source_1bin_example1(),
+ spec=spec_1bin_shapesys(source_1bin_example1()),
+ mu=1,
+ expected_result=expected_result_1bin_shapesys(1.0),
+ config={'init_pars': 2, 'par_bounds': 2},
+):
return {
'source': source,
'spec': spec,
'mu': mu,
- 'expected': {
- 'result': expected_result,
- 'config': config
- }
+ 'expected': {'result': expected_result, 'config': config},
}
@@ -81,11 +77,7 @@ def setup_1bin_shapesys(source=source_1bin_example1(),
def source_1bin_normsys():
source = {
'binning': [2, -0.5, 1.5],
- 'bindata': {
- 'data': [120.0, 180.0],
- 'bkg': [100.0, 150.0],
- 'sig': [30.0, 95.0]
- }
+ 'bindata': {'data': [120.0, 180.0], 'bkg': [100.0, 150.0], 'sig': [30.0, 95.0]},
}
return source
@@ -101,12 +93,8 @@ def spec_1bin_normsys(source=source_1bin_normsys()):
'name': 'signal',
'data': source['bindata']['sig'],
'modifiers': [
- {
- 'name': 'mu',
- 'type': 'normfactor',
- 'data': None
- }
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'background',
@@ -115,11 +103,11 @@ def spec_1bin_normsys(source=source_1bin_normsys()):
{
'name': 'bkg_norm',
'type': 'normsys',
- 'data': {'lo': 0.90, 'hi': 1.10}
+ 'data': {'lo': 0.90, 'hi': 1.10},
}
- ]
- }
- ]
+ ],
+ },
+ ],
}
]
}
@@ -127,34 +115,34 @@ def spec_1bin_normsys(source=source_1bin_normsys()):
@pytest.fixture(scope='module')
-def expected_result_1bin_normsys(mu=1.):
+def expected_result_1bin_normsys(mu=1.0):
if mu == 1:
- expected_result = {"exp": [
- 7.471684419037565e-10,
- 5.7411551509088054e-08,
- 3.6898088058290313e-06,
- 0.000169657315363677,
- 0.004392708998183163
+ expected_result = {
+ "exp": [
+ 7.471684419037565e-10,
+ 5.7411551509088054e-08,
+ 3.6898088058290313e-06,
+ 0.000169657315363677,
+ 0.004392708998183163,
],
- "obs": 0.0006735317023683173
+ "obs": 0.0006735317023683173,
}
return expected_result
@pytest.fixture(scope='module')
-def setup_1bin_normsys(source=source_1bin_normsys(),
- spec=spec_1bin_normsys(source_1bin_normsys()),
- mu=1,
- expected_result=expected_result_1bin_normsys(1.),
- config={'init_pars': 2, 'par_bounds': 2}):
+def setup_1bin_normsys(
+ source=source_1bin_normsys(),
+ spec=spec_1bin_normsys(source_1bin_normsys()),
+ mu=1,
+ expected_result=expected_result_1bin_normsys(1.0),
+ config={'init_pars': 2, 'par_bounds': 2},
+):
return {
'source': source,
'spec': spec,
'mu': mu,
- 'expected': {
- 'result': expected_result,
- 'config': config
- }
+ 'expected': {'result': expected_result, 'config': config},
}
@@ -175,12 +163,8 @@ def spec_2bin_histosys(source=source_2bin_histosys_example2()):
'name': 'signal',
'data': source['bindata']['sig'],
'modifiers': [
- {
- 'name': 'mu',
- 'type': 'normfactor',
- 'data': None
- }
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'background',
@@ -191,12 +175,12 @@ def spec_2bin_histosys(source=source_2bin_histosys_example2()):
'type': 'histosys',
'data': {
'lo_data': source['bindata']['bkgsys_dn'],
- 'hi_data': source['bindata']['bkgsys_up']
- }
+ 'hi_data': source['bindata']['bkgsys_up'],
+ },
}
- ]
- }
- ]
+ ],
+ },
+ ],
}
]
}
@@ -206,33 +190,32 @@ def spec_2bin_histosys(source=source_2bin_histosys_example2()):
@pytest.fixture(scope='module')
def expected_result_2bin_histosys(mu=1):
if mu == 1:
- expected_result = {"exp": [
- 7.134513306138892e-06,
- 0.00012547100627138575,
- 0.001880010666437615,
- 0.02078964907605385,
- 0.13692494523572218
+ expected_result = {
+ "exp": [
+ 7.134513306138892e-06,
+ 0.00012547100627138575,
+ 0.001880010666437615,
+ 0.02078964907605385,
+ 0.13692494523572218,
],
- "obs": 0.1001463460725534
+ "obs": 0.1001463460725534,
}
return expected_result
@pytest.fixture(scope='module')
-def setup_2bin_histosys(source=source_2bin_histosys_example2(),
- spec=spec_2bin_histosys(
- source_2bin_histosys_example2()),
- mu=1,
- expected_result=expected_result_2bin_histosys(1.),
- config={'init_pars': 2, 'par_bounds': 2}):
+def setup_2bin_histosys(
+ source=source_2bin_histosys_example2(),
+ spec=spec_2bin_histosys(source_2bin_histosys_example2()),
+ mu=1,
+ expected_result=expected_result_2bin_histosys(1.0),
+ config={'init_pars': 2, 'par_bounds': 2},
+):
return {
'source': source,
'spec': spec,
'mu': mu,
- 'expected': {
- 'result': expected_result,
- 'config': config
- }
+ 'expected': {'result': expected_result, 'config': config},
}
@@ -253,12 +236,8 @@ def spec_2bin_2channel(source=source_2bin_2channel_example1()):
'name': 'signal',
'data': source['channels']['signal']['bindata']['sig'],
'modifiers': [
- {
- 'name': 'mu',
- 'type': 'normfactor',
- 'data': None
- }
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'background',
@@ -267,11 +246,13 @@ def spec_2bin_2channel(source=source_2bin_2channel_example1()):
{
'name': 'uncorr_bkguncrt_signal',
'type': 'shapesys',
- 'data': source['channels']['signal']['bindata']['bkgerr']
+ 'data': source['channels']['signal']['bindata'][
+ 'bkgerr'
+ ],
}
- ]
- }
- ]
+ ],
+ },
+ ],
},
{
'name': 'control',
@@ -283,48 +264,49 @@ def spec_2bin_2channel(source=source_2bin_2channel_example1()):
{
'name': 'uncorr_bkguncrt_control',
'type': 'shapesys',
- 'data': source['channels']['control']['bindata']['bkgerr']
+ 'data': source['channels']['control']['bindata'][
+ 'bkgerr'
+ ],
}
- ]
+ ],
}
- ]
- }
+ ],
+ },
]
}
return spec
@pytest.fixture(scope='module')
-def expected_result_2bin_2channel(mu=1.):
+def expected_result_2bin_2channel(mu=1.0):
if mu == 1:
- expected_result = {"exp": [
- 0.00043491354821983556,
- 0.0034223000502860606,
- 0.02337423265831151,
- 0.1218654225510158,
- 0.40382074249477845
+ expected_result = {
+ "exp": [
+ 0.00043491354821983556,
+ 0.0034223000502860606,
+ 0.02337423265831151,
+ 0.1218654225510158,
+ 0.40382074249477845,
],
- "obs": 0.056332621064982304
+ "obs": 0.056332621064982304,
}
return expected_result
@pytest.fixture(scope='module')
-def setup_2bin_2channel(source=source_2bin_2channel_example1(),
- spec=spec_2bin_2channel(
- source_2bin_2channel_example1()),
- mu=1,
- expected_result=expected_result_2bin_2channel(1.),
- config={'init_pars': 5, 'par_bounds': 5}):
+def setup_2bin_2channel(
+ source=source_2bin_2channel_example1(),
+ spec=spec_2bin_2channel(source_2bin_2channel_example1()),
+ mu=1,
+ expected_result=expected_result_2bin_2channel(1.0),
+ config={'init_pars': 5, 'par_bounds': 5},
+):
# 1 mu + 2 gammas for 2 channels each
return {
'source': source,
'spec': spec,
'mu': mu,
- 'expected': {
- 'result': expected_result,
- 'config': config
- }
+ 'expected': {'result': expected_result, 'config': config},
}
@@ -345,12 +327,8 @@ def spec_2bin_2channel_couplednorm(source=source_2bin_2channel_couplednorm()):
'name': 'signal',
'data': source['channels']['signal']['bindata']['sig'],
'modifiers': [
- {
- 'name': 'mu',
- 'type': 'normfactor',
- 'data': None
- }
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'bkg1',
@@ -359,9 +337,9 @@ def spec_2bin_2channel_couplednorm(source=source_2bin_2channel_couplednorm()):
{
'name': 'coupled_normsys',
'type': 'normsys',
- 'data': {'lo': 0.9, 'hi': 1.1}
+ 'data': {'lo': 0.9, 'hi': 1.1},
}
- ]
+ ],
},
{
'name': 'bkg2',
@@ -370,11 +348,11 @@ def spec_2bin_2channel_couplednorm(source=source_2bin_2channel_couplednorm()):
{
'name': 'coupled_normsys',
'type': 'normsys',
- 'data': {'lo': 0.5, 'hi': 1.5}
+ 'data': {'lo': 0.5, 'hi': 1.5},
}
- ]
- }
- ]
+ ],
+ },
+ ],
},
{
'name': 'control',
@@ -386,49 +364,47 @@ def spec_2bin_2channel_couplednorm(source=source_2bin_2channel_couplednorm()):
{
'name': 'coupled_normsys',
'type': 'normsys',
- 'data': {'lo': 0.9, 'hi': 1.1}
+ 'data': {'lo': 0.9, 'hi': 1.1},
}
- ]
+ ],
}
- ]
- }
+ ],
+ },
]
}
return spec
@pytest.fixture(scope='module')
-def expected_result_2bin_2channel_couplednorm(mu=1.):
+def expected_result_2bin_2channel_couplednorm(mu=1.0):
if mu == 1:
- expected_result = {"exp": [
- 0.055223914655538435,
- 0.13613239925395315,
- 0.3068720101493323,
- 0.5839470093910164,
- 0.8554725461337025
+ expected_result = {
+ "exp": [
+ 0.055223914655538435,
+ 0.13613239925395315,
+ 0.3068720101493323,
+ 0.5839470093910164,
+ 0.8554725461337025,
],
- "obs": 0.5906228034705155
+ "obs": 0.5906228034705155,
}
return expected_result
@pytest.fixture(scope='module')
def setup_2bin_2channel_couplednorm(
- source=source_2bin_2channel_couplednorm(),
- spec=spec_2bin_2channel_couplednorm(
- source_2bin_2channel_couplednorm()),
- mu=1,
- expected_result=expected_result_2bin_2channel_couplednorm(1.),
- config={'init_pars': 2, 'par_bounds': 2}):
+ source=source_2bin_2channel_couplednorm(),
+ spec=spec_2bin_2channel_couplednorm(source_2bin_2channel_couplednorm()),
+ mu=1,
+ expected_result=expected_result_2bin_2channel_couplednorm(1.0),
+ config={'init_pars': 2, 'par_bounds': 2},
+):
# 1 mu + 1 alpha
return {
'source': source,
'spec': spec,
'mu': mu,
- 'expected': {
- 'result': expected_result,
- 'config': config
- }
+ 'expected': {'result': expected_result, 'config': config},
}
@@ -449,12 +425,8 @@ def spec_2bin_2channel_coupledhistosys(source=source_2bin_2channel_coupledhisto(
'name': 'signal',
'data': source['channels']['signal']['bindata']['sig'],
'modifiers': [
- {
- 'name': 'mu',
- 'type': 'normfactor',
- 'data': None
- }
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'bkg1',
@@ -464,11 +436,15 @@ def spec_2bin_2channel_coupledhistosys(source=source_2bin_2channel_coupledhisto(
'name': 'coupled_histosys',
'type': 'histosys',
'data': {
- 'lo_data': source['channels']['signal']['bindata']['bkg1_dn'],
- 'hi_data': source['channels']['signal']['bindata']['bkg1_up']
- }
+ 'lo_data': source['channels']['signal']['bindata'][
+ 'bkg1_dn'
+ ],
+ 'hi_data': source['channels']['signal']['bindata'][
+ 'bkg1_up'
+ ],
+ },
}
- ]
+ ],
},
{
'name': 'bkg2',
@@ -478,13 +454,17 @@ def spec_2bin_2channel_coupledhistosys(source=source_2bin_2channel_coupledhisto(
'name': 'coupled_histosys',
'type': 'histosys',
'data': {
- 'lo_data': source['channels']['signal']['bindata']['bkg2_dn'],
- 'hi_data': source['channels']['signal']['bindata']['bkg2_up']
- }
+ 'lo_data': source['channels']['signal']['bindata'][
+ 'bkg2_dn'
+ ],
+ 'hi_data': source['channels']['signal']['bindata'][
+ 'bkg2_up'
+ ],
+ },
}
- ]
- }
- ]
+ ],
+ },
+ ],
},
{
'name': 'control',
@@ -497,51 +477,53 @@ def spec_2bin_2channel_coupledhistosys(source=source_2bin_2channel_coupledhisto(
'name': 'coupled_histosys',
'type': 'histosys',
'data': {
- 'lo_data': source['channels']['control']['bindata']['bkg1_dn'],
- 'hi_data': source['channels']['control']['bindata']['bkg1_up']
- }
+ 'lo_data': source['channels']['control']['bindata'][
+ 'bkg1_dn'
+ ],
+ 'hi_data': source['channels']['control']['bindata'][
+ 'bkg1_up'
+ ],
+ },
}
- ]
+ ],
}
- ]
- }
+ ],
+ },
]
}
return spec
@pytest.fixture(scope='module')
-def expected_result_2bin_2channel_coupledhistosys(mu=1.):
+def expected_result_2bin_2channel_coupledhistosys(mu=1.0):
if mu == 1:
- expected_result = {"exp": [
- 1.7653746536962154e-05,
- 0.00026265644807799805,
- 0.00334003612780065,
- 0.031522353024659715,
- 0.17907742915143962
+ expected_result = {
+ "exp": [
+ 1.7653746536962154e-05,
+ 0.00026265644807799805,
+ 0.00334003612780065,
+ 0.031522353024659715,
+ 0.17907742915143962,
],
- "obs": 0.07967400132261188
+ "obs": 0.07967400132261188,
}
return expected_result
@pytest.fixture(scope='module')
def setup_2bin_2channel_coupledhistosys(
- source=source_2bin_2channel_coupledhisto(),
- spec=spec_2bin_2channel_coupledhistosys(
- source_2bin_2channel_coupledhisto()),
- mu=1,
- expected_result=expected_result_2bin_2channel_coupledhistosys(1.),
- config={'auxdata': 1, 'init_pars': 2, 'par_bounds': 2}):
+ source=source_2bin_2channel_coupledhisto(),
+ spec=spec_2bin_2channel_coupledhistosys(source_2bin_2channel_coupledhisto()),
+ mu=1,
+ expected_result=expected_result_2bin_2channel_coupledhistosys(1.0),
+ config={'auxdata': 1, 'init_pars': 2, 'par_bounds': 2},
+):
# 1 mu 1 shared histosys
return {
'source': source,
'spec': spec,
'mu': mu,
- 'expected': {
- 'result': expected_result,
- 'config': config
- }
+ 'expected': {'result': expected_result, 'config': config},
}
@@ -552,7 +534,9 @@ def source_2bin_2channel_coupledshapefactor():
@pytest.fixture(scope='module')
-def spec_2bin_2channel_coupledshapefactor(source=source_2bin_2channel_coupledshapefactor()):
+def spec_2bin_2channel_coupledshapefactor(
+ source=source_2bin_2channel_coupledshapefactor()
+):
spec = {
'channels': [
{
@@ -562,12 +546,8 @@ def spec_2bin_2channel_coupledshapefactor(source=source_2bin_2channel_coupledsha
'name': 'signal',
'data': source['channels']['signal']['bindata']['sig'],
'modifiers': [
- {
- 'name': 'mu',
- 'type': 'normfactor',
- 'data': None
- }
- ]
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
},
{
'name': 'bkg1',
@@ -576,11 +556,11 @@ def spec_2bin_2channel_coupledshapefactor(source=source_2bin_2channel_coupledsha
{
'name': 'coupled_shapefactor',
'type': 'shapefactor',
- 'data': None
+ 'data': None,
}
- ]
- }
- ]
+ ],
+ },
+ ],
},
{
'name': 'control',
@@ -592,19 +572,19 @@ def spec_2bin_2channel_coupledshapefactor(source=source_2bin_2channel_coupledsha
{
'name': 'coupled_shapefactor',
'type': 'shapefactor',
- 'data': None
+ 'data': None,
}
- ]
+ ],
}
- ]
- }
+ ],
+ },
]
}
return spec
@pytest.fixture(scope='module')
-def expected_result_2bin_2channel_coupledshapefactor(mu=1.):
+def expected_result_2bin_2channel_coupledshapefactor(mu=1.0):
if mu == 1:
expected_result = {
'obs': 0.5421679124909312,
@@ -613,29 +593,28 @@ def expected_result_2bin_2channel_coupledshapefactor(mu=1.):
0.048887400056355966,
0.15555296253957684,
0.4007561343326305,
- 0.7357169630955912
- ]
+ 0.7357169630955912,
+ ],
}
return expected_result
@pytest.fixture(scope='module')
def setup_2bin_2channel_coupledshapefactor(
- source=source_2bin_2channel_coupledshapefactor(),
- spec=spec_2bin_2channel_coupledshapefactor(
- source_2bin_2channel_coupledshapefactor()),
- mu=1,
- expected_result=expected_result_2bin_2channel_coupledshapefactor(1.),
- config={'auxdata': 0, 'init_pars': 3, 'par_bounds': 3}):
+ source=source_2bin_2channel_coupledshapefactor(),
+ spec=spec_2bin_2channel_coupledshapefactor(
+ source_2bin_2channel_coupledshapefactor()
+ ),
+ mu=1,
+ expected_result=expected_result_2bin_2channel_coupledshapefactor(1.0),
+ config={'auxdata': 0, 'init_pars': 3, 'par_bounds': 3},
+):
# 1 mu 2 shared shapefactors
return {
'source': source,
'spec': spec,
'mu': mu,
- 'expected': {
- 'result': expected_result,
- 'config': config
- }
+ 'expected': {'result': expected_result, 'config': config},
}
@@ -644,33 +623,35 @@ def validate_runOnePoint(pdf, data, mu_test, expected_result, tolerance=1e-6):
par_bounds = pdf.config.suggested_bounds()
CLs_obs, CLs_exp = pyhf.utils.runOnePoint(
- mu_test, data, pdf, init_pars, par_bounds)[-2:]
+ mu_test, data, pdf, init_pars, par_bounds
+ )[-2:]
- assert abs(CLs_obs - expected_result['obs']) / \
- expected_result['obs'] < tolerance
+ assert abs(CLs_obs - expected_result['obs']) / expected_result['obs'] < tolerance
for result, expected_result in zip(CLs_exp, expected_result['exp']):
- assert abs(result - expected_result) / \
- expected_result < tolerance
-
-
-@pytest.mark.parametrize('setup_and_tolerance', [
- (setup_1bin_shapesys(), 1e-6),
- (setup_1bin_normsys(), 1e-6),
- (setup_2bin_histosys(), 8e-5),
- (setup_2bin_2channel(), 1e-6),
- (setup_2bin_2channel_couplednorm(), 1e-6),
- (setup_2bin_2channel_coupledhistosys(), 1e-6),
- (setup_2bin_2channel_coupledshapefactor(), 2.5e-6)
-],
+ assert abs(result - expected_result) / expected_result < tolerance
+
+
+@pytest.mark.parametrize(
+ 'setup_and_tolerance',
+ [
+ (setup_1bin_shapesys(), 1e-6),
+ (setup_1bin_normsys(), 1e-6),
+ (setup_2bin_histosys(), 8e-5),
+ (setup_2bin_2channel(), 1e-6),
+ (setup_2bin_2channel_couplednorm(), 1e-6),
+ (setup_2bin_2channel_coupledhistosys(), 1e-6),
+ (setup_2bin_2channel_coupledshapefactor(), 2.5e-6),
+ ],
ids=[
- '1bin_shapesys_mu1',
- '1bin_normsys_mu1',
- '2bin_histosys_mu1',
- '2bin_2channel_mu1',
- '2bin_2channel_couplednorm_mu1',
- '2bin_2channel_coupledhistosys_mu1',
- '2bin_2channel_coupledshapefactor_mu1'
-])
+ '1bin_shapesys_mu1',
+ '1bin_normsys_mu1',
+ '2bin_histosys_mu1',
+ '2bin_2channel_mu1',
+ '2bin_2channel_couplednorm_mu1',
+ '2bin_2channel_coupledhistosys_mu1',
+ '2bin_2channel_coupledshapefactor_mu1',
+ ],
+)
def test_validation(setup_and_tolerance):
setup, tolerance = setup_and_tolerance
source = setup['source']
@@ -685,11 +666,12 @@ def test_validation(setup_and_tolerance):
data = source['bindata']['data'] + pdf.config.auxdata
if 'auxdata' in setup['expected']['config']:
- assert len(pdf.config.auxdata) == \
- setup['expected']['config']['auxdata']
- assert len(pdf.config.suggested_init()) == \
- setup['expected']['config']['init_pars']
- assert len(pdf.config.suggested_bounds()) == \
- setup['expected']['config']['par_bounds']
-
- validate_runOnePoint(pdf, data, setup['mu'], setup['expected']['result'], tolerance=tolerance)
+ assert len(pdf.config.auxdata) == setup['expected']['config']['auxdata']
+ assert len(pdf.config.suggested_init()) == setup['expected']['config']['init_pars']
+ assert (
+ len(pdf.config.suggested_bounds()) == setup['expected']['config']['par_bounds']
+ )
+
+ validate_runOnePoint(
+ pdf, data, setup['mu'], setup['expected']['result'], tolerance=tolerance
+ )
diff --git a/validation/makedata.py b/validation/makedata.py
index f9f358f3cb..f2bf230969 100644
--- a/validation/makedata.py
+++ b/validation/makedata.py
@@ -2,35 +2,36 @@
import json
import sys
+
source_data = json.load(open(sys.argv[1]))
-root_file = sys.argv[2]
+root_file = sys.argv[2]
binning = source_data['binning']
bindata = source_data['bindata']
f = ROOT.TFile(root_file, 'RECREATE')
-data = ROOT.TH1F('data','data',*binning)
-for i,v in enumerate(bindata['data']):
- data.SetBinContent(i+1,v)
+data = ROOT.TH1F('data', 'data', *binning)
+for i, v in enumerate(bindata['data']):
+ data.SetBinContent(i + 1, v)
data.Sumw2()
-bkg = ROOT.TH1F('bkg','bkg',*binning)
-for i,v in enumerate(bindata['bkg']):
- bkg.SetBinContent(i+1,v)
+bkg = ROOT.TH1F('bkg', 'bkg', *binning)
+for i, v in enumerate(bindata['bkg']):
+ bkg.SetBinContent(i + 1, v)
bkg.Sumw2()
if 'bkgerr' in bindata:
- bkgerr = ROOT.TH1F('bkgerr','bkgerr',*binning)
+ bkgerr = ROOT.TH1F('bkgerr', 'bkgerr', *binning)
- #shapesys must be as multiplicative factor
- for i,v in enumerate(bindata['bkgerr']):
- bkgerr.SetBinContent(i+1,v / bkg.GetBinContent(i+1))
+ # shapesys must be as multiplicative factor
+ for i, v in enumerate(bindata['bkgerr']):
+ bkgerr.SetBinContent(i + 1, v / bkg.GetBinContent(i + 1))
bkgerr.Sumw2()
-sig = ROOT.TH1F('sig','sig',*binning)
-for i,v in enumerate(bindata['sig']):
- sig.SetBinContent(i+1,v)
+sig = ROOT.TH1F('sig', 'sig', *binning)
+for i, v in enumerate(bindata['sig']):
+ sig.SetBinContent(i + 1, v)
sig.Sumw2()
f.Write()
diff --git a/validation/manualonoff_roofit/onoff.py b/validation/manualonoff_roofit/onoff.py
index 5a7cea397a..6b9255a18e 100644
--- a/validation/manualonoff_roofit/onoff.py
+++ b/validation/manualonoff_roofit/onoff.py
@@ -3,26 +3,26 @@
import ROOT
d = json.load(open('data/source.json'))
-nobs = d['bindata']['data'][0]
-b = d['bindata']['bkg'][0]
+nobs = d['bindata']['data'][0]
+b = d['bindata']['bkg'][0]
deltab = d['bindata']['bkgerr'][0]
-s = d['bindata']['sig'][0]
+s = d['bindata']['sig'][0]
# derived data
-tau = b/deltab/deltab
-mobs = round(tau*b)
+tau = b / deltab / deltab
+mobs = round(tau * b)
-print 'tau: ', tau, 'm: ', mobs
+print('tau: {}, m: {}'.format(tau, mobs))
-w = ROOT.RooWorkspace("w",True);
+w = ROOT.RooWorkspace("w", True)
-#-----------------
+# -----------------
-w.factory("prod:nsig(mu[1,0,10],s[1])");
-w.factory("sum:nexp_sr(nsig,b[1,40,300])");
-w.factory("Poisson:on_model(nobs_sr[0,1000],nexp_sr)");
+w.factory("prod:nsig(mu[1,0,10],s[1])")
+w.factory("sum:nexp_sr(nsig,b[1,40,300])")
+w.factory("Poisson:on_model(nobs_sr[0,1000],nexp_sr)")
-#-----------------
+# -----------------
w.var('s').setVal(s)
w.var('b').setVal(b)
@@ -31,29 +31,31 @@
w.var('nobs_sr').setVal(nobs)
-w.factory("prod:nexp_cr(tau[1],b)");
-w.factory("Poisson:off_model(nobs_cr[0,1000],nexp_cr)");
+w.factory("prod:nexp_cr(tau[1],b)")
+w.factory("Poisson:off_model(nobs_cr[0,1000],nexp_cr)")
w.var('nobs_cr').setVal(mobs)
w.var('nobs_cr').setConstant(True)
w.var('tau').setVal(tau)
w.var('tau').setConstant(True)
-w.factory("PROD:onoff(on_model,off_model)");
+w.factory("PROD:onoff(on_model,off_model)")
-data = ROOT.RooDataSet('data','data', ROOT.RooArgSet(w.var('nobs_sr'), w.var('nobs_cr')))
+data = ROOT.RooDataSet(
+ 'data', 'data', ROOT.RooArgSet(w.var('nobs_sr'), w.var('nobs_cr'))
+)
data.add(ROOT.RooArgSet(w.var('nobs_sr'), w.var('nobs_cr')))
-getattr(w,'import')(data)
+getattr(w, 'import')(data)
-modelConfig = ROOT.RooStats.ModelConfig(w);
-modelConfig.SetPdf(w.pdf('onoff'));
+modelConfig = ROOT.RooStats.ModelConfig(w)
+modelConfig.SetPdf(w.pdf('onoff'))
modelConfig.SetParametersOfInterest(ROOT.RooArgSet(w.var('mu')))
modelConfig.SetNuisanceParameters(ROOT.RooArgSet(w.var('b')))
modelConfig.SetObservables(ROOT.RooArgSet(w.var('nobs_sr'), w.var('nobs_cr')))
-modelConfig.SetGlobalObservables( ROOT.RooArgSet())
+modelConfig.SetGlobalObservables(ROOT.RooArgSet())
modelConfig.SetName("ModelConfig")
-getattr(w,'import')(modelConfig)
+getattr(w, 'import')(modelConfig)
w.Print()
@@ -79,19 +81,19 @@
ROOT.RooStats.AsymptoticCalculator.SetPrintLevel(10)
calc = ROOT.RooStats.HypoTestInverter(ac)
-calc.RunFixedScan(51,0,5)
+calc.RunFixedScan(51, 0, 5)
calc.SetConfidenceLevel(0.95)
calc.UseCLs(True)
result = calc.GetInterval()
-plot = ROOT.RooStats.HypoTestInverterPlot("plot","plot",result)
+plot = ROOT.RooStats.HypoTestInverterPlot("plot", "plot", result)
c = ROOT.TCanvas()
c.SetLogy(False)
plot.Draw("OBS EXP CLb 2CL")
c.Draw()
c.SaveAs('scan.pdf')
-print 'observed: ', result.UpperLimit()
-for i in [-2,-1,0,1,2]:
- print 'expected {}: '.format(i), result.GetExpectedUpperLimit(i)
+print('observed: {}'.format(result.UpperLimit()))
+for i in [-2, -1, 0, 1, 2]:
+ print('expected {}: {}'.format(i, result.GetExpectedUpperLimit(i)))
diff --git a/validation/multichan_coupledhistosys_histfactory/makedata.py b/validation/multichan_coupledhistosys_histfactory/makedata.py
index 497c71a03c..465fa49b01 100644
--- a/validation/multichan_coupledhistosys_histfactory/makedata.py
+++ b/validation/multichan_coupledhistosys_histfactory/makedata.py
@@ -2,25 +2,27 @@
import json
import sys
+
source_data = json.load(open(sys.argv[1]))
-root_file = sys.argv[2]
+root_file = sys.argv[2]
f = ROOT.TFile(root_file, 'RECREATE')
hists = []
for cname, channel_def in source_data['channels'].iteritems():
- print ('CH',cname)
+ print('CH', cname)
binning = channel_def['binning']
bindata = channel_def['bindata']
-
for hist, data in bindata.iteritems():
- print ('{}_{}'.format(cname,hist))
- h = ROOT.TH1F('{}_{}'.format(cname,hist),'{}_{}'.format(cname,hist),*binning)
+ print('{}_{}'.format(cname, hist))
+ h = ROOT.TH1F(
+ '{}_{}'.format(cname, hist), '{}_{}'.format(cname, hist), *binning
+ )
hists += [h]
- for i,v in enumerate(data):
- h.SetBinContent(i+1,v)
+ for i, v in enumerate(data):
+ h.SetBinContent(i + 1, v)
h.Sumw2()
f.Write()
diff --git a/validation/multichan_coupledoverall_histfactory/makedata.py b/validation/multichan_coupledoverall_histfactory/makedata.py
index 8a6e6120c3..09bb42b2bc 100644
--- a/validation/multichan_coupledoverall_histfactory/makedata.py
+++ b/validation/multichan_coupledoverall_histfactory/makedata.py
@@ -2,35 +2,36 @@
import json
import sys
+
source_data = json.load(open(sys.argv[1]))
-root_file = sys.argv[2]
+root_file = sys.argv[2]
f = ROOT.TFile(root_file, 'RECREATE')
for cname, channel_def in source_data['channels'].iteritems():
- print 'CH',cname
+ print('CH', cname)
binning = channel_def['binning']
bindata = channel_def['bindata']
- data = ROOT.TH1F('{}_data'.format(cname),'{}_data'.format(cname),*binning)
- for i,v in enumerate(bindata['data']):
- data.SetBinContent(i+1,v)
+ data = ROOT.TH1F('{}_data'.format(cname), '{}_data'.format(cname), *binning)
+ for i, v in enumerate(bindata['data']):
+ data.SetBinContent(i + 1, v)
data.Sumw2()
- bkg1 = ROOT.TH1F('{}_bkg1'.format(cname),'{}_bkg1'.format(cname),*binning)
- for i,v in enumerate(bindata['bkg1']):
- bkg1.SetBinContent(i+1,v)
+ bkg1 = ROOT.TH1F('{}_bkg1'.format(cname), '{}_bkg1'.format(cname), *binning)
+ for i, v in enumerate(bindata['bkg1']):
+ bkg1.SetBinContent(i + 1, v)
bkg1.Sumw2()
if 'bkg2' in bindata:
- bkg2 = ROOT.TH1F('{}_bkg2'.format(cname),'{}_bkg2'.format(cname),*binning)
- for i,v in enumerate(bindata['bkg2']):
- bkg2.SetBinContent(i+1,v)
+ bkg2 = ROOT.TH1F('{}_bkg2'.format(cname), '{}_bkg2'.format(cname), *binning)
+ for i, v in enumerate(bindata['bkg2']):
+ bkg2.SetBinContent(i + 1, v)
bkg2.Sumw2()
if 'sig' in bindata:
- sig = ROOT.TH1F('{}_signal'.format(cname),'{}_signal'.format(cname),*binning)
- for i,v in enumerate(bindata['sig']):
- sig.SetBinContent(i+1,v)
+ sig = ROOT.TH1F('{}_signal'.format(cname), '{}_signal'.format(cname), *binning)
+ for i, v in enumerate(bindata['sig']):
+ sig.SetBinContent(i + 1, v)
sig.Sumw2()
f.Write()
diff --git a/validation/multichannel_histfactory/makedata.py b/validation/multichannel_histfactory/makedata.py
index 65fae6fc1b..961281f34b 100644
--- a/validation/multichannel_histfactory/makedata.py
+++ b/validation/multichannel_histfactory/makedata.py
@@ -2,41 +2,42 @@
import json
import sys
+
source_data = json.load(open(sys.argv[1]))
-root_file = sys.argv[2]
+root_file = sys.argv[2]
f = ROOT.TFile(root_file, 'RECREATE')
for cname, channel_def in source_data['channels'].iteritems():
- print 'CH',cname
+ print('CH', cname)
binning = channel_def['binning']
bindata = channel_def['bindata']
-
- data = ROOT.TH1F('{}_data'.format(cname),'{}_data'.format(cname),*binning)
- for i,v in enumerate(bindata['data']):
- data.SetBinContent(i+1,v)
+ data = ROOT.TH1F('{}_data'.format(cname), '{}_data'.format(cname), *binning)
+ for i, v in enumerate(bindata['data']):
+ data.SetBinContent(i + 1, v)
data.Sumw2()
- print data.GetName()
+ print(data.GetName())
- bkg = ROOT.TH1F('{}_bkg'.format(cname),'{}_bkg'.format(cname),*binning)
- for i,v in enumerate(bindata['bkg']):
- bkg.SetBinContent(i+1,v)
+ bkg = ROOT.TH1F('{}_bkg'.format(cname), '{}_bkg'.format(cname), *binning)
+ for i, v in enumerate(bindata['bkg']):
+ bkg.SetBinContent(i + 1, v)
bkg.Sumw2()
-
if 'bkgerr' in bindata:
- bkgerr = ROOT.TH1F('{}_bkgerr'.format(cname),'{}_bkgerr'.format(cname),*binning)
+ bkgerr = ROOT.TH1F(
+ '{}_bkgerr'.format(cname), '{}_bkgerr'.format(cname), *binning
+ )
- #shapesys must be as multiplicative factor
- for i,v in enumerate(bindata['bkgerr']):
- bkgerr.SetBinContent(i+1,v / bkg.GetBinContent(i+1))
+ # shapesys must be as multiplicative factor
+ for i, v in enumerate(bindata['bkgerr']):
+ bkgerr.SetBinContent(i + 1, v / bkg.GetBinContent(i + 1))
bkgerr.Sumw2()
if 'sig' in bindata:
- sig = ROOT.TH1F('{}_signal'.format(cname),'{}_signal'.format(cname),*binning)
- for i,v in enumerate(bindata['sig']):
- sig.SetBinContent(i+1,v)
+ sig = ROOT.TH1F('{}_signal'.format(cname), '{}_signal'.format(cname), *binning)
+ for i, v in enumerate(bindata['sig']):
+ sig.SetBinContent(i + 1, v)
sig.Sumw2()
f.Write()
diff --git a/validation/run_cls.py b/validation/run_cls.py
index a271aa2a3e..80339a2493 100644
--- a/validation/run_cls.py
+++ b/validation/run_cls.py
@@ -8,8 +8,6 @@
data = workspace.data("obsData")
-
-
sbModel = workspace.obj("ModelConfig")
poi = sbModel.GetParametersOfInterest().first()
@@ -26,14 +24,14 @@
ac.SetQTilde(True)
calc = ROOT.RooStats.HypoTestInverter(ac)
-calc.RunFixedScan(51,0,5)
+calc.RunFixedScan(51, 0, 5)
calc.SetConfidenceLevel(0.95)
calc.UseCLs(True)
result = calc.GetInterval()
-plot = ROOT.RooStats.HypoTestInverterPlot("plot","plot",result)
+plot = ROOT.RooStats.HypoTestInverterPlot("plot", "plot", result)
c = ROOT.TCanvas()
c.SetLogy(False)
plot.Draw("OBS EXP CLb 2CL")
@@ -41,7 +39,7 @@
c.SaveAs('scan.pdf')
-print 'observed: ', result.UpperLimit()
+print('observed: {}'.format(result.UpperLimit()))
-for i in [-2,-1,0,1,2]:
- print 'expected {}: '.format(i), result.GetExpectedUpperLimit(i)
+for i in [-2, -1, 0, 1, 2]:
+ print('expected {}: {}'.format(i, result.GetExpectedUpperLimit(i)))
diff --git a/validation/run_single.py b/validation/run_single.py
index 1482b8dbe2..2c7df23816 100644
--- a/validation/run_single.py
+++ b/validation/run_single.py
@@ -26,7 +26,7 @@
calc = ROOT.RooStats.HypoTestInverter(ac)
calc.SetConfidenceLevel(0.95)
calc.UseCLs(True)
-calc.RunFixedScan(1,1,1)
+calc.RunFixedScan(1, 1, 1)
result = calc.GetInterval()
@@ -39,4 +39,5 @@
CLs_exp = list(v)[3:-3]
import json
+
print(json.dumps({'CLs_obs': CLs_obs, 'CLs_exp': CLs_exp}, sort_keys=True, indent=4))
diff --git a/validation/shared_nuispar_across_types/make_data.py b/validation/shared_nuispar_across_types/make_data.py
index 18a17e9682..728835df8f 100644
--- a/validation/shared_nuispar_across_types/make_data.py
+++ b/validation/shared_nuispar_across_types/make_data.py
@@ -1,22 +1,23 @@
import ROOT
-sig = 'sig',[3,1]
-nom = 'nom',[12,13]
+sig = 'sig', [3, 1]
+nom = 'nom', [12, 13]
-histo_up = 'hup',[14,15]
-histo_dn = 'hdn',[10,11]
+histo_up = 'hup', [14, 15]
+histo_dn = 'hdn', [10, 11]
-data = 'data',[15,16]
+data = 'data', [15, 16]
import ROOT
-f = ROOT.TFile.Open('data.root','recreate')
+f = ROOT.TFile.Open('data.root', 'recreate')
-for n,h in [sig,nom,histo_up,histo_dn,data]:
- rh = ROOT.TH1F(n,n,2,-0.5,1.5)
- for i,c in enumerate(h):
- rh.SetBinContent(1+i,c)
- rh.Sumw2()
- rh.Write()
+
+for n, h in [sig, nom, histo_up, histo_dn, data]:
+ rh = ROOT.TH1F(n, n, 2, -0.5, 1.5)
+ for i, c in enumerate(h):
+ rh.SetBinContent(1 + i, c)
+ rh.Sumw2()
+ rh.Write()
f.Close()
diff --git a/validation/xmlimport_input2/makedata.py b/validation/xmlimport_input2/makedata.py
index 497c71a03c..465fa49b01 100644
--- a/validation/xmlimport_input2/makedata.py
+++ b/validation/xmlimport_input2/makedata.py
@@ -2,25 +2,27 @@
import json
import sys
+
source_data = json.load(open(sys.argv[1]))
-root_file = sys.argv[2]
+root_file = sys.argv[2]
f = ROOT.TFile(root_file, 'RECREATE')
hists = []
for cname, channel_def in source_data['channels'].iteritems():
- print ('CH',cname)
+ print('CH', cname)
binning = channel_def['binning']
bindata = channel_def['bindata']
-
for hist, data in bindata.iteritems():
- print ('{}_{}'.format(cname,hist))
- h = ROOT.TH1F('{}_{}'.format(cname,hist),'{}_{}'.format(cname,hist),*binning)
+ print('{}_{}'.format(cname, hist))
+ h = ROOT.TH1F(
+ '{}_{}'.format(cname, hist), '{}_{}'.format(cname, hist), *binning
+ )
hists += [h]
- for i,v in enumerate(data):
- h.SetBinContent(i+1,v)
+ for i, v in enumerate(data):
+ h.SetBinContent(i + 1, v)
h.Sumw2()
f.Write()