Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Black and flake8 current code #4

Merged
merged 6 commits into from
Feb 4, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[flake8]
per-file-ignores =
*/__init__.py:F401,F403
2 changes: 0 additions & 2 deletions fiddy/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# noqa: F401, F403

from .constants import *
from . import difference
from . import function
Expand Down
20 changes: 12 additions & 8 deletions fiddy/constants.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
from enum import Enum
from typing import Callable, Iterable, Union
from typing import Callable, Union

import numpy as np

__all__ = [
'TYPE_DIMENSION', 'TYPE_OUTPUT', 'TYPE_POINT', 'TYPE_FUNCTION',
'Difference', 'GradientCheckMethod'
"TYPE_DIMENSION",
"TYPE_FUNCTION",
"TYPE_OUTPUT",
"TYPE_POINT",
"Difference",
"GradientCheckMethod",
]

# Currently only 1D arrays are supported.
Expand All @@ -15,18 +19,18 @@
# - `.gradient_check` classes
# ... or just flatten
TYPE_DIMENSION = int
TYPE_POINT = np.ndarray
TYPE_OUTPUT = Union[float, int]
TYPE_POINT = np.ndarray
TYPE_FUNCTION = Callable[[TYPE_POINT], TYPE_OUTPUT]


class Difference(str, Enum):
FORWARD = 'forward'
BACKWARD = 'backward'
CENTRAL = 'central'
BACKWARD = "backward"
CENTRAL = "central"
FORWARD = "forward"


class GradientCheckMethod(str, Enum):
FORWARD = Difference.FORWARD
BACKWARD = Difference.BACKWARD
CENTRAL = Difference.CENTRAL
FORWARD = Difference.FORWARD
2 changes: 1 addition & 1 deletion fiddy/difference.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,4 @@ def central(
Returns:
The central difference.
"""
return function(point + step/2) - function(point - step/2)
return function(point + step / 2) - function(point - step / 2)
3 changes: 3 additions & 0 deletions fiddy/extensions/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""Extensions, e.g. for compatibility with other tools."""

from . import amici
29 changes: 15 additions & 14 deletions fiddy/extensions/amici/amici.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from functools import partial
from typing import Any, Callable, Dict, List, Tuple

import amici
import numpy as np
from amici.petab_objective import LLH, SLLH
import petab
Expand All @@ -27,9 +26,9 @@ def transform_gradient_log10(gradient_value, parameter_value):


transforms = {
'lin': transform_gradient_lin,
'log': transform_gradient_log,
'log10': transform_gradient_log10,
"lin": transform_gradient_lin,
"log": transform_gradient_log,
"log10": transform_gradient_log10,
}


Expand Down Expand Up @@ -72,8 +71,9 @@ def simulate_petab_to_cached_functions(
for parameter_id in parameter_ids
]

simulate_petab_partial = \
partial(simulate_petab, petab_problem=petab_problem, *args, **kwargs)
simulate_petab_partial = partial(
simulate_petab, petab_problem=petab_problem, *args, **kwargs
)

def simulate_petab_full(point: TYPE_POINT):
problem_parameters = dict(zip(parameter_ids, point))
Expand All @@ -90,14 +90,15 @@ def function(point: TYPE_POINT):

def gradient(point: TYPE_POINT) -> TYPE_POINT:
result = simulate_petab_full_cached(point)
sllh = np.array([
gradient_transformations[parameter_index](
gradient_value=result[SLLH][parameter_id],
parameter_value=point[parameter_index],
)
for parameter_index, parameter_id in enumerate(parameter_ids)
])
sllh = np.array(
[
gradient_transformations[parameter_index](
gradient_value=result[SLLH][parameter_id],
parameter_value=point[parameter_index],
)
for parameter_index, parameter_id in enumerate(parameter_ids)
]
)
return sllh

return function, gradient

19 changes: 11 additions & 8 deletions fiddy/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,15 @@


default_memory_kwargs = {
'location': 'cache_fiddy',
'verbose': 0,
"location": "cache_fiddy",
"verbose": 0,
}
ram_cache_parent_path = Path("/dev/shm")


class Function:
"""Wrapper for functions."""

def __init__(
self,
function: TYPE_FUNCTION,
Expand Down Expand Up @@ -60,10 +61,11 @@ def __init__(
ram_cache:
Whether to cache in RAM. If `False`, disk is used instead.
"""
self.cache_path = \
kwargs.get('location', default_memory_kwargs['location'])
self.cache_path = kwargs.get(
"location", default_memory_kwargs["location"]
)
if ram_cache:
if 'location' in kwargs:
if "location" in kwargs:
raise ValueError(
"Do not supply a location when using `ram_cache`."
)
Expand All @@ -72,10 +74,11 @@ def __init__(
"The standard Linux shared memory location '/dev/shm' "
"does not exist."
)
self.cache_path = \
ram_cache_parent_path / default_memory_kwargs['location']
self.cache_path = (
ram_cache_parent_path / default_memory_kwargs["location"]
)
self.cache_path = Path(self.cache_path).resolve()
kwargs['location'] = str(self.cache_path)
kwargs["location"] = str(self.cache_path)

memory = joblib.Memory(**{**default_memory_kwargs, **kwargs})
self.function = memory.cache(function)
Expand Down
82 changes: 40 additions & 42 deletions fiddy/gradient_check.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import abc
from dataclasses import dataclass
from functools import partial
from typing import Callable, Iterable, List, Tuple
Expand All @@ -10,11 +9,8 @@
TYPE_DIMENSION,
TYPE_FUNCTION,
TYPE_POINT,
Difference,
GradientCheckMethod,
)
from .misc import numpy_array_to_tuple
#from .quotient import forward, backward, central
from .step import dstep


Expand All @@ -27,8 +23,8 @@ def gradient_check(
stop_at_success: bool = True,
# TODO or custom Callable
fd_gradient_method: GradientCheckMethod = None,
#atol: float = 1e-2,
#rtol: float = 1e-2,
# atol: float = 1e-2, # TODO
# rtol: float = 1e-2, # TODO
check_protocol: List[Callable[[pd.DataFrame], None]] = None,
postprocessor_protocol: List[Callable[[pd.DataFrame], None]] = None,
) -> Tuple[bool, pd.DataFrame]:
Expand Down Expand Up @@ -60,8 +56,8 @@ def gradient_check(
check_protocol:
These methods are applied to the results, to perform the checks.
Defaults to `default_check_protocol`.
Methods in this protocol should set the `"success"` column to `True`
if the check passes, and put the reason for success in the
Methods in this protocol should set the `"success"` column to
`True` if the check passes, and put the reason for success in the
`"success_reason"` column.
postprocessor_protocol:
Similar to `check_protocol`, but applied after `check_protocol`.
Expand All @@ -77,7 +73,7 @@ def gradient_check(
if sizes is None:
sizes = [1e-1, 1e-3, 1e-5, 1e-7, 1e-9]
if fd_gradient_method is None:
fd_gradient_method = 'central'
fd_gradient_method = "central"
if check_protocol is None:
check_protocol = default_check_protocol
if postprocessor_protocol is None:
Expand All @@ -90,25 +86,27 @@ def gradient_check(
# Create a method to approximate the gradient. Should only require a
# step as its only argument (use as kwarg).
# `fd_gradient_callable` should only require a step to run.
if fd_gradient_method == 'forward':
fd_gradient_callable = \
partial(quotient.forward, function=function, point=point)
elif fd_gradient_method == 'backward':
fd_gradient_callable = \
partial(quotient.backward, function=function, point=point)
elif fd_gradient_method == 'central':
fd_gradient_callable = \
partial(quotient.central, function=function, point=point)
if fd_gradient_method == "forward":
fd_gradient_callable = partial(
quotient.forward, function=function, point=point
)
elif fd_gradient_method == "backward":
fd_gradient_callable = partial(
quotient.backward, function=function, point=point
)
elif fd_gradient_method == "central":
fd_gradient_callable = partial(
quotient.central, function=function, point=point
)
else:
raise NotImplementedError(f'Method: {fd_gradient_method}')
raise NotImplementedError(f"Method: {fd_gradient_method}")

for size in sizes:
for dimension in dimensions:
step = dstep(point=point, dimension=dimension, size=size)
test_gradient = fd_gradient_callable(step=step)
results.append(
Result(
#point=numpy_array_to_tuple(point),
dimension=dimension,
size=size,
test_gradient=test_gradient,
Expand All @@ -118,8 +116,8 @@ def gradient_check(
)

results_df = pd.DataFrame(results)
results_df['success'] = False
results_df['success_reason'] = None
results_df["success"] = False
results_df["success_reason"] = None
for check in check_protocol:
check(results_df)
if postprocessor_protocol is not None:
Expand All @@ -134,7 +132,8 @@ def gradient_check(
return success, results_df


# FIXME refactor to some `gradient.py` where these FD methods can be used to compute gradients.
# FIXME refactor to some `gradient.py` where these FD methods can be used to
# compute gradients.
# would result in or require a similar method
def simplify_results_df(results_df: pd.DataFrame) -> pd.DataFrame:
"""Keep only one row per successful dimension, in the dataframe.
Expand All @@ -150,7 +149,7 @@ def simplify_results_df(results_df: pd.DataFrame) -> pd.DataFrame:
The simplified results.
"""
dimension_result_dfs = []
for dimension, df in results_df.groupby('dimension'):
for dimension, df in results_df.groupby("dimension"):
# If any checks were successful for this dimension, only include the
# first successful check.
if df["success"].any():
Expand All @@ -167,8 +166,7 @@ def simplify_results_df(results_df: pd.DataFrame) -> pd.DataFrame:
@dataclass
class Result:
"""Information about a single finite difference gradient computation."""
# """The point at which the gradient was computed."""
# point: TYPE_POINT

size: float
"""The size of the step taken."""
dimension: TYPE_DIMENSION
Expand All @@ -183,54 +181,54 @@ class Result:

# FIXME string literals
def add_absolute_error(results_df):
results_df['|aerr|'] = abs(
results_df['test_gradient'] - results_df['expected_gradient']
results_df["|aerr|"] = abs(
results_df["test_gradient"] - results_df["expected_gradient"]
)


def check_absolute_error(results_df, tolerance: float = 1e-2):
success = results_df['|aerr|'] < tolerance
set_success(results_df, success, reason='|aerr|')
success = results_df["|aerr|"] < tolerance
set_success(results_df, success, reason="|aerr|")


def add_relative_error(results_df):
if '|aerr|' not in results_df.columns:
if "|aerr|" not in results_df.columns:
add_absolute_error(results_df)
epsilon = results_df['|aerr|'].min() * 1e-10
epsilon = results_df["|aerr|"].min() * 1e-10

results_df['|rerr|'] = abs(
results_df['|aerr|'] / (results_df['expected_gradient'] + epsilon)
results_df["|rerr|"] = abs(
results_df["|aerr|"] / (results_df["expected_gradient"] + epsilon)
)


def check_relative_error(results_df, tolerance: float = 1e-2):
success = results_df['|rerr|'] < tolerance
set_success(results_df, success, reason='|rerr|')
success = results_df["|rerr|"] < tolerance
set_success(results_df, success, reason="|rerr|")


def set_success(results_df: pd.DataFrame, success: pd.Series, reason: str):
new_success = success & ~results_df['success']
results_df['success'] = results_df['success'] | new_success
results_df['success_reason'].mask(
new_success = success & ~results_df["success"]
results_df["success"] = results_df["success"] | new_success
results_df["success_reason"].mask(
new_success,
reason,
inplace=True,
)


default_check_protocol = [
#set_all_failed,
# set_all_failed,
add_absolute_error,
add_relative_error,
check_absolute_error,
check_relative_error,
]


def keep_lowest_error(results_df, error='|rerr|'):
def keep_lowest_error(results_df, error="|rerr|"):
keep_indices = []

for dimension, df in results_df.groupby('dimension'):
for dimension, df in results_df.groupby("dimension"):
# Keep best success from each dimension.
if df["success"].any():
keep_index = df.loc[df["success"]][error].idxmin()
Expand Down
5 changes: 1 addition & 4 deletions fiddy/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ def numpy_array_to_tuple(array: np.ndarray) -> Tuple:
# Iterate over the array, turning iterable
# elements also into tuples, recursively.
try:
return tuple(
numpy_array_to_tuple(element)
for element in array
)
return tuple(numpy_array_to_tuple(element) for element in array)
except TypeError:
return array
2 changes: 0 additions & 2 deletions fiddy/quotient.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from typing import Optional

import numpy as np

from .constants import (
Expand Down
Loading