From 51fe47a3ba88371542bdc06c557a602bdc9cc8e4 Mon Sep 17 00:00:00 2001 From: aaarrti Date: Thu, 22 Jun 2023 13:57:06 +0200 Subject: [PATCH 1/5] update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bc8c1de0b..af55be2fd 100644 --- a/README.md +++ b/README.md @@ -235,7 +235,7 @@ else: model.load_state_dict(torch.load("tests/assets/mnist")) # Load datasets and make loaders. -test_set = torchvision.datasets.MNIST(root='./sample_data', download=True, transforms=transforms.Compose([transforms.ToTensor()])) +test_set = torchvision.datasets.MNIST(root='./sample_data', download=True, transform=transforms.Compose([transforms.ToTensor()])) test_loader = torch.utils.data.DataLoader(test_set, batch_size=24) # Load a batch of inputs and outputs to use for XAI evaluation. @@ -266,7 +266,7 @@ and `a_batch_intgrad`: ```python a_batch_saliency = load("path/to/precomputed/saliency/explanations") -a_batch_saliency = load("path/to/precomputed/intgrad/explanations") +a_batch_intgrad = load("path/to/precomputed/intgrad/explanations") ``` Another option is to simply obtain the attributions using one of many XAI frameworks out there, From 07ddbe2f401d080654657b1616a3efc89dbd76d1 Mon Sep 17 00:00:00 2001 From: annahedstroem Date: Fri, 23 Jun 2023 12:41:20 +0200 Subject: [PATCH 2/5] fixed Issue 276 --- README.md | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index af55be2fd..47167b5cb 100644 --- a/README.md +++ b/README.md @@ -322,7 +322,9 @@ metric = quantus.MaxSensitivity(nr_samples=10, norm_numerator=quantus.fro_norm, norm_denominator=quantus.fro_norm, perturb_func=quantus.uniform_noise, - similarity_func=quantus.difference) + similarity_func=quantus.difference, + abs=True, + normalise=True) ``` and then applied to your model, data, and (pre-computed) explanations: @@ -333,26 +335,14 @@ scores = metric( x_batch=x_batch, y_batch=y_batch, a_batch=a_batch_saliency, - device=device -) -``` - -#### Use quantus.explain - -Alternatively, instead of providing pre-computed explanations, you can employ the `quantus.explain` function, -which can be specified through a dictionary passed to `explain_func_kwargs`. - -```python -scores = metric( - model=model, - x_batch=x_batch, - y_batch=y_batch, device=device, explain_func=quantus.explain, - explain_func_kwargs={"method": "Saliency"} + explain_func_kwargs={"method": "Saliency"}, ) ``` +In this example, we rely on the built-in `quantus.explain` function in order to recompute the explanations during the robustness evaluation procedure. Further hyperparameters for generating the explanations can be specified through a dictionary passed to `explain_func_kwargs`. Please find more details on how to use `quantus.explain` at [API documentation](https://quantus.readthedocs.io/en/latest/docs_api/quantus.functions.explanation_func.html). + #### Employ customised functions You can alternatively use your own customised explanation function From 8acbec67ed36122aa0245392dd436af3cf960b09 Mon Sep 17 00:00:00 2001 From: annahedstroem Date: Fri, 23 Jun 2023 12:44:18 +0200 Subject: [PATCH 3/5] fixed issue 273 --- .gitignore | 3 +++ quantus/helpers/asserts.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index d9a4d1488..8630f2338 100644 --- a/.gitignore +++ b/.gitignore @@ -40,3 +40,6 @@ tutorials/invited_talks/ # PyCharm project files .idea + +# Tox files. +.tox \ No newline at end of file diff --git a/quantus/helpers/asserts.py b/quantus/helpers/asserts.py index d895491bf..c6f7109ff 100644 --- a/quantus/helpers/asserts.py +++ b/quantus/helpers/asserts.py @@ -122,7 +122,7 @@ def assert_attributions_order(order: str) -> None: assert order in [ "random", "morf", - "lorf", + "lerf", ], "The order of sorting the attributions must be either random, morf, or lorf." From 7afc5fde04c76147d28c3dc7bff82fd4baa8dac2 Mon Sep 17 00:00:00 2001 From: annahedstroem Date: Fri, 23 Jun 2023 13:09:25 +0200 Subject: [PATCH 4/5] fixed explanation of quantus.explain --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 47167b5cb..9c44a437a 100644 --- a/README.md +++ b/README.md @@ -341,7 +341,9 @@ scores = metric( ) ``` -In this example, we rely on the built-in `quantus.explain` function in order to recompute the explanations during the robustness evaluation procedure. Further hyperparameters for generating the explanations can be specified through a dictionary passed to `explain_func_kwargs`. Please find more details on how to use `quantus.explain` at [API documentation](https://quantus.readthedocs.io/en/latest/docs_api/quantus.functions.explanation_func.html). +#### Use quantus.explain + +Since a re-computation of the explanations is necessary for robustness evaluation, in this example, we also pass an explanation function (`explain_func`) to the metric call. Here, we rely on the built-in `quantus.explain` function to recompute the explanations. The hyperparameters are set with the `explain_func_kwargs` dictionary. Please find more details on how to use `quantus.explain` at [API documentation](https://quantus.readthedocs.io/en/latest/docs_api/quantus.functions.explanation_func.html). #### Employ customised functions From 911e3225067a01c7bc16f8a15830ee6538c96b8d Mon Sep 17 00:00:00 2001 From: annahedstroem Date: Fri, 23 Jun 2023 13:10:04 +0200 Subject: [PATCH 5/5] updated docs! --- .../getting_started/getting_started_example.md | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/docs/source/getting_started/getting_started_example.md b/docs/source/getting_started/getting_started_example.md index 11a38e67f..d12216e1b 100644 --- a/docs/source/getting_started/getting_started_example.md +++ b/docs/source/getting_started/getting_started_example.md @@ -126,19 +126,6 @@ metric = quantus.MaxSensitivity() and then applied to your model, data, and (pre-computed) explanations: -```python -scores = metric( - model=model, - x_batch=x_batch, - y_batch=y_batch, - a_batch=a_batch_saliency, - device=device -) -``` - -Alternatively, instead of providing pre-computed explanations, you can employ the `quantus.explain` function, -which can be specified through a dictionary passed to `explain_func_kwargs`. - ```python scores = metric( model=model, @@ -149,6 +136,9 @@ scores = metric( explain_func_kwargs={"method": "Saliency"} ) ``` + +Since a re-computation of the explanations is necessary for robustness evaluation, in this example, we also pass an explanation function (`explain_func`) to the metric call. Here, we rely on the built-in `quantus.explain` function to recompute the explanations. The hyperparameters are set with the `explain_func_kwargs` dictionary. Please find more details on how to use `quantus.explain` at [API documentation](https://quantus.readthedocs.io/en/latest/docs_api/quantus.functions.explanation_func.html). + You can alternatively use your own customised explanation function (assuming it returns an `np.ndarray` in a shape that matches the input `x_batch`). This is done as follows: