diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml
deleted file mode 100644
index 0a8b8e9..0000000
--- a/.github/workflows/black.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-name: Lint
-
-on: [push, pull_request]
-
-jobs:
- lint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - uses: psf/black@stable
\ No newline at end of file
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 0000000..b268138
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,8 @@
+name: Ruff
+on: [push, pull_request]
+jobs:
+ ruff:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: chartboost/ruff-action@v1
diff --git a/.github/workflows/readthedocs-pr.yaml b/.github/workflows/readthedocs-pr.yaml
new file mode 100644
index 0000000..f66cf6a
--- /dev/null
+++ b/.github/workflows/readthedocs-pr.yaml
@@ -0,0 +1,16 @@
+name: readthedocs/actions
+on:
+ pull_request_target:
+ types:
+ - opened
+
+permissions:
+ pull-requests: write
+
+jobs:
+ pull-request-links:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: readthedocs/actions/preview@v1
+ with:
+ project-slug: "readthedocs-preview"
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
new file mode 100644
index 0000000..7b03f27
--- /dev/null
+++ b/.github/workflows/tests.yml
@@ -0,0 +1,25 @@
+name: Test and codecov report
+on: [push, pull_request]
+jobs:
+ run:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+ - name: Install dependencies
+ run: pip install -r requirements.txt
+ - name: Install test requirements
+ run: pip install -r requirements-test.txt
+ - name: Run tests and collect coverage
+ run: pytest --cov src/
+ - name: Upload coverage reports to Codecov
+ uses: codecov/codecov-action@v4.0.1
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ slug: pralab/secml-torch
diff --git a/.gitignore b/.gitignore
index 446d990..b3aad53 100644
--- a/.gitignore
+++ b/.gitignore
@@ -244,4 +244,7 @@ $RECYCLE.BIN/
*.msp
# Windows shortcuts
-*.lnk
\ No newline at end of file
+*.lnk
+
+# Example models and datasets
+examples/example_data/*
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..80e9050
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,23 @@
+default_language_version:
+ python: python3.9
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: check-added-large-files
+ - id: check-toml
+ - id: check-yaml
+ args:
+ - --unsafe
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+- repo: https://github.com/charliermarsh/ruff-pre-commit
+ rev: v0.1.2
+ hooks:
+ - id: ruff
+ args:
+ - --fix
+ - id: ruff-format
+ci:
+ autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks
+ autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
new file mode 100644
index 0000000..31b6209
--- /dev/null
+++ b/.readthedocs.yaml
@@ -0,0 +1,28 @@
+# .readthedocs.yaml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+# Set the OS, Python version and other tools you might need
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.10"
+
+# Build documentation in the "docs/" directory with Sphinx
+sphinx:
+ configuration: docs/conf.py
+
+# Optionally build your docs in additional formats such as PDF and ePub
+# formats:
+# - pdf
+# - epub
+
+# Optional but recommended, declare the Python requirements required
+# to build your documentation
+# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
+# python:
+# install:
+# - requirements: docs/requirements.txt
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 0a99704..598525c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,6 @@
-# SecMLT: Contribution Guide for Adversarial Machine Learning and Robustness Evaluation
+# SecMLT: Contribution Guide
-SecMLT is an open-source Python library for Adversarial Machine Learning and robustness evaluation. We welcome contributions from the research community to expand its capabilities, improve its functionality, or add new features. In this guide, we will discuss how to contribute to SecMLT through forks, pull requests, and code formatting using Black.
+SecMLT is an open-source Python library for Adversarial Machine Learning and robustness evaluation. We welcome contributions from the research community to expand its capabilities, improve its functionality, or add new features. In this guide, we will discuss how to contribute to SecMLT through forks, pull requests, and code formatting using Ruff.
## Prerequisites
@@ -39,10 +39,55 @@ copy of the SecMLT repository under your GitHub account.
## Formatting your code
-SecMLT uses Black for ensuring high-quality code formatting. Before submitting a pull request, make sure your code adheres to the SecMLT style guide by running the following command in the root directory:
- ```bash
- black .
- ```
+In our project, we leverage [Ruff](https://docs.astral.sh/ruff/) and [pre-commit](https://pre-commit.com) to enhance code quality and streamline the development process.
+Ruff is a static code linter, while Pre-commit is a framework for defining pre-commit hooks.
+
+### Using Ruff
+
+Ruff is integrated into our project to perform code linting.
+It helps ensure adherence to coding standards, identifies potential bugs, and enhances overall code quality. Here's how to use Ruff:
+
+1. **Installation**: Make sure you have Ruff installed in your development environment. You can install it via pip:
+ ```
+ pip install ruff
+ ```
+
+2. **Running Ruff**: To analyze your codebase using Ruff, navigate to the project directory and run the following command:
+ ```
+ ruff check
+ ```
+ Ruff will analyze the codebase and provide feedback on potential issues and areas for improvement.
+
+### Using Pre-commit
+
+Pre-commit is employed to automate various tasks such as code formatting, linting, and ensuring code consistency across different environments. We use it to enforce Ruff formatting *before* commit.
+Here's how to utilize Pre-commit:
+
+1. **Installation**: Ensure that Pre-commit is installed in your environment. You can install it using pip:
+ ```
+ pip install pre-commit
+ ```
+
+2. **Configuration**: The project includes a `.pre-commit-config.yaml` file that specifies the hooks to be executed by Pre-commit. These hooks can include tasks such as code formatting, static analysis, and more.
+
+3. **Installation of Hooks**: Run the following command in the project directory to install the Pre-commit hooks:
+ ```
+ pre-commit install
+ ```
+ This command will set up the hooks specified in the configuration file to run automatically before each commit.
+
+4. **Running Pre-commit**: Whenever you make changes and attempt to commit them, Pre-commit will automatically execute the configured hooks. If any issues are found, Pre-commit will prevent the commit from proceeding and provide feedback on the detected issues.
+
+### Contributing with your Code
+
+When contributing code to the project, follow these guidelines to ensure a smooth and efficient contribution process:
+
+1. **Run Ruff and Pre-commit Locally**: Before making a pull request, run Ruff and Pre-commit locally to identify and fix potential issues in your code.
+
+2. **Address Ruff and Pre-commit Warnings**: If Ruff or Pre-commit identifies any issues, address them before submitting your code for review. This ensures that the codebase maintains high standards of quality and consistency.
+
+3. **Document Changes**: Clearly document any changes you make, including the rationale behind the changes and any potential impact on existing functionality.
+
4. If there are no issues with your code, commit the changes using the `git add` command and push them to your forked repository:
```bash
git add .
@@ -58,4 +103,4 @@ SecMLT uses Black for ensuring high-quality code formatting. Before submitting a
4. Submit your pull request by clicking "Create pull request".
5. The SecMLT maintainers will review your pull request, provide feedback, or merge it into the main repository as appropriate.
-We appreciate your contributions to SecMLT! If you have any questions or need assistance during the process, please don't hesitate to reach out to us on GitHub or other communication channels.
\ No newline at end of file
+We appreciate your contributions to SecMLT! If you have any questions or need assistance during the process, please don't hesitate to reach out to us on GitHub or other communication channels.
diff --git a/README.md b/README.md
index 64e12a0..e5f7d5b 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,11 @@
# SecML-Torch: A Library for Robustness Evaluation of Deep Learning Models
-SecML-Torch (SecMLT) is an open-source Python library designed to facilitate research in the area of Adversarial Machine Learning (AML) and robustness evaluation.
+
+
+
+
+
+SecML-Torch (SecMLT) is an open-source Python library designed to facilitate research in the area of Adversarial Machine Learning (AML) and robustness evaluation.
The library provides a simple yet powerful interface for generating various types of adversarial examples, as well as tools for evaluating the robustness of machine learning models against such attacks.
## Installation
@@ -14,7 +19,7 @@ This will install the core version of SecMLT, including only the main functional
### Install with extras
-The library can be installed together with other plugins that enable further functionalities.
+The library can be installed together with other plugins that enable further functionalities.
* [Foolbox](https://github.com/bethgelab/foolbox), a Python toolbox to create adversarial examples.
* [Tensorboard](https://www.tensorflow.org/tensorboard), a visualization toolkit for machine learning experimentation.
@@ -65,7 +70,5 @@ For more detailed usage instructions and examples, please refer to the [official
## Contributing
-We welcome contributions from the research community to expand the library's capabilities or add new features.
+We welcome contributions from the research community to expand the library's capabilities or add new features.
If you would like to contribute to SecMLT, please follow our [contribution guidelines](https://github.com/pralab/secml-torch/blob/main/CONTRIBUTING.md).
-
-
diff --git a/conftest.py b/conftest.py
new file mode 100644
index 0000000..971d955
--- /dev/null
+++ b/conftest.py
@@ -0,0 +1,5 @@
+"""Configuration for tests."""
+
+pytest_plugins = [
+ "secmlt.tests.fixtures",
+]
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..d0c3cbf
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000..747ffb7
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..f9c6aa2
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,48 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+import sys, os
+
+sys.path.insert(0, os.path.abspath("../src/secmlt"))
+
+project = "SecML-Torch"
+copyright = "2024, Maura Pintor, Luca Demetrio"
+author = "Maura Pintor, Luca Demetrio"
+release = "v0.1"
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = [
+ "sphinx.ext.napoleon",
+]
+
+napoleon_google_docstring = False
+napoleon_use_param = False
+napoleon_use_ivar = True
+
+templates_path = ["_templates"]
+exclude_patterns = ["*tests*"]
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = "sphinx_rtd_theme"
+html_static_path = ["_static"]
+
+
+# -- Add readme and contribution guide -------------------------------------------------
+
+import pathlib
+
+for m in ["Readme.md", "CONTRIBUTING.md"]:
+ source_path = pathlib.Path(__file__).parent.resolve().parent.parent / m
+ target_path = pathlib.Path(__file__).parent / m.lower().replace(".md", ".rst")
+ from m2r import convert
+
+ with target_path.open("w") as outf: # Change the title to "Readme"
+ outf.write(convert(source_path.read_text()))
diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst
new file mode 100644
index 0000000..679c343
--- /dev/null
+++ b/docs/source/contributing.rst
@@ -0,0 +1,145 @@
+
+SecMLT: Contribution Guide
+==========================
+
+SecMLT is an open-source Python library for Adversarial Machine Learning and robustness evaluation. We welcome contributions from the research community to expand its capabilities, improve its functionality, or add new features. In this guide, we will discuss how to contribute to SecMLT through forks, pull requests, and code formatting using Ruff.
+
+Prerequisites
+-------------
+
+Before contributing to SecMLT:
+
+
+#. Familiarize yourself with the library by reviewing the `official documentation `_ and exploring the existing codebase.
+#. Install the required dependencies (refer to `the installation guide `_\ ).
+
+Setting up your development environment
+---------------------------------------
+
+To contribute to SecMLT, follow these steps:
+
+
+#.
+ **Fork the repository**\ : Go to the `SecMLT GitHub page `_ and click "Fork" in the upper-right corner. This will create a
+ copy of the SecMLT repository under your GitHub account.
+
+#.
+ **Clone your forked repository**\ : Clone your forked repository to your local machine using ``git clone`` command:
+
+ .. code-block:: bash
+
+ git clone secmlt
+
+#. **Set up remote repositories**\ : Add the original SecMLT repository as an upstream remote and set the tracking branch to be ``master``\ :
+ .. code-block:: bash
+
+ cd secmlt
+ git remote add upstream
+ git fetch upstream
+ git checkout master --track upstream/master
+
+Making changes
+--------------
+
+
+#. Create a new branch for your feature, bug fix, or documentation update:
+ .. code-block:: bash
+
+ git checkout -c
+
+#. Make the necessary changes to the codebase (add features, fix bugs, improve documentation, etc.). Be sure to write clear and descriptive commit messages.
+#. Test your changes locally using appropriate testing frameworks and tools.
+
+Formatting your code
+--------------------
+
+In our project, we leverage `Ruff `_ and `pre-commit `_ to enhance code quality and streamline the development process.
+Ruff is a static code linter, while Pre-commit is a framework for defining pre-commit hooks.
+
+Using Ruff
+^^^^^^^^^^
+
+Ruff is integrated into our project to perform code linting.
+It helps ensure adherence to coding standards, identifies potential bugs, and enhances overall code quality. Here's how to use Ruff:
+
+
+#.
+ **Installation**\ : Make sure you have Ruff installed in your development environment. You can install it via pip:
+
+ .. code-block::
+
+ pip install ruff
+
+#.
+ **Running Ruff**\ : To analyze your codebase using Ruff, navigate to the project directory and run the following command:
+
+ .. code-block::
+
+ ruff check
+
+ Ruff will analyze the codebase and provide feedback on potential issues and areas for improvement.
+
+Using Pre-commit
+^^^^^^^^^^^^^^^^
+
+Pre-commit is employed to automate various tasks such as code formatting, linting, and ensuring code consistency across different environments. We use it to enforce Ruff formatting *before* commit.
+Here's how to utilize Pre-commit:
+
+
+#.
+ **Installation**\ : Ensure that Pre-commit is installed in your environment. You can install it using pip:
+
+ .. code-block::
+
+ pip install pre-commit
+
+#.
+ **Configuration**\ : The project includes a ``.pre-commit-config.yaml`` file that specifies the hooks to be executed by Pre-commit. These hooks can include tasks such as code formatting, static analysis, and more.
+
+#.
+ **Installation of Hooks**\ : Run the following command in the project directory to install the Pre-commit hooks:
+
+ .. code-block::
+
+ pre-commit install
+
+ This command will set up the hooks specified in the configuration file to run automatically before each commit.
+
+#.
+ **Running Pre-commit**\ : Whenever you make changes and attempt to commit them, Pre-commit will automatically execute the configured hooks. If any issues are found, Pre-commit will prevent the commit from proceeding and provide feedback on the detected issues.
+
+Contributing with your Code
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When contributing code to the project, follow these guidelines to ensure a smooth and efficient contribution process:
+
+
+#.
+ **Run Ruff and Pre-commit Locally**\ : Before making a pull request, run Ruff and Pre-commit locally to identify and fix potential issues in your code.
+
+#.
+ **Address Ruff and Pre-commit Warnings**\ : If Ruff or Pre-commit identifies any issues, address them before submitting your code for review. This ensures that the codebase maintains high standards of quality and consistency.
+
+#.
+ **Document Changes**\ : Clearly document any changes you make, including the rationale behind the changes and any potential impact on existing functionality.
+
+#.
+ If there are no issues with your code, commit the changes using the ``git add`` command and push them to your forked repository:
+
+ .. code-block:: bash
+
+ git add .
+ git commit -m "Your commit message"
+ git push origin
+
+Submitting a pull request
+-------------------------
+
+
+#. Go to your forked repository on GitHub and click the "New pull request" button.
+#. Choose the branch you've created as the source branch, and select ``master`` as the target branch.
+#. Review the changes you're submitting and write a clear and descriptive pull request title and description.
+#. Submit your pull request by clicking "Create pull request".
+#. The SecMLT maintainers will review your pull request, provide feedback, or merge it into the main repository as appropriate.
+
+We appreciate your contributions to SecMLT! If you have any questions or need assistance during the process, please don't hesitate to reach out to us on GitHub or other communication channels.
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000..c63ed31
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,29 @@
+.. SecML-Torch documentation master file, created by
+ sphinx-quickstart on Sun Mar 17 21:27:29 2024.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to SecML-Torch's documentation!
+=======================================
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Contents:
+
+ readme
+ contributing
+ secmlt.adv
+ secmlt.data
+ secmlt.manipulations
+ secmlt.metrics
+ secmlt.models
+ secmlt.optimization
+ secmlt.trackers
+ secmlt.utils
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
new file mode 100644
index 0000000..eb91da9
--- /dev/null
+++ b/docs/source/modules.rst
@@ -0,0 +1,7 @@
+secmlt
+======
+
+.. toctree::
+ :maxdepth: 4
+
+ secmlt
diff --git a/docs/source/readme.rst b/docs/source/readme.rst
new file mode 100644
index 0000000..6c9c820
--- /dev/null
+++ b/docs/source/readme.rst
@@ -0,0 +1,87 @@
+.. role:: raw-html-m2r(raw)
+ :format: html
+
+
+SecML-Torch: A Library for Robustness Evaluation of Deep Learning Models
+========================================================================
+
+ :raw-html-m2r:`
`
+
+:raw-html-m2r:`
`
+
+SecML-Torch (SecMLT) is an open-source Python library designed to facilitate research in the area of Adversarial Machine Learning (AML) and robustness evaluation.
+The library provides a simple yet powerful interface for generating various types of adversarial examples, as well as tools for evaluating the robustness of machine learning models against such attacks.
+
+Installation
+------------
+
+You can install SecMLT via pip:
+
+.. code-block:: bash
+
+ pip install secml-torch
+
+This will install the core version of SecMLT, including only the main functionalities such as native implementation of attacks and PyTorch wrappers.
+
+Install with extras
+^^^^^^^^^^^^^^^^^^^
+
+The library can be installed together with other plugins that enable further functionalities.
+
+
+* `Foolbox `_\ , a Python toolbox to create adversarial examples.
+* `Tensorboard `_\ , a visualization toolkit for machine learning experimentation.
+
+Install one or more extras with the command:
+
+.. code-block:: bash
+
+ pip install secml-torch[foolbox,tensorboard]
+
+Key Features
+------------
+
+
+* **Built for Deep Learning:** SecMLT is compatible with the popular machine learning framework PyTorch.
+* **Various types of adversarial attacks:** SecMLT includes support for a wide range of attack methods (evasion, poisoning, ...) such as different implementations imported from popular AML libraries (Foolbox, Adversarial Library).
+* **Customizable attacks:** SecMLT offers several levels of analysis for the models, including modular implementations of existing attacks to extend with different loss functions, optimizers, and more.
+* **Attack debugging:** Built-in debugging of evaluations by logging events and metrics along the attack runs (even on Tensorboard).
+
+Usage
+-----
+
+Here's a brief example of using SecMLT to evaluate the robustness of a trained classifier:
+
+.. code-block:: python
+
+ from secmlt.adv.evasion.pgd import PGD
+ from secmlt.metrics.classification import Accuracy
+ from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
+
+
+ model = ...
+ torch_data_loader = ...
+
+ # Wrap model
+ model = BasePytorchClassifier(model)
+
+ # create and run attack
+ attack = PGD(
+ perturbation_model="l2",
+ epsilon=0.4,
+ num_steps=100,
+ step_size=0.01,
+ )
+
+ adversarial_loader = attack(model, torch_data_loader)
+
+ # Test accuracy on adversarial examples
+ robust_accuracy = Accuracy()(model, adversarial_loader)
+
+For more detailed usage instructions and examples, please refer to the `official documentation `_ or to the `examples `_.
+
+Contributing
+------------
+
+We welcome contributions from the research community to expand the library's capabilities or add new features.
+If you would like to contribute to SecMLT, please follow our `contribution guidelines `_.
diff --git a/docs/source/secmlt.adv.evasion.aggregators.rst b/docs/source/secmlt.adv.evasion.aggregators.rst
new file mode 100644
index 0000000..18081dd
--- /dev/null
+++ b/docs/source/secmlt.adv.evasion.aggregators.rst
@@ -0,0 +1,21 @@
+secmlt.adv.evasion.aggregators
+======================================
+
+Submodules
+----------
+
+secmlt.adv.evasion.aggregators.ensemble
+----------------------------------------------
+
+.. automodule:: secmlt.adv.evasion.aggregators.ensemble
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.adv.evasion.aggregators
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.adv.evasion.foolbox_attacks.rst b/docs/source/secmlt.adv.evasion.foolbox_attacks.rst
new file mode 100644
index 0000000..e1e0350
--- /dev/null
+++ b/docs/source/secmlt.adv.evasion.foolbox_attacks.rst
@@ -0,0 +1,29 @@
+secmlt.adv.evasion.foolbox\_attacks
+===========================================
+
+Submodules
+----------
+
+secmlt.adv.evasion.foolbox\_attacks.foolbox\_base
+--------------------------------------------------------
+
+.. automodule:: secmlt.adv.evasion.foolbox_attacks.foolbox_base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.adv.evasion.foolbox\_attacks.foolbox\_pgd
+-------------------------------------------------------
+
+.. automodule:: secmlt.adv.evasion.foolbox_attacks.foolbox_pgd
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.adv.evasion.foolbox_attacks
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.adv.evasion.rst b/docs/source/secmlt.adv.evasion.rst
new file mode 100644
index 0000000..a05a789
--- /dev/null
+++ b/docs/source/secmlt.adv.evasion.rst
@@ -0,0 +1,54 @@
+secmlt.adv.evasion
+==========================
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ secmlt.adv.evasion.aggregators
+ secmlt.adv.evasion.foolbox_attacks
+
+Submodules
+----------
+
+secmlt.adv.evasion.base\_evasion\_attack
+-----------------------------------------------
+
+.. automodule:: secmlt.adv.evasion.base_evasion_attack
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.adv.evasion.modular\_attack
+-----------------------------------------
+
+.. automodule:: secmlt.adv.evasion.modular_attack
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.adv.evasion.perturbation\_models
+----------------------------------------------
+
+.. automodule:: secmlt.adv.evasion.perturbation_models
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.adv.evasion.pgd
+-----------------------------
+
+.. automodule:: secmlt.adv.evasion.pgd
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.adv.evasion
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.adv.rst b/docs/source/secmlt.adv.rst
new file mode 100644
index 0000000..7ab4df4
--- /dev/null
+++ b/docs/source/secmlt.adv.rst
@@ -0,0 +1,29 @@
+secmlt.adv
+==================
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ secmlt.adv.evasion
+
+Submodules
+----------
+
+secmlt.adv.backends module
+--------------------------
+
+.. automodule:: secmlt.adv.backends
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.adv
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.data.rst b/docs/source/secmlt.data.rst
new file mode 100644
index 0000000..02fd32f
--- /dev/null
+++ b/docs/source/secmlt.data.rst
@@ -0,0 +1,10 @@
+secmlt.data
+===================
+
+Module contents
+---------------
+
+.. automodule:: secmlt.data
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.manipulations.rst b/docs/source/secmlt.manipulations.rst
new file mode 100644
index 0000000..643ad82
--- /dev/null
+++ b/docs/source/secmlt.manipulations.rst
@@ -0,0 +1,21 @@
+secmlt.manipulations
+============================
+
+Submodules
+----------
+
+secmlt.manipulations.manipulation
+----------------------------------------
+
+.. automodule:: secmlt.manipulations.manipulation
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.manipulations
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.metrics.rst b/docs/source/secmlt.metrics.rst
new file mode 100644
index 0000000..edf8f4d
--- /dev/null
+++ b/docs/source/secmlt.metrics.rst
@@ -0,0 +1,21 @@
+secmlt.metrics
+======================
+
+Submodules
+----------
+
+secmlt.metrics.classification
+------------------------------------
+
+.. automodule:: secmlt.metrics.classification
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.metrics
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.models.data_processing.rst b/docs/source/secmlt.models.data_processing.rst
new file mode 100644
index 0000000..df9b8ab
--- /dev/null
+++ b/docs/source/secmlt.models.data_processing.rst
@@ -0,0 +1,29 @@
+secmlt.models.data\_processing
+======================================
+
+Submodules
+----------
+
+secmlt.models.data\_processing.data\_processing
+------------------------------------------------------
+
+.. automodule:: secmlt.models.data_processing.data_processing
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.models.data\_processing.identity\_data\_processing
+----------------------------------------------------------------
+
+.. automodule:: secmlt.models.data_processing.identity_data_processing
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.models.data_processing
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.models.pytorch.rst b/docs/source/secmlt.models.pytorch.rst
new file mode 100644
index 0000000..667e197
--- /dev/null
+++ b/docs/source/secmlt.models.pytorch.rst
@@ -0,0 +1,29 @@
+secmlt.models.pytorch
+=============================
+
+Submodules
+----------
+
+secmlt.models.pytorch.base\_pytorch\_nn
+----------------------------------------------
+
+.. automodule:: secmlt.models.pytorch.base_pytorch_nn
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.models.pytorch.base\_pytorch\_trainer
+---------------------------------------------------
+
+.. automodule:: secmlt.models.pytorch.base_pytorch_trainer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.models.pytorch
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.models.rst b/docs/source/secmlt.models.rst
new file mode 100644
index 0000000..751bbde
--- /dev/null
+++ b/docs/source/secmlt.models.rst
@@ -0,0 +1,38 @@
+secmlt.models
+=====================
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ secmlt.models.data_processing
+ secmlt.models.pytorch
+
+Submodules
+----------
+
+secmlt.models.base\_model
+--------------------------------
+
+.. automodule:: secmlt.models.base_model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.models.base\_trainer
+----------------------------------
+
+.. automodule:: secmlt.models.base_trainer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.models
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.optimization.rst b/docs/source/secmlt.optimization.rst
new file mode 100644
index 0000000..d5caa84
--- /dev/null
+++ b/docs/source/secmlt.optimization.rst
@@ -0,0 +1,53 @@
+secmlt.optimization
+===========================
+
+Submodules
+----------
+
+secmlt.optimization.constraints
+--------------------------------------
+
+.. automodule:: secmlt.optimization.constraints
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.optimization.gradient\_processing
+-----------------------------------------------
+
+.. automodule:: secmlt.optimization.gradient_processing
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.optimization.initializer
+--------------------------------------
+
+.. automodule:: secmlt.optimization.initializer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.optimization.optimizer\_factory
+---------------------------------------------
+
+.. automodule:: secmlt.optimization.optimizer_factory
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.optimization.random\_perturb
+------------------------------------------
+
+.. automodule:: secmlt.optimization.random_perturb
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.optimization
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.rst b/docs/source/secmlt.rst
new file mode 100644
index 0000000..60e3736
--- /dev/null
+++ b/docs/source/secmlt.rst
@@ -0,0 +1,25 @@
+secmlt
+==============
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ secmlt.adv
+ secmlt.data
+ secmlt.manipulations
+ secmlt.metrics
+ secmlt.models
+ secmlt.optimization
+ secmlt.trackers
+ secmlt.utils
+
+Module contents
+---------------
+
+.. automodule:: secmlt
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.tests.rst b/docs/source/secmlt.tests.rst
new file mode 100644
index 0000000..2e32f92
--- /dev/null
+++ b/docs/source/secmlt.tests.rst
@@ -0,0 +1,85 @@
+secmlt.tests
+====================
+
+Submodules
+----------
+
+secmlt.tests.fixtures
+----------------------------
+
+.. automodule:: secmlt.tests.fixtures
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.tests.mocks
+-------------------------
+
+.. automodule:: secmlt.tests.mocks
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.tests.test\_aggregators
+-------------------------------------
+
+.. automodule:: secmlt.tests.test_aggregators
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.tests.test\_attacks
+---------------------------------
+
+.. automodule:: secmlt.tests.test_attacks
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.tests.test\_constants
+-----------------------------------
+
+.. automodule:: secmlt.tests.test_constants
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.tests.test\_metrics
+---------------------------------
+
+.. automodule:: secmlt.tests.test_metrics
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.tests.test\_trackers
+----------------------------------
+
+.. automodule:: secmlt.tests.test_trackers
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.tests.test\_trainer
+---------------------------------
+
+.. automodule:: secmlt.tests.test_trainer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.tests.test\_utils
+-------------------------------
+
+.. automodule:: secmlt.tests.test_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.tests
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.trackers.rst b/docs/source/secmlt.trackers.rst
new file mode 100644
index 0000000..2f2794a
--- /dev/null
+++ b/docs/source/secmlt.trackers.rst
@@ -0,0 +1,37 @@
+secmlt.trackers
+=======================
+
+Submodules
+----------
+
+secmlt.trackers.image\_trackers
+--------------------------------------
+
+.. automodule:: secmlt.trackers.image_trackers
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.trackers.tensorboard\_tracker
+-------------------------------------------
+
+.. automodule:: secmlt.trackers.tensorboard_tracker
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+secmlt.trackers.trackers
+-------------------------------
+
+.. automodule:: secmlt.trackers.trackers
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.trackers
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/secmlt.utils.rst b/docs/source/secmlt.utils.rst
new file mode 100644
index 0000000..f6ab739
--- /dev/null
+++ b/docs/source/secmlt.utils.rst
@@ -0,0 +1,21 @@
+secmlt.utils
+====================
+
+Submodules
+----------
+
+secmlt.utils.tensor\_utils
+---------------------------------
+
+.. automodule:: secmlt.utils.tensor_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: secmlt.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/examples/loaders/get_loaders.py b/examples/loaders/get_loaders.py
new file mode 100644
index 0000000..51b0fb9
--- /dev/null
+++ b/examples/loaders/get_loaders.py
@@ -0,0 +1,13 @@
+import torchvision
+from torch.utils.data import DataLoader, Subset
+
+
+def get_mnist_loader(path):
+ test_dataset = torchvision.datasets.MNIST(
+ transform=torchvision.transforms.ToTensor(),
+ train=False,
+ root=path,
+ download=True,
+ )
+ test_dataset = Subset(test_dataset, list(range(10)))
+ return DataLoader(test_dataset, batch_size=10, shuffle=False)
diff --git a/examples/mnist_example.py b/examples/mnist_example.py
index 566f9b0..eaf5dcc 100644
--- a/examples/mnist_example.py
+++ b/examples/mnist_example.py
@@ -1,65 +1,35 @@
-import os
-from secmlt.trackers.trackers import (
- LossTracker,
- PredictionTracker,
- PerturbationNormTracker,
-)
import torch
-import torchvision.datasets
-from torch.utils.data import DataLoader, Subset
-from robustbench.utils import download_gdrive
+from loaders.get_loaders import get_mnist_loader
+from models.mnist_net import get_mnist_model
from secmlt.adv.backends import Backends
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
from secmlt.adv.evasion.pgd import PGD
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
-
from secmlt.metrics.classification import Accuracy
from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
-
-
-class MNISTNet(torch.nn.Module):
- def __init__(self):
- super(MNISTNet, self).__init__()
- self.fc1 = torch.nn.Linear(784, 200)
- self.fc2 = torch.nn.Linear(200, 200)
- self.fc3 = torch.nn.Linear(200, 10)
-
- def forward(self, x):
- x = x.flatten(1)
- x = torch.relu(self.fc1(x))
- x = torch.relu(self.fc2(x))
- return self.fc3(x)
-
+from secmlt.trackers.trackers import (
+ LossTracker,
+ PerturbationNormTracker,
+ PredictionTracker,
+)
device = "cpu"
-net = MNISTNet()
-model_folder = "models/mnist"
-model_weights_path = os.path.join("mnist_model.pt")
-if not os.path.exists(model_weights_path):
- os.makedirs(model_folder, exist_ok=True)
- MODEL_ID = "12h1tXK442jHSE7wtsPpt8tU8f04R4nHM"
- download_gdrive(MODEL_ID, model_weights_path)
-
-model_weigths = torch.load(model_weights_path, map_location=device)
-net.eval()
-net.load_state_dict(model_weigths)
-test_dataset = torchvision.datasets.MNIST(
- transform=torchvision.transforms.ToTensor(), train=False, root=".", download=True
-)
-test_dataset = Subset(test_dataset, list(range(5)))
-test_data_loader = DataLoader(test_dataset, batch_size=5, shuffle=False)
+model_path = "example_data/models/mnist"
+dataset_path = "example_data/datasets/"
+net = get_mnist_model(model_path).to(device)
+test_loader = get_mnist_loader(dataset_path)
# Wrap model
model = BasePytorchClassifier(net)
# Test accuracy on original data
-accuracy = Accuracy()(model, test_data_loader)
-print("accuracy: ", accuracy)
+accuracy = Accuracy()(model, test_loader)
+print(f"test accuracy: {accuracy.item():.2f}")
# Create and run attack
epsilon = 0.3
num_steps = 10
step_size = 0.05
-perturbation_model = PerturbationModels.LINF
+perturbation_model = LpPerturbationModels.LINF
y_target = None
trackers = [
@@ -78,7 +48,8 @@ def forward(self, x):
backend=Backends.NATIVE,
trackers=trackers,
)
-native_adv_ds = native_attack(model, test_data_loader)
+
+native_adv_ds = native_attack(model, test_loader)
for tracker in trackers:
print(tracker.name)
@@ -98,7 +69,7 @@ def forward(self, x):
y_target=y_target,
backend=Backends.FOOLBOX,
)
-f_adv_ds = foolbox_attack(model, test_data_loader)
+f_adv_ds = foolbox_attack(model, test_loader)
# Test accuracy on adversarial examples
f_robust_accuracy = Accuracy()(model, f_adv_ds)
@@ -106,7 +77,7 @@ def forward(self, x):
native_data, native_labels = next(iter(native_adv_ds))
f_data, f_labels = next(iter(f_adv_ds))
-real_data, real_labels = next(iter(test_data_loader))
+real_data, real_labels = next(iter(test_loader))
distance = torch.linalg.norm(
native_data.detach().cpu().flatten(start_dim=1)
diff --git a/examples/mnist_example_random_inits.py b/examples/mnist_example_random_inits.py
index f5c1f5d..42232e5 100644
--- a/examples/mnist_example_random_inits.py
+++ b/examples/mnist_example_random_inits.py
@@ -1,12 +1,10 @@
-import os
-from secmlt.adv.evasion.aggregators.ensemble import FixedEpsilonEnsemble
import torch
-import torchvision.datasets
-from torch.utils.data import DataLoader, Subset
-from robustbench.utils import download_gdrive
+from loaders.get_loaders import get_mnist_loader
+from models.mnist_net import get_mnist_model
from secmlt.adv.backends import Backends
+from secmlt.adv.evasion.aggregators.ensemble import FixedEpsilonEnsemble
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
from secmlt.adv.evasion.pgd import PGD
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
from secmlt.metrics.classification import (
Accuracy,
AccuracyEnsemble,
@@ -15,51 +13,24 @@
)
from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
-
-class MNISTNet(torch.nn.Module):
- def __init__(self):
- super(MNISTNet, self).__init__()
- self.fc1 = torch.nn.Linear(784, 200)
- self.fc2 = torch.nn.Linear(200, 200)
- self.fc3 = torch.nn.Linear(200, 10)
-
- def forward(self, x):
- x = x.flatten(1)
- x = torch.relu(self.fc1(x))
- x = torch.relu(self.fc2(x))
- return self.fc3(x)
-
-
device = "cpu"
-net = MNISTNet()
-model_folder = "models/mnist"
-model_weights_path = os.path.join("mnist_model.pt")
-if not os.path.exists(model_weights_path):
- os.makedirs(model_folder, exist_ok=True)
- MODEL_ID = "12h1tXK442jHSE7wtsPpt8tU8f04R4nHM"
- download_gdrive(MODEL_ID, model_weights_path)
-
-model_weigths = torch.load(model_weights_path, map_location=device)
-net.eval()
-net.load_state_dict(model_weigths)
-test_dataset = torchvision.datasets.MNIST(
- transform=torchvision.transforms.ToTensor(), train=False, root=".", download=True
-)
-test_dataset = Subset(test_dataset, list(range(10)))
-test_data_loader = DataLoader(test_dataset, batch_size=10, shuffle=False)
+model_path = "example_data/models/mnist"
+dataset_path = "example_data/datasets/"
+net = get_mnist_model(model_path).to(device)
+test_loader = get_mnist_loader(dataset_path)
# Wrap model
model = BasePytorchClassifier(net)
# Test accuracy on original data
-accuracy = Accuracy()(model, test_data_loader)
+accuracy = Accuracy()(model, test_loader)
print(f"test accuracy: {accuracy.item():.2f}")
# Create and run attack
epsilon = 0.15
num_steps = 3
step_size = 0.05
-perturbation_model = PerturbationModels.LINF
+perturbation_model = LpPerturbationModels.LINF
y_target = None
pgd_attack = PGD(
@@ -72,9 +43,9 @@ def forward(self, x):
backend=Backends.NATIVE,
)
-multiple_attack_results = [pgd_attack(model, test_data_loader) for i in range(3)]
+multiple_attack_results = [pgd_attack(model, test_loader) for i in range(3)]
criterion = FixedEpsilonEnsemble(loss_fn=torch.nn.CrossEntropyLoss())
-best_advs = criterion(model, test_data_loader, multiple_attack_results)
+best_advs = criterion(model, test_loader, multiple_attack_results)
# Test accuracy on best adversarial examples
n_robust_accuracy = Accuracy()(model, best_advs)
diff --git a/examples/mnist_example_sequential.py b/examples/mnist_example_sequential.py
index c2d6eb2..2fc8466 100644
--- a/examples/mnist_example_sequential.py
+++ b/examples/mnist_example_sequential.py
@@ -1,67 +1,30 @@
-import os
-from secmlt.adv.evasion.modular_attack import ModularEvasionAttackFixedEps
-from secmlt.trackers.trackers import (
- LossTracker,
- PredictionTracker,
- PerturbationNormTracker,
-)
-import torch
-import torchvision.datasets
-from torch.utils.data import DataLoader, Subset
-from robustbench.utils import download_gdrive
+from loaders.get_loaders import get_mnist_loader
+from models.mnist_net import get_mnist_model
from secmlt.adv.backends import Backends
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
from secmlt.adv.evasion.pgd import PGD
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
-
from secmlt.metrics.classification import Accuracy
from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
-
-class MNISTNet(torch.nn.Module):
- def __init__(self):
- super(MNISTNet, self).__init__()
- self.fc1 = torch.nn.Linear(784, 200)
- self.fc2 = torch.nn.Linear(200, 200)
- self.fc3 = torch.nn.Linear(200, 10)
-
- def forward(self, x):
- x = x.flatten(1)
- x = torch.relu(self.fc1(x))
- x = torch.relu(self.fc2(x))
- return self.fc3(x)
-
-
device = "cpu"
-net = MNISTNet()
-model_folder = "models/mnist"
-model_weights_path = os.path.join("mnist_model.pt")
-if not os.path.exists(model_weights_path):
- os.makedirs(model_folder, exist_ok=True)
- MODEL_ID = "12h1tXK442jHSE7wtsPpt8tU8f04R4nHM"
- download_gdrive(MODEL_ID, model_weights_path)
-
-model_weigths = torch.load(model_weights_path, map_location=device)
-net.eval()
-net.load_state_dict(model_weigths)
-test_dataset = torchvision.datasets.MNIST(
- transform=torchvision.transforms.ToTensor(), train=False, root=".", download=True
-)
-test_dataset = Subset(test_dataset, list(range(5)))
-test_data_loader = DataLoader(test_dataset, batch_size=5, shuffle=False)
+model_path = "example_data/models/mnist"
+dataset_path = "example_data/datasets/"
+net = get_mnist_model(model_path).to(device)
+test_loader = get_mnist_loader(dataset_path)
# Wrap model
model = BasePytorchClassifier(net)
# Test accuracy on original data
-accuracy = Accuracy()(model, test_data_loader)
-print("accuracy: ", accuracy)
+accuracy = Accuracy()(model, test_loader)
+print(f"test accuracy: {accuracy.item():.2f}")
# Create and run attack
epsilon = 0.3
num_steps = 10
step_size = 0.05
-perturbation_model = PerturbationModels.LINF
+perturbation_model = LpPerturbationModels.LINF
y_target = None
attack_1 = PGD(
@@ -86,9 +49,9 @@ def forward(self, x):
attack_2.initializer = attack_1
-adv_ds = attack_2(model, test_data_loader)
+adv_ds = attack_2(model, test_loader)
# Test accuracy on adversarial examples
n_robust_accuracy = Accuracy()(model, adv_ds)
-print("robust accuracy: ", n_robust_accuracy)
+print("robust accuracy: ", n_robust_accuracy.item())
diff --git a/examples/mnist_example_tensorboard.py b/examples/mnist_example_tensorboard.py
index 3f14dd2..a51bb60 100644
--- a/examples/mnist_example_tensorboard.py
+++ b/examples/mnist_example_tensorboard.py
@@ -1,70 +1,39 @@
-import os
+from loaders.get_loaders import get_mnist_loader
+from models.mnist_net import get_mnist_model
+from secmlt.adv.backends import Backends
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
+from secmlt.adv.evasion.pgd import PGD
+from secmlt.metrics.classification import Accuracy
+from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
from secmlt.trackers import (
GradientNormTracker,
+ GradientsTracker,
LossTracker,
- PredictionTracker,
PerturbationNormTracker,
- TensorboardTracker,
- GradientsTracker,
+ PredictionTracker,
SampleTracker,
ScoresTracker,
+ TensorboardTracker,
)
-import torch
-import torchvision.datasets
-from torch.utils.data import DataLoader, Subset
-from robustbench.utils import download_gdrive
-from secmlt.adv.backends import Backends
-from secmlt.adv.evasion.pgd import PGD
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
-
-from secmlt.metrics.classification import Accuracy
-from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
-
-
-class MNISTNet(torch.nn.Module):
- def __init__(self):
- super(MNISTNet, self).__init__()
- self.fc1 = torch.nn.Linear(784, 200)
- self.fc2 = torch.nn.Linear(200, 200)
- self.fc3 = torch.nn.Linear(200, 10)
-
- def forward(self, x):
- x = x.flatten(1)
- x = torch.relu(self.fc1(x))
- x = torch.relu(self.fc2(x))
- return self.fc3(x)
-
device = "cpu"
-net = MNISTNet()
-model_folder = "models/mnist"
-model_weights_path = os.path.join("mnist_model.pt")
-if not os.path.exists(model_weights_path):
- os.makedirs(model_folder, exist_ok=True)
- MODEL_ID = "12h1tXK442jHSE7wtsPpt8tU8f04R4nHM"
- download_gdrive(MODEL_ID, model_weights_path)
-
-model_weigths = torch.load(model_weights_path, map_location=device)
-net.eval()
-net.load_state_dict(model_weigths)
-test_dataset = torchvision.datasets.MNIST(
- transform=torchvision.transforms.ToTensor(), train=False, root=".", download=True
-)
-test_dataset = Subset(test_dataset, list(range(5)))
-test_data_loader = DataLoader(test_dataset, batch_size=5, shuffle=False)
+model_path = "example_data/models/mnist"
+dataset_path = "example_data/datasets/"
+net = get_mnist_model(model_path).to(device)
+test_loader = get_mnist_loader(dataset_path)
# Wrap model
model = BasePytorchClassifier(net)
# Test accuracy on original data
-accuracy = Accuracy()(model, test_data_loader)
-print(accuracy)
+accuracy = Accuracy()(model, test_loader)
+print(f"test accuracy: {accuracy.item():.2f}")
# Create and run attack
epsilon = 0.2
num_steps = 200
step_size = 0.01
-perturbation_model = PerturbationModels.LINF
+perturbation_model = LpPerturbationModels.LINF
y_target = None
trackers = [
@@ -77,7 +46,7 @@ def forward(self, x):
GradientsTracker(),
]
-tensorboard_tracker = TensorboardTracker("logs/pgd", trackers)
+tensorboard_tracker = TensorboardTracker("example_data/logs/pgd", trackers)
native_attack = PGD(
perturbation_model=perturbation_model,
@@ -89,4 +58,4 @@ def forward(self, x):
backend=Backends.NATIVE,
trackers=tensorboard_tracker,
)
-native_adv_ds = native_attack(model, test_data_loader)
+native_adv_ds = native_attack(model, test_loader)
diff --git a/examples/models/mnist_net.py b/examples/models/mnist_net.py
new file mode 100644
index 0000000..2bc6e2f
--- /dev/null
+++ b/examples/models/mnist_net.py
@@ -0,0 +1,33 @@
+from pathlib import Path
+
+import torch
+from robustbench.utils import download_gdrive
+
+
+class MNISTNet(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.fc1 = torch.nn.Linear(784, 200)
+ self.fc2 = torch.nn.Linear(200, 200)
+ self.fc3 = torch.nn.Linear(200, 10)
+
+ def forward(self, x):
+ x = x.flatten(1)
+ x = torch.relu(self.fc1(x))
+ x = torch.relu(self.fc2(x))
+ return self.fc3(x)
+
+
+def get_mnist_model(path):
+ net = MNISTNet()
+ path = Path(path)
+ model_weights_path = path / "mnist_model.pt"
+ if not model_weights_path.exists():
+ path.mkdir(exist_ok=True, parents=True)
+ model_id = "12h1tXK442jHSE7wtsPpt8tU8f04R4nHM"
+ download_gdrive(model_id, model_weights_path)
+
+ model_weigths = torch.load(model_weights_path, map_location="cpu")
+ net.eval()
+ net.load_state_dict(model_weigths)
+ return net
diff --git a/examples/run_evasion_attack.py b/examples/run_evasion_attack.py
index b36ef61..151f3d9 100644
--- a/examples/run_evasion_attack.py
+++ b/examples/run_evasion_attack.py
@@ -1,19 +1,20 @@
import torchvision.datasets
-from torch.utils.data import DataLoader, Subset
+from robustbench.utils import load_model
from secmlt.adv.backends import Backends
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
from secmlt.adv.evasion.pgd import PGD
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
-
from secmlt.metrics.classification import Accuracy
from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
-
-from robustbench.utils import load_model
+from torch.utils.data import DataLoader, Subset
net = load_model(model_name="Rony2019Decoupling", dataset="cifar10", threat_model="L2")
device = "cpu"
net.to(device)
test_dataset = torchvision.datasets.CIFAR10(
- transform=torchvision.transforms.ToTensor(), train=False, root=".", download=True
+ transform=torchvision.transforms.ToTensor(),
+ train=False,
+ root=".",
+ download=True,
)
test_dataset = Subset(test_dataset, list(range(5)))
test_data_loader = DataLoader(test_dataset, batch_size=5, shuffle=False)
@@ -29,7 +30,7 @@
epsilon = 0.5
num_steps = 10
step_size = 0.005
-perturbation_model = PerturbationModels.LINF
+perturbation_model = LpPerturbationModels.LINF
y_target = None
native_attack = PGD(
perturbation_model=perturbation_model,
diff --git a/examples/train_model.py b/examples/train_model.py
index 2b40fce..af4727d 100644
--- a/examples/train_model.py
+++ b/examples/train_model.py
@@ -1,37 +1,31 @@
+from pathlib import Path
+
import torch
import torchvision.datasets
-from torch.optim import Adam
-from torch.utils.data import DataLoader
-
+from models.mnist_net import MNISTNet
from secmlt.metrics.classification import Accuracy
from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer
+from torch.optim import Adam
+from torch.utils.data import DataLoader
-
-class MNISTNet(torch.nn.Module):
- def __init__(self):
- super(MNISTNet, self).__init__()
- self.fc1 = torch.nn.Linear(784, 200)
- self.fc2 = torch.nn.Linear(200, 200)
- self.fc3 = torch.nn.Linear(200, 10)
-
- def forward(self, x):
- x = x.flatten(1)
- x = torch.relu(self.fc1(x))
- x = torch.relu(self.fc2(x))
- return self.fc3(x)
-
-
+dataset_path = "example_data/datasets/"
device = "cpu"
net = MNISTNet()
net.to(device)
optimizer = Adam(lr=1e-3, params=net.parameters())
training_dataset = torchvision.datasets.MNIST(
- transform=torchvision.transforms.ToTensor(), train=True, root=".", download=True
+ transform=torchvision.transforms.ToTensor(),
+ train=True,
+ root=dataset_path,
+ download=True,
)
training_data_loader = DataLoader(training_dataset, batch_size=64, shuffle=False)
test_dataset = torchvision.datasets.MNIST(
- transform=torchvision.transforms.ToTensor(), train=False, root=".", download=True
+ transform=torchvision.transforms.ToTensor(),
+ train=False,
+ root=dataset_path,
+ download=True,
)
test_data_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
@@ -42,6 +36,7 @@ def forward(self, x):
# Test MNIST model
accuracy = Accuracy()(model, test_data_loader)
-print(accuracy)
+print("test accuracy: ", accuracy)
-torch.save(model.model.state_dict(), "mnist_model.pt")
+model_path = Path("example_data/models/mnist")
+torch.save(model.model.state_dict(), model_path / "mnist_model.pt")
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..748eaf7
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,4 @@
+[tool.pytest.ini_options]
+pythonpath = [
+ "src"
+]
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644
index 0000000..4abaaa8
--- /dev/null
+++ b/requirements-dev.txt
@@ -0,0 +1,4 @@
+ruff
+pre-commit
+flake8-docstrings
+sphinx
diff --git a/requirements-test.txt b/requirements-test.txt
new file mode 100644
index 0000000..d56c563
--- /dev/null
+++ b/requirements-test.txt
@@ -0,0 +1,3 @@
+pytest
+pytest-cov
+foolbox
diff --git a/ruff.toml b/ruff.toml
new file mode 100644
index 0000000..995966a
--- /dev/null
+++ b/ruff.toml
@@ -0,0 +1,57 @@
+# same as black
+line-length = 88
+indent-width = 4
+
+# assume Python 3.10
+target-version = "py310"
+
+[lint]
+select = ["ALL", "D107"]
+ignore = [
+ "FA102", # dynamic type hinting
+ "S101", # use of assert
+ "ARG001", # unused function argument
+ "ARG002", # unused method argument
+ "ANN101", # annotation for self
+ "ANN102", # annotation of cls
+ "PLR0913", # too many arguments
+ "ANN002", # type annotations for args
+ "ANN003", # type annotations for kwargs
+ "ARG004", # unused kwargs
+ "PLW2901", # for loop variable overwritten
+ "SLF001", # use of private methods,
+ "FBT001", # boolean type positional argument
+ "FBT002", # boolean type default argument
+ ]
+
+[lint.per-file-ignores]
+"test_*.py" = [
+ "D", # force docstrings
+ "ANN", # annotations for tests
+ "PT006", # mark parametrize
+ ]
+"*/tests/*.py" = ["D104"]
+"setup.py" = ["D"]
+"examples/*" = [
+ "D", # docstrings
+ "INP001", # init file in folder
+ "ANN", # annotations
+ "T20" # print
+ ]
+"docs/*" = ["ALL"]
+
+[lint.pydocstyle]
+convention = "numpy"
+
+[format]
+# like black, use double quotes for strings.
+quote-style = "double"
+
+# like black, indent with spaces, rather than tabs.
+indent-style = "space"
+
+# like black, respect magic trailing commas.
+skip-magic-trailing-comma = false
+
+# like black, automatically detect the appropriate line ending.
+line-ending = "auto"
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 6036d43..0000000
--- a/setup.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from setuptools import setup, find_packages
-import pathlib
-
-here = pathlib.Path(__file__).parent.resolve()
-
-# Get the long description from the README file
-long_description = (here / "README.md").read_text(encoding="utf-8")
-
-CLASSIFIERS = """\
-Development Status :: 3 - Alpha
-Intended Audience :: Science/Research
-Intended Audience :: Developers
-License :: OSI Approved
-Programming Language :: Python
-Programming Language :: Python :: 3
-Programming Language :: Python :: 3.6
-Programming Language :: Python :: 3.7
-Programming Language :: Python :: 3.8
-Programming Language :: Python :: 3.9
-Programming Language :: Python :: 3.10
-Programming Language :: Python :: Implementation :: PyPy
-Topic :: Software Development
-Topic :: Scientific/Engineering
-"""
-
-setup(
- name="secml-torch",
- version="1.0.0",
- description="SecML-Torch Library",
- classifiers=[_f for _f in CLASSIFIERS.split("\n") if _f],
- long_description=long_description,
- long_description_context_type="text/markdown",
- package_dir={"": "src"},
- packages=find_packages(
- where="src", exclude=["*.tests", "*.tests.*", "tests.*", "tests"]
- ),
- include_package_data=True,
- url="",
- license="MIT",
- author="Maura Pintor, Luca Demetrio",
- author_email="maura.pintor@unica.it, luca.demetrio@unige.it",
- install_requires=["torch>=1.4,!=1.5.*", "torchvision>=0.5,!=0.6.*"],
- extras_require={"foolbox": ["foolbox>=3.3.0"], "tensorboard": ["tensorboard"]},
- python_requires=">=3.7",
-)
diff --git a/src/__init__.py b/src/__init__.py
index e69de29..6e03199 100644
--- a/src/__init__.py
+++ b/src/__init__.py
@@ -0,0 +1 @@
+# noqa: D104
diff --git a/src/secmlt/__init__.py b/src/secmlt/__init__.py
index e69de29..6e03199 100644
--- a/src/secmlt/__init__.py
+++ b/src/secmlt/__init__.py
@@ -0,0 +1 @@
+# noqa: D104
diff --git a/src/secmlt/adv/__init__.py b/src/secmlt/adv/__init__.py
index e69de29..6e76442 100644
--- a/src/secmlt/adv/__init__.py
+++ b/src/secmlt/adv/__init__.py
@@ -0,0 +1 @@
+"""Adversarial functionalities."""
diff --git a/src/secmlt/adv/backends.py b/src/secmlt/adv/backends.py
index 7431a01..faa33fe 100644
--- a/src/secmlt/adv/backends.py
+++ b/src/secmlt/adv/backends.py
@@ -1,3 +1,8 @@
+"""Available backends for running adversarial attacks."""
+
+
class Backends:
+ """Available backends."""
+
FOOLBOX = "foolbox"
NATIVE = "native"
diff --git a/src/secmlt/adv/evasion/__init__.py b/src/secmlt/adv/evasion/__init__.py
index 1b3328b..0d31991 100644
--- a/src/secmlt/adv/evasion/__init__.py
+++ b/src/secmlt/adv/evasion/__init__.py
@@ -1,6 +1,6 @@
-try:
- import foolbox
-except ImportError:
- pass # foolbox is an extra component and requires the foolbox library
-else:
- from .foolbox_attacks import *
+"""Evasion attack functionalities."""
+
+import importlib
+
+if importlib.util.find_spec("foolbox", None) is not None:
+ from .foolbox_attacks import * # noqa: F403
diff --git a/src/secmlt/adv/evasion/aggregators/__init__.py b/src/secmlt/adv/evasion/aggregators/__init__.py
new file mode 100644
index 0000000..818d605
--- /dev/null
+++ b/src/secmlt/adv/evasion/aggregators/__init__.py
@@ -0,0 +1 @@
+"""Aggregator functions for multiple attacks or multiple attack runs."""
diff --git a/src/secmlt/adv/evasion/aggregators/ensemble.py b/src/secmlt/adv/evasion/aggregators/ensemble.py
index 247a1f4..be4671f 100644
--- a/src/secmlt/adv/evasion/aggregators/ensemble.py
+++ b/src/secmlt/adv/evasion/aggregators/ensemble.py
@@ -1,12 +1,40 @@
+"""Ensemble metrics for getting best results across multiple attacks."""
+
from abc import ABC, abstractmethod
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
-from secmlt.utils.tensor_utils import atleast_kd
+
import torch
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
+from secmlt.models.base_model import BaseModel
+from secmlt.utils.tensor_utils import atleast_kd
from torch.utils.data import DataLoader, TensorDataset
class Ensemble(ABC):
- def __call__(self, model, data_loader, adv_loaders):
+ """Abstract class for creating an ensemble metric."""
+
+ def __call__(
+ self,
+ model: BaseModel,
+ data_loader: DataLoader,
+ adv_loaders: list[DataLoader],
+ ) -> DataLoader[torch.Tuple[torch.Tensor]]:
+ """
+ Get the worst-case of the metric with the given implemented criterion.
+
+ Parameters
+ ----------
+ model : BaseModel
+ Model to use for predictions.
+ data_loader : DataLoader
+ Test dataloader.
+ adv_loaders : list[DataLoader]
+ List of dataloaders returned by multiple attacks.
+
+ Returns
+ -------
+ DataLoader[torch.Tuple[torch.Tensor]]
+ The worst-case metric computed on the multiple attacks.
+ """
best_x_adv_data = []
original_labels = []
adv_loaders = [iter(a) for a in adv_loaders]
@@ -14,49 +42,170 @@ def __call__(self, model, data_loader, adv_loaders):
best_x_adv = samples.clone()
for adv_loader in adv_loaders:
x_adv, _ = next(adv_loader)
- best_x_adv = self.get_best(model, samples, labels, x_adv, best_x_adv)
+ best_x_adv = self._get_best(model, samples, labels, x_adv, best_x_adv)
best_x_adv_data.append(best_x_adv)
original_labels.append(labels)
best_x_adv_dataset = TensorDataset(
- torch.vstack(best_x_adv_data), torch.hstack(original_labels)
+ torch.vstack(best_x_adv_data),
+ torch.hstack(original_labels),
)
- best_x_adv_loader = DataLoader(
- best_x_adv_dataset, batch_size=data_loader.batch_size
+ return DataLoader(
+ best_x_adv_dataset,
+ batch_size=data_loader.batch_size,
)
- return best_x_adv_loader
@abstractmethod
- def get_best(self, model, samples, labels, x_adv): ...
+ def _get_best(
+ self,
+ model: BaseModel,
+ samples: torch.Tensor,
+ labels: torch.Tensor,
+ x_adv: torch.Tensor,
+ best_x_adv: torch.Tensor,
+ ) -> torch.Tensor:
+ """
+ Get the best result from multiple attacks.
+
+ Parameters
+ ----------
+ model : BaseModel
+ Model to use to predict.
+ samples : torch.Tensor
+ Input samples.
+ labels : torch.Tensor
+ Labels for the samples.
+ x_adv : torch.Tensor
+ Adversarial examples.
+ best_x_adv : torch.Tensor
+ Best adversarial examples found so far.
+
+ Returns
+ -------
+ torch.Tensor
+ Best adversarial examples between the current x_adv
+ and the ones already tested on the given model.
+ """
+ ...
class MinDistanceEnsemble(Ensemble):
- def get_best(self, model, samples, labels, x_adv, best_x_adv):
+ """Wrapper for ensembling results of multiple minimum-distance attacks."""
+
+ def __init__(self, perturbation_model: str) -> None:
+ """
+ Create MinDistance Ensemble.
+
+ Parameters
+ ----------
+ perturbation_model : str
+ Perturbation model to use to compute the distance.
+ """
+ self.perturbation_model = perturbation_model
+
+ def _get_best(
+ self,
+ model: BaseModel,
+ samples: torch.Tensor,
+ labels: torch.Tensor,
+ x_adv: torch.Tensor,
+ best_x_adv: torch.Tensor,
+ ) -> torch.Tensor:
+ """
+ Get the adversarial examples with minimal perturbation.
+
+ Parameters
+ ----------
+ model : BaseModel
+ Model to use to predict.
+ samples : torch.Tensor
+ Input samples.
+ labels : torch.Tensor
+ Labels for the samples.
+ x_adv : torch.Tensor
+ Adversarial examples.
+ best_x_adv : torch.Tensor
+ Best adversarial examples found so far.
+
+ Returns
+ -------
+ torch.Tensor
+ The minimum-distance adversarial examples found so far.
+ """
preds = model(x_adv).argmax(dim=1)
is_adv = preds.type(labels.dtype) == labels
norms = (
(samples - x_adv)
.flatten(start_dim=1)
- .norm(PerturbationModels.get_p(self.perturbation_model), dim=-1)
+ .norm(LpPerturbationModels.get_p(self.perturbation_model), dim=-1)
)
best_adv_norms = (
(samples - best_x_adv)
.flatten(start_dim=1)
- .norm(PerturbationModels.get_p(self.perturbation_model))
+ .norm(LpPerturbationModels.get_p(self.perturbation_model))
)
is_best = torch.logical_and(norms < best_adv_norms, is_adv)
- best_x_adv = torch.where(
- atleast_kd(is_best, len(x_adv.shape)), x_adv, best_x_adv
+
+ return torch.where(
+ atleast_kd(is_best, len(x_adv.shape)),
+ x_adv,
+ best_x_adv,
)
- return best_x_adv
class FixedEpsilonEnsemble(Ensemble):
- def __init__(self, loss_fn, maximize=True, y_target=None) -> None:
+ """Wrapper for ensembling results of multiple fixed-epsilon attacks."""
+
+ def __init__(
+ self,
+ loss_fn: torch.nn.Module,
+ maximize: bool = True,
+ y_target: torch.Tensor | None = None,
+ ) -> None:
+ """
+ Create fixed epsilon ensemble.
+
+ Parameters
+ ----------
+ loss_fn : torch.nn.Module
+ Loss function to maximize (or minimize).
+ maximize : bool, optional
+ If True maximizes the loss otherwise it minimizes it, by default True.
+ y_target : torch.Tensor | None, optional
+ Target label for targeted attacks, None for untargeted, by default None.
+ """
self.maximize = maximize
self.loss_fn = loss_fn
self.y_target = y_target
- def get_best(self, model, samples, labels, x_adv, best_x_adv):
+ def _get_best(
+ self,
+ model: BaseModel,
+ samples: torch.Tensor,
+ labels: torch.Tensor,
+ x_adv: torch.Tensor,
+ best_x_adv: torch.Tensor,
+ ) -> torch.Tensor:
+ """
+ Get the adversarial examples with maximum (or minimum) loss.
+
+ Parameters
+ ----------
+ model : BaseModel
+ Model to use to predict.
+ samples : torch.Tensor
+ Input samples.
+ labels : torch.Tensor
+ Labels for the samples.
+ x_adv : torch.Tensor
+ Adversarial examples.
+ best_x_adv : torch.Tensor
+ Best adversarial examples found so far.
+
+ Returns
+ -------
+ torch.Tensor
+ The maximum-loss adversarial examples found so far.
+ """
if self.y_target is None:
targets = labels
else:
@@ -67,7 +216,8 @@ def get_best(self, model, samples, labels, x_adv, best_x_adv):
is_best = loss > best_adv_loss
else:
is_best = loss < best_adv_loss
- best_x_adv = torch.where(
- atleast_kd(is_best, len(x_adv.shape)), x_adv, best_x_adv
+ return torch.where(
+ atleast_kd(is_best, len(x_adv.shape)),
+ x_adv,
+ best_x_adv,
)
- return best_x_adv
diff --git a/src/secmlt/adv/evasion/base_evasion_attack.py b/src/secmlt/adv/evasion/base_evasion_attack.py
index c0fa9ca..918ab29 100644
--- a/src/secmlt/adv/evasion/base_evasion_attack.py
+++ b/src/secmlt/adv/evasion/base_evasion_attack.py
@@ -1,66 +1,138 @@
+"""Base classes for implementing attacks and wrapping backends."""
+
+import importlib
from abc import abstractmethod
-from typing import Callable, List, Union
+from typing import Literal
import torch
-from torch.utils.data import DataLoader, TensorDataset
-
from secmlt.adv.backends import Backends
from secmlt.models.base_model import BaseModel
+from torch.utils.data import DataLoader, TensorDataset
# lazy evaluation to avoid circular imports
TRACKER_TYPE = "secmlt.trackers.tracker.Tracker"
class BaseEvasionAttackCreator:
+ """Generic creator for attacks."""
+
@classmethod
- def get_implementation(cls, backend: str) -> Callable:
+ def get_implementation(cls, backend: str) -> "BaseEvasionAttack":
+ """
+ Get the implementation of the attack with the given backend.
+
+ Parameters
+ ----------
+ backend : str
+ The backend for the attack. See secmlt.adv.backends for
+ available backends.
+
+ Returns
+ -------
+ BaseEvasionAttack
+ Attack implementation.
+ """
implementations = {
Backends.FOOLBOX: cls.get_foolbox_implementation,
- Backends.NATIVE: cls.get_native_implementation,
+ Backends.NATIVE: cls._get_native_implementation,
}
cls.check_backend_available(backend)
return implementations[backend]()
@classmethod
def check_backend_available(cls, backend: str) -> bool:
+ """
+ Check if a given backend is available for the attack.
+
+ Parameters
+ ----------
+ backend : str
+ Backend string.
+
+ Returns
+ -------
+ bool
+ True if the given backend is implemented.
+
+ Raises
+ ------
+ NotImplementedError
+ Raises NotImplementedError if the requested backend is not in
+ the list of the possible backends (check secmlt.adv.backends).
+ """
if backend in cls.get_backends():
return True
- raise NotImplementedError("Unsupported or not-implemented backend.")
+ msg = "Unsupported or not-implemented backend."
+ raise NotImplementedError(msg)
@classmethod
- def get_foolbox_implementation(cls):
- try:
- import foolbox
- except ImportError:
- raise ImportError("Foolbox extra not installed.")
- else:
+ def get_foolbox_implementation(cls) -> "BaseEvasionAttack":
+ """
+ Get the Foolbox implementation of the attack.
+
+ Returns
+ -------
+ BaseEvasionAttack
+ Foolbox implementation of the attack.
+
+ Raises
+ ------
+ ImportError
+ Raises ImportError if Foolbox extra is not installed.
+ """
+ if importlib.util.find_spec("foolbox", None) is not None:
return cls._get_foolbox_implementation()
+ msg = "Foolbox extra not installed."
+ raise ImportError(msg)
@staticmethod
- def _get_foolbox_implementation():
- raise NotImplementedError("Foolbox implementation not available.")
+ def _get_foolbox_implementation() -> "BaseEvasionAttack":
+ msg = "Foolbox implementation not available."
+ raise NotImplementedError(msg)
@staticmethod
- def get_native_implementation():
- raise NotImplementedError("Native implementation not available.")
+ def _get_native_implementation() -> "BaseEvasionAttack":
+ msg = "Native implementation not available."
+ raise NotImplementedError(msg)
@staticmethod
@abstractmethod
- def get_backends():
- raise NotImplementedError("Backends should be specified in inherited class.")
+ def get_backends() -> set[str]:
+ """
+ Get the available backends for the given attack.
+
+ Returns
+ -------
+ set[str]
+ Set of implemented backends available for the attack.
+
+ Raises
+ ------
+ NotImplementedError
+ Raises NotImplementedError if not implemented in the inherited class.
+ """
+ msg = "Backends should be specified in inherited class."
+ raise NotImplementedError(msg)
class BaseEvasionAttack:
+ """Base class for evasion attacks."""
+
def __call__(self, model: BaseModel, data_loader: DataLoader) -> DataLoader:
"""
Compute the attack against the model, using the input data.
- It returns a dataset with the adversarial examples and the original labels
- :param model: model to test
- :type model: BaseModel
- :param data_loader: input data
- :type data_loader: DataLoader
- :return: Data loader with adversarial examples and original labels
- :rtype: DataLoader
+
+ Parameters
+ ----------
+ model : BaseModel
+ Model to test.
+ data_loader : DataLoader
+ Test dataloader.
+
+ Returns
+ -------
+ DataLoader
+ Dataloader with adversarial examples and original labels.
"""
adversarials = []
original_labels = []
@@ -71,45 +143,87 @@ def __call__(self, model: BaseModel, data_loader: DataLoader) -> DataLoader:
adversarials = torch.vstack(adversarials)
original_labels = torch.hstack(original_labels)
adversarial_dataset = TensorDataset(adversarials, original_labels)
- adversarial_loader = DataLoader(
- adversarial_dataset, batch_size=data_loader.batch_size
+ return DataLoader(
+ adversarial_dataset,
+ batch_size=data_loader.batch_size,
)
- return adversarial_loader
@property
- def trackers(self) -> Union[List[TRACKER_TYPE], None]:
+ def trackers(self) -> list[TRACKER_TYPE] | None:
+ """
+ Get the trackers set for this attack.
+
+ Returns
+ -------
+ list[TRACKER_TYPE] | None
+ Trackers set for the attack, if any.
+ """
return self._trackers
@trackers.setter
- def trackers(self, trackers: Union[List[TRACKER_TYPE], None] = None) -> None:
- if self.trackers_allowed():
+ def trackers(self, trackers: list[TRACKER_TYPE] | None = None) -> None:
+ if self._trackers_allowed():
if trackers is not None and not isinstance(trackers, list):
trackers = [trackers]
self._trackers = trackers
elif trackers is not None:
- raise NotImplementedError("Trackers not implemented for this attack.")
+ msg = "Trackers not implemented for this attack."
+ raise NotImplementedError(msg)
+ @classmethod
@abstractmethod
- def trackers_allowed(cls):
+ def _trackers_allowed(cls) -> Literal[False]:
return False
@classmethod
def check_perturbation_model_available(cls, perturbation_model: str) -> bool:
+ """
+ Check whether the given perturbation model is available for the attack.
+
+ Parameters
+ ----------
+ perturbation_model : str
+ A perturbation model.
+
+ Returns
+ -------
+ bool
+ True if the attack implements the given perturbation model.
+
+ Raises
+ ------
+ NotImplementedError
+ Raises NotImplementedError if not implemented in the inherited class.
+ """
if perturbation_model in cls.get_perturbation_models():
return
- raise NotImplementedError("Unsupported or not-implemented perturbation model.")
+ msg = "Unsupported or not-implemented perturbation model."
+ raise NotImplementedError(msg)
@staticmethod
@abstractmethod
- def get_perturbation_models():
- raise NotImplementedError(
- "Perturbation models should be specified in inherited class."
- )
-
- @abstractmethod
- def _run(self, model: BaseModel, samples: torch.Tensor, labels: torch.Tensor):
+ def get_perturbation_models() -> set[str]:
"""
- Compute the attack against the model, using the input data (batch).
- It returns the batch of adversarial examples and the perturbation delta.
+ Check the perturbation models implemented for the given attack.
+
+ Returns
+ -------
+ set[str]
+ The set of perturbation models for which the attack is implemented.
+
+ Raises
+ ------
+ NotImplementedError
+ Raises NotImplementedError if not implemented in the inherited class.
"""
+ msg = "Perturbation models should be specified in inherited class."
+ raise NotImplementedError(msg)
+
+ @abstractmethod
+ def _run(
+ self,
+ model: BaseModel,
+ samples: torch.Tensor,
+ labels: torch.Tensor,
+ ) -> torch.Tensor:
...
diff --git a/src/secmlt/adv/evasion/foolbox_attacks/__init__.py b/src/secmlt/adv/evasion/foolbox_attacks/__init__.py
index d5313cb..ac84718 100644
--- a/src/secmlt/adv/evasion/foolbox_attacks/__init__.py
+++ b/src/secmlt/adv/evasion/foolbox_attacks/__init__.py
@@ -1,6 +1,6 @@
-try:
- import foolbox
-except ImportError:
- pass # foolbox is an extra component and requires the foolbox library
-else:
- from .foolbox_pgd import *
+"""Wrappers of Foolbox library for evasion attacks."""
+
+import importlib
+
+if importlib.util.find_spec("foolbox", None) is not None:
+ from .foolbox_pgd import * # noqa: F403
diff --git a/src/secmlt/adv/evasion/foolbox_attacks/foolbox_base.py b/src/secmlt/adv/evasion/foolbox_attacks/foolbox_base.py
index 21665e2..b3a7989 100644
--- a/src/secmlt/adv/evasion/foolbox_attacks/foolbox_base.py
+++ b/src/secmlt/adv/evasion/foolbox_attacks/foolbox_base.py
@@ -1,23 +1,46 @@
-from typing import Optional, Type, Union
-from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
-from secmlt.models.base_model import BaseModel
+"""Generic wrapper for Foolbox evasion attacks."""
+
+from typing import Literal
+
+import torch
from foolbox.attacks.base import Attack
-from foolbox.models.pytorch import PyTorchModel
from foolbox.criteria import Misclassification, TargetedMisclassification
-from secmlt.adv.evasion.base_evasion_attack import BaseEvasionAttack, TRACKER_TYPE
-import torch
+from foolbox.models.pytorch import PyTorchModel
+from secmlt.adv.evasion.base_evasion_attack import TRACKER_TYPE, BaseEvasionAttack
+from secmlt.models.base_model import BaseModel
+from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
class BaseFoolboxEvasionAttack(BaseEvasionAttack):
+ """Generic wrapper for Foolbox Evasion attacks."""
+
def __init__(
self,
- foolbox_attack: Attack,
+ foolbox_attack: type[Attack],
epsilon: float = torch.inf,
- y_target: Optional[int] = None,
+ y_target: int | None = None,
lb: float = 0.0,
ub: float = 1.0,
- trackers: Union[Type[TRACKER_TYPE], None] = None,
+ trackers: type[TRACKER_TYPE] | None = None,
) -> None:
+ """
+ Wrap Foolbox attacks.
+
+ Parameters
+ ----------
+ foolbox_attack : Type[Attack]
+ Foolbox attack class to wrap.
+ epsilon : float, optional
+ Perturbation constraint, by default torch.inf.
+ y_target : int | None, optional
+ Target label for the attack, None if untargeted, by default None.
+ lb : float, optional
+ Lower bound of the input space, by default 0.0.
+ ub : float, optional
+ Upper bound of the input space, by default 1.0.
+ trackers : type[TRACKER_TYPE] | None, optional
+ Trackers for the attack (unallowed in Foolbox), by default None.
+ """
self.foolbox_attack = foolbox_attack
self.lb = lb
self.ub = ub
@@ -27,16 +50,19 @@ def __init__(
super().__init__()
@classmethod
- def trackers_allowed(cls):
+ def _trackers_allowed(cls) -> Literal[False]:
return False
def _run(
- self, model: BaseModel, samples: torch.Tensor, labels: torch.Tensor
+ self,
+ model: BaseModel,
+ samples: torch.Tensor,
+ labels: torch.Tensor,
) -> torch.Tensor:
- # TODO get here the correct model if not pytorch
if not isinstance(model, BasePytorchClassifier):
- raise NotImplementedError("Model type not supported.")
- device = model.get_device()
+ msg = "Model type not supported."
+ raise NotImplementedError(msg)
+ device = model._get_device()
foolbox_model = PyTorchModel(model.model, (self.lb, self.ub), device=device)
if self.y_target is None:
criterion = Misclassification(labels)
diff --git a/src/secmlt/adv/evasion/foolbox_attacks/foolbox_pgd.py b/src/secmlt/adv/evasion/foolbox_attacks/foolbox_pgd.py
index d67c8ef..7fc0100 100644
--- a/src/secmlt/adv/evasion/foolbox_attacks/foolbox_pgd.py
+++ b/src/secmlt/adv/evasion/foolbox_attacks/foolbox_pgd.py
@@ -1,15 +1,17 @@
-from typing import Optional
-from secmlt.adv.evasion.foolbox_attacks.foolbox_base import BaseFoolboxEvasionAttack
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
+"""Wrapper of the PGD attack implemented in Foolbox."""
from foolbox.attacks.projected_gradient_descent import (
L1ProjectedGradientDescentAttack,
L2ProjectedGradientDescentAttack,
LinfProjectedGradientDescentAttack,
)
+from secmlt.adv.evasion.foolbox_attacks.foolbox_base import BaseFoolboxEvasionAttack
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
class PGDFoolbox(BaseFoolboxEvasionAttack):
+ """Wrapper of the Foolbox implementation of the PGD attack."""
+
def __init__(
self,
perturbation_model: str,
@@ -17,20 +19,44 @@ def __init__(
num_steps: int,
step_size: float,
random_start: bool,
- y_target: Optional[int] = None,
+ y_target: int | None = None,
lb: float = 0.0,
ub: float = 1.0,
- **kwargs
+ **kwargs,
) -> None:
+ """
+ Create PGD attack with Foolbox backend.
+
+ Parameters
+ ----------
+ perturbation_model : str
+ Perturbation model for the attack.
+ epsilon : float
+ Maximum perturbation allowed.
+ num_steps : int
+ Number of iterations for the attack.
+ step_size : float
+ Attack step size.
+ random_start : bool
+ True for randomly initializing the perturbation.
+ y_target : int | None, optional
+ Target label for the attack, None for untargeted, by default None.
+ lb : float, optional
+ Lower bound of the input space, by default 0.0.
+ ub : float, optional
+ Upper bound of the input space, by default 1.0.
+ """
perturbation_models = {
- PerturbationModels.L1: L1ProjectedGradientDescentAttack,
- PerturbationModels.L2: L2ProjectedGradientDescentAttack,
- PerturbationModels.LINF: LinfProjectedGradientDescentAttack,
+ LpPerturbationModels.L1: L1ProjectedGradientDescentAttack,
+ LpPerturbationModels.L2: L2ProjectedGradientDescentAttack,
+ LpPerturbationModels.LINF: LinfProjectedGradientDescentAttack,
}
foolbox_attack_cls = perturbation_models.get(perturbation_model)
foolbox_attack = foolbox_attack_cls(
- abs_stepsize=step_size, steps=num_steps, random_start=random_start
+ abs_stepsize=step_size,
+ steps=num_steps,
+ random_start=random_start,
)
super().__init__(
@@ -42,5 +68,17 @@ def __init__(
)
@staticmethod
- def get_perturbation_models():
- return {PerturbationModels.L1, PerturbationModels.L2, PerturbationModels.LINF}
+ def get_perturbation_models() -> set[str]:
+ """
+ Check the perturbation models implemented for this attack.
+
+ Returns
+ -------
+ set[str]
+ The list of perturbation models implemented for this attack.
+ """
+ return {
+ LpPerturbationModels.L1,
+ LpPerturbationModels.L2,
+ LpPerturbationModels.LINF,
+ }
diff --git a/src/secmlt/adv/evasion/modular_attack.py b/src/secmlt/adv/evasion/modular_attack.py
index 7ab9343..c9da2bb 100644
--- a/src/secmlt/adv/evasion/modular_attack.py
+++ b/src/secmlt/adv/evasion/modular_attack.py
@@ -1,12 +1,11 @@
-from typing import Union, List, Type
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
-from secmlt.utils.tensor_utils import atleast_kd
+"""Implementation of modular iterative attacks with customizable components."""
-import torch.nn
-from torch.nn import CrossEntropyLoss
-from torch.optim import Optimizer
from functools import partial
+from typing import Literal
+
+import torch.nn
from secmlt.adv.evasion.base_evasion_attack import BaseEvasionAttack
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
from secmlt.manipulations.manipulation import Manipulation
from secmlt.models.base_model import BaseModel
from secmlt.optimization.constraints import Constraint
@@ -14,6 +13,9 @@
from secmlt.optimization.initializer import Initializer
from secmlt.optimization.optimizer_factory import OptimizerFactory
from secmlt.trackers.trackers import Tracker
+from secmlt.utils.tensor_utils import atleast_kd
+from torch.nn import CrossEntropyLoss
+from torch.optim import Optimizer
CE_LOSS = "ce_loss"
LOGIT_LOSS = "logit_loss"
@@ -24,18 +26,50 @@
class ModularEvasionAttackFixedEps(BaseEvasionAttack):
+ """Modular evasion attack for fixed-epsilon attacks."""
+
def __init__(
self,
- y_target: Union[int, None],
+ y_target: int | None,
num_steps: int,
step_size: float,
- loss_function: Union[str, torch.nn.Module],
- optimizer_cls: Union[str, Type[partial[Optimizer]]],
+ loss_function: str | torch.nn.Module,
+ optimizer_cls: str | partial[Optimizer],
manipulation_function: Manipulation,
initializer: Initializer,
gradient_processing: GradientProcessing,
- trackers: Union[List[Type[Tracker]], Type[Tracker]] = None,
+ trackers: list[Tracker] | Tracker | None = None,
) -> None:
+ """
+ Create modular evasion attack.
+
+ Parameters
+ ----------
+ y_target : int | None
+ Target label for the attack, None for untargeted.
+ num_steps : int
+ Number of iterations for the attack.
+ step_size : float
+ Attack step size.
+ loss_function : str | torch.nn.Module
+ Loss function to minimize.
+ optimizer_cls : str | partial[Optimizer]
+ Algorithm for solving the attack optimization problem.
+ manipulation_function : Manipulation
+ Manipulation function to perturb the inputs.
+ initializer : Initializer
+ Initialization for the perturbation delta.
+ gradient_processing : GradientProcessing
+ Gradient transformation function.
+ trackers : list[Tracker] | Tracker | None, optional
+ Trackers for logging, by default None.
+
+ Raises
+ ------
+ ValueError
+ Raises ValueError if the loss is not in allowed
+ list of loss functions.
+ """
self.y_target = y_target
self.num_steps = num_steps
self.step_size = step_size
@@ -44,15 +78,17 @@ def __init__(
if loss_function in LOSS_FUNCTIONS:
self.loss_function = LOSS_FUNCTIONS[loss_function](reduction="none")
else:
- raise ValueError(
- f"{loss_function} not in list of init from string. Use one among {LOSS_FUNCTIONS.values()}"
+ msg = (
+ f"Loss function not found. Use one among {LOSS_FUNCTIONS.values()}"
)
+ raise ValueError(msg)
else:
self.loss_function = loss_function
if isinstance(optimizer_cls, str):
optimizer_cls = OptimizerFactory.create_from_name(
- optimizer_cls, lr=step_size
+ optimizer_cls,
+ lr=step_size,
)
self.optimizer_cls = optimizer_cls
@@ -64,17 +100,26 @@ def __init__(
super().__init__()
@classmethod
- def get_perturbation_models(self):
- return {PerturbationModels.L2, PerturbationModels.LINF}
+ def get_perturbation_models(cls) -> set[str]:
+ """
+ Check if a given perturbation model is implemented.
+
+ Returns
+ -------
+ set[str]
+ Set of perturbation models available for this attack.
+ """
+ return {LpPerturbationModels.L2, LpPerturbationModels.LINF}
@classmethod
- def trackers_allowed(cls):
+ def _trackers_allowed(cls) -> Literal[True]:
return True
- def init_perturbation_constraints(self) -> List[Constraint]:
- raise NotImplementedError("Must be implemented accordingly")
+ def _init_perturbation_constraints(self) -> list[Constraint]:
+ msg = "Must be implemented accordingly"
+ raise NotImplementedError(msg)
- def create_optimizer(self, delta: torch.Tensor, **kwargs) -> Optimizer:
+ def _create_optimizer(self, delta: torch.Tensor, **kwargs) -> Optimizer:
return self.optimizer_cls([delta], **kwargs)
def _run(
@@ -100,7 +145,7 @@ def _run(
delta = self.initializer(samples.data)
delta.requires_grad = True
- optimizer = self.create_optimizer(delta, **optim_kwargs)
+ optimizer = self._create_optimizer(delta, **optim_kwargs)
x_adv, delta = self.manipulation_function(samples, delta)
x_adv.data, delta.data = self.manipulation_function(samples.data, delta.data)
best_losses = torch.zeros(samples.shape[0]).fill_(torch.inf)
@@ -117,7 +162,8 @@ def _run(
delta.grad.data = self.gradient_processing(delta.grad.data)
optimizer.step()
x_adv.data, delta.data = self.manipulation_function(
- samples.data, delta.data
+ samples.data,
+ delta.data,
)
if self.trackers is not None:
for tracker in self.trackers:
@@ -137,7 +183,9 @@ def _run(
best_delta.data,
)
best_losses.data = torch.where(
- losses < best_losses, losses.data, best_losses.data
+ losses < best_losses,
+ losses.data,
+ best_losses.data,
)
x_adv, _ = self.manipulation_function(samples.data, best_delta.data)
return x_adv, best_delta
diff --git a/src/secmlt/adv/evasion/perturbation_models.py b/src/secmlt/adv/evasion/perturbation_models.py
index 5df0d71..9d68b87 100644
--- a/src/secmlt/adv/evasion/perturbation_models.py
+++ b/src/secmlt/adv/evasion/perturbation_models.py
@@ -1,16 +1,55 @@
-class PerturbationModels:
+"""Implementation of perturbation models for perturbations of adversarial examples."""
+
+from typing import ClassVar
+
+
+class LpPerturbationModels:
+ """Lp perturbation models."""
+
L0 = "l0"
L1 = "l1"
L2 = "l2"
LINF = "linf"
- pert_models = {L0: 0, L1: 1, L2: 2, LINF: float("inf")}
+ pert_models: ClassVar[dict[str, float]] = {L0: 0, L1: 1, L2: 2, LINF: float("inf")}
@classmethod
- def is_perturbation_model_available(cls, perturbation_model) -> bool:
+ def is_perturbation_model_available(cls, perturbation_model: str) -> bool:
+ """
+ Check availability of the perturbation model requested.
+
+ Parameters
+ ----------
+ perturbation_model : str
+ A perturbation model as a string.
+
+ Returns
+ -------
+ bool
+ True if the perturbation model is found in PerturbationModels.pert_models.
+ """
return perturbation_model in (cls.pert_models)
@classmethod
- def get_p(cls, perturbation_model) -> float:
+ def get_p(cls, perturbation_model: str) -> float:
+ """
+ Get the float representation of p from the given string.
+
+ Parameters
+ ----------
+ perturbation_model : str
+ One of the strings defined in PerturbationModels.pert_models.
+
+ Returns
+ -------
+ float
+ The float representation of p, to use. e.g., in torch.norm(p=...).
+
+ Raises
+ ------
+ ValueError
+ Raises ValueError if the norm given is not in PerturbationModels.pert_models
+ """
if cls.is_perturbation_model_available(perturbation_model):
return cls.pert_models[perturbation_model]
- raise ValueError("Perturbation model not implemented")
+ msg = "Perturbation model not implemented"
+ raise ValueError(msg)
diff --git a/src/secmlt/adv/evasion/pgd.py b/src/secmlt/adv/evasion/pgd.py
index 0f4d254..63a37a5 100644
--- a/src/secmlt/adv/evasion/pgd.py
+++ b/src/secmlt/adv/evasion/pgd.py
@@ -1,18 +1,16 @@
-from typing import Optional
+"""Implementations of the Projected Gradient Descent evasion attack."""
-from foolbox.attacks import (
- L1ProjectedGradientDescentAttack,
- L2ProjectedGradientDescentAttack,
- LinfProjectedGradientDescentAttack,
-)
+import importlib
from secmlt.adv.backends import Backends
from secmlt.adv.evasion import BaseFoolboxEvasionAttack
from secmlt.adv.evasion.base_evasion_attack import (
+ BaseEvasionAttack,
BaseEvasionAttackCreator,
)
-from secmlt.adv.evasion.modular_attack import ModularEvasionAttackFixedEps, CE_LOSS
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
+from secmlt.adv.evasion.foolbox_attacks.foolbox_pgd import PGDFoolbox
+from secmlt.adv.evasion.modular_attack import CE_LOSS, ModularEvasionAttackFixedEps
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
from secmlt.manipulations.manipulation import AdditiveManipulation
from secmlt.optimization.constraints import (
ClipConstraint,
@@ -27,6 +25,8 @@
class PGD(BaseEvasionAttackCreator):
+ """Creator for the Projected Gradient Descent (PGD) attack."""
+
def __new__(
cls,
perturbation_model: str,
@@ -34,13 +34,47 @@ def __new__(
num_steps: int,
step_size: float,
random_start: bool = False,
- y_target: Optional[int] = None,
+ y_target: int | None = None,
lb: float = 0.0,
ub: float = 1.0,
backend: str = Backends.FOOLBOX,
- trackers: list[Tracker] = None,
- **kwargs
- ):
+ trackers: list[Tracker] | None = None,
+ **kwargs,
+ ) -> BaseEvasionAttack:
+ """
+ Create the PGD attack.
+
+ Parameters
+ ----------
+ perturbation_model : str
+ Perturbation model for the attack. Available: 1, 2, inf.
+ epsilon : float
+ Radius of the constraint for the Lp ball.
+ num_steps : int
+ Number of iterations for the attack.
+ step_size : float
+ Attack step size.
+ random_start : bool, optional
+ Whether to use a random initialization onto the Lp ball, by
+ default False.
+ y_target : int | None, optional
+ Target label for a targeted attack, None
+ for untargeted attack, by default None.
+ lb : float, optional
+ Lower bound of the input space, by default 0.0.
+ ub : float, optional
+ Upper bound of the input space, by default 1.0.
+ backend : str, optional
+ Backend to use to run the attack, by default Backends.FOOLBOX
+ trackers : list[Tracker] | None, optional
+ Trackers to check various attack metrics (see secmlt.trackers),
+ available only for native implementation, by default None.
+
+ Returns
+ -------
+ BaseEvasionAttack
+ PGD attack instance.
+ """
cls.check_backend_available(backend)
implementation = cls.get_implementation(backend)
implementation.check_perturbation_model_available(perturbation_model)
@@ -54,27 +88,29 @@ def __new__(
lb=lb,
ub=ub,
trackers=trackers,
- **kwargs
+ **kwargs,
)
@staticmethod
- def get_backends():
+ def get_backends() -> list[str]:
+ """Get available implementations for the PGD attack."""
return [Backends.FOOLBOX, Backends.NATIVE]
@staticmethod
- def _get_foolbox_implementation():
- try:
- from .foolbox_attacks.foolbox_pgd import PGDFoolbox
- except ImportError:
- raise ImportError("Foolbox extra not installed")
- return PGDFoolbox
+ def _get_foolbox_implementation() -> type[PGDFoolbox]:
+ if importlib.util.find_spec("foolbox", None) is not None:
+ return PGDFoolbox
+ msg = "Foolbox extra not installed"
+ raise ImportError(msg)
@staticmethod
- def get_native_implementation():
+ def _get_native_implementation() -> type["PGDNative"]:
return PGDNative
class PGDFoolbox(BaseFoolboxEvasionAttack):
+ """Foolbox implementation of the PGD attack."""
+
def __init__(
self,
perturbation_model: str,
@@ -82,25 +118,64 @@ def __init__(
num_steps: int,
step_size: float,
random_start: bool,
- y_target: Optional[int] = None,
+ y_target: int | None = None,
lb: float = 0.0,
ub: float = 1.0,
- trackers: list[Tracker] = None,
- **kwargs
+ trackers: list[Tracker] | None = None,
+ **kwargs,
) -> None:
+ """
+ Create Foolbox PGD attack.
+
+ Parameters
+ ----------
+ perturbation_model : str
+ Perturbation model for the attack. Available: 1, 2, inf.
+ epsilon : float
+ Radius of the constraint for the Lp ball.
+ num_steps : int
+ Number of iterations for the attack.
+ step_size : float
+ Attack step size.
+ random_start : bool
+ Whether to use a random initialization onto the Lp ball.
+ y_target : int | None, optional
+ Target label for a targeted attack, None
+ for untargeted attack, by default None.
+ lb : float, optional
+ Lower bound of the input space, by default 0.0.
+ ub : float, optional
+ Upper bound of the input space, by default 1.0.
+ trackers : list[Tracker] | None, optional
+ Trackers to check various attack metrics (see secmlt.trackers),
+ available only for native implementation, by default None.
+
+ Raises
+ ------
+ NotImplementedError
+ Raises NotImplementedError if the requested perturbation
+ model is not defined for this attack.
+ """
+ from foolbox.attacks import (
+ L1ProjectedGradientDescentAttack,
+ L2ProjectedGradientDescentAttack,
+ LinfProjectedGradientDescentAttack,
+ )
+
perturbation_models = {
- PerturbationModels.L1: L1ProjectedGradientDescentAttack,
- PerturbationModels.L2: L2ProjectedGradientDescentAttack,
- PerturbationModels.LINF: LinfProjectedGradientDescentAttack,
+ LpPerturbationModels.L1: L1ProjectedGradientDescentAttack,
+ LpPerturbationModels.L2: L2ProjectedGradientDescentAttack,
+ LpPerturbationModels.LINF: LinfProjectedGradientDescentAttack,
}
- foolbox_attack_cls = perturbation_models.get(perturbation_model, None)
+ foolbox_attack_cls = perturbation_models.get(perturbation_model)
if foolbox_attack_cls is None:
- raise NotImplementedError(
- "This perturbation model is not implemented in foolbox."
- )
+ msg = "This perturbation model is not implemented in foolbox."
+ raise NotImplementedError(msg)
foolbox_attack = foolbox_attack_cls(
- abs_stepsize=step_size, steps=num_steps, random_start=random_start
+ abs_stepsize=step_size,
+ steps=num_steps,
+ random_start=random_start,
)
super().__init__(
@@ -112,8 +187,31 @@ def __init__(
trackers=trackers,
)
+ @staticmethod
+ def get_perturbation_models() -> set[str]:
+ """
+ Check the perturbation models implemented for the given attack.
+
+ Returns
+ -------
+ set[str]
+ The set of perturbation models for which the attack is implemented.
+
+ Raises
+ ------
+ NotImplementedError
+ Raises NotImplementedError if not implemented in the inherited class.
+ """
+ return {
+ LpPerturbationModels.L1,
+ LpPerturbationModels.L2,
+ LpPerturbationModels.LINF,
+ }
+
class PGDNative(ModularEvasionAttackFixedEps):
+ """Native implementation of the Projected Gradient Descent attack."""
+
def __init__(
self,
perturbation_model: str,
@@ -121,28 +219,55 @@ def __init__(
num_steps: int,
step_size: float,
random_start: bool,
- y_target: Optional[int] = None,
+ y_target: int | None = None,
lb: float = 0.0,
ub: float = 1.0,
- trackers: list[Tracker] = None,
- **kwargs
+ trackers: list[Tracker] | None = None,
+ **kwargs,
) -> None:
+ """
+ Create Native PGD attack.
+
+ Parameters
+ ----------
+ perturbation_model : str
+ Perturbation model for the attack. Available: 1, 2, inf.
+ epsilon : float
+ Radius of the constraint for the Lp ball.
+ num_steps : int
+ Number of iterations for the attack.
+ step_size : float
+ Attack step size.
+ random_start : bool
+ Whether to use a random initialization onto the Lp ball.
+ y_target : int | None, optional
+ Target label for a targeted attack, None
+ for untargeted attack, by default None.
+ lb : float, optional
+ Lower bound of the input space, by default 0.0.
+ ub : float, optional
+ Upper bound of the input space, by default 1.0.
+ trackers : list[Tracker] | None, optional
+ Trackers to check various attack metrics (see secmlt.trackers),
+ available only for native implementation, by default None.
+ """
perturbation_models = {
- PerturbationModels.L1: L1Constraint,
- PerturbationModels.L2: L2Constraint,
- PerturbationModels.LINF: LInfConstraint,
+ LpPerturbationModels.L1: L1Constraint,
+ LpPerturbationModels.L2: L2Constraint,
+ LpPerturbationModels.LINF: LInfConstraint,
}
if random_start:
initializer = RandomLpInitializer(
- perturbation_model=perturbation_model, radius=epsilon
+ perturbation_model=perturbation_model,
+ radius=epsilon,
)
else:
initializer = Initializer()
self.epsilon = epsilon
gradient_processing = LinearProjectionGradientProcessing(perturbation_model)
perturbation_constraints = [
- perturbation_models[perturbation_model](radius=self.epsilon)
+ perturbation_models[perturbation_model](radius=self.epsilon),
]
domain_constraints = [ClipConstraint(lb=lb, ub=ub)]
manipulation_function = AdditiveManipulation(
diff --git a/src/secmlt/data/__init__.py b/src/secmlt/data/__init__.py
index e69de29..055bc15 100644
--- a/src/secmlt/data/__init__.py
+++ b/src/secmlt/data/__init__.py
@@ -0,0 +1 @@
+"""Functionalities for handling data."""
diff --git a/src/secmlt/data/sklearn_dataset.py b/src/secmlt/data/sklearn_dataset.py
deleted file mode 100644
index b183437..0000000
--- a/src/secmlt/data/sklearn_dataset.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import numpy as np
-from torch.utils.data import Dataset
-from torch.utils.data.dataset import T_co
-
-
-class SklearnDataset(Dataset):
- def __init__(self, x: np.ndarray, y: np.ndarray):
- if x.shape[0] != y.shape[0]:
- raise ValueError(
- f"x and y must have the same number of rows (mismatch {x.shape} and {y.shape})"
- )
- self._x: np.ndarray = x
- self._y: np.ndarray = y
-
- @property
- def x(self) -> np.ndarray:
- return self._x
-
- @property
- def y(self) -> np.ndarray:
- return self._y
-
- @x.setter
- def x(self, data: np.ndarray):
- self._x = data
-
- @y.setter
- def y(self, data: np.ndarray):
- self._y = data
-
- def __getitem__(self, index) -> T_co:
- return self._x[index, ...], self._y[index, ...]
-
- def __len__(self) -> int:
- return self._x.shape[0]
diff --git a/src/secmlt/manipulations/__init__.py b/src/secmlt/manipulations/__init__.py
index e69de29..8b91b0d 100644
--- a/src/secmlt/manipulations/__init__.py
+++ b/src/secmlt/manipulations/__init__.py
@@ -0,0 +1 @@
+"""Functionalities for applying manipulations to input data."""
diff --git a/src/secmlt/manipulations/manipulation.py b/src/secmlt/manipulations/manipulation.py
index 3027cca..276c948 100644
--- a/src/secmlt/manipulations/manipulation.py
+++ b/src/secmlt/manipulations/manipulation.py
@@ -1,17 +1,29 @@
-from abc import ABC
-from typing import Tuple
+"""Manipulations for perturbing input samples."""
-import torch
+from abc import ABC, abstractmethod
+import torch
from secmlt.optimization.constraints import Constraint
class Manipulation(ABC):
+ """Abstract class for manipulations."""
+
def __init__(
self,
domain_constraints: list[Constraint],
perturbation_constraints: list[Constraint],
- ):
+ ) -> None:
+ """
+ Create manipulation object.
+
+ Parameters
+ ----------
+ domain_constraints : list[Constraint]
+ Constraints for the domain bounds (x_adv).
+ perturbation_constraints : list[Constraint]
+ Constraints for the perturbation (delta).
+ """
self.domain_constraints = domain_constraints
self.perturbation_constraints = perturbation_constraints
@@ -25,13 +37,50 @@ def _apply_perturbation_constraints(self, delta: torch.Tensor) -> torch.Tensor:
delta = constraint(delta)
return delta
+ @abstractmethod
def _apply_manipulation(
- self, x: torch.Tensor, delta: torch.Tensor
- ) -> torch.Tensor: ...
+ self,
+ x: torch.Tensor,
+ delta: torch.Tensor,
+ ) -> torch.Tensor:
+ """
+ Apply the manipulation.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+ delta : torch.Tensor
+ Manipulation to apply.
+
+ Returns
+ -------
+ torch.Tensor
+ Perturbed samples.
+ """
+ ...
def __call__(
- self, x: torch.Tensor, delta: torch.Tensor
- ) -> Tuple[torch.Tensor, torch.Tensor]:
+ self,
+ x: torch.Tensor,
+ delta: torch.Tensor,
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ """
+ Apply the manipulation to the input data.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input data.
+ delta : torch.Tensor
+ Perturbation to apply.
+
+ Returns
+ -------
+ tuple[torch.Tensor, torch.Tensor]
+ Perturbed data and perturbation after the
+ application of constraints.
+ """
delta.data = self._apply_perturbation_constraints(delta.data)
x_adv, delta = self._apply_manipulation(x, delta)
x_adv.data = self._apply_domain_constraints(x_adv.data)
@@ -39,7 +88,11 @@ def __call__(
class AdditiveManipulation(Manipulation):
+ """Additive manipulation for input data."""
+
def _apply_manipulation(
- self, x: torch.Tensor, delta: torch.Tensor
- ) -> Tuple[torch.Tensor, torch.Tensor]:
+ self,
+ x: torch.Tensor,
+ delta: torch.Tensor,
+ ) -> tuple[torch.Tensor, torch.Tensor]:
return x + delta, delta
diff --git a/src/secmlt/metrics/__init__.py b/src/secmlt/metrics/__init__.py
index e69de29..dd293b0 100644
--- a/src/secmlt/metrics/__init__.py
+++ b/src/secmlt/metrics/__init__.py
@@ -0,0 +1 @@
+"""Metrics to evaluate machine learning models and attacks."""
diff --git a/src/secmlt/metrics/classification.py b/src/secmlt/metrics/classification.py
index a1afe3f..8936c91 100644
--- a/src/secmlt/metrics/classification.py
+++ b/src/secmlt/metrics/classification.py
@@ -1,89 +1,152 @@
-from typing import List
-import torch
-from torch.utils.data import DataLoader
+"""Classification metrics for machine-learning models and for attack performance."""
+import torch
from secmlt.models.base_model import BaseModel
+from torch.utils.data import DataLoader
def accuracy(y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
- acc = (y_pred.type(y_true.dtype) == y_true).mean()
- return acc
+ """
+ Compute the accuracy on a batch of predictions and targets.
+
+ Parameters
+ ----------
+ y_pred : torch.Tensor
+ Predictions from the model.
+ y_true : torch.Tensor
+ Target labels.
+
+ Returns
+ -------
+ torch.Tensor
+ The percentage of predictions that match the targets.
+ """
+ return (y_pred.type(y_true.dtype) == y_true).mean()
+
+class Accuracy:
+ """Class for computing accuracy of a model on a dataset."""
-class Accuracy(object):
- def __init__(self):
+ def __init__(self) -> None:
+ """Create Accuracy metric."""
self._num_samples = 0
self._accumulated_accuracy = 0.0
- def __call__(self, model: BaseModel, dataloader: DataLoader):
- for batch_idx, (x, y) in enumerate(dataloader):
+ def __call__(self, model: BaseModel, dataloader: DataLoader) -> torch.Tensor:
+ """
+ Compute the metric on a single attack run or a dataloader.
+
+ Parameters
+ ----------
+ model : BaseModel
+ Model to use for prediction.
+ dataloader : DataLoader
+ A dataloader, can be the result of an attack or a generic
+ test dataloader.
+
+ Returns
+ -------
+ torch.Tensor
+ The metric computed on the given dataloader.
+ """
+ for _, (x, y) in enumerate(dataloader):
y_pred = model.predict(x).cpu().detach()
- self.accumulate(y_pred, y)
- accuracy = self.compute()
- return accuracy
+ self._accumulate(y_pred, y)
+ return self._compute()
- def accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor):
+ def _accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None:
self._num_samples += y_true.shape[0]
self._accumulated_accuracy += torch.sum(
- y_pred.type(y_true.dtype).cpu() == y_true.cpu()
+ y_pred.type(y_true.dtype).cpu() == y_true.cpu(),
)
- def compute(self):
+ def _compute(self) -> torch.Tensor:
return self._accumulated_accuracy / self._num_samples
class AttackSuccessRate(Accuracy):
- def __init__(self, y_target=None):
+ """Single attack success rate from attack results."""
+
+ def __init__(self, y_target: float | torch.Tensor | None = None) -> None:
+ """
+ Create attack success rate metric.
+
+ Parameters
+ ----------
+ y_target : float | torch.Tensor | None, optional
+ Target label for the attack, None for untargeted, by default None
+ """
super().__init__()
self.y_target = y_target
- def accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor):
+ def _accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None:
if self.y_target is None:
- super().accumulate(y_pred, y_true)
+ super()._accumulate(y_pred, y_true)
else:
- super().accumulate(y_pred, torch.ones_like(y_true) * self.y_target)
+ super()._accumulate(y_pred, torch.ones_like(y_true) * self.y_target)
- def compute(self):
+ def _compute(self) -> torch.Tensor:
if self.y_target is None:
- return 1 - super().compute()
- else:
- return super().compute()
+ return 1 - super()._compute()
+ return super()._compute()
class AccuracyEnsemble(Accuracy):
-
- def __call__(self, model: BaseModel, dataloaders: List[DataLoader]):
- for advs in zip(*dataloaders):
+ """Robust accuracy of a model on multiple attack runs."""
+
+ def __call__(self, model: BaseModel, dataloaders: list[DataLoader]) -> torch.Tensor:
+ """
+ Compute the metric on an ensemble of attacks from their results.
+
+ Parameters
+ ----------
+ model : BaseModel
+ Model to use for prediction.
+ dataloaders : list[DataLoader]
+ List of loaders returned from multiple attack runs.
+
+ Returns
+ -------
+ torch.Tensor
+ The metric computed across multiple attack runs.
+ """
+ for advs in zip(*dataloaders, strict=False):
y_pred = []
for x, y in advs:
y_pred.append(model.predict(x).cpu().detach())
# verify that the samples order correspond
assert (y - advs[0][1]).sum() == 0
y_pred = torch.vstack(y_pred)
- self.accumulate(y_pred, advs[0][1])
- accuracy = self.compute()
- return accuracy
+ self._accumulate(y_pred, advs[0][1])
+ return self._compute()
- def accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor):
+ def _accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None:
self._num_samples += y_true.shape[0]
self._accumulated_accuracy += torch.sum(
# take worst over predictions
- (y_pred.type(y_true.dtype).cpu() == y_true.cpu())
- .min(dim=0)
- .values
+ (y_pred.type(y_true.dtype).cpu() == y_true.cpu()).min(dim=0).values,
)
class EnsembleSuccessRate(AccuracyEnsemble):
- def __init__(self, y_target=None):
+ """Worst-case success rate of multiple attack runs."""
+
+ def __init__(self, y_target: float | torch.Tensor | None = None) -> None:
+ """
+ Create ensemble success rate metric.
+
+ Parameters
+ ----------
+ y_target : float | torch.Tensor | None, optional
+ Target label for the attack, None for untargeted,, by default None
+ """
super().__init__()
self.y_target = y_target
- def accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor):
+ def _accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None:
if self.y_target is None:
- super().accumulate(y_pred, y_true)
+ super()._accumulate(y_pred, y_true)
else:
- print(y_pred)
self._num_samples += y_true.shape[0]
self._accumulated_accuracy += torch.sum(
# take worst over predictions
@@ -92,11 +155,10 @@ def accumulate(self, y_pred: torch.Tensor, y_true: torch.Tensor):
== (torch.ones_like(y_true) * self.y_target).cpu()
)
.max(dim=0)
- .values
+ .values,
)
- def compute(self):
+ def _compute(self) -> torch.Tensor:
if self.y_target is None:
- return 1 - super().compute()
- else:
- return super().compute()
+ return 1 - super()._compute()
+ return super()._compute()
diff --git a/src/secmlt/models/__init__.py b/src/secmlt/models/__init__.py
index e69de29..073f505 100644
--- a/src/secmlt/models/__init__.py
+++ b/src/secmlt/models/__init__.py
@@ -0,0 +1 @@
+"""Machine learning models and wrappers."""
diff --git a/src/secmlt/models/base_model.py b/src/secmlt/models/base_model.py
index b1659ad..f544378 100644
--- a/src/secmlt/models/base_model.py
+++ b/src/secmlt/models/base_model.py
@@ -1,28 +1,32 @@
+"""Basic wrapper for generic model."""
+
from abc import ABC, abstractmethod
-from typing import Callable
import torch
-from torch.utils.data import DataLoader
-
+from secmlt.models.data_processing.data_processing import DataProcessing
from secmlt.models.data_processing.identity_data_processing import (
IdentityDataProcessing,
)
-from secmlt.models.data_processing.data_processing import DataProcessing
+from torch.utils.data import DataLoader
class BaseModel(ABC):
+ """Basic model wrapper."""
+
def __init__(
self,
preprocessing: DataProcessing = None,
postprocessing: DataProcessing = None,
- ):
+ ) -> None:
"""
- Create base abstract model
+ Create base model.
+
Parameters
----------
- preprocessing : DataProcessing
- postprocessing: DataProcessing
-
+ preprocessing : DataProcessing, optional
+ Preprocessing to apply before the forward, by default None.
+ postprocessing : DataProcessing, optional
+ Postprocessing to apply after the forward, by default None.
"""
self._preprocessing = (
preprocessing if preprocessing is not None else IdentityDataProcessing()
@@ -32,22 +36,100 @@ def __init__(
)
@abstractmethod
- def predict(self, x: torch.Tensor) -> torch.Tensor: ...
+ def predict(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Return output predictions for given model.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Predictions from the model.
+ """
+ ...
def decision_function(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Return the decision function from the model.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input damples.
+
+ Returns
+ -------
+ torch.Tensor
+ Model output scores.
+ """
x = self._preprocessing(x)
x = self._decision_function(x)
- x = self._postprocessing(x)
- return x
+ return self._postprocessing(x)
@abstractmethod
- def _decision_function(self, x: torch.Tensor) -> torch.Tensor: ...
+ def _decision_function(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Specific decision function of the model (data already preprocessed).
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Preprocessed input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Model output scores.
+ """
+ ...
@abstractmethod
- def gradient(self, x: torch.Tensor, y: int) -> torch.Tensor: ...
+ def gradient(self, x: torch.Tensor, y: int) -> torch.Tensor:
+ """
+ Compute gradients of the score y w.r.t. x.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+ y : int
+ Target score.
+
+ Returns
+ -------
+ torch.Tensor
+ Input gradients of the target score y.
+ """
+ ...
@abstractmethod
- def train(self, dataloader: DataLoader): ...
+ def train(self, dataloader: DataLoader) -> "BaseModel":
+ """
+ Train the model with the given dataloader.
+
+ Parameters
+ ----------
+ dataloader : DataLoader
+ Train data loader.
+ """
+ ...
def __call__(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Forward function of the model.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Model ouptut scores.
+ """
return self.decision_function(x)
diff --git a/src/secmlt/models/base_trainer.py b/src/secmlt/models/base_trainer.py
index 32bd71d..5988989 100644
--- a/src/secmlt/models/base_trainer.py
+++ b/src/secmlt/models/base_trainer.py
@@ -1,7 +1,29 @@
+"""Model trainers."""
+
from abc import ABCMeta, abstractmethod
+from secmlt.models.base_model import BaseModel
+from torch.utils.data import DataLoader
+
class BaseTrainer(metaclass=ABCMeta):
+ """Abstract class for model trainers."""
+
@abstractmethod
- def train(self, model, dataloader):
- pass
+ def train(self, model: BaseModel, dataloader: DataLoader) -> BaseModel:
+ """
+ Train a model with the given dataloader.
+
+ Parameters
+ ----------
+ model : BaseModel
+ Model to train.
+ dataloader : DataLoader
+ Training dataloader.
+
+ Returns
+ -------
+ BaseModel
+ The trained model.
+ """
+ ...
diff --git a/src/secmlt/models/data_processing/__init__.py b/src/secmlt/models/data_processing/__init__.py
index e69de29..df5cd2e 100644
--- a/src/secmlt/models/data_processing/__init__.py
+++ b/src/secmlt/models/data_processing/__init__.py
@@ -0,0 +1 @@
+"""Functionalities for data transformations."""
diff --git a/src/secmlt/models/data_processing/data_processing.py b/src/secmlt/models/data_processing/data_processing.py
index 2ea30c2..0142beb 100644
--- a/src/secmlt/models/data_processing/data_processing.py
+++ b/src/secmlt/models/data_processing/data_processing.py
@@ -1,13 +1,46 @@
+"""Interface for the data processing functionalities."""
+
from abc import ABC, abstractmethod
import torch
class DataProcessing(ABC):
+ """Abstract data processing class."""
+
+ @abstractmethod
+ def _process(self, x: torch.Tensor) -> torch.Tensor:
+ ...
+
@abstractmethod
- def process(self, x: torch.Tensor) -> torch.Tensor: ...
+ def invert(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Apply the inverted transform (if defined).
- def invert(self, x: torch.Tensor) -> torch.Tensor: ...
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ The samples in the input space before the transformation.
+ """
+ ...
def __call__(self, x: torch.Tensor) -> torch.Tensor:
- return self.process(x)
+ """
+ Apply the forward transformation.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ The samples after transformation.
+ """
+ return self._process(x)
diff --git a/src/secmlt/models/data_processing/identity_data_processing.py b/src/secmlt/models/data_processing/identity_data_processing.py
index c3005aa..b472150 100644
--- a/src/secmlt/models/data_processing/identity_data_processing.py
+++ b/src/secmlt/models/data_processing/identity_data_processing.py
@@ -1,11 +1,40 @@
-import torch
+"""Identity data processing, returns the samples as they are."""
+import torch
from secmlt.models.data_processing.data_processing import DataProcessing
class IdentityDataProcessing(DataProcessing):
- def process(self, x: torch.Tensor) -> torch.Tensor:
+ """Identity transformation."""
+
+ def _process(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Identity transformation. Returns the samples unchanged.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Unchanged samples.
+ """
return x
def invert(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Return the sample as it is.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Unchanged samples for identity inverse transformation.
+ """
return x
diff --git a/src/secmlt/models/data_processing/test/__init__.py b/src/secmlt/models/data_processing/test/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/secmlt/models/pytorch/__init__.py b/src/secmlt/models/pytorch/__init__.py
index e69de29..a851dd1 100644
--- a/src/secmlt/models/pytorch/__init__.py
+++ b/src/secmlt/models/pytorch/__init__.py
@@ -0,0 +1 @@
+"""PyTorch model wrappers."""
diff --git a/src/secmlt/models/pytorch/base_pytorch_nn.py b/src/secmlt/models/pytorch/base_pytorch_nn.py
index 54a5c20..73d5098 100644
--- a/src/secmlt/models/pytorch/base_pytorch_nn.py
+++ b/src/secmlt/models/pytorch/base_pytorch_nn.py
@@ -1,61 +1,104 @@
-import torch
-from torch.utils.data import DataLoader
+"""Wrappers for PyTorch models."""
+import torch
from secmlt.models.base_model import BaseModel
from secmlt.models.data_processing.data_processing import DataProcessing
from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer
+from torch.utils.data import DataLoader
class BasePytorchClassifier(BaseModel):
+ """Wrapper for PyTorch classifier."""
+
def __init__(
self,
model: torch.nn.Module,
preprocessing: DataProcessing = None,
postprocessing: DataProcessing = None,
trainer: BasePyTorchTrainer = None,
- ):
+ ) -> None:
+ """
+ Create wrapped PyTorch classifier.
+
+ Parameters
+ ----------
+ model : torch.nn.Module
+ PyTorch model.
+ preprocessing : DataProcessing, optional
+ Preprocessing to apply before the forward., by default None.
+ postprocessing : DataProcessing, optional
+ Postprocessing to apply after the forward, by default None.
+ trainer : BasePyTorchTrainer, optional
+ Trainer object to train the model, by default None.
+ """
super().__init__(preprocessing=preprocessing, postprocessing=postprocessing)
self._model: torch.nn.Module = model
self._trainer = trainer
@property
- def model(self):
+ def model(self) -> torch.nn.Module:
+ """
+ Get the wrapped instance of PyTorch model.
+
+ Returns
+ -------
+ torch.nn.Module
+ Wrapped PyTorch model.
+ """
return self._model
- def get_device(self):
+ def _get_device(self) -> torch.device:
return next(self._model.parameters()).device
def predict(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Return the predicted class for the given samples.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Predicted class for the samples.
+ """
scores = self.decision_function(x)
- labels = torch.argmax(scores, dim=-1)
- return labels
+ return torch.argmax(scores, dim=-1)
def _decision_function(self, x: torch.Tensor) -> torch.Tensor:
"""
- Returns the decision function of the model.
+ Compute decision function of the model.
+
Parameters
----------
- x : input samples
+ x : torch.Tensor
+ Input samples.
Returns
-------
- output of decision function
+ torch.Tensor
+ Output scores from the model.
"""
- x = x.to(device=self.get_device())
+ x = x.to(device=self._get_device())
return self._model(x)
def gradient(self, x: torch.Tensor, y: int) -> torch.Tensor:
"""
- The functions computes gradients of class with label y w.r.t. x.
- Gradients are computed in batch.
+ Compute batch gradients of class y w.r.t. x.
+
Parameters
----------
- x : input samples
- y : class label
+ x : torch.Tensor
+ Input samples.
+ y : int
+ Class label.
Returns
-------
- Gradient of class y w.r.t. input x
+ torch.Tensor
+ Gradient of class y w.r.t. input x.
"""
x = x.clone().requires_grad_()
if x.grad is not None:
@@ -63,10 +106,28 @@ def gradient(self, x: torch.Tensor, y: int) -> torch.Tensor:
output = self.decision_function(x)
output = output[:, y].sum()
output.backward()
- grad = x.grad
- return grad
+ return x.grad
- def train(self, dataloader: DataLoader):
+ def train(self, dataloader: DataLoader) -> torch.nn.Module:
+ """
+ Train the model with given dataloader, if trainer is set.
+
+ Parameters
+ ----------
+ dataloader : DataLoader
+ Training PyTorch dataloader to use for training.
+
+ Returns
+ -------
+ torch.nn.Module
+ Trained PyTorch model.
+
+ Raises
+ ------
+ ValueError
+ Raises ValueError if the trainer is not set.
+ """
if self._trainer is None:
- raise ValueError("Cannot train without a trainer.")
+ msg = "Cannot train without a trainer."
+ raise ValueError(msg)
return self._trainer.train(self._model, dataloader)
diff --git a/src/secmlt/models/pytorch/base_pytorch_trainer.py b/src/secmlt/models/pytorch/base_pytorch_trainer.py
index 5e89779..0ddda72 100644
--- a/src/secmlt/models/pytorch/base_pytorch_trainer.py
+++ b/src/secmlt/models/pytorch/base_pytorch_trainer.py
@@ -1,28 +1,60 @@
-import torch.nn
-from torch.optim.lr_scheduler import _LRScheduler # noqa
-from torch.utils.data import DataLoader
+"""PyTorch model trainers."""
+import torch.nn
from secmlt.models.base_trainer import BaseTrainer
+from torch.optim.lr_scheduler import _LRScheduler
+from torch.utils.data import DataLoader
class BasePyTorchTrainer(BaseTrainer):
+ """Trainer for PyTorch models."""
+
def __init__(
self,
optimizer: torch.optim.Optimizer,
epochs: int = 5,
loss: torch.nn.Module = None,
scheduler: _LRScheduler = None,
- ):
+ ) -> None:
+ """
+ Create PyTorch trainer.
+
+ Parameters
+ ----------
+ optimizer : torch.optim.Optimizer
+ Optimizer to use for training the model.
+ epochs : int, optional
+ Number of epochs, by default 5.
+ loss : torch.nn.Module, optional
+ Loss to minimize, by default None.
+ scheduler : _LRScheduler, optional
+ Scheduler for the optimizer, by default None.
+ """
self._epochs = epochs
self._optimizer = optimizer
self._loss = loss if loss is not None else torch.nn.CrossEntropyLoss()
self._scheduler = scheduler
- def train(self, model: torch.nn.Module, dataloader: DataLoader):
+ def train(self, model: torch.nn.Module, dataloader: DataLoader) -> torch.nn.Module:
+ """
+ Train model with given loader.
+
+ Parameters
+ ----------
+ model : torch.nn.Module
+ Pytorch model to be trained.
+ dataloader : DataLoader
+ Train data loader.
+
+ Returns
+ -------
+ torch.nn.Module
+ Trained model.
+ """
device = next(model.parameters()).device
model = model.train()
- for epoch in range(self._epochs):
- for batch_idx, (x, y) in enumerate(dataloader):
+ for _ in range(self._epochs):
+ for _, (x, y) in enumerate(dataloader):
x, y = x.to(device), y.to(device)
self._optimizer.zero_grad()
outputs = model(x)
diff --git a/src/secmlt/models/pytorch/test/__init__.py b/src/secmlt/models/pytorch/test/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/secmlt/models/pytorch/test/base_pytorch.py b/src/secmlt/models/pytorch/test/base_pytorch.py
deleted file mode 100644
index 4d7a030..0000000
--- a/src/secmlt/models/pytorch/test/base_pytorch.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import unittest
-from functools import reduce
-
-import torch.nn
-from torch.nn import Module
-from torch.utils.data import DataLoader
-from torchvision.datasets import FakeData
-from torchvision.transforms import ToTensor
-
-
-class Net(Module):
- def __init__(self, input_size, output_size):
- super(Net, self).__init__()
- self._l1 = torch.nn.Linear(
- reduce(lambda x, y: x * y, input_size), out_features=output_size
- )
-
- def forward(self, x: torch.Tensor):
- return self._l1(x.view(x.shape[0], -1))
-
-
-class BasePytorchTests(unittest.TestCase):
- def setUp(self):
- self.input_shape = (3, 224, 224)
- self.output_shape = 2
- self._data = FakeData(
- size=10,
- num_classes=self.output_shape,
- image_size=self.input_shape,
- transform=ToTensor(),
- )
- self._dataloader = DataLoader(self._data)
- self._net = Net(self.input_shape, self.output_shape)
diff --git a/src/secmlt/models/pytorch/test/test_pytorch_nn.py b/src/secmlt/models/pytorch/test/test_pytorch_nn.py
deleted file mode 100644
index ba94e59..0000000
--- a/src/secmlt/models/pytorch/test/test_pytorch_nn.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import torch
-from torch.optim import Adam
-
-from src.models.pytorch.base_pytorch_nn import BasePytorchClassifier
-from src.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer
-from src.models.pytorch.test.base_pytorch import BasePytorchTests
-
-
-class TestPytorchNN(BasePytorchTests):
- def setUp(self):
- super(TestPytorchNN, self).setUp()
- self._trainer = BasePyTorchTrainer(
- optimizer=Adam(lr=1e-3, params=self._net.parameters()),
- loss=torch.nn.CrossEntropyLoss(),
- )
-
- def test_fit_raise_exception_no_trainer(self):
- test_net = BasePytorchClassifier(self._net, trainer=None)
- with self.assertRaises(ValueError):
- test_net.train(self._dataloader)
-
- def test_fit_ok(self):
- test_net = BasePytorchClassifier(self._net, trainer=self._trainer)
- test_net.train(self._dataloader)
- self.assertTrue(test_net)
diff --git a/src/secmlt/models/pytorch/test/test_pytorch_trainer.py b/src/secmlt/models/pytorch/test/test_pytorch_trainer.py
deleted file mode 100644
index 7835e8d..0000000
--- a/src/secmlt/models/pytorch/test/test_pytorch_trainer.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from torch.optim import Adam
-
-from src.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer
-from src.models.pytorch.test.base_pytorch import BasePytorchTests
-
-
-class TestPytorchTrainer(BasePytorchTests):
- def test_train_net(self):
- old_weights = [t.clone() for t in list(self._net.parameters())]
- trainer = BasePyTorchTrainer(
- optimizer=Adam(lr=1e-3, params=self._net.parameters())
- )
- fit_net = trainer.train(self._net, self._dataloader)
-
- sum_old_weights = sum([t.sum().item() for t in old_weights])
- sum_new_weights = sum([t.sum().item() for t in fit_net.parameters()])
-
- self.assertNotEqual(sum_old_weights, sum_new_weights)
-
- def test_test_accuracy(self):
- # TODO: implement test with mock network
- raise NotImplementedError("test yet to be implemented")
diff --git a/src/secmlt/models/sklearn/__init__.py b/src/secmlt/models/sklearn/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/secmlt/models/sklearn/base_sklearn_model.py b/src/secmlt/models/sklearn/base_sklearn_model.py
deleted file mode 100644
index 7e7830f..0000000
--- a/src/secmlt/models/sklearn/base_sklearn_model.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from typing import Callable
-
-import numpy as np
-import sklearn
-import torch
-from torch.utils.data import DataLoader
-
-from src.data.sklearn_dataset import SklearnDataset
-from src.models.base_model import BaseModel
-from src.models.data_processing.data_processing import DataProcessing
-
-
-class BaseSklearnModel(BaseModel):
- def __init__(
- self,
- model: sklearn.base.BaseEstimator,
- preprocessing: DataProcessing = None,
- postprocessing: DataProcessing = None,
- ):
- super().__init__(preprocessing=preprocessing, postprocessing=postprocessing)
- self._model: sklearn.base.BaseEstimator = model
-
- def _decision_function(self, x: torch.Tensor) -> torch.Tensor:
- if hasattr(self._model, "decision_function"):
- return self.to_tensor(self._model.decision_function(self.to_2d_numpy(x)))
- elif hasattr(self._model, "predict_proba"):
- return self.to_tensor(self._model.predict_proba(self.to_2d_numpy(x)))
- raise AttributeError(
- "This model has neither decision_function nor predict_proba."
- )
-
- def gradient(self, x: torch.Tensor, y: int) -> torch.Tensor:
- raise NotImplementedError(
- "Custom sklearn model do not implement gradients. "
- "Use specific class or create subclass with custom definition."
- )
-
- def train(self, dataloader: DataLoader):
- if not isinstance(dataloader.dataset, SklearnDataset):
- raise ValueError(
- f"Internal dataset is not SklearnDataset, but {type(dataloader.dataset)}"
- )
- x, y = dataloader.dataset.x, dataloader.dataset.y
- self._model.fit(x, y)
- return self
-
- def predict(self, x: torch.Tensor) -> torch.Tensor:
- x_numpy = self.to_2d_numpy(x)
- y = self._model.predict(x_numpy)
- y = self.to_tensor(y)
- return y
-
- @classmethod
- def to_numpy(cls, x: torch.Tensor) -> np.ndarray:
- return x.detach().cpu().numpy()
-
- @classmethod
- def to_tensor(cls, x: np.ndarray) -> torch.Tensor:
- return torch.tensor(x)
-
- @classmethod
- def to_2d_numpy(cls, x: torch.Tensor) -> np.ndarray:
- return x.view(x.shape[0], -1).cpu().detach().numpy()
diff --git a/src/secmlt/models/sklearn/test/__init__.py b/src/secmlt/models/sklearn/test/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/secmlt/models/tensorflow/__init__.py b/src/secmlt/models/tensorflow/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/secmlt/models/tensorflow/base_tensorflow_nn.py b/src/secmlt/models/tensorflow/base_tensorflow_nn.py
deleted file mode 100644
index 87f67c5..0000000
--- a/src/secmlt/models/tensorflow/base_tensorflow_nn.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import tensorflow as tf
-import torch
-
-from src.models.base_model import BaseModel
-from src.models.data_processing.data_processing import DataProcessing
-from src.models.tensorflow.base_tensorflow_trainer import BaseTensorflowTrainer
-
-
-class BaseTensorflowClassifier(BaseModel):
- def __init__(
- self,
- model: tf.keras.Model,
- preprocessing: DataProcessing = None,
- postprocessing: DataProcessing = None,
- trainer: BaseTensorflowTrainer = None,
- ):
- super().__init__(preprocessing=preprocessing, postprocessing=postprocessing)
- self._model = model
- self._trainer = trainer
-
- def predict(self, x: torch.Tensor) -> torch.Tensor:
- """
- TODO
- Parameters
- ----------
- x :
-
- Returns
- -------
-
- """
- pass
-
- def _decision_function(self, x: torch.Tensor) -> torch.Tensor:
- """
- TODO
- Parameters
- ----------
- x :
-
- Returns
- -------
-
- """
- pass
-
- def gradient(self, x: torch.Tensor, y: int) -> torch.Tensor:
- """
- TODO
- Parameters
- ----------
- x :
-
- Returns
- -------
-
- """
- pass
-
- def train(self, dataloader: torch.Tensor):
- """
- TODO
- Parameters
- ----------
- dataloader :
-
- Returns
- -------
-
- """
- pass
diff --git a/src/secmlt/models/tensorflow/base_tensorflow_trainer.py b/src/secmlt/models/tensorflow/base_tensorflow_trainer.py
deleted file mode 100644
index 4bdf65f..0000000
--- a/src/secmlt/models/tensorflow/base_tensorflow_trainer.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from src.models.base_trainer import BaseTrainer
-
-
-class BaseTensorflowTrainer(BaseTrainer):
- def __init__(self):
- pass
-
- def train(self, model, dataloader):
- pass
diff --git a/src/secmlt/models/test/__init__.py b/src/secmlt/models/test/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/secmlt/optimization/__init__.py b/src/secmlt/optimization/__init__.py
index e69de29..2b2f01e 100644
--- a/src/secmlt/optimization/__init__.py
+++ b/src/secmlt/optimization/__init__.py
@@ -0,0 +1 @@
+"""Optimization functionalities."""
diff --git a/src/secmlt/optimization/constraints.py b/src/secmlt/optimization/constraints.py
index 366ac94..0116584 100644
--- a/src/secmlt/optimization/constraints.py
+++ b/src/secmlt/optimization/constraints.py
@@ -1,72 +1,220 @@
-from abc import abstractmethod
+"""Constraints for tensors and the corresponding batch-wise projections."""
+
+from abc import ABC, abstractmethod
import torch
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
-class Constraint:
- def __call__(self, x: torch.Tensor, *args, **kwargs): ...
+class Constraint(ABC):
+ """Generic constraint."""
+
+ @abstractmethod
+ def __call__(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
+ """
+ Project onto the constraint.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor.
+
+ Returns
+ -------
+ torch.Tensor
+ Tensor projected onto the constraint.
+ """
+ ...
class ClipConstraint(Constraint):
- def __init__(self, lb=0, ub=1):
+ """Box constraint, usually for the input space."""
+
+ def __init__(self, lb: float = 0.0, ub: float = 1.0) -> None:
+ """
+ Create box constraint.
+
+ Parameters
+ ----------
+ lb : float, optional
+ Lower bound of the domain, by default 0.0.
+ ub : float, optional
+ Upper bound of the domain, by default 1.0.
+ """
self.lb = lb
self.ub = ub
def __call__(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
+ """
+ Call the projection function.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Tensor projected onto the box constraint.
+ """
return x.clamp(self.lb, self.ub)
-class LpConstraint(Constraint):
- def __init__(self, radius=0, center=0, p=torch.inf):
- self.p = p
+class LpConstraint(Constraint, ABC):
+ """Abstract class for Lp constraint."""
+
+ def __init__(
+ self,
+ radius: float = 0.0,
+ center: float = 0.0,
+ p: str = LpPerturbationModels.LINF,
+ ) -> None:
+ """
+ Create Lp constraint.
+
+ Parameters
+ ----------
+ radius : float, optional
+ Radius of the constraint, by default 0.0.
+ center : float, optional
+ Center of the constraint, by default 0.0.
+ p : str, optional
+ Value of p for Lp norm, by default LpPerturbationModels.LINF.
+ """
+ self.p = LpPerturbationModels.get_p(p)
self.center = center
self.radius = radius
@abstractmethod
- def project(self, x: torch.Tensor) -> torch.Tensor: ...
+ def project(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Project onto the Lp constraint.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor.
+
+ Returns
+ -------
+ torch.Tensor
+ Tensor projected onto the Lp constraint.
+ """
+ ...
def __call__(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
+ """
+ Project the samples onto the Lp constraint.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor.
+
+ Returns
+ -------
+ torch.Tensor
+ Tensor projected onto the Lp constraint.
+ """
x = x + self.center
with torch.no_grad():
norm = torch.linalg.norm(x.flatten(start_dim=1), ord=self.p, dim=1)
to_normalize = (norm > self.radius).view(-1, 1)
proj_delta = self.project(x).flatten(start_dim=1)
delta = torch.where(to_normalize, proj_delta, x.flatten(start_dim=1))
- delta = delta.view(x.shape)
- return delta
-
- def __repr__(self) -> str:
- return f"L{self.p} constraint with {'nonzero' if self.center!=0 else 'zero'} center with radius {self.radius}."
+ return delta.view(x.shape)
class L2Constraint(LpConstraint):
- def __init__(self, radius=0, center=0):
- super().__init__(radius=radius, center=center, p=2)
+ """L2 constraint."""
+
+ def __init__(self, radius: float = 0.0, center: float = 0.0) -> None:
+ """
+ Create L2 constraint.
+
+ Parameters
+ ----------
+ radius : float, optional
+ Radius of the constraint, by default 0.0.
+ center : float, optional
+ Center of the constraint, by default 0.0.
+ """
+ super().__init__(radius=radius, center=center, p=LpPerturbationModels.L2)
+
+ def project(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Project onto the L2 constraint.
- def project(self, x):
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor.
+
+ Returns
+ -------
+ torch.Tensor
+ Tensor projected onto the L2 constraint.
+ """
flat_x = x.flatten(start_dim=1)
diff_norm = flat_x.norm(p=2, dim=1, keepdim=True).clamp_(min=1e-12)
flat_x = torch.where(diff_norm <= 1, flat_x, flat_x / diff_norm) * self.radius
- x = flat_x.reshape(x.shape)
- return x
+ return flat_x.reshape(x.shape)
class LInfConstraint(LpConstraint):
- def __init__(self, radius=0, center=0):
- super().__init__(radius=radius, center=center, p=float("inf"))
+ """Linf constraint."""
- def project(self, x):
- x = x.clamp(min=-self.radius, max=self.radius)
- return x
+ def __init__(self, radius: float = 0.0, center: float = 0.0) -> None:
+ """
+ Create Linf constraint.
+
+ Parameters
+ ----------
+ radius : float, optional
+ Radius of the constraint, by default 0.0.
+ center : float, optional
+ Center of the constraint, by default 0.0.
+ """
+ super().__init__(radius=radius, center=center, p=LpPerturbationModels.LINF)
+
+ def project(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Project onto the Linf constraint.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor.
+
+ Returns
+ -------
+ torch.Tensor
+ Tensor projected onto the Linf constraint.
+ """
+ return x.clamp(min=-self.radius, max=self.radius)
class L1Constraint(LpConstraint):
- def __init__(self, radius=0, center=0) -> None:
+ """L1 constraint."""
+
+ def __init__(self, radius: float = 0.0, center: float = 0.0) -> None:
+ """
+ Create L1 constraint.
+
+ Parameters
+ ----------
+ radius : float, optional
+ Radius of the constraint, by default 0.0.
+ center : float, optional
+ Center of the constraint, by default 0.0.
+ """
super().__init__(radius=radius, center=center, p=1)
- def project(self, x):
+ def project(self, x: torch.Tensor) -> torch.Tensor:
"""
Compute Euclidean projection onto the L1 ball for a batch.
+
Source: https://gist.github.com/tonyduan/1329998205d88c566588e57e3e2c0c55
min ||x - u||_2 s.t. ||u||_1 <= eps
@@ -75,16 +223,13 @@ def project(self, x):
Parameters
----------
- x: (batch_size, *) torch array
- batch of arbitrary-size tensors to project, possibly on GPU
-
- eps: float
- radius of l-1 ball to project onto
+ x : torch.Tensor
+ Input tensor.
Returns
-------
- u: (batch_size, *) torch array
- batch of projected tensors, reshaped to match the original
+ torch.Tensor
+ Projected tensor.
Notes
-----
@@ -106,15 +251,42 @@ def project(self, x):
theta = (cumsum[torch.arange(x.shape[0]), rho.cpu() - 1] - self.radius) / rho
proj = (torch.abs(x) - theta.unsqueeze(1)).clamp(min=0)
x = mask * x + (1 - mask) * proj * torch.sign(x)
- x = x.view(original_shape)
- return x
+ return x.view(original_shape)
class L0Constraint(LpConstraint):
- def __init__(self, radius=0, center=0):
+ """L0 constraint."""
+
+ def __init__(self, radius: float = 0.0, center: float = 0.0) -> None:
+ """
+ Create L0 constraint.
+
+ Parameters
+ ----------
+ radius : float, optional
+ Radius of the constraint, by default 0.0.
+ center : float, optional
+ Center of the constraint, by default 0.0.
+ """
super().__init__(radius=radius, center=center, p=0)
- def project(self, x):
+ def project(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Project the samples onto the L0 constraint.
+
+ Returns the sample with the top-k components preserved,
+ and the rest set to zero.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Samples projected onto L0 constraint.
+ """
flat_x = x.flatten(start_dim=1)
positions, topk = torch.topk(flat_x, k=self.radius)
return torch.zeros_like(flat_x).scatter_(positions, topk).reshape(x.shape)
diff --git a/src/secmlt/optimization/gradient_processing.py b/src/secmlt/optimization/gradient_processing.py
index 6875400..bde8e1e 100644
--- a/src/secmlt/optimization/gradient_processing.py
+++ b/src/secmlt/optimization/gradient_processing.py
@@ -1,30 +1,87 @@
+"""Processing functions for gradients."""
+
+from abc import ABC, abstractmethod
+
import torch.linalg
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
from torch.nn.functional import normalize
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
+class GradientProcessing(ABC):
+ """Gradient processing base class."""
+
+ @abstractmethod
+ def __call__(self, grad: torch.Tensor) -> torch.Tensor:
+ """
+ Process the gradient with the given transformation.
+
+ Parameters
+ ----------
+ grad : torch.Tensor
+ Input gradients.
-class GradientProcessing:
- def __call__(self, grad: torch.Tensor) -> torch.Tensor: ...
+ Returns
+ -------
+ torch.Tensor
+ The processed gradients.
+ """
+ ...
class LinearProjectionGradientProcessing(GradientProcessing):
- def __init__(self, perturbation_model: str = PerturbationModels.L2):
+ """Linear projection of the gradient onto Lp balls."""
+
+ def __init__(self, perturbation_model: str = LpPerturbationModels.L2) -> None:
+ """
+ Create linear projection for the gradient.
+
+ Parameters
+ ----------
+ perturbation_model : str, optional
+ Perturbation model for the Lp ball, by default LpPerturbationModels.L2.
+
+ Raises
+ ------
+ ValueError
+ Raises ValueError if the perturbation model is not implemented.
+ Available, l1, l2, linf.
+ """
perturbations_models = {
- PerturbationModels.L1: 1,
- PerturbationModels.L2: 2,
- PerturbationModels.LINF: float("inf"),
+ LpPerturbationModels.L1: 1,
+ LpPerturbationModels.L2: 2,
+ LpPerturbationModels.LINF: float("inf"),
}
if perturbation_model not in perturbations_models:
- raise ValueError(
- f"{perturbation_model} not included in normalizers. Available: {perturbations_models.values()}"
- )
+ msg = f"{perturbation_model} not available. \
+ Use one of: {perturbations_models.values()}"
+ raise ValueError(msg)
self.p = perturbations_models[perturbation_model]
def __call__(self, grad: torch.Tensor) -> torch.Tensor:
- if self.p == 2:
- grad = normalize(grad.data, p=self.p, dim=0)
- return grad
+ """
+ Process gradient with linear projection onto the Lp ball.
+
+ Sets the direction by maximizing the scalar product with the
+ gradient over the Lp ball.
+
+ Parameters
+ ----------
+ grad : torch.Tensor
+ Input gradients.
+
+ Returns
+ -------
+ torch.Tensor
+ The gradient linearly projected onto the Lp ball.
+
+ Raises
+ ------
+ NotImplementedError
+ Raises NotImplementedError if the norm is not in 2, inf.
+ """
+ if self.p == 2: # noqa: PLR2004
+ return normalize(grad.data, p=self.p, dim=0)
if self.p == float("inf"):
return torch.sign(grad)
- raise NotImplementedError("Only L2 and LInf norms implemented now")
+ msg = "Only L2 and LInf norms implemented now"
+ raise NotImplementedError(msg)
diff --git a/src/secmlt/optimization/initializer.py b/src/secmlt/optimization/initializer.py
index b37114f..471d52a 100644
--- a/src/secmlt/optimization/initializer.py
+++ b/src/secmlt/optimization/initializer.py
@@ -1,20 +1,64 @@
-from secmlt.optimization.random_perturb import RandomPerturb
-import torch
+"""Initializers for the attacks."""
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
+import torch
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
+from secmlt.optimization.random_perturb import RandomPerturb
class Initializer:
+ """Initialization for the perturbation delta."""
+
def __call__(self, x: torch.Tensor) -> torch.Tensor:
- init = torch.zeros_like(x)
- return init
+ """
+ Get initialization for the perturbation.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Initialized perturbation.
+ """
+ return torch.zeros_like(x)
class RandomLpInitializer(Initializer):
- def __init__(self, radius: torch.Tensor, perturbation_model: PerturbationModels):
+ """Random perturbation initialization in Lp ball."""
+
+ def __init__(
+ self,
+ radius: torch.Tensor,
+ perturbation_model: LpPerturbationModels,
+ ) -> None:
+ """
+ Create random perturbation initializer.
+
+ Parameters
+ ----------
+ radius : torch.Tensor
+ Radius of the Lp ball for the constraint.
+ perturbation_model : LpPerturbationModels
+ Perturbation model for the constraint.
+ """
self.radius = radius
self.perturbation_model = perturbation_model
self.initializer = RandomPerturb(p=self.perturbation_model, epsilon=self.radius)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Get random perturbations.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples.
+
+ Returns
+ -------
+ torch.Tensor
+ Initialized random perturbations.
+ """
return self.initializer(x)
diff --git a/src/secmlt/optimization/optimizer_factory.py b/src/secmlt/optimization/optimizer_factory.py
index c6ccb44..f67a360 100644
--- a/src/secmlt/optimization/optimizer_factory.py
+++ b/src/secmlt/optimization/optimizer_factory.py
@@ -1,28 +1,88 @@
+"""Optimizer creation tools."""
+
import functools
+from typing import ClassVar
-from torch.optim import Adam, SGD
+import torch
+from torch.optim import SGD, Adam
ADAM = "adam"
StochasticGD = "sgd"
class OptimizerFactory:
- OPTIMIZERS = {ADAM: Adam, StochasticGD: SGD}
+ """Creator class for optimizers."""
+
+ OPTIMIZERS: ClassVar[dict[str, torch.optim.Optimizer]] = {
+ ADAM: Adam,
+ StochasticGD: SGD,
+ }
@staticmethod
- def create_from_name(optimizer_name, lr: float, **kwargs):
+ def create_from_name(
+ optimizer_name: str,
+ lr: float,
+ **kwargs,
+ ) -> functools.partial[Adam] | functools.partial[SGD]:
+ """
+ Create an optimizer.
+
+ Parameters
+ ----------
+ optimizer_name : str
+ One of the available optimizer names. Available: `adam`, `sgd`.
+ lr : float
+ Learning rate.
+
+ Returns
+ -------
+ functools.partial[Adam] | functools.partial[SGD]
+ The created optimizer.
+
+ Raises
+ ------
+ ValueError
+ Raises ValueError when the requested optimizer is not in the list
+ of implemented optimizers.
+ """
if optimizer_name == ADAM:
return OptimizerFactory.create_adam(lr)
if optimizer_name == StochasticGD:
return OptimizerFactory.create_sgd(lr)
- raise ValueError(
- f"No optimizer known as {optimizer_name}, or not implemented yet. Available: {list(OptimizerFactory.OPTIMIZERS.keys())} "
- )
+ msg = f"Optimizer not found. Use one of: \
+ {list(OptimizerFactory.OPTIMIZERS.keys())}"
+ raise ValueError(msg)
@staticmethod
- def create_adam(lr: float):
+ def create_adam(lr: float) -> functools.partial[Adam]:
+ """
+ Create the Adam optimizer.
+
+ Parameters
+ ----------
+ lr : float
+ Learning rate.
+
+ Returns
+ -------
+ functools.partial[Adam]
+ Adam optimizer.
+ """
return functools.partial(Adam, lr=lr)
@staticmethod
- def create_sgd(lr: float):
+ def create_sgd(lr: float) -> functools.partial[SGD]:
+ """
+ Create the SGD optimizer.
+
+ Parameters
+ ----------
+ lr : float
+ Learning rate.
+
+ Returns
+ -------
+ functools.partial[SGD]
+ SGD optimizer.
+ """
return functools.partial(SGD, lr=lr)
diff --git a/src/secmlt/optimization/random_perturb.py b/src/secmlt/optimization/random_perturb.py
index 83ae453..32c1357 100644
--- a/src/secmlt/optimization/random_perturb.py
+++ b/src/secmlt/optimization/random_perturb.py
@@ -1,87 +1,200 @@
+"""Random pertubations in Lp balls."""
+
from abc import ABC, abstractmethod
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
+
+import torch
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
from secmlt.optimization.constraints import (
L0Constraint,
L1Constraint,
L2Constraint,
LInfConstraint,
+ LpConstraint,
)
-import torch
from torch.distributions.laplace import Laplace
class RandomPerturbBase(ABC):
- def __init__(self, epsilon):
+ """Class implementing the random perturbations in Lp balls."""
+
+ def __init__(self, epsilon: float) -> None:
+ """
+ Create random perturbation object.
+
+ Parameters
+ ----------
+ epsilon : float
+ Constraint radius.
+ """
self.epsilon = epsilon
- def __call__(self, x):
+ def __call__(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Get the perturbations for the given samples.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples to perturb.
+
+ Returns
+ -------
+ torch.Tensor
+ Perturbations (to apply) to the given samples.
+ """
perturbations = self.get_perturb(x)
- perturbations = self.constraint(
- radius=self.epsilon, center=torch.zeros_like(perturbations)
+ return self._constraint(
+ radius=self.epsilon,
+ center=torch.zeros_like(perturbations),
).project(perturbations)
- return perturbations
@abstractmethod
- def get_perturb(self, x): ...
+ def get_perturb(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Generate random perturbation for the Lp norm.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples to perturb.
+ """
+ ...
@abstractmethod
- def constraint(self, x): ...
+ def _constraint(self) -> LpConstraint:
+ ...
class RandomPerturbLinf(RandomPerturbBase):
- def get_perturb(self, x):
- x = torch.randn_like(x)
- return x
+ """Random Perturbations for Linf norm."""
+
+ def get_perturb(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Generate random perturbation for the Linf norm.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples to perturb.
+
+ Returns
+ -------
+ torch.Tensor
+ Perturbed samples.
+ """
+ return torch.randn_like(x)
@property
- def constraint(self):
+ def _constraint(self) -> type[LInfConstraint]:
return LInfConstraint
class RandomPerturbL1(RandomPerturbBase):
- def __init__(self, epsilon):
- super().__init__(epsilon)
-
- def get_perturb(self, x):
- s = Laplace(0, 1)
+ """Random Perturbations for L1 norm."""
+
+ def get_perturb(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Generate random perturbation for the L1 norm.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples to perturb.
+
+ Returns
+ -------
+ torch.Tensor
+ Perturbed samples.
+ """
+ s = Laplace(loc=0, scale=1)
return s.sample(x.shape)
@property
- def constraint(self):
+ def _constraint(self) -> type[L1Constraint]:
return L1Constraint
class RandomPerturbL2(RandomPerturbBase):
- def get_perturb(self, x):
- perturbations = torch.randn_like(x)
- return perturbations
+ """Random Perturbations for L2 norm."""
+
+ def get_perturb(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Generate random perturbation for the L2 norm.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples to perturb.
+
+ Returns
+ -------
+ torch.Tensor
+ Perturbed samples.
+ """
+ return torch.randn_like(x)
@property
- def constraint(self):
+ def _constraint(self) -> type[L2Constraint]:
return L2Constraint
class RandomPerturbL0(RandomPerturbBase):
- def get_perturb(self, x):
+ """Random Perturbations for L0 norm."""
+
+ def get_perturb(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Generate random perturbation for the L0 norm.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input samples to perturb.
+
+ Returns
+ -------
+ torch.Tensor
+ Perturbed samples.
+ """
perturbations = torch.randn_like(x)
return perturbations.sign()
@property
- def constraint(self):
+ def _constraint(self) -> type[L0Constraint]:
return L0Constraint
class RandomPerturb:
- def __new__(cls, p, epsilon) -> RandomPerturbBase:
+ """Random perturbation creator."""
+
+ def __new__(cls, p: str, epsilon: float) -> RandomPerturbBase:
+ """
+ Creator for random perturbation in Lp norms.
+
+ Parameters
+ ----------
+ p : str
+ p-norm used for the random perturbation shape.
+ epsilon : float
+ Radius of the random perturbation constraint.
+
+ Returns
+ -------
+ RandomPerturbBase
+ Random perturbation object.
+
+ Raises
+ ------
+ ValueError
+ Raises ValueError if the norm is not in 0, 1, 2, inf.
+ """
random_inits = {
- PerturbationModels.L0: RandomPerturbL0,
- PerturbationModels.L1: RandomPerturbL1,
- PerturbationModels.L2: RandomPerturbL2,
- PerturbationModels.LINF: RandomPerturbLinf,
+ LpPerturbationModels.L0: RandomPerturbL0,
+ LpPerturbationModels.L1: RandomPerturbL1,
+ LpPerturbationModels.L2: RandomPerturbL2,
+ LpPerturbationModels.LINF: RandomPerturbLinf,
}
- selected = random_inits.get(p, None)
+ selected = random_inits.get(p)
if selected is not None:
return selected(epsilon=epsilon)
- raise ValueError(
- "Random Perturbation not implemented for this perturbation model."
- )
+ msg = "Perturbation model not available."
+ raise ValueError(msg)
diff --git a/src/secmlt/metrics/test/__init__.py b/src/secmlt/tests/__init__.py
similarity index 100%
rename from src/secmlt/metrics/test/__init__.py
rename to src/secmlt/tests/__init__.py
diff --git a/src/secmlt/tests/fixtures.py b/src/secmlt/tests/fixtures.py
new file mode 100644
index 0000000..4de64bb
--- /dev/null
+++ b/src/secmlt/tests/fixtures.py
@@ -0,0 +1,109 @@
+"""Fixtures used for testing."""
+
+import pytest
+import torch
+from secmlt.models.pytorch.base_pytorch_nn import BasePytorchClassifier
+from secmlt.tests.mocks import MockModel
+from torch.utils.data import DataLoader, TensorDataset
+
+
+@pytest.fixture()
+def data_loader() -> DataLoader[tuple[torch.Tensor]]:
+ """
+ Create fake data loader.
+
+ Returns
+ -------
+ DataLoader[tuple[torch.Tensor]]
+ A loader with random samples and labels.
+ """
+ # Create a dummy dataset loader for testing
+ data = torch.randn(100, 3, 32, 32).clamp(0, 1)
+ labels = torch.randint(0, 10, (100,))
+ dataset = TensorDataset(data, labels)
+ return DataLoader(dataset, batch_size=10)
+
+
+@pytest.fixture()
+def adv_loaders() -> list[DataLoader[tuple[torch.Tensor, ...]]]:
+ """
+ Create fake adversarial loaders.
+
+ Returns
+ -------
+ list[DataLoader[Tuple[torch.Tensor, ...]]]
+ A list of multiple loaders (with same ordered labels).
+ """
+ # Create a list of dummy adversarial example loaders for testing
+ loaders = []
+ adv_labels = torch.randint(0, 10, (100,))
+ for _ in range(3):
+ adv_data = torch.randn(100, 3, 32, 32)
+ adv_dataset = TensorDataset(adv_data, adv_labels)
+ loaders.append(DataLoader(adv_dataset, batch_size=10))
+ return loaders
+
+
+@pytest.fixture()
+def model() -> torch.nn.Module:
+ """
+ Create fake model.
+
+ Returns
+ -------
+ torch.nn.Module
+ Fake model.
+ """
+ return BasePytorchClassifier(model=MockModel())
+
+
+@pytest.fixture()
+def data() -> torch.Tensor:
+ """
+ Get random samples.
+
+ Returns
+ -------
+ torch.Tensor
+ A fake tensor with samples.
+ """
+ return torch.randn(10, 3, 32, 32).clamp(0.0, 1.0)
+
+
+@pytest.fixture()
+def labels() -> torch.Tensor:
+ """
+ Get random labels.
+
+ Returns
+ -------
+ torch.Tensor
+ A fake tensor with labels.
+ """
+ return torch.randint(0, 9, 10)
+
+
+@pytest.fixture()
+def loss_values() -> torch.Tensor:
+ """
+ Get random model outputs.
+
+ Returns
+ -------
+ torch.Tensor
+ A fake tensor with model outputs.
+ """
+ return torch.randn(10)
+
+
+@pytest.fixture()
+def output_values() -> torch.Tensor:
+ """
+ Get random model outputs.
+
+ Returns
+ -------
+ torch.Tensor
+ A fake tensor with model outputs.
+ """
+ return torch.randn(10, 10)
diff --git a/src/secmlt/tests/mocks.py b/src/secmlt/tests/mocks.py
new file mode 100644
index 0000000..90c6498
--- /dev/null
+++ b/src/secmlt/tests/mocks.py
@@ -0,0 +1,55 @@
+"""Mock classes for testing."""
+
+from collections.abc import Iterator
+
+import torch
+
+
+class MockLayer(torch.autograd.Function):
+ """Fake layer that returns the input."""
+
+ @staticmethod
+ def forward(ctx, inputs: torch.Tensor) -> torch.Tensor: # noqa: ANN001
+ """Fake forward, returns 10 scores."""
+ ctx.save_for_backward(inputs)
+ return torch.randn(inputs.size(0), 10)
+
+ @staticmethod
+ def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: # noqa: ANN001
+ """Fake backward, returns inputs."""
+ (inputs,) = ctx.saved_tensors
+ return inputs
+
+
+class MockModel(torch.nn.Module):
+ """Mock class for torch model."""
+
+ @staticmethod
+ def parameters() -> Iterator[torch.Tensor]:
+ """Return fake parameters."""
+ params = torch.rand(10, 10)
+ return iter(
+ [
+ params,
+ ],
+ )
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Return random outputs for classification and add fake gradients to x."""
+ # Mock output shape (batch_size, 10)
+ fake_layer = MockLayer.apply
+ return fake_layer(x)
+
+ def decision_function(self, *args, **kwargs) -> torch.Tensor:
+ """Return random outputs for classification and add fake gradients to x."""
+ return self.forward(*args, **kwargs)
+
+
+class MockLoss(torch.nn.Module):
+ """Fake loss function."""
+
+ def forward(*args) -> torch.Tensor:
+ """Override forward."""
+ x = torch.rand(10)
+ x.backward = lambda: x
+ return x
diff --git a/src/secmlt/tests/test_aggregators.py b/src/secmlt/tests/test_aggregators.py
new file mode 100644
index 0000000..26e9914
--- /dev/null
+++ b/src/secmlt/tests/test_aggregators.py
@@ -0,0 +1,32 @@
+import torch
+from secmlt.adv.evasion.aggregators.ensemble import (
+ FixedEpsilonEnsemble,
+ MinDistanceEnsemble,
+)
+
+
+def test_min_distance_ensemble(model, data_loader, adv_loaders) -> None:
+ ensemble = MinDistanceEnsemble("l2")
+ result_loader = ensemble(model, data_loader, adv_loaders)
+ for batch in result_loader:
+ assert batch[0].shape == (
+ 10,
+ 3,
+ 32,
+ 32,
+ ) # Expected shape of adversarial examples
+ assert batch[1].shape == (10,) # Expected shape of original labels
+
+
+def test_fixed_epsilon_ensemble(model, data_loader, adv_loaders) -> None:
+ loss_fn = torch.nn.CrossEntropyLoss()
+ ensemble = FixedEpsilonEnsemble(loss_fn)
+ result_loader = ensemble(model, data_loader, adv_loaders)
+ for batch in result_loader:
+ assert batch[0].shape == (
+ 10,
+ 3,
+ 32,
+ 32,
+ ) # Expected shape of adversarial examples
+ assert batch[1].shape == (10,) # Expected shape of original labels
diff --git a/src/secmlt/tests/test_attacks.py b/src/secmlt/tests/test_attacks.py
new file mode 100644
index 0000000..6a0e87d
--- /dev/null
+++ b/src/secmlt/tests/test_attacks.py
@@ -0,0 +1,63 @@
+import pytest
+from secmlt.adv.evasion.base_evasion_attack import BaseEvasionAttack
+from secmlt.adv.evasion.foolbox_attacks.foolbox_pgd import PGDFoolbox
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
+from secmlt.adv.evasion.pgd import PGD, PGDNative
+from torch.utils.data import DataLoader
+
+
+@pytest.mark.parametrize(
+ "random_start",
+ [True, False],
+)
+@pytest.mark.parametrize(
+ "y_target",
+ [None, 1],
+)
+@pytest.mark.parametrize(
+ (
+ "backend",
+ "perturbation_models",
+ ),
+ [
+ (
+ "foolbox",
+ PGDFoolbox.get_perturbation_models(),
+ ),
+ (
+ "native",
+ PGDNative.get_perturbation_models(),
+ ),
+ ],
+)
+def test_pgd_attack(
+ backend,
+ perturbation_models,
+ random_start,
+ y_target,
+ model,
+ data_loader,
+) -> BaseEvasionAttack:
+ for perturbation_model in LpPerturbationModels.pert_models:
+ if perturbation_model in perturbation_models:
+ attack = PGD(
+ perturbation_model=perturbation_model,
+ epsilon=0.5,
+ num_steps=10,
+ step_size=0.1,
+ random_start=random_start,
+ y_target=y_target,
+ backend=backend,
+ )
+ assert isinstance(attack(model, data_loader), DataLoader)
+ else:
+ with pytest.raises(NotImplementedError):
+ attack = PGD(
+ perturbation_model=perturbation_model,
+ epsilon=0.5,
+ num_steps=10,
+ step_size=0.1,
+ random_start=random_start,
+ y_target=y_target,
+ backend=backend,
+ )
diff --git a/src/secmlt/tests/test_constants.py b/src/secmlt/tests/test_constants.py
new file mode 100644
index 0000000..9009736
--- /dev/null
+++ b/src/secmlt/tests/test_constants.py
@@ -0,0 +1,16 @@
+from secmlt.adv.backends import Backends
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
+
+
+def test_backends() -> None:
+ assert hasattr(Backends, "FOOLBOX")
+ assert hasattr(Backends, "NATIVE")
+ assert Backends.FOOLBOX == "foolbox"
+ assert Backends.NATIVE == "native"
+
+
+def test_perturbation_models() -> None:
+ assert hasattr(LpPerturbationModels, "L0")
+ assert hasattr(LpPerturbationModels, "L1")
+ assert hasattr(LpPerturbationModels, "L2")
+ assert hasattr(LpPerturbationModels, "LINF")
diff --git a/src/secmlt/tests/test_metrics.py b/src/secmlt/tests/test_metrics.py
new file mode 100644
index 0000000..411888a
--- /dev/null
+++ b/src/secmlt/tests/test_metrics.py
@@ -0,0 +1,31 @@
+import torch
+from secmlt.metrics.classification import (
+ Accuracy,
+ AccuracyEnsemble,
+ AttackSuccessRate,
+ EnsembleSuccessRate,
+)
+
+
+def test_accuracy(model, data_loader) -> None:
+ acc_metric = Accuracy()
+ acc = acc_metric(model, data_loader)
+ assert torch.is_tensor(acc)
+
+
+def test_attack_success_rate(model, adv_loaders):
+ attack_acc = AttackSuccessRate()
+ acc = attack_acc(model, adv_loaders[0])
+ assert torch.is_tensor(acc)
+
+
+def test_accuracy_ensemble(model, adv_loaders):
+ acc_ensemble = AccuracyEnsemble()
+ acc = acc_ensemble(model, adv_loaders)
+ assert torch.is_tensor(acc)
+
+
+def test_ensemble_success_rate(model, adv_loaders):
+ ensemble_acc = EnsembleSuccessRate()
+ acc = ensemble_acc(model, adv_loaders)
+ assert torch.is_tensor(acc)
diff --git a/src/secmlt/tests/test_trackers.py b/src/secmlt/tests/test_trackers.py
new file mode 100644
index 0000000..ffe961e
--- /dev/null
+++ b/src/secmlt/tests/test_trackers.py
@@ -0,0 +1,36 @@
+import pytest
+import torch
+from secmlt.trackers.image_trackers import (
+ GradientsTracker,
+ SampleTracker,
+)
+from secmlt.trackers.trackers import (
+ GradientNormTracker,
+ LossTracker,
+ PerturbationNormTracker,
+ PredictionTracker,
+ ScoresTracker,
+)
+
+NUM_STEPS = 5
+
+
+@pytest.mark.parametrize(
+ "tracker",
+ [
+ GradientsTracker(),
+ SampleTracker(),
+ GradientNormTracker(),
+ LossTracker(),
+ PerturbationNormTracker(),
+ PredictionTracker(),
+ ScoresTracker(y=0),
+ ScoresTracker(y=None),
+ ],
+)
+def test_tracker(data, loss_values, output_values, tracker) -> None:
+ for i in range(NUM_STEPS):
+ tracker.track(i, loss_values, output_values, data, data, data)
+ assert len(tracker.tracked) == NUM_STEPS
+ assert all(torch.is_tensor(x) for x in tracker.tracked)
+ assert torch.is_tensor(tracker.get_last_tracked())
diff --git a/src/secmlt/tests/test_trainer.py b/src/secmlt/tests/test_trainer.py
new file mode 100644
index 0000000..c09e9ad
--- /dev/null
+++ b/src/secmlt/tests/test_trainer.py
@@ -0,0 +1,17 @@
+import torch
+from secmlt.models.pytorch.base_pytorch_trainer import BasePyTorchTrainer
+from secmlt.tests.mocks import MockLoss
+from torch.optim import SGD
+
+
+def test_pytorch_trainer(model, data_loader) -> None:
+ pytorch_model = model._model
+ optimizer = SGD(pytorch_model.parameters(), lr=0.01)
+ criterion = MockLoss()
+
+ # Create the trainer instance
+ trainer = BasePyTorchTrainer(optimizer=optimizer, loss=criterion)
+
+ # Train the model
+ trained_model = trainer.train(pytorch_model, data_loader)
+ assert isinstance(trained_model, torch.nn.Module)
diff --git a/src/secmlt/tests/test_utils.py b/src/secmlt/tests/test_utils.py
new file mode 100644
index 0000000..8969605
--- /dev/null
+++ b/src/secmlt/tests/test_utils.py
@@ -0,0 +1,23 @@
+import pytest
+import torch
+from secmlt.utils.tensor_utils import atleast_kd
+
+
+@pytest.mark.parametrize(
+ "input_tensor, desired_dims, expected_shape",
+ [
+ (torch.tensor([1, 2, 3]), 2, (3, 1)),
+ (torch.tensor([[1, 2], [3, 4]]), 3, (2, 2, 1)),
+ (torch.tensor([[[1], [2]], [[3], [4]]]), 4, (2, 2, 1, 1)),
+ ],
+)
+def test_atleast_kd(input_tensor, desired_dims, expected_shape):
+ output_tensor = atleast_kd(input_tensor, desired_dims)
+ assert output_tensor.shape == expected_shape
+
+
+def test_atleast_kd_raises_error():
+ x = torch.tensor([[1, 2], [3, 4]])
+ msg = "The number of desired dimensions should be > x.dim()"
+ with pytest.raises(ValueError, match=msg):
+ atleast_kd(x, 1)
diff --git a/src/secmlt/trackers/__init__.py b/src/secmlt/trackers/__init__.py
index a51fbab..7faa612 100644
--- a/src/secmlt/trackers/__init__.py
+++ b/src/secmlt/trackers/__init__.py
@@ -1,9 +1,9 @@
-try:
- import tensorboard
-except ImportError:
- pass # tensorboard is an extra
-else:
- from .tensorboard_tracker import TensorboardTracker
+"""Module implementing trackers for adversarial attacks."""
-from .trackers import *
-from .image_trackers import *
+import importlib
+
+if importlib.util.find_spec("tensorboard", None) is not None:
+ from .tensorboard_tracker import TensorboardTracker # noqa: F401
+
+from .image_trackers import * # noqa: F403
+from .trackers import * # noqa: F403
diff --git a/src/secmlt/trackers/image_trackers.py b/src/secmlt/trackers/image_trackers.py
index 77ffa83..1fc261d 100644
--- a/src/secmlt/trackers/image_trackers.py
+++ b/src/secmlt/trackers/image_trackers.py
@@ -1,10 +1,14 @@
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
-from secmlt.trackers.trackers import IMAGE, Tracker
+"""Image-specific trackers."""
+
import torch
+from secmlt.trackers.trackers import IMAGE, Tracker
class SampleTracker(Tracker):
+ """Tracker for adversarial images."""
+
def __init__(self) -> None:
+ """Create adversarial image tracker."""
super().__init__("Sample", IMAGE)
self.tracked = []
@@ -18,12 +22,33 @@ def track(
delta: torch.Tensor,
grad: torch.Tensor,
) -> None:
+ """
+ Track the adversarial examples at the current iteration as images.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
self.tracked.append(x_adv)
class GradientsTracker(Tracker):
+ """Tracker for gradient images."""
+
def __init__(self) -> None:
- super().__init__("Grad", IMAGE)
+ """Create gradients tracker."""
+ super().__init__(name="Grad", tracker_type=IMAGE)
self.tracked = []
@@ -36,4 +61,22 @@ def track(
delta: torch.Tensor,
grad: torch.Tensor,
) -> None:
+ """
+ Track the gradients at the current iteration as images.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
self.tracked.append(grad)
diff --git a/src/secmlt/trackers/tensorboard_tracker.py b/src/secmlt/trackers/tensorboard_tracker.py
index 3840257..4b73f4c 100644
--- a/src/secmlt/trackers/tensorboard_tracker.py
+++ b/src/secmlt/trackers/tensorboard_tracker.py
@@ -1,4 +1,6 @@
-from typing import List, Type
+"""Tensorboard tracking utilities."""
+
+import torch
from secmlt.trackers.trackers import (
IMAGE,
MULTI_SCALAR,
@@ -7,13 +9,24 @@
LossTracker,
Tracker,
)
-import torch
from torch.utils.tensorboard import SummaryWriter
class TensorboardTracker(Tracker):
- def __init__(self, logdir: str, trackers: List[Type[Tracker]] = None):
- super().__init__("Tensorboard")
+ """Tracker for Tensorboard. Uses other trackers as subscribers."""
+
+ def __init__(self, logdir: str, trackers: list[Tracker] | None = None) -> None:
+ """
+ Create tensorboard tracker.
+
+ Parameters
+ ----------
+ logdir : str
+ Folder to store tensorboard logs.
+ trackers : list[Tracker] | None, optional
+ List of trackers subsctibed to the updates, by default None.
+ """
+ super().__init__(name="Tensorboard")
if trackers is None:
trackers = [
LossTracker(),
@@ -30,14 +43,34 @@ def track(
x_adv: torch.tensor,
delta: torch.Tensor,
grad: torch.Tensor,
- ):
+ ) -> None:
+ """
+ Update all subscribed trackers.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
for tracker in self.trackers:
tracker.track(iteration, loss, scores, x_adv, delta, grad)
tracked_value = tracker.get_last_tracked()
for i, sample in enumerate(tracked_value):
if tracker.tracked_type == SCALAR:
self.writer.add_scalar(
- f"Sample #{i}/{tracker.name}", sample, global_step=iteration
+ f"Sample #{i}/{tracker.name}",
+ sample,
+ global_step=iteration,
)
elif tracker.tracked_type == MULTI_SCALAR:
self.writer.add_scalars(
@@ -50,10 +83,13 @@ def track(
)
elif tracker.tracked_type == IMAGE:
self.writer.add_image(
- f"Sample #{i}/{tracker.name}", sample, global_step=iteration
+ f"Sample #{i}/{tracker.name}",
+ sample,
+ global_step=iteration,
)
- def get_last_tracked(self):
+ def get_last_tracked(self) -> NotImplementedError:
+ """Not implemented for this tracker."""
return NotImplementedError(
- "Last tracked value is not available for this tracker."
+ "Last tracked value is not available for this tracker.",
)
diff --git a/src/secmlt/trackers/trackers.py b/src/secmlt/trackers/trackers.py
index d6accb1..e279090 100644
--- a/src/secmlt/trackers/trackers.py
+++ b/src/secmlt/trackers/trackers.py
@@ -1,8 +1,9 @@
-from abc import ABC
-from typing import Union
-from secmlt.adv.evasion.perturbation_models import PerturbationModels
+"""Trackers for attack metrics."""
+
+from abc import ABC, abstractmethod
import torch
+from secmlt.adv.evasion.perturbation_models import LpPerturbationModels
SCALAR = "scalar"
IMAGE = "image"
@@ -10,11 +11,25 @@
class Tracker(ABC):
- def __init__(self, name, tracker_type=SCALAR) -> None:
+ """Class implementing the trackers for the attacks."""
+
+ def __init__(self, name: str, tracker_type: str = SCALAR) -> None:
+ """
+ Create tracker.
+
+ Parameters
+ ----------
+ name : str
+ Tracker name.
+ tracker_type : str, optional
+ Type of tracker (mostly used for tensorboard functionalities),
+ by default SCALAR. Available: SCALAR, IMAGE, MULTI_SCALAR.
+ """
self.name = name
self.tracked = None
self.tracked_type = tracker_type
+ @abstractmethod
def track(
self,
iteration: int,
@@ -23,19 +38,56 @@ def track(
x_adv: torch.tensor,
delta: torch.Tensor,
grad: torch.Tensor,
- ) -> None: ...
+ ) -> None:
+ """
+ Track the history of given attack observable parameters.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
def get(self) -> torch.Tensor:
+ """
+ Get the current tracking history.
+
+ Returns
+ -------
+ torch.Tensor
+ History of tracked parameters.
+ """
return torch.stack(self.tracked, -1)
- def get_last_tracked(self) -> Union[None, torch.Tensor]:
+ def get_last_tracked(self) -> None | torch.Tensor:
+ """
+ Get last element tracked.
+
+ Returns
+ -------
+ None | torch.Tensor
+ Returns the last tracked element if anything was tracked.
+ """
if self.tracked is not None:
return self.get()[..., -1] # return last tracked value
return None
class LossTracker(Tracker):
+ """Tracker for attack loss."""
+
def __init__(self) -> None:
+ """Create loss tracker."""
super().__init__("Loss")
self.tracked = []
@@ -48,11 +100,32 @@ def track(
delta: torch.Tensor,
grad: torch.Tensor,
) -> None:
+ """
+ Track the sample-wise loss of the attack at the current iteration.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
self.tracked.append(loss.data)
class ScoresTracker(Tracker):
- def __init__(self, y: Union[int, torch.Tensor] = None) -> None:
+ """Tracker for model scores."""
+
+ def __init__(self, y: int | torch.Tensor = None) -> None:
+ """Create scores tracker."""
if y is None:
super().__init__("Scores", MULTI_SCALAR)
else:
@@ -69,6 +142,24 @@ def track(
delta: torch.Tensor,
grad: torch.Tensor,
) -> None:
+ """
+ Track the sample-wise model scores at the current iteration.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
if self.y is None:
self.tracked.append(scores.data)
else:
@@ -76,7 +167,10 @@ def track(
class PredictionTracker(Tracker):
+ """Tracker for model predictions."""
+
def __init__(self) -> None:
+ """Create prediction tracker."""
super().__init__("Prediction")
self.tracked = []
@@ -89,13 +183,41 @@ def track(
delta: torch.Tensor,
grad: torch.Tensor,
) -> None:
+ """
+ Track the sample-wise model predictions at the current iteration.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
self.tracked.append(scores.data.argmax(dim=1))
class PerturbationNormTracker(Tracker):
- def __init__(self, p: PerturbationModels = PerturbationModels.L2) -> None:
+ """Tracker for perturbation norm."""
+
+ def __init__(self, p: LpPerturbationModels = LpPerturbationModels.L2) -> None:
+ """
+ Create perturbation norm tracker.
+
+ Parameters
+ ----------
+ p : LpPerturbationModels, optional
+ Perturbation model to compute the norm, by default LpPerturbationModels.L2.
+ """
super().__init__("PertNorm")
- self.p = PerturbationModels.get_p(p)
+ self.p = LpPerturbationModels.get_p(p)
self.tracked = []
def track(
@@ -107,14 +229,42 @@ def track(
delta: torch.Tensor,
grad: torch.Tensor,
) -> None:
+ """
+ Track the perturbation norm at the current iteration.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
self.tracked.append(delta.flatten(start_dim=1).norm(p=self.p, dim=-1))
class GradientNormTracker(Tracker):
- def __init__(self, p: PerturbationModels = PerturbationModels.L2) -> None:
+ """Tracker for gradients."""
+
+ def __init__(self, p: LpPerturbationModels = LpPerturbationModels.L2) -> None:
+ """
+ Create gradient norm tracker.
+
+ Parameters
+ ----------
+ p : LpPerturbationModels, optional
+ Perturbation model to compute the norm, by default LpPerturbationModels.L2.
+ """
super().__init__("GradNorm")
- self.p = PerturbationModels.get_p(p)
+ self.p = LpPerturbationModels.get_p(p)
self.tracked = []
def track(
@@ -126,5 +276,23 @@ def track(
delta: torch.Tensor,
grad: torch.Tensor,
) -> None:
+ """
+ Track the sample-wise gradient of the loss w.r.t delta.
+
+ Parameters
+ ----------
+ iteration : int
+ The attack iteration number.
+ loss : torch.Tensor
+ The value of the (per-sample) loss of the attack.
+ scores : torch.Tensor
+ The output scores from the model.
+ x_adv : torch.tensor
+ The adversarial examples at the current iteration.
+ delta : torch.Tensor
+ The adversarial perturbations at the current iteration.
+ grad : torch.Tensor
+ The gradient of delta at the given iteration.
+ """
norm = grad.data.flatten(start_dim=1).norm(p=self.p, dim=1)
self.tracked.append(norm)
diff --git a/src/secmlt/utils/__init__.py b/src/secmlt/utils/__init__.py
new file mode 100644
index 0000000..9cce67d
--- /dev/null
+++ b/src/secmlt/utils/__init__.py
@@ -0,0 +1 @@
+"""Utilities for the use of the library."""
diff --git a/src/secmlt/utils/tensor_utils.py b/src/secmlt/utils/tensor_utils.py
index 95bf0b3..60a5389 100644
--- a/src/secmlt/utils/tensor_utils.py
+++ b/src/secmlt/utils/tensor_utils.py
@@ -1,6 +1,26 @@
+"""Basic utils for tensor handling."""
+
import torch
def atleast_kd(x: torch.Tensor, k: int) -> torch.Tensor:
+ """
+ Add dimensions to the tensor x until it reaches k dimensions.
+
+ Parameters
+ ----------
+ x : torch.Tensor
+ Input tensor.
+ k : int
+ Number of desired dimensions.
+
+ Returns
+ -------
+ torch.Tensor
+ The input tensor x but with k dimensions.
+ """
+ if k <= x.dim():
+ msg = "The number of desired dimensions should be > x.dim()"
+ raise ValueError(msg)
shape = x.shape + (1,) * (k - x.ndim)
return x.reshape(shape)