diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 8d41016a..00000000 --- a/.coveragerc +++ /dev/null @@ -1,2 +0,0 @@ -[run] -source = pyroengine \ No newline at end of file diff --git a/.flake8 b/.flake8 index 9b10d8a6..8d31f5a5 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,5 @@ [flake8] max-line-length = 120 -ignore = F401, E402, E265, F403, W503, W504, F821 -exclude = venv*, .circleci, .git, docs +ignore = E203, E402, E265, F403, W503, W504, E731 +exclude = .git, venv*, docs, build +per-file-ignores = **/__init__.py:F401 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 08b44077..366684e9 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -28,12 +28,12 @@ Steps to reproduce the behavior: ## Environment Please copy and paste the output from our -[environment collection script](https://raw.githubusercontent.com/pyronear/pyro-engine/master/scripts/collect_env.py) +[environment collection script](https://raw.githubusercontent.com/pyronear/pyro-engine/main/.github/collect_env.py) (or fill out the checklist below manually). You can get the script and run it with: ``` -wget https://raw.githubusercontent.com/pyronear/pyro-engine/master/scripts/collect_env.py +wget https://raw.githubusercontent.com/pyronear/pyro-engine/main/.github/collect_env.py # For security purposes, please check the contents of collect_env.py before running it. python collect_env.py ``` diff --git a/.github/collect_env.py b/.github/collect_env.py new file mode 100644 index 00000000..16662758 --- /dev/null +++ b/.github/collect_env.py @@ -0,0 +1,218 @@ +# Copyright (C) 2020-2022, Pyronear. + +# This program is licensed under the Apache License 2.0. +# See LICENSE or go to for full license details. + +""" +Based on https://github.com/pytorch/pytorch/blob/master/torch/utils/collect_env.py +This script outputs relevant system environment info +Run it with `python collect_env.py`. +""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import locale +import re +import subprocess +import sys +from collections import namedtuple + +try: + import pyroengine + + ENGINE_AVAILABLE = True +except (ImportError, NameError, AttributeError): + ENGINE_AVAILABLE = False + +try: + import onnxruntime + + ONNX_AVAILABLE = True +except (ImportError, NameError, AttributeError): + ONNX_AVAILABLE = False + +PY3 = sys.version_info >= (3, 0) + + +# System Environment Information +SystemEnv = namedtuple( + "SystemEnv", + [ + "pyroengine_version", + "onnxruntime_version", + "os", + "python_version", + ], +) + + +def run(command): + """Returns (return-code, stdout, stderr)""" + p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + output, err = p.communicate() + rc = p.returncode + if PY3: + enc = locale.getpreferredencoding() + output = output.decode(enc) + err = err.decode(enc) + return rc, output.strip(), err.strip() + + +def run_and_read_all(run_lambda, command): + """Runs command using run_lambda; reads and returns entire output if rc is 0""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + return out + + +def run_and_parse_first_match(run_lambda, command, regex): + """Runs command using run_lambda, returns the first regex match if it exists""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + match = re.search(regex, out) + if match is None: + return None + return match.group(1) + + +def get_platform(): + if sys.platform.startswith("linux"): + return "linux" + elif sys.platform.startswith("win32"): + return "win32" + elif sys.platform.startswith("cygwin"): + return "cygwin" + elif sys.platform.startswith("darwin"): + return "darwin" + else: + return sys.platform + + +def get_mac_version(run_lambda): + return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)") + + +def get_windows_version(run_lambda): + return run_and_read_all(run_lambda, "wmic os get Caption | findstr /v Caption") + + +def get_lsb_version(run_lambda): + return run_and_parse_first_match(run_lambda, "lsb_release -a", r"Description:\t(.*)") + + +def check_release_file(run_lambda): + return run_and_parse_first_match(run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(.*)"') + + +def get_os(run_lambda): + platform = get_platform() + + if platform == "win32" or platform == "cygwin": + return get_windows_version(run_lambda) + + if platform == "darwin": + version = get_mac_version(run_lambda) + if version is None: + return None + return "Mac OSX {}".format(version) + + if platform == "linux": + # Ubuntu/Debian based + desc = get_lsb_version(run_lambda) + if desc is not None: + return desc + + # Try reading /etc/*-release + desc = check_release_file(run_lambda) + if desc is not None: + return desc + + return platform + + # Unknown platform + return platform + + +def get_env_info(): + run_lambda = run + + if ENGINE_AVAILABLE: + pyroengine_str = pyroengine.__version__ + else: + pyroengine_str = "N/A" + + if ONNX_AVAILABLE: + onnxruntime_str = onnxruntime.__version__ + else: + onnxruntime_str = "N/A" + + return SystemEnv( + pyroengine_version=pyroengine_str, + onnxruntime_version=onnxruntime_str, + python_version=".".join(map(str, sys.version_info[:3])), + os=get_os(run_lambda), + ) + + +env_info_fmt = """ +PyroEngine version: {pyroengine_version} +ONNX runtime version: {onnxruntime_version} + +OS: {os} + +Python version: {python_version} +""".strip() + + +def pretty_str(envinfo): + def replace_nones(dct, replacement="Could not collect"): + for key in dct.keys(): + if dct[key] is not None: + continue + dct[key] = replacement + return dct + + def replace_bools(dct, true="Yes", false="No"): + for key in dct.keys(): + if dct[key] is True: + dct[key] = true + elif dct[key] is False: + dct[key] = false + return dct + + def maybe_start_on_next_line(string): + # If `string` is multiline, prepend a \n to it. + if string is not None and len(string.split("\n")) > 1: + return "\n{}\n".format(string) + return string + + mutable_dict = envinfo._asdict() + + # Replace True with Yes, False with No + mutable_dict = replace_bools(mutable_dict) + + # Replace all None objects with 'Could not collect' + mutable_dict = replace_nones(mutable_dict) + + return env_info_fmt.format(**mutable_dict) + + +def get_pretty_env_info(): + """Collects environment information for debugging purposes + + Returns: + str: environment information + """ + return pretty_str(get_env_info()) + + +def main(): + print("Collecting environment information...") + output = get_pretty_env_info() + print(output) + + +if __name__ == "__main__": + main() diff --git a/.github/validate_headers.py b/.github/validate_headers.py deleted file mode 100644 index ccd09027..00000000 --- a/.github/validate_headers.py +++ /dev/null @@ -1,67 +0,0 @@ -from datetime import datetime -from pathlib import Path - -shebang = ["#!usr/bin/python\n"] -blank_line = "\n" - -# Possible years -starting_year = 2020 -current_year = datetime.now().year - -year_options = [f"{current_year}"] + [ - f"{year}-{current_year}" for year in range(starting_year, current_year) -] -copyright_notices = [ - [f"# Copyright (C) {year_str}, Pyronear.\n"] for year_str in year_options -] -license_notice = [ - "# This program is licensed under the Apache License version 2.\n", - "# See LICENSE or go to for full license details.\n", -] - -# Define all header options -HEADERS = [ - shebang + [blank_line] + copyright_notice + [blank_line] + license_notice - for copyright_notice in copyright_notices -] + [ - copyright_notice + [blank_line] + license_notice - for copyright_notice in copyright_notices -] - - -IGNORED_FILES = ["version.py", "__init__.py"] -FOLDERS = ["pyroengine", "scripts", "server", "docs", "test"] - - -def main(): - - invalid_files = [] - - # For every python file in the repository - for folder in FOLDERS: - for source_path in ( - Path(__file__).parent.parent.joinpath(folder).rglob("**/*.py") - ): - if source_path.name not in IGNORED_FILES: - # Parse header - header_length = max(len(option) for option in HEADERS) - current_header = [] - with open(source_path) as f: - for idx, line in enumerate(f): - current_header.append(line) - if idx == header_length - 1: - break - # Validate it - if not any( - "".join(current_header[: min(len(option), len(current_header))]) - == "".join(option) - for option in HEADERS - ): - invalid_files.append(source_path) - if len(invalid_files) > 0: - invalid_str = "\n- " + "\n- ".join(map(str, invalid_files)) - raise AssertionError(f"Invalid header in the following files:{invalid_str}") - - -if __name__ == "__main__": - main() diff --git a/.github/workflows/builds.yml b/.github/workflows/builds.yml new file mode 100644 index 00000000..8800259d --- /dev/null +++ b/.github/workflows/builds.yml @@ -0,0 +1,34 @@ +name: builds + +on: + push: + branches: main + pull_request: + branches: main + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python: [3.8] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Cache python modules + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-python-${{ matrix.python }}-${{ hashFiles('pyproject.toml') }}-build + - name: Install package + run: | + python -m pip install --upgrade pip + pip install -e . --upgrade + - name: Import package + run: python -c "import pyroengine; print(pyroengine.__version__)" diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 00000000..955ecb8b --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,17 @@ +name: docker + +on: + push: + branches: main + pull_request: + branches: main + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Build docker image + run: docker build . -t pyronear/pyro-engine:python3.8.1-slim + - name: Run docker container + run: docker run pyronear/pyro-engine:python3.8.1-slim python -c 'import pyroengine' diff --git a/.github/workflows/doc-deploy.yml b/.github/workflows/docs.yml similarity index 98% rename from .github/workflows/doc-deploy.yml rename to .github/workflows/docs.yml index b370ab6c..6b4b2ef1 100644 --- a/.github/workflows/doc-deploy.yml +++ b/.github/workflows/docs.yml @@ -1,7 +1,7 @@ name: doc-deploy on: push: - branches: master + branches: main jobs: docs-publish: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 1e3115cf..00000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,130 +0,0 @@ -name: github-runner-python-package - -on: - push: - branches: [ master, develop] - pull_request: - branches: [ master, develop ] - -jobs: - pkg-install: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - architecture: x64 - - name: Cache python modules - uses: actions/cache@v2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pkg-deps-${{ hashFiles('requirements.txt') }}-${{ hashFiles('**/*.py') }} - restore-keys: | - ${{ runner.os }}-pkg-deps-${{ hashFiles('requirements.txt') }}- - ${{ runner.os }}-pkg-deps- - ${{ runner.os }}- - - name: Install package - run: | - python -m pip install --upgrade pip - pip install -e . - - pkg-test: - runs-on: ubuntu-latest - needs: pkg-install - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - architecture: x64 - - name: Cache python modules - uses: actions/cache@v2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pkg-deps-${{ hashFiles('requirements.txt') }}-${{ hashFiles('**/*.py') }} - restore-keys: | - ${{ runner.os }}-pkg-deps-${{ hashFiles('requirements.txt') }}- - ${{ runner.os }}-pkg-deps- - ${{ runner.os }}- - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -e . - pip install -r .github/workflows/requirements.txt - - - name: Run unittests - run: | - coverage run -m unittest discover test/ - coverage xml - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - with: - file: ./coverage.xml - flags: unittests - fail_ci_if_error: true - - flake8-py3: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - architecture: x64 - - name: Run flake8 - run: | - pip install -r .github/workflows/requirements.txt - flake8 --version - flake8 ./ - - docs-build: - runs-on: ubuntu-latest - needs: pkg-install - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - architecture: x64 - - name: Cache python modules - uses: actions/cache@v2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pkg-deps-${{ hashFiles('requirements.txt') }}-${{ hashFiles('**/*.py') }} - restore-keys: | - ${{ runner.os }}-pkg-deps-${{ hashFiles('requirements.txt') }}- - ${{ runner.os }}-pkg-deps- - ${{ runner.os }}- - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -e . - pip install -r docs/requirements.txt - - - name: Build documentation - run: | - sphinx-build docs/source docs/build -a - - headers: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest] - python: [3.7] - steps: - - uses: actions/checkout@v2 - with: - persist-credentials: false - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python }} - architecture: x64 - - name: Run unittests - run: python .github/validate_headers.py \ No newline at end of file diff --git a/.github/workflows/markdown-links.yml b/.github/workflows/markdown-links.yml index ad02fff2..c6b5bea1 100644 --- a/.github/workflows/markdown-links.yml +++ b/.github/workflows/markdown-links.yml @@ -3,7 +3,7 @@ name: Check Broken Markdown Links on: pull_request: branches: - - master, develop + - main jobs: markdown-link-check: diff --git a/.github/workflows/pi.yml b/.github/workflows/pi.yml index a934e0f3..97da8bc5 100644 --- a/.github/workflows/pi.yml +++ b/.github/workflows/pi.yml @@ -1,40 +1,36 @@ -name: pi-runner-python-package +name: tests-RPI on: push: - branches: [ master, develop] + branches: main pull_request: - branches: [ master, develop ] + branches: main jobs: - pkg-install: - runs-on: self-hosted + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [self-hosted] steps: - uses: actions/checkout@v2 - name: Install package run: | - python --version - pip install -e . + python -m pip install --upgrade pip + pip install -e . --upgrade - pkg-test: - runs-on: self-hosted - needs: pkg-install + pytest: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [self-hosted] + needs: build steps: - uses: actions/checkout@v2 - name: Install dependencies - run: | - pip install -e . - pip install -r .github/workflows/requirements.txt + run: pip install -e ".[test]" --upgrade - name: Run unittests - run: | - coverage run -m unittest discover test/ - coverage xml - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - with: - file: ./coverage.xml - flags: unittests - fail_ci_if_error: true - + run: coverage run -m pytest tests/ diff --git a/.github/workflows/publish-docker-image.yml b/.github/workflows/publish-docker-image.yml deleted file mode 100644 index b3f94a2e..00000000 --- a/.github/workflows/publish-docker-image.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Publish Docker image -on: - release: - types: [published] -jobs: - push_to_registry: - name: Push Docker image to Docker Hub - runs-on: ubuntu-latest - steps: - - name: Check out the repo - uses: actions/checkout@v2 - - name: Push to Docker Hub - uses: docker/build-push-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME}} - password: ${{ secrets.DOCKERHUB_PASSWORD}} - repository: pyronear/pyro-engine - tag_with_ref: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..4f27e9c7 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,70 @@ +name: release + +on: + release: + types: [published] + +jobs: + pypi-publish: + if: "!github.event.release.prerelease" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: 3.8 + architecture: x64 + - name: Cache python modules + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-python-${{ matrix.python }}-${{ hashFiles('pyproject.toml') }}-build + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine --upgrade + - name: Get release tag + id: release_tag + run: | + echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//} + - name: Build and publish + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + VERSION: ${{ steps.release_tag.outputs.VERSION }} + run: | + BUILD_VERSION=${VERSION:1} python setup.py sdist bdist_wheel + twine check dist/* + twine upload dist/* + + pypi-check: + if: "!github.event.release.prerelease" + runs-on: ubuntu-latest + needs: pypi-publish + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: 3.8 + architecture: x64 + - name: Install package + run: | + python -m pip install --upgrade pip + pip install pyroengine + python -c "import pyroengine; print(pyroengine.__version__)" + + dockerhub-publish: + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + steps: + - name: Check out the repo + uses: actions/checkout@v2 + - name: Push to Docker Hub + uses: docker/build-push-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME}} + password: ${{ secrets.DOCKERHUB_PASSWORD}} + repository: pyronear/pyro-engine + tag_with_ref: true diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt deleted file mode 100644 index 58382921..00000000 --- a/.github/workflows/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -coverage>=4.5.4 -flake8>=3.6.0 diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml new file mode 100644 index 00000000..b659dadc --- /dev/null +++ b/.github/workflows/style.yml @@ -0,0 +1,113 @@ +name: style + +on: + push: + branches: main + pull_request: + branches: main + +jobs: + flake8: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.8] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Run flake8 + run: | + pip install flake8 + flake8 --version + flake8 + + isort: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.8] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Run isort + run: | + pip install isort + isort --version + isort . + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then exit 1; else echo "All clear"; fi + + mypy: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.8] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Cache python modules + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-python-${{ matrix.python }}-${{ hashFiles('pyproject.toml') }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e . --upgrade + pip install mypy + - name: Run mypy + run: | + mypy --version + mypy + + pydocstyle: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.8] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Run pydocstyle + run: | + pip install pydocstyle[toml] + pydocstyle --version + pydocstyle + + black: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.8] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Run black + run: | + pip install black + black --version + black --check --diff . diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..1f91ed30 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,102 @@ +name: tests-GH + +on: + push: + branches: main + pull_request: + branches: main + +jobs: + pytest: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.8] + steps: + - uses: actions/checkout@v2 + with: + persist-credentials: false + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Cache python modules + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-python-${{ matrix.python }}-${{ hashFiles('pyproject.toml') }}-test + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[test]" --upgrade + - name: Run unittests + run: | + coverage run -m pytest tests/ + coverage xml + - uses: actions/upload-artifact@v2 + with: + name: coverage-main + path: ./coverage.xml + + codecov-upload: + runs-on: ubuntu-latest + needs: pytest + steps: + - uses: actions/checkout@v2 + - uses: actions/download-artifact@v2 + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + flags: unittests + fail_ci_if_error: true + + docs: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.8] + steps: + - uses: actions/checkout@v2 + with: + persist-credentials: false + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Cache python modules + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-python-${{ matrix.python }}-${{ hashFiles('pyproject.toml') }}-docs + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[docs]" --upgrade + + - name: Build documentation + run: sphinx-build docs/source docs/build -a + + - name: Documentation sanity check + run: test -e docs/build/index.html || exit + + headers: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + steps: + - uses: actions/checkout@v2 + with: + persist-credentials: false + - name: Check the headers + uses: frgfm/validate-python-headers@main + with: + license: 'Apache-2.0' + owner: 'Pyronear' + starting-year: 2020 + folders: 'pyroengine,docs,scripts,.github,src' + ignores: 'version.py,__init__.py' diff --git a/.gitignore b/.gitignore index efbef31b..c0527cb3 100644 --- a/.gitignore +++ b/.gitignore @@ -112,4 +112,7 @@ test/fixtures/videos/ pyroengine/version.py # Ignore config file -runner/data +./data +# Checkpoints +*.pth +*.onnx diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8ffc3448..d3f557da 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,21 +7,25 @@ Whatever the way you wish to contribute to the project, please respect the [code ## Codebase structure -- [pyroengine](https://github.com/pyronear/pyro-engine/blob/master/pyroengine) - The actual pyroengine library +- [pyroengine](https://github.com/pyronear/pyro-engine/blob/main/pyroengine) - The actual pyroengine library +- [docs](https://github.com/pyronear/pyro-engine/blob/main/docs) - Sphinx documentation building +- [tests](https://github.com/pyronear/pyro-engine/blob/main/tests) - Python unit tests +- [scripts](https://github.com/pyronear/pyro-engine/blob/main/scripts) - Scripts for Raspberry Pi +- [src](https://github.com/pyronear/pyro-engine/blob/main/src) - code running on Raspberry Pi + ## Continuous Integration This project uses the following integrations to ensure proper codebase maintenance: -- [CircleCI](https://circleci.com/) - run jobs for package build and coverage +- [Github Worklow](https://help.github.com/en/actions/configuring-and-managing-workflows/configuring-a-workflow) - run jobs for package build and coverage - [Codacy](https://www.codacy.com/) - analyzes commits for code quality - [Codecov](https://codecov.io/) - reports back coverage results As a contributor, you will only have to ensure coverage of your code by adding appropriate unit testing of your code. - ## Issues Use Github [issues](https://github.com/pyronear/pyro-engine/issues) for feature requests, or bug reporting. When doing so, use issue templates whenever possible and provide enough information for other contributors to jump in. diff --git a/Dockerfile b/Dockerfile index 4f33e768..34360f47 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,20 +1,26 @@ FROM python:3.8.1-slim +# set environment variables +ENV PYTHONPATH "${PYTHONPATH}:/usr/src/app" +ENV PATH /usr/local/bin:$PATH +ENV LANG C.UTF-8 ENV PYTHONUNBUFFERED 1 ENV PYTHONDONTWRITEBYTECODE 1 +# set work directory +WORKDIR /usr/src/app + +COPY ./pyproject.toml /tmp/pyproject.toml +COPY ./src/requirements.txt /tmp/requirements.txt COPY ./README.md /tmp/README.md COPY ./setup.py /tmp/setup.py COPY ./pyroengine /tmp/pyroengine -COPY ./requirements.txt /tmp/requirements.txt -RUN apt update \ - && apt install -y git \ - && apt install ffmpeg libsm6 libxext6 -y \ - && apt install -y gcc python3-dev \ - && pip install --upgrade pip setuptools wheel \ +RUN pip install --upgrade pip setuptools wheel \ && pip install -e /tmp/. \ + && pip install -r /tmp/requirements.txt \ && pip cache purge \ - && rm -rf /root/.cache/pip \ - && rm -rf /var/lib/apt/lists/* + && rm -rf /root/.cache/pip + +COPY ./src/run.py /usr/src/app/run.py diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..c35cb7dc --- /dev/null +++ b/Makefile @@ -0,0 +1,33 @@ +# this target runs checks on all files +quality: + isort . -c + flake8 + mypy + pydocstyle + black --check . + +# this target runs checks on all files and potentially modifies some of them +style: + isort . + black . + +# Run tests for the library +test: + coverage run -m pytest tests/ + +# Build documentation for current version +single-docs: + sphinx-build docs/source docs/_build -a + +# Build the docker +docker: + docker build . -t pyronear/pyro-engine:python3.8.1-slim + +# Run the engine wrapper +run: + docker build . -t pyronear/pyro-engine:latest + docker-compose up -d + +# Stop the engine wrapper +stop: + docker-compose down diff --git a/README.md b/README.md index f2d82e3c..74a79b1f 100644 --- a/README.md +++ b/README.md @@ -5,8 +5,8 @@ - - + + @@ -16,64 +16,63 @@ -# pyroengine: Deploy Pyronear wildfire detection +# PyroEngine: Wildfire detection on edge devices -The increasing adoption of mobile phones have significantly shortened the time required for firefighting agents to be alerted of a starting wildfire. In less dense areas, limiting and minimizing this duration remains critical to preserve forest areas. +PyroEngine provides a high-level interface to use Deep learning models in production while being connected to the alert API. -![pyrovision](https://github.com/pyronear/pyro-vision) aims at providing the means to create a wildfire early detection system with state-of-the-art performances at minimal deployment costs. +## Quick Tour -pyroengine aims to deploy pyrovision wildfire detection system +### Running your engine locally +You can use the library like any other python package to detect wildfires as follows: +```python +from pyroengine.core import Engine +from PIL import Image -## Table of Contents - -* [Getting Started](#getting-started) - * [Prerequisites](#prerequisites) - * [Installation](#installation) -* [Documentation](#documentation) -* [Contributing](#contributing) -* [Credits](#credits) -* [License](#license) - +engine = Engine("pyronear/rexnet1_3x") +im = Image.open("path/to/your/image.jpg").convert('RGB') -## Getting started +prediction = engine.predict(image) +``` -### Prerequisites +## Setup -- Python 3.6 (or more recent) -- [pip](https://pip.pypa.io/en/stable/) +Python 3.6 (or higher) and [pip](https://pip.pypa.io/en/stable/)/[conda](https://docs.conda.io/en/latest/miniconda.html) are required to install PyroVision. -### Installation +### Stable release -You can install the package using [pypi](https://pypi.org/project/pyronear/) as follows: +You can install the last stable release of the package using [pypi](https://pypi.org/project/pyroengine/) as follows: ```shell pip install pyroengine ``` -### Environment files - -The `pyroengine/pi_utils/python.env` file must contain: -- `WEBSERVER_IP`: the IP address of the main rpi once it is installed on site -- `WEBSERVER_PORT`: the port exposed on the main rpi for the local webserver -### Test Engine +### Developer installation -You can test to run a prediction using our Pyronear Engine using the following: +Alternatively, if you wish to use the latest features of the project that haven't made their way to a release yet, you can install the package from source: ```shell -from pyroengine.engine import PyronearEngine -from PIL import Image - -engine = PyronearEngine() +git clone https://github.com/pyronear/pyro-engine.git +pip install -e pyro-engine/. +``` -im = Image.open("path/to/your/image.jpg").convert('RGB') +### Full docker orchestration -prediction = engine.predict(image) +Finally, you will need a `.env` file to enable camera & Alert API interactions. Your file should include a few mandatory entries: +``` +API_URL=http://my-api.myhost.com +LAT=48.88 +LON=2.38 +CAM_USER=my_dummy_login +CAM_PWD=my_dummy_pwd ``` -This is a quick demo without api setup, so without sending the alert +Additionally, you'll need a `./data` folder which contains: +- `credentials.json`: a dictionary with the IP address of your cameras as key, and dictionary with entries `login` & `password` for their Alert API credentials +- `model.onnx`: optional, will overrides the model weights download from HuggingFace Hub +- `config.json`: optional, will overrides the model config download from HuggingFace Hub ## Documentation @@ -83,7 +82,7 @@ The full package documentation is available [here](https://pyronear.github.io/py ## Contributing -Please refer to `CONTRIBUTING` if you wish to contribute to this project. +Please refer to [`CONTRIBUTING`](CONTRIBUTING.md) if you wish to contribute to this project. @@ -95,4 +94,4 @@ This project is developed and maintained by the repo owner and volunteers from [ ## License -Distributed under the Apache 2 License. See `LICENSE` for more information. +Distributed under the Apache 2 License. See [`LICENSE`](LICENSE) for more information. diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..23fe6e0e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,15 @@ +version: '3.8' + +services: + pyro-engine: + image: pyronear/pyro-engine:latest + environment: + - API_URL=${API_URL} + - LAT=${LAT} + - LON=${LON} + - CAM_USER=${CAM_USER} + - CAM_PWD=${CAM_PWD} + command: python run.py + volumes: + - ./data:/usr/src/app/data + restart: always diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 2c7349d5..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -sphinx-rtd-theme==0.4.3 -sphinx>=3.0.0,!=3.5.0 -Jinja2<3.1 diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css new file mode 100644 index 00000000..b202a3cf --- /dev/null +++ b/docs/source/_static/css/custom.css @@ -0,0 +1,10 @@ +h1 { + font-size: 200%; +} + +/* Github button */ + +.github-repo { + display: flex; + justify-content: center; +} diff --git a/docs/source/_static/css/custom_theme.css b/docs/source/_static/css/custom_theme.css deleted file mode 100644 index dbc5fe7e..00000000 --- a/docs/source/_static/css/custom_theme.css +++ /dev/null @@ -1,177 +0,0 @@ -h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend, p.caption { - font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; - font-weight: normal; -} - -h1 { - text-transform: uppercase; - letter-spacing: 1.78px -} - -body { - font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; - background: #fff; -} - -/* Use white for docs background */ -.wy-side-nav-search { - background-color: #fff; -} - -.wy-nav-shift { - background-color: #f3f4f7; -} -.wy-nav-side { - background-color: #f3f4f7; -} - -.wy-nav-content { - max-width: 900px; -} - -.wy-nav-content-wrap, .wy-menu li.current > a { - background-color: #fff; -} - -a { - color: #B91444; -} - -.wy-menu-vertical header, .wy-menu-vertical p.caption { - color: #B91444; -} - -@media screen and (min-width: 1100px) { - .wy-nav-content-wrap { - background-color: #fff; - } - .wy-nav-side { - background-color: #f3f4f7; - } - .wy-nav-content { - background: #fff; - } -} - -@media screen and (min-width: 1400px) { - .wy-nav-content-wrap { - background-color: #fff; - } - .wy-nav-side { - background-color: #f3f4f7; - } - .wy-nav-content { - background: #fff; - } -} - -/* Fixes for mobile */ -.wy-nav-top { - background-color: #fff; - background-repeat: no-repeat; - background-position: center; - padding: 0; - margin: 0.4045em 0.809em; - color: #333; -} - -.wy-nav-top > a { - display: none; -} - -@media screen and (max-width: 768px) { - .wy-side-nav-search>a img.logo { - height: 60px; - } -} - -/* This is needed to ensure that logo above search scales properly */ -.wy-side-nav-search a { - display: block; -} - -.wy-side-nav-search .wy-dropdown>a, .wy-side-nav-search>a { - color: #B91444; -} - -/* This ensures that multiple constructors will remain in separate lines. */ -.rst-content dl:not(.docutils) dt { - display: block; - color: #797A7B; - border-top: 0px; - background: #f0f0f0; -} - -.rst-content dl:not(.docutils) { -/* display: table; - color: #B91444;*/ - border-top: solid 3px #B91444; -} - -.rst-content dl:not(.docutils) dl dt { - border-left: 0px; -} - -.rst-content .viewcode-link, .rst-content .viewcode-back { - color: #B91444; -} - -/* Use our red for literals (it's very similar to the original color) */ -.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal { - color: #B91444; -} - -.rst-content tt.xref, a .rst-content tt, .rst-content tt.xref, -.rst-content code.xref, a .rst-content tt, a .rst-content code { - color: #404040; -} - -/* Change link colors (except for the menu) */ - -a { - color: #900C30; -} - -a:hover { - color: #900C30; -} - - -a:visited { - color: #780a35; -} - -.wy-menu a { - color: #b3b3b3; -} - -.wy-menu a:hover { - color: #b3b3b3; -} - -/* Default footer text is quite big */ -footer { - font-size: 80%; -} - -footer .rst-footer-buttons { - font-size: 125%; /* revert footer settings - 1/80% = 125% */ -} - -footer p { - font-size: 100%; -} - -/* For hidden headers that appear in TOC tree */ -/* see http://stackoverflow.com/a/32363545/3343043 */ -.rst-content .hidden-section { - display: none; -} - -nav .hidden-section { - display: inherit; -} - -.wy-side-nav-search>div.version { - color: #000; -} \ No newline at end of file diff --git a/docs/source/_static/js/custom.js b/docs/source/_static/js/custom.js new file mode 100644 index 00000000..bdeb5462 --- /dev/null +++ b/docs/source/_static/js/custom.js @@ -0,0 +1,32 @@ +// Based on https://github.com/huggingface/transformers/blob/master/docs/source/_static/js/custom.js + +function addGithubButton() { + const div = ` +
+ Star +
+ `; + document.querySelector(".sidebar-brand").insertAdjacentHTML('afterend', div); +} + +/*! + * github-buttons v2.2.10 + * (c) 2019 なつき + * @license BSD-2-Clause + */ +/** + * modified to run programmatically + */ +function parseGithubButtons (){"use strict";var e=window.document,t=e.location,o=window.encodeURIComponent,r=window.decodeURIComponent,n=window.Math,a=window.HTMLElement,i=window.XMLHttpRequest,l="https://unpkg.com/github-buttons@2.2.10/dist/buttons.html",c=i&&i.prototype&&"withCredentials"in i.prototype,d=c&&a&&a.prototype.attachShadow&&!a.prototype.attachShadow.prototype,s=function(e,t,o){e.addEventListener?e.addEventListener(t,o):e.attachEvent("on"+t,o)},u=function(e,t,o){e.removeEventListener?e.removeEventListener(t,o):e.detachEvent("on"+t,o)},h=function(e,t,o){var r=function(n){return u(e,t,r),o(n)};s(e,t,r)},f=function(e,t,o){var r=function(n){if(t.test(e.readyState))return u(e,"readystatechange",r),o(n)};s(e,"readystatechange",r)},p=function(e){return function(t,o,r){var n=e.createElement(t);if(o)for(var a in o){var i=o[a];null!=i&&(null!=n[a]?n[a]=i:n.setAttribute(a,i))}if(r)for(var l=0,c=r.length;l'},eye:{width:16,height:16,path:''},star:{width:14,height:16,path:''},"repo-forked":{width:10,height:16,path:''},"issue-opened":{width:14,height:16,path:''},"cloud-download":{width:16,height:16,path:''}},w={},x=function(e,t,o){var r=p(e.ownerDocument),n=e.appendChild(r("style",{type:"text/css"}));n.styleSheet?n.styleSheet.cssText=m:n.appendChild(e.ownerDocument.createTextNode(m));var a,l,d=r("a",{className:"btn",href:t.href,target:"_blank",innerHTML:(a=t["data-icon"],l=/^large$/i.test(t["data-size"])?16:14,a=(""+a).toLowerCase().replace(/^octicon-/,""),{}.hasOwnProperty.call(v,a)||(a="mark-github"),'"),"aria-label":t["aria-label"]||void 0},[" ",r("span",{},[t["data-text"]||""])]);/\.github\.com$/.test("."+d.hostname)?/^https?:\/\/((gist\.)?github\.com\/[^\/?#]+\/[^\/?#]+\/archive\/|github\.com\/[^\/?#]+\/[^\/?#]+\/releases\/download\/|codeload\.github\.com\/)/.test(d.href)&&(d.target="_top"):(d.href="#",d.target="_self");var u,h,g,x,y=e.appendChild(r("div",{className:"widget"+(/^large$/i.test(t["data-size"])?" lg":"")},[d]));/^(true|1)$/i.test(t["data-show-count"])&&"github.com"===d.hostname&&(u=d.pathname.replace(/^(?!\/)/,"/").match(/^\/([^\/?#]+)(?:\/([^\/?#]+)(?:\/(?:(subscription)|(fork)|(issues)|([^\/?#]+)))?)?(?:[\/?#]|$)/))&&!u[6]?(u[2]?(h="/repos/"+u[1]+"/"+u[2],u[3]?(x="subscribers_count",g="watchers"):u[4]?(x="forks_count",g="network"):u[5]?(x="open_issues_count",g="issues"):(x="stargazers_count",g="stargazers")):(h="/users/"+u[1],g=x="followers"),function(e,t){var o=w[e]||(w[e]=[]);if(!(o.push(t)>1)){var r=b(function(){for(delete w[e];t=o.shift();)t.apply(null,arguments)});if(c){var n=new i;s(n,"abort",r),s(n,"error",r),s(n,"load",function(){var e;try{e=JSON.parse(n.responseText)}catch(e){return void r(e)}r(200!==n.status,e)}),n.open("GET",e),n.send()}else{var a=this||window;a._=function(e){a._=null,r(200!==e.meta.status,e.data)};var l=p(a.document)("script",{async:!0,src:e+(/\?/.test(e)?"&":"?")+"callback=_"}),d=function(){a._&&a._({meta:{}})};s(l,"load",d),s(l,"error",d),l.readyState&&f(l,/de|m/,d),a.document.getElementsByTagName("head")[0].appendChild(l)}}}.call(this,"https://api.github.com"+h,function(e,t){if(!e){var n=t[x];y.appendChild(r("a",{className:"social-count",href:t.html_url+"/"+g,target:"_blank","aria-label":n+" "+x.replace(/_count$/,"").replace("_"," ").slice(0,n<2?-1:void 0)+" on GitHub"},[r("b"),r("i"),r("span",{},[(""+n).replace(/\B(?=(\d{3})+(?!\d))/g,",")])]))}o&&o(y)})):o&&o(y)},y=window.devicePixelRatio||1,C=function(e){return(y>1?n.ceil(n.round(e*y)/y*2)/2:n.ceil(e))||0},F=function(e,t){e.style.width=t[0]+"px",e.style.height=t[1]+"px"},k=function(t,r){if(null!=t&&null!=r)if(t.getAttribute&&(t=function(e){for(var t={href:e.href,title:e.title,"aria-label":e.getAttribute("aria-label")},o=["icon","text","size","show-count"],r=0,n=o.length;r`_ diff --git a/docs/source/conf.py b/docs/source/conf.py index 8589b322..3d149c64 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,7 +1,7 @@ # Copyright (C) 2020-2022, Pyronear. -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. +# This program is licensed under the Apache License 2.0. +# See LICENSE or go to for full license details. # Configuration file for the Sphinx documentation builder. # @@ -15,19 +15,19 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) -import pyroengine -import sphinx_rtd_theme +import os +import sys +from datetime import datetime +sys.path.insert(0, os.path.abspath("../..")) +import pyroengine # -- Project information ----------------------------------------------------- master_doc = "index" project = "pyroengine" -copyright = "2019, Pyronear Contributors" -author = "Pyronear Contributors" +author = "Pyronear" +copyright = f"2020-{datetime.now().year}, {author}" # The full version, including alpha/beta/rc tags version = pyroengine.__version__ @@ -44,11 +44,8 @@ "sphinx.ext.napoleon", "sphinx.ext.viewcode", "sphinx.ext.autosummary", - "sphinx.ext.doctest", - "sphinx.ext.intersphinx", - "sphinx.ext.todo", - "sphinx.ext.coverage", "sphinx.ext.mathjax", + "sphinx_copybutton", ] napoleon_use_ivar = True @@ -63,7 +60,8 @@ # The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" +pygments_style = "friendly" +pygments_dark_style = "monokai" highlight_language = "python3" # -- Options for HTML output ------------------------------------------------- @@ -71,19 +69,33 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] +html_theme = "furo" + +html_title = "PyroEngine" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { - "collapse_navigation": False, - "display_version": True, - "logo_only": True, + "footer_icons": [ + { + "name": "GitHub", + "url": "https://github.com/pyronear/pyro-engine", + "html": """ + + + + """, + "class": "", + }, + ], + "source_repository": "https://github.com/pyronear/pyro-engine/", + "source_branch": "develop", + "source_directory": "docs/source/", } + html_logo = "_static/img/pyronear-logo-dark.png" # Add any paths that contain custom static files (such as style sheets) here, @@ -91,9 +103,7 @@ # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] -html_context = { - "css_files": [ - "https://fonts.googleapis.com/css?family=Lato", - "_static/css/custom_theme.css", - ], -} + +def setup(app): + app.add_css_file("css/custom_theme.css") + app.add_js_file("js/custom.js") diff --git a/docs/source/core.rst b/docs/source/core.rst new file mode 100644 index 00000000..6f3fbb31 --- /dev/null +++ b/docs/source/core.rst @@ -0,0 +1,16 @@ +pyroengine.core +=============== + +The models subpackage contains everything to manage the whole Fire Detection process by capturing and saving the image and +by predicting whether there is a fire on this image + + +.. currentmodule:: pyroengine.core + +Engine +------ + +.. autoclass:: Engine + .. automethod:: clear_cache + .. automethod:: predict + .. automethod:: heartbeat \ No newline at end of file diff --git a/docs/source/engine.rst b/docs/source/engine.rst deleted file mode 100644 index a1d7b1bb..00000000 --- a/docs/source/engine.rst +++ /dev/null @@ -1,20 +0,0 @@ -pyroengine.engine -================= - -The models subpackage contains everything to manage the whole Fire Detection process by capturing and saving the image and -by predicting whether there is a fire on this image - - -.. currentmodule:: pyroengine.engine - -Pyronear Predictor ------------------- - -.. autoclass:: PyronearPredictor - - -Pyronear Engine ---------------- - -.. autoclass:: PyronearEngine - diff --git a/docs/source/index.rst b/docs/source/index.rst index 0e246dd9..be22fd44 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,21 +1,27 @@ -Pyroengine documentation -======================== +********************************************** +PyroEngine: Wildfire detection on edge devices +********************************************** -The :mod:`pyroengine` package provides everything usefull to deploy pyronear +PyroEngine provides a high-level interface to use Deep learning models in production while being connected to the alert API. .. toctree:: - :maxdepth: 2 - :caption: Package Reference + :maxdepth: 1 + :caption: Getting Started + :hidden: - engine + installing +.. toctree:: + :maxdepth: 2 + :caption: Package Reference + :hidden: -.. automodule:: pyroengine - :members: + core -Indices and tables -================== +.. toctree:: + :maxdepth: 1 + :caption: Notes + :hidden: -* :ref:`genindex` -* :ref:`modindex` + changelog diff --git a/docs/source/installation.rst b/docs/source/installation.rst new file mode 100644 index 00000000..c473fe16 --- /dev/null +++ b/docs/source/installation.rst @@ -0,0 +1,36 @@ + +************ +Installation +************ + +This library requires `Python `_ 3.6 or higher. + +Via Python Package +================== + +Install the last stable release of the package using `pip `_: + +.. code:: bash + + pip install pyroengine + + +Via Conda +========= + +Install the last stable release of the package using `conda `_: + +.. code:: bash + + conda install -c pyronear pyroengine + + +Via Git +======= + +Install the library in developer mode: + +.. code:: bash + + git clone https://github.com/pyronear/pyro-engine.git + pip install -e pyro-engine/. diff --git a/main.rpi.requirements.txt b/main.rpi.requirements.txt deleted file mode 100644 index 60b4f1f0..00000000 --- a/main.rpi.requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -psutil==5.8.0 -pyroclient@git+https://github.com/pyronear/pyro-api.git#egg=pyroclient&subdirectory=client -python-dotenv==0.17.0 -requests==2.25.1 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..f09293fe --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,128 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "pyroengine" +description = "Wildfire detection on edge devices" +authors = [ + {name = "Pyronear", email = "contact@pyronear.org"} +] +readme = "README.md" +requires-python = ">=3.6,<4" +license = {file = "LICENSE"} +keywords = ["pytorch", "deep learning", "vision", "cnn", "wildfire"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +dynamic = ["version"] +dependencies = [ + "Pillow>=8.4.0", + "onnxruntime>=1.10.0,<2.0.0", + "numpy>=1.19.5,<2.0.0", + "huggingface-hub>=0.4.0,<1.0.0", + "pyroclient>=0.1.2" +] + +[project.optional-dependencies] +test = [ + "pytest>=5.3.2", + "coverage[toml]>=4.5.4", + "requests>=2.20.0,<3.0.0", +] +quality = [ + "flake8>=3.9.0", + "isort>=5.7.0", + "mypy>=0.812", + "pydocstyle[toml]>=6.0.0", + "black>=22.1,<23.0", +] +docs = [ + "sphinx>=3.0.0,!=3.5.0", + "furo>=2022.3.4", + "sphinxemoji>=0.1.8", + "sphinx-copybutton>=0.3.1", + # Indirect deps + # cf. https://github.com/readthedocs/readthedocs.org/issues/9038 + "Jinja2<3.1", +] +dev = [ + # test + "pytest>=5.3.2", + "coverage[toml]>=4.5.4", + "requests>=2.20.0,<3.0.0", + # style + "flake8>=3.9.0", + "isort>=5.7.0", + "mypy>=0.812", + "pydocstyle[toml]>=6.0.0", + "black>=22.1,<23.0", + # docs + "sphinx>=3.0.0,!=3.5.0", + "furo>=2022.3.4", + "sphinxemoji>=0.1.8", + "sphinx-copybutton>=0.3.1", + "Jinja2<3.1", +] + +[project.urls] +documentation = "https://pyronear.org/pyro-engine" +repository = "https://github.com/pyronear/pyro-engine" +tracker = "https://github.com/pyronear/pyro-engine/issues" + +[tool.setuptools] +zip-safe = true + +[tool.setuptools.packages.find] +exclude = ["docs*", "scripts*", "tests*", "src*"] + + +[tool.mypy] +files = "pyroengine/" +show_error_codes = true +pretty = true +warn_unused_ignores = true +warn_redundant_casts = true +no_implicit_optional = true +check_untyped_defs = true +implicit_reexport = false + +[[tool.mypy.overrides]] +module = [ + "onnxruntime.*", + "requests.*", + "PIL.*", + "huggingface_hub.*", + "pyroclient.*", + "psutil.*", +] +ignore_missing_imports = true + +[tool.isort] +line_length = 120 +src_paths = ["pyroengine", "tests", "src", "docs", ".github", "scripts"] +skip_glob = "**/__init__.py" + +[tool.pydocstyle] +select = "D300,D301,D417" +match = ".*\\.py" + +[tool.coverage.run] +source = ["pyroengine"] + +[tool.black] +line-length = 120 +target-version = ['py38'] diff --git a/pyroengine/__init__.py b/pyroengine/__init__.py index 4c5e645e..21ce24a5 100644 --- a/pyroengine/__init__.py +++ b/pyroengine/__init__.py @@ -1,6 +1,2 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - +from .core import * from .version import __version__ diff --git a/pyroengine/core.py b/pyroengine/core.py new file mode 100644 index 00000000..962ff2e6 --- /dev/null +++ b/pyroengine/core.py @@ -0,0 +1,293 @@ +# Copyright (C) 2020-2022, Pyronear. + +# This program is licensed under the Apache License 2.0. +# See LICENSE or go to for full license details. + +import io +import json +import logging +import os +from collections import deque +from datetime import datetime, timedelta +from pathlib import Path +from typing import Any, Dict, Optional, Tuple + +from PIL import Image +from pyroclient import client +from requests.exceptions import ConnectionError + +from .vision import Classifier + +__all__ = ["Engine"] + +logging.basicConfig(format="%(asctime)s | %(levelname)s: %(message)s", level=logging.INFO, force=True) + + +class Engine: + """This implements an object to manage predictions and API interactions for wildfire alerts. + + Args: + hub_repo: repository on HF Hub to load the ONNX model from + conf_thresh: confidence threshold to send an alert + api_url: url of the pyronear API + client_creds: api credectials for each pizero, the dictionary should be as the one in the example + latitude: device latitude + longitude: device longitude + alert_relaxation: number of consecutive positive detections required to send the first alert, and also + the number of consecutive negative detections before stopping the alert + frame_size: Resize frame to frame_size before sending it to the api in order to save bandwidth + cache_backup_period: number of minutes between each cache backup to disk + frame_saving_period: Send one frame over N to the api for our dataset + cache_size: maximum number of alerts to save in cache + kwargs: keyword args of Classifier + + Examples: + >>> from pyroengine import Engine + >>> client_creds ={ + "cam_id_1": {'login':'log1', 'password':'pwd1'}, + "cam_id_2": {'login':'log2', 'password':'pwd2'}, + } + >>> pyroEngine = Engine("pyronear/rexnet1_3x", 0.5, 'https://api.pyronear.org', client_creds, 48.88, 2.38) + """ + + def __init__( + self, + hub_repo: str, + conf_thresh: float = 0.5, + api_url: Optional[str] = None, + client_creds: Optional[Dict[str, Dict[str, str]]] = None, + latitude: Optional[float] = None, + longitude: Optional[float] = None, + alert_relaxation: int = 3, + frame_size: Optional[Tuple[int, int]] = None, + cache_backup_period: int = 60, + frame_saving_period: Optional[int] = None, + cache_size: int = 100, + cache_folder: str = "data/", + **kwargs: Any, + ) -> None: + """Init engine""" + # Engine Setup + + self.model = Classifier(hub_repo, **kwargs) + self.conf_thresh = conf_thresh + + # API Setup + if isinstance(api_url, str): + assert isinstance(latitude, float) and isinstance(longitude, float) and isinstance(client_creds, dict) + self.latitude = latitude + self.longitude = longitude + self.api_client = {} + if isinstance(api_url, str) and isinstance(client_creds, dict): + # Instantiate clients for each camera + for _id, vals in client_creds.items(): + self.api_client[_id] = client.Client(api_url, vals["login"], vals["password"]) + + # Cache & relaxation + self.frame_saving_period = frame_saving_period + self.alert_relaxation = alert_relaxation + self.frame_size = frame_size + self.cache_backup_period = cache_backup_period + + # Var initialization + self._states: Dict[str, Dict[str, Any]] = {} + if isinstance(client_creds, dict): + for cam_id in client_creds: + self._states[cam_id] = {"consec": 0, "frame_count": 0, "ongoing": False} + else: + self._states["-1"] = {"consec": 0, "frame_count": 0, "ongoing": False} + + # Restore pending alerts cache + self._alerts: deque = deque([], cache_size) + self._cache = Path(cache_folder) # with Docker, the path has to be a bind volume + assert self._cache.is_dir() + self._load_cache() + self.last_cache_dump = datetime.utcnow() + + def clear_cache(self) -> None: + """Clear local cache""" + for file in self._cache.rglob("*"): + file.unlink() + + def _dump_cache(self) -> None: + + # Remove previous dump + json_path = self._cache.joinpath("pending_alerts.json") + if json_path.is_file(): + with open(json_path, "rb") as f: + data = json.load(f) + + for entry in data: + os.remove(entry["frame_path"]) + os.remove(json_path) + + data = [] + for idx, info in enumerate(self._alerts): + # Save frame to disk + info["frame"].save(self._cache.joinpath(f"pending_frame{idx}.jpg")) + + # Save path in JSON + data.append( + { + "frame_path": str(self._cache.joinpath(f"pending_frame{idx}.jpg")), + "cam_id": info["cam_id"], + "ts": info["ts"], + } + ) + + # JSON dump + if len(data) > 0: + with open(json_path, "w") as f: + json.dump(data, f) + + def _load_cache(self) -> None: + # Read json + json_path = self._cache.joinpath("pending_alerts.json") + if json_path.is_file(): + with open(json_path, "rb") as f: + data = json.load(f) + + for entry in data: + # Open image + frame = Image.open(entry["frame_path"], mode="r") + self._alerts.append({"frame": frame, "cam_id": entry["cam_id"], "ts": entry["ts"]}) + + def heartbeat(self, cam_id: str) -> None: + """Updates last ping of device""" + self.api_client[cam_id].heartbeat() + + def _update_states(self, conf: float, cam_key: str) -> bool: + """Updates the detection states""" + # Detection + if conf >= self.conf_thresh: + # Don't increment beyond relaxation + if not self._states[cam_key]["ongoing"] and self._states[cam_key]["consec"] < self.alert_relaxation: + self._states[cam_key]["consec"] += 1 + + if self._states[cam_key]["consec"] == self.alert_relaxation: + self._states[cam_key]["ongoing"] = True + + return self._states[cam_key]["ongoing"] + # No wildfire + else: + if self._states[cam_key]["consec"] > 0: + self._states[cam_key]["consec"] -= 1 + # Consider event as finished + if self._states[cam_key]["consec"] == 0: + self._states[cam_key]["ongoing"] = False + + return False + + def predict(self, frame: Image.Image, cam_id: Optional[str] = None) -> float: + """Computes the confidence that the image contains wildfire cues + + Args: + frame: a PIL image + cam_id: the name of the camera that sent this image + Returns: + the predicted confidence + """ + + # Heartbeat + if len(self.api_client) > 0 and isinstance(cam_id, str): + self.heartbeat(cam_id) + + # Inference with ONNX + pred = float(self.model(frame.convert("RGB"))) + # Log analysis result + device_str = f"Camera {cam_id} - " if isinstance(cam_id, str) else "" + pred_str = "Wildfire detected" if pred >= self.conf_thresh else "No wildfire" + logging.info(f"{device_str}{pred_str} (confidence: {pred:.2%})") + + # Reduce image size to save bandwidth + if isinstance(self.frame_size, tuple): + frame = frame.resize(self.frame_size, Image.BILINEAR) + + # Alert + cam_key = cam_id or "-1" + to_be_staged = self._update_states(pred, cam_key) + if to_be_staged and len(self.api_client) > 0 and isinstance(cam_id, str): + # Save the alert in cache to avoid connection issues + self._stage_alert(frame, cam_id) + + # Uploading pending alerts + if len(self._alerts) > 0: + self._process_alerts() + + # Check if it's time to backup pending alerts + ts = datetime.utcnow() + if ts > self.last_cache_dump + timedelta(minutes=self.cache_backup_period): + self._dump_cache() + self.last_cache_dump = ts + + # save frame + if len(self.api_client) > 0 and isinstance(self.frame_saving_period, int) and isinstance(cam_id, str): + self._states[cam_key]["frame_count"] += 1 + if self._states[cam_key]["frame_count"] == self.frame_saving_period: + # Send frame to the api + stream = io.BytesIO() + frame.save(stream, format="JPEG") + try: + self._upload_frame(cam_id, stream.getvalue()) + # Reset frame counter + self._states[cam_key]["frame_count"] = 0 + except ConnectionError: + stream.seek(0) # "Rewind" the stream to the beginning so we can read its content + + return pred + + def _upload_frame(self, cam_id: str, media_data: bytes) -> None: + """Save frame""" + logging.info("Uploading media...") + # Create a media + media_id = self.api_client[cam_id].create_media_from_device().json()["id"] + # Send media + self.api_client[cam_id].upload_media(media_id=media_id, media_data=media_data) + + def _stage_alert(self, frame: Image.Image, cam_id: str) -> None: + # Store information in the queue + self._alerts.append( + { + "frame": frame, + "cam_id": cam_id, + "ts": datetime.utcnow().isoformat(), + "media_id": None, + "alert_id": None, + } + ) + + def _process_alerts(self) -> None: + + for _ in range(len(self._alerts)): + # try to upload the oldest element + frame_info = self._alerts[0] + cam_id = frame_info["cam_id"] + logging.info("Sending alert...") + + try: + # Media creation + if not isinstance(self._alerts[0]["media_id"], int): + self._alerts[0]["media_id"] = self.api_client[cam_id].create_media_from_device().json()["id"] + # Alert creation + if not isinstance(self._alerts[0]["alert_id"], int): + self._alerts[0]["alert_id"] = ( + self.api_client[cam_id] + .send_alert_from_device( + self.latitude, + self.longitude, + self._alerts[0]["media_id"], + ) + .json()["id"] + ) + + # Media upload + stream = io.BytesIO() + frame_info["frame"].save(stream, format="JPEG") + self.api_client[cam_id].upload_media(self._alerts[0]["media_id"], media_data=stream.getvalue()) + # Clear + self._alerts.popleft() + logging.info(f"Camera {frame_info['cam_id']} - alert sent") + stream.seek(0) # "Rewind" the stream to the beginning so we can read its content + except (KeyError, ConnectionError): + logging.warning(f"Camera {cam_id} - unable to upload cache") + break diff --git a/pyroengine/engine/__init__.py b/pyroengine/engine/__init__.py deleted file mode 100644 index 331ad89e..00000000 --- a/pyroengine/engine/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -from .predictor import PyronearPredictor -from .engine import PyronearEngine diff --git a/pyroengine/engine/engine.py b/pyroengine/engine/engine.py deleted file mode 100644 index 67145956..00000000 --- a/pyroengine/engine/engine.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -import io -import os -import json -import logging -from PIL import Image -from pathlib import Path -from requests.exceptions import ConnectionError -from datetime import datetime, timedelta -from collections import deque -from typing import Optional, Dict -import numpy as np - -from pyroclient import client -from .predictor import PyronearPredictor - -logging.basicConfig( - format="%(asctime)s | %(levelname)s: %(message)s", level=logging.INFO, force=True -) - - -class PyronearEngine: - """ - This class is the Pyronear Engine. This engine manage the whole Fire Detection - process by capturing and saving the image and by predicting if there is a fire or - not based on this image. - - Args: - detection_thresh: wildfire detection threshold in [0, 1] - api_url: url of the pyronear API - client_creds: api credectials for each pizero, the dictionary should be as the one in the example - frame_saving_period: Send one frame over N to the api for our dataset - latitude: device latitude - longitude: device longitude - cache_size: maximum number of alerts to save in cache - alert_relaxation: number of consecutive positive detections required to send the first alert, and also - the number of consecutive negative detections before stopping the alert - cache_backup_period: number of minutes between each cache backup to disk - frame_size: Resize frame to frame_size before sending it to the api in order to save bandwidth - model_weights: Path / url model yolov5 model weights - - Examples: - >>> client_creds ={} - >>> client_creds['cam_id_1']={'login':'log1', 'password':'pwd1'} - >>> client_creds['cam_id_2']={'login':'log2', 'password':'pwd2'} - >>> pyroEngine = PyronearEngine(0.6, 'https://api.pyronear.org', client_creds, 50) - """ - - def __init__( - self, - detection_thresh: float = 0.25, - api_url: Optional[str] = None, - client_creds: Optional[Dict[str, str]] = None, - frame_saving_period: Optional[int] = None, - latitude: Optional[float] = None, - longitude: Optional[float] = None, - cache_size: int = 100, - alert_relaxation: int = 3, - cache_backup_period: int = 60, - frame_size: tuple = None, - model_weights: str = None, - ) -> None: - """Init engine""" - # Engine Setup - - self.pyronearPredictor = PyronearPredictor(model_weights, detection_thresh) - self.detection_thresh = detection_thresh - self.frame_saving_period = frame_saving_period - self.alert_relaxation = alert_relaxation - self.frame_size = frame_size - - # API Setup - self.api_url = api_url - self.latitude = latitude - self.longitude = longitude - - # Var initialization - self.stream = io.BytesIO() - self.consec_dets = {} - self.ongoing_alert = {} - self.frames_counter = {} - if isinstance(client_creds, dict): - for cam_id in client_creds.keys(): - self.consec_dets[cam_id] = 0 - self.frames_counter[cam_id] = 0 - self.ongoing_alert[cam_id] = False - else: - self.consec_dets["-1"] = 0 - self.ongoing_alert["-1"] = 0 - - if self.api_url is not None: - # Instantiate clients for each camera - self.api_client = {} - for _id, vals in client_creds.items(): - self.api_client[_id] = client.Client( - self.api_url, vals["login"], vals["password"] - ) - - # Restore pending alerts cache - self.pending_alerts = deque([], cache_size) - self._backup_folder = Path( - "data/" - ) # with Docker, the path has to be a bind volume - self.load_cache_from_disk() - self.cache_backup_period = cache_backup_period - self.last_cache_dump = datetime.utcnow() - - def predict(self, frame: Image.Image, cam_id: Optional[int] = None) -> float: - """run prediction on comming frame""" - pred = self.pyronearPredictor.predict(frame.convert("RGB")) # run prediction - - if len(pred) > 0: - prob = np.max(pred[:, 4]) - if cam_id is None: - logging.info(f"Wildfire detected with score ({prob:.2%})") - else: - self.heartbeat(cam_id) - logging.info( - f"Wildfire detected with score ({prob:.2%}), on device {cam_id}" - ) - - else: - prob = 0 - if cam_id is None: - logging.info("No wildfire detected") - else: - self.heartbeat(cam_id) - logging.info(f"No wildfire detected on device {cam_id}") - - # Reduce image size to save bandwidth - if isinstance(self.frame_size, tuple): - frame = frame.resize(self.frame_size) - - # Alert - if prob > self.detection_thresh: - if cam_id is None: - cam_id = "-1" # add default key value - - # Don't increment beyond relaxation - if ( - not self.ongoing_alert[cam_id] - and self.consec_dets[cam_id] < self.alert_relaxation - ): - self.consec_dets[cam_id] += 1 - - if self.consec_dets[cam_id] == self.alert_relaxation: - self.ongoing_alert[cam_id] = True - - if isinstance(self.api_url, str) and self.ongoing_alert[cam_id]: - # Save the alert in cache to avoid connection issues - self.save_to_cache(frame, cam_id) - - # No wildfire - else: - if cam_id is None: - cam_id = "-1" # add default key value - - if self.consec_dets[cam_id] > 0: - self.consec_dets[cam_id] -= 1 - # Consider event as finished - if self.consec_dets[cam_id] == 0: - self.ongoing_alert[cam_id] = False - - # Uploading pending alerts - if len(self.pending_alerts) > 0: - self.upload_pending_alerts() - - # Check if it's time to backup pending alerts - ts = datetime.utcnow() - if ts > self.last_cache_dump + timedelta(minutes=self.cache_backup_period): - self.save_cache_to_disk() - self.last_cache_dump = ts - - # save frame - if ( - isinstance(self.api_url, str) - and isinstance(self.frame_saving_period, int) - and isinstance(cam_id, int) - ): - self.frames_counter[cam_id] += 1 - if self.frames_counter[cam_id] == self.frame_saving_period: - # Reset frame counter - self.frames_counter[cam_id] = 0 - # Send frame to the api - frame.save(self.stream, format="JPEG") - self.save_frame(cam_id) - self.stream.seek( - 0 - ) # "Rewind" the stream to the beginning so we can read its content - - return prob - - def send_alert(self, cam_id: int) -> None: - """Send alert""" - logging.info("Sending alert...") - # Create a media - media_id = self.api_client[cam_id].create_media_from_device().json()["id"] - # Create an alert linked to the media and the event - self.api_client[cam_id].send_alert_from_device( - lat=self.latitude, lon=self.longitude, media_id=media_id - ) - self.api_client[cam_id].upload_media( - media_id=media_id, media_data=self.stream.getvalue() - ) - - def upload_frame(self, cam_id: int) -> None: - """Save frame""" - logging.info("Uploading media...") - # Create a media - media_id = self.api_client[cam_id].create_media_from_device().json()["id"] - # Send media - self.api_client[cam_id].upload_media( - media_id=media_id, media_data=self.stream.getvalue() - ) - - def heartbeat(self, cam_id: int) -> None: - """Updates last ping of device""" - self.api_client[cam_id].heartbeat() - - def save_to_cache(self, frame: Image.Image, cam_id: int) -> None: - # Store information in the queue - self.pending_alerts.append( - {"frame": frame, "cam_id": cam_id, "ts": datetime.utcnow()} - ) - - def upload_pending_alerts(self) -> None: - - for _ in range(len(self.pending_alerts)): - # try to upload the oldest element - frame_info = self.pending_alerts[0] - - try: - frame_info["frame"].save(self.stream, format="JPEG") - # Send alert to the api - self.send_alert(frame_info["cam_id"]) - # No need to upload it anymore - self.pending_alerts.popleft() - logging.info(f"Alert sent by device {frame_info['cam_id']}") - except ConnectionError: - logging.warning( - f"Unable to upload cache for device {frame_info['cam_id']}" - ) - self.stream.seek( - 0 - ) # "Rewind" the stream to the beginning so we can read its content - break - - def save_cache_to_disk(self) -> None: - - # Remove previous dump - json_path = self._backup_folder.joinpath("pending_alerts.json") - if json_path.is_file(): - with open(json_path, "rb") as f: - data = json.load(f) - - for entry in data: - os.remove(entry["frame_path"]) - os.remove(json_path) - - data = [] - for idx, info in enumerate(self.pending_alerts): - # Save frame to disk - info["frame"].save(self._backup_folder.joinpath(f"pending_frame{idx}.jpg")) - - # Save path in JSON - data.append( - { - "frame_path": str( - self._backup_folder.joinpath(f"pending_frame{idx}.jpg") - ), - "cam_id": info["cam_id"], - "ts": info["ts"], - } - ) - - # JSON dump - if len(data) > 0: - with open(json_path, "w") as f: - json.dump(data, f) - - def load_cache_from_disk(self) -> None: - # Read json - json_path = self._backup_folder.joinpath("pending_alerts.json") - if json_path.is_file(): - with open(json_path, "rb") as f: - data = json.load(f) - - for entry in data: - # Open image - frame = Image.open(entry["frame_path"], mode="r") - self.pending_alerts.append( - {"frame": frame, "cam_id": entry["cam_id"], "ts": entry["ts"]} - ) diff --git a/pyroengine/engine/predictor.py b/pyroengine/engine/predictor.py deleted file mode 100644 index 95b9bd69..00000000 --- a/pyroengine/engine/predictor.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -import torch - - -class PyronearPredictor: - """This class use the last pyronear model and run our smoke detection model on it - Examples: - >>> pyronearPredictor = PyronearPredictor() - >>> im = Image.open("image.jpg") - >>> res = pyronearPredictor.predict(im) - """ - - def __init__(self, model_weights: str = None, conf: float = 0.25): - """Init predictor""" - # Model definition - if model_weights is None: - model_weights = "https://github.com//pyronear//pyro-vision//releases//download//v0.1.2//yolov5s_v001.onnx" - self.model = torch.hub.load( - "ultralytics/yolov5", "custom", path=model_weights, force_reload=True - ) # local model - self.model.conf = conf - - def predict(self, im): - """Run prediction""" - pred = self.model(im) - - return pred.xyxy[0].numpy() diff --git a/pyroengine/pi_utils/monitor_pi.py b/pyroengine/pi_utils/monitor_pi.py deleted file mode 100644 index a029c38d..00000000 --- a/pyroengine/pi_utils/monitor_pi.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -import logging -import os -from time import sleep - -import psutil - -import requests -from dotenv import load_dotenv -from gpiozero import CPUTemperature -from requests import RequestException - -load_dotenv() - -WEBSERVER_IP = os.environ.get("WEBSERVER_IP") -WEBSERVER_PORT = os.environ.get("WEBSERVER_PORT") - - -class MonitorPi: - """This class aims to monitor some metrics from Raspberry Pi system. - Example - -------- - monitor = MonitorPi(url_of_webserver) - monitor.record(5) # record metrics every 5 seconds - """ - - logger = logging.getLogger(__name__) - - def __init__(self, webserver_url): - """Initialize parameters for MonitorPi.""" - self.cpu_temp = CPUTemperature() - self.webserver_url = webserver_url - self.is_running = True - - def get_record(self): - metrics = { - "id": 0, - "cpu_temperature_C": self.cpu_temp.temperature, - "mem_available_GB": psutil.virtual_memory().available / 1024**3, - "cpu_usage_percent": psutil.cpu_percent(), - } - - response = requests.post(self.webserver_url, json=metrics) - response.raise_for_status() - - def record(self, time_step): - while self.is_running: - try: - self.get_record() - sleep(time_step) - except RequestException as e: - self.logger.error(f"Unexpected error in get_record(): {e!r}") - - def stop_monitoring(self): - self.is_running = False - - -if __name__ == "__main__": - webserver_local_url = f"http://{WEBSERVER_IP}:{WEBSERVER_PORT}/metrics" - monitor = MonitorPi(webserver_local_url) - monitor.record(30) diff --git a/pyroengine/vision.py b/pyroengine/vision.py new file mode 100644 index 00000000..13027b98 --- /dev/null +++ b/pyroengine/vision.py @@ -0,0 +1,66 @@ +# Copyright (C) 2022, Pyronear. + +# This program is licensed under the Apache License 2.0. +# See LICENSE or go to for full license details. + +import json +from typing import Optional + +import numpy as np +import onnxruntime +from huggingface_hub import hf_hub_download +from PIL import Image + +__all__ = ["Classifier"] + + +class Classifier: + """Implements an image classification model using ONNX backend. + + Examples: + >>> from pyroengine.vision import Classifier + >>> model = Classifier("pyronear/rexnet1_3x") + + Args: + hub_repo: repository from HuggingFace Hub to load the model from + model_path: overrides the model path + cfg_path: overrides the configuration file from the model + """ + + def __init__(self, hub_repo: str, model_path: Optional[str] = None, cfg_path: Optional[str] = None) -> None: + # Download model config & checkpoint + _path = cfg_path or hf_hub_download(hub_repo, filename="config.json") + with open(_path, "rb") as f: + self.cfg = json.load(f) + + _path = model_path or hf_hub_download(hub_repo, filename="model.onnx") + self.ort_session = onnxruntime.InferenceSession(_path) + + def preprocess_image(self, pil_img: Image.Image) -> np.ndarray: + """Preprocess an image for inference + + Args: + pil_img: a valid pillow image + + Returns: + the resized and normalized image of shape (1, C, H, W) + """ + + # Resizing + img = pil_img.resize(self.cfg["input_shape"][-2:], Image.BILINEAR) + # (H, W, C) --> (C, H, W) + img = np.asarray(img).transpose((2, 0, 1)).astype(np.float32) / 255 + # Normalization + img -= np.array(self.cfg["mean"])[:, None, None] + img /= np.array(self.cfg["std"])[:, None, None] + + return img[None, ...] + + def __call__(self, pil_img: Image.Image) -> np.ndarray: + np_img = self.preprocess_image(pil_img) + + # ONNX inference + ort_input = {self.ort_session.get_inputs()[0].name: np_img} + ort_out = self.ort_session.run(None, ort_input) + # Sigmoid + return 1 / (1 + np.exp(-ort_out[0][0])) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index acaa4ae1..00000000 --- a/requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -pyroclient@git+https://github.com/pyronear/pyro-api.git#egg=pyroclient&subdirectory=client -python-dotenv==0.15.0 -requests==2.25.1 -psutil==5.9.0 -# From https://github.com/ultralytics/yolov5/blob/master/requirements.txt -torch==1.11.0 -torchvision==0.12.0 -opencv-python==4.5.4.60 -pandas==1.1.4 -PyYAML==5.3.1 -Pillow==9.2.0 -tqdm==4.64.0 -onnx==1.12.0 -onnxruntime==1.11.1 -ipython==8.4.0 -seaborn==0.11.0 \ No newline at end of file diff --git a/runner/docker-compose.yml b/runner/docker-compose.yml deleted file mode 100644 index c37f0387..00000000 --- a/runner/docker-compose.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: '3.8' - -services: - pyro-engine: - build: - context: .. - dockerfile: ./runner/src/Dockerfile - command: python runner.py - volumes: - - ./data:/usr/src/app/data - - restart: always diff --git a/runner/src/Dockerfile b/runner/src/Dockerfile deleted file mode 100644 index dea5c7b3..00000000 --- a/runner/src/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM pyronear/pyro-engine:latest - -# set work directory -WORKDIR /usr/src/app - -# set environment variables -ENV PYTHONDONTWRITEBYTECODE 1 -ENV PYTHONUNBUFFERED 1 -ENV PYTHONPATH "${PYTHONPATH}:/usr/src/app" -ENV PATH /usr/local/bin:$PATH -ENV LANG C.UTF-8 - -COPY runner/src/runner.py /usr/src/app/runner.py diff --git a/runner/src/capture.py b/runner/src/capture.py deleted file mode 100644 index 19c60d64..00000000 --- a/runner/src/capture.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - - -from PIL import Image -import requests -from io import BytesIO -from dotenv import load_dotenv -import os -import time -import logging -import json -import urllib3 - - -urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - -logging.basicConfig( - format="%(asctime)s | %(levelname)s: %(message)s", level=logging.INFO, force=True -) - - -def capture(ip, CAM_USER, CAM_PWD): - url = f"https://{ip}/cgi-bin/api.cgi?cmd=Snap&channel=0&rs=wuuPhkmUCeI9WG7C&user={CAM_USER}&password={CAM_PWD}" - - response = requests.get(url, verify=False, timeout=3) - return Image.open(BytesIO(response.content)) - - -load_dotenv("/home/pi/pyro-engine/runner/data/.env") - -CAM_USER = os.environ.get("CAM_USER") -CAM_PWD = os.environ.get("CAM_PWD") - -with open("/home/pi/pyro-engine/runner/data/cameras_credentials.json") as json_file: - cameras_credentials = json.load(json_file) - -for ip in cameras_credentials.keys(): - try: - img = capture(ip, CAM_USER, CAM_PWD) - file = os.path.join( - "/home/pi/captured_images", ip, f"{time.strftime('%Y%m%d-%H%M%S')}.jpg" - ) - os.makedirs(os.path.split(file)[0], exist_ok=True) - img.save(file) - - time.sleep(1) - except Exception: - logging.warning(f"Unable to get image from camera {ip}") diff --git a/runner/src/runner.py b/runner/src/runner.py deleted file mode 100644 index 6b6eb0c9..00000000 --- a/runner/src/runner.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - - -from PIL import Image -import requests -from io import BytesIO -from pyroengine.engine import PyronearEngine -from dotenv import load_dotenv -import os -import time -import json -import logging -import urllib3 - - -urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - -logging.basicConfig( - format="%(asctime)s | %(levelname)s: %(message)s", level=logging.INFO, force=True -) - - -def setup_engine(): - with open("data/config_data.json") as json_file: - config_data = json.load(json_file) - - # Loading config datas - detection_threshold = config_data["detection_threshold"] - api_url = config_data["api_url"] - save_evry_n_frame = config_data["save_evry_n_frame"] - loop_time = config_data["loop_time"] - latitude = config_data["latitude"] - longitude = config_data["longitude"] - model_weights = config_data["model_weights"] - - # Loading pi zeros datas - with open("data/cameras_credentials.json") as json_file: - cameras_credentials = json.load(json_file) - - engine = PyronearEngine( - detection_threshold, - api_url, - cameras_credentials, - save_evry_n_frame, - latitude, - longitude, - model_weights=model_weights, - ) - - return engine, cameras_credentials, loop_time - - -def capture(ip, CAM_USER, CAM_PWD): - url = f"https://{ip}/cgi-bin/api.cgi?cmd=Snap&channel=0&rs=wuuPhkmUCeI9WG7C&user={CAM_USER}&password={CAM_PWD}" - - response = requests.get(url, verify=False, timeout=3) - return Image.open(BytesIO(response.content)) - - -load_dotenv("data/.env") - -CAM_USER = os.environ.get("CAM_USER") -CAM_PWD = os.environ.get("CAM_PWD") - -engine, cameras_credentials, loop_time = setup_engine() - -while True: - for ip in cameras_credentials.keys(): - try: - start_time = time.time() - img = capture(ip, CAM_USER, CAM_PWD) - pred = engine.predict(img, ip) - - time.sleep(max(loop_time - time.time() + start_time, 0)) - except Exception: - logging.warning(f"Unable to get image from camera {ip}") diff --git a/scripts/collect_env.py b/scripts/collect_env.py deleted file mode 100644 index d89ab70f..00000000 --- a/scripts/collect_env.py +++ /dev/null @@ -1,348 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -""" -Based on https://github.com/pytorch/pytorch/blob/master/torch/utils/collect_env.py -This script outputs relevant system environment info -Run it with `python collect_env.py`. -""" - -from __future__ import absolute_import, division, print_function, unicode_literals -import locale -import re -import subprocess -import sys -import os -from collections import namedtuple - -try: - import pyroengine - - PYROVISION_AVAILABLE = True -except (ImportError, NameError, AttributeError): - PYROVISION_AVAILABLE = False - -try: - import torch - - TORCH_AVAILABLE = True -except (ImportError, NameError, AttributeError): - TORCH_AVAILABLE = False - -try: - import torchvision - - VISION_AVAILABLE = True -except (ImportError, NameError, AttributeError): - VISION_AVAILABLE = False - -PY3 = sys.version_info >= (3, 0) - - -# System Environment Information -SystemEnv = namedtuple( - "SystemEnv", - [ - "pyroengine_version", - "torch_version", - "torchvision_version", - "os", - "python_version", - "is_cuda_available", - "cuda_runtime_version", - "nvidia_driver_version", - "nvidia_gpu_models", - "cudnn_version", - ], -) - - -def run(command): - """Returns (return-code, stdout, stderr)""" - p = subprocess.Popen( - command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True - ) - output, err = p.communicate() - rc = p.returncode - if PY3: - enc = locale.getpreferredencoding() - output = output.decode(enc) - err = err.decode(enc) - return rc, output.strip(), err.strip() - - -def run_and_read_all(run_lambda, command): - """Runs command using run_lambda; reads and returns entire output if rc is 0""" - rc, out, _ = run_lambda(command) - if rc != 0: - return None - return out - - -def run_and_parse_first_match(run_lambda, command, regex): - """Runs command using run_lambda, returns the first regex match if it exists""" - rc, out, _ = run_lambda(command) - if rc != 0: - return None - match = re.search(regex, out) - if match is None: - return None - return match.group(1) - - -def get_nvidia_driver_version(run_lambda): - if get_platform() == "darwin": - cmd = "kextstat | grep -i cuda" - return run_and_parse_first_match( - run_lambda, cmd, r"com[.]nvidia[.]CUDA [(](.*?)[)]" - ) - smi = get_nvidia_smi() - return run_and_parse_first_match(run_lambda, smi, r"Driver Version: (.*?) ") - - -def get_gpu_info(run_lambda): - if get_platform() == "darwin": - if TORCH_AVAILABLE and torch.cuda.is_available(): - return torch.cuda.get_device_name(None) - return None - smi = get_nvidia_smi() - uuid_regex = re.compile(r" \(UUID: .+?\)") - rc, out, _ = run_lambda(smi + " -L") - if rc != 0: - return None - # Anonymize GPUs by removing their UUID - return re.sub(uuid_regex, "", out) - - -def get_running_cuda_version(run_lambda): - return run_and_parse_first_match(run_lambda, "nvcc --version", r"release .+ V(.*)") - - -def get_cudnn_version(run_lambda): - """This will return a list of libcudnn.so; it's hard to tell which one is being used""" - if get_platform() == "win32": - cudnn_cmd = 'where /R "%CUDA_PATH%\\bin" cudnn*.dll' - elif get_platform() == "darwin": - # CUDA libraries and drivers can be found in /usr/local/cuda/. See - # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install - # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac - # Use CUDNN_LIBRARY when cudnn library is installed elsewhere. - cudnn_cmd = "ls /usr/local/cuda/lib/libcudnn*" - else: - cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev' - rc, out, _ = run_lambda(cudnn_cmd) - # find will return 1 if there are permission errors or if not found - if len(out) == 0 or (rc != 1 and rc != 0): - lib = os.environ.get("CUDNN_LIBRARY") - if lib is not None and os.path.isfile(lib): - return os.path.realpath(l) - return None - files = set() - for fn in out.split("\n"): - fn = os.path.realpath(fn) # eliminate symbolic links - if os.path.isfile(fn): - files.add(fn) - if not files: - return None - # Alphabetize the result because the order is non-deterministic otherwise - files = list(sorted(files)) - if len(files) == 1: - return files[0] - result = "\n".join(files) - return "Probably one of the following:\n{}".format(result) - - -def get_nvidia_smi(): - # Note: nvidia-smi is currently available only on Windows and Linux - smi = "nvidia-smi" - if get_platform() == "win32": - smi = '"C:\\Program Files\\NVIDIA Corporation\\NVSMI\\%s"' % smi - return smi - - -def get_platform(): - if sys.platform.startswith("linux"): - return "linux" - elif sys.platform.startswith("win32"): - return "win32" - elif sys.platform.startswith("cygwin"): - return "cygwin" - elif sys.platform.startswith("darwin"): - return "darwin" - else: - return sys.platform - - -def get_mac_version(run_lambda): - return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)") - - -def get_windows_version(run_lambda): - return run_and_read_all(run_lambda, "wmic os get Caption | findstr /v Caption") - - -def get_lsb_version(run_lambda): - return run_and_parse_first_match( - run_lambda, "lsb_release -a", r"Description:\t(.*)" - ) - - -def check_release_file(run_lambda): - return run_and_parse_first_match( - run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(.*)"' - ) - - -def get_os(run_lambda): - platform = get_platform() - - if platform == "win32" or platform == "cygwin": - return get_windows_version(run_lambda) - - if platform == "darwin": - version = get_mac_version(run_lambda) - if version is None: - return None - return "Mac OSX {}".format(version) - - if platform == "linux": - # Ubuntu/Debian based - desc = get_lsb_version(run_lambda) - if desc is not None: - return desc - - # Try reading /etc/*-release - desc = check_release_file(run_lambda) - if desc is not None: - return desc - - return platform - - # Unknown platform - return platform - - -def get_env_info(): - run_lambda = run - - if pyroengine_AVAILABLE: - pyroengine_str = pyroengine.__version__ - else: - pyroengine_str = "N/A" - - if TORCH_AVAILABLE: - torch_str = torch.__version__ - cuda_available_str = torch.cuda.is_available() - else: - torch_str = cuda_available_str = "N/A" - - if VISION_AVAILABLE: - torchvision_str = torchvision.__version__ - else: - torchvision_str = "N/A" - - return SystemEnv( - pyroengine_version=pyroengine_str, - torch_version=torch_str, - torchvision_version=torchvision_str, - python_version="{}.{}".format(sys.version_info[0], sys.version_info[1]), - is_cuda_available=cuda_available_str, - cuda_runtime_version=get_running_cuda_version(run_lambda), - nvidia_gpu_models=get_gpu_info(run_lambda), - nvidia_driver_version=get_nvidia_driver_version(run_lambda), - cudnn_version=get_cudnn_version(run_lambda), - os=get_os(run_lambda), - ) - - -env_info_fmt = """ -pyroengine version: {pyroengine_version} -PyTorch version: {torch_version} -Torchvision version: {torchvision_version} - -OS: {os} - -Python version: {python_version} -Is CUDA available: {is_cuda_available} -CUDA runtime version: {cuda_runtime_version} -GPU models and configuration: {nvidia_gpu_models} -Nvidia driver version: {nvidia_driver_version} -cuDNN version: {cudnn_version} -""".strip() - - -def pretty_str(envinfo): - def replace_nones(dct, replacement="Could not collect"): - for key in dct.keys(): - if dct[key] is not None: - continue - dct[key] = replacement - return dct - - def replace_bools(dct, true="Yes", false="No"): - for key in dct.keys(): - if dct[key] is True: - dct[key] = true - elif dct[key] is False: - dct[key] = false - return dct - - def maybe_start_on_next_line(string): - # If `string` is multiline, prepend a \n to it. - if string is not None and len(string.split("\n")) > 1: - return "\n{}\n".format(string) - return string - - mutable_dict = envinfo._asdict() - - # If nvidia_gpu_models is multiline, start on the next line - mutable_dict["nvidia_gpu_models"] = maybe_start_on_next_line( - envinfo.nvidia_gpu_models - ) - - # If the machine doesn't have CUDA, report some fields as 'No CUDA' - dynamic_cuda_fields = [ - "cuda_runtime_version", - "nvidia_gpu_models", - "nvidia_driver_version", - ] - all_cuda_fields = dynamic_cuda_fields + ["cudnn_version"] - all_dynamic_cuda_fields_missing = all( - mutable_dict[field] is None for field in dynamic_cuda_fields - ) - if ( - TORCH_AVAILABLE - and not torch.cuda.is_available() - and all_dynamic_cuda_fields_missing - ): - for field in all_cuda_fields: - mutable_dict[field] = "No CUDA" - - # Replace True with Yes, False with No - mutable_dict = replace_bools(mutable_dict) - - # Replace all None objects with 'Could not collect' - mutable_dict = replace_nones(mutable_dict) - - return env_info_fmt.format(**mutable_dict) - - -def get_pretty_env_info(): - """Collects environment information for debugging purposes - - Returns: - str: environment information - """ - return pretty_str(get_env_info()) - - -def main(): - print("Collecting environment information...") - output = get_pretty_env_info() - print(output) - - -if __name__ == "__main__": - main() diff --git a/scripts/update_script.sh b/scripts/update_script.sh index 2df90000..f9f36eba 100644 --- a/scripts/update_script.sh +++ b/scripts/update_script.sh @@ -1,6 +1,6 @@ #!/bin/bash # This script performs: -# pull origin master +# pull origin main #- if any change: # kill container # rebuild docker compose @@ -11,7 +11,7 @@ CID=$(docker ps | grep runner_pyro-engine_1 | awk '{print $1}') -if [ `git -C /home/pi/pyro-engine pull origin master | grep -c "up to date."` -ne 1 ]; +if [ `git -C /home/pi/pyro-engine pull origin main | grep -c "up to date."` -ne 1 ]; then echo "pyro-engine updated from github"; cd /home/pi/pyro-engine/runner/; diff --git a/server/data/config_data.json b/server/data/config_data.json deleted file mode 100644 index 9633113b..00000000 --- a/server/data/config_data.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "detection_threshold": 0.55, - "api_url": "https://api.pyronear.org", - "save_evry_n_frame": 50, - "latitude": 48.8673441, - "longitude": 2.3841824 -} diff --git a/server/data/ip_hostname_mapping.json b/server/data/ip_hostname_mapping.json deleted file mode 100644 index 4abaa9b0..00000000 --- a/server/data/ip_hostname_mapping.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "192.168.1.1": "device-hostname-1", - "192.168.1.2": "device-hostname-2", - "192.168.1.3": "device-hostname-3", - "192.168.1.4": "device-hostname-4" -} diff --git a/setup.py b/setup.py index ad3cb25f..ee8ae44b 100644 --- a/setup.py +++ b/setup.py @@ -1,93 +1,25 @@ -#!usr/bin/python - # Copyright (C) 2020-2022, Pyronear. -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. +# This program is licensed under the Apache License 2.0. +# See LICENSE or go to for full license details. -""" -Package installation setup -""" import os -import subprocess -from setuptools import setup, find_packages from pathlib import Path -version = "0.0.1" -sha = "Unknown" -src_folder = "pyroengine" -package_index = "pyroengine" +from setuptools import setup -cwd = Path(__file__).parent.absolute() +PKG_NAME = "pyroengine" +VERSION = os.getenv("BUILD_VERSION", "0.2.0.dev0") -if os.getenv("BUILD_VERSION"): - version = os.getenv("BUILD_VERSION") -else: - try: - sha = ( - subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd) - .decode("ascii") - .strip() - ) - except Exception: - pass - if sha != "Unknown": - version += "+" + sha[:7] -print(f"Building wheel {package_index}-{version}") -with open(cwd.joinpath(src_folder, "version.py"), "w") as f: - f.write(f"__version__ = '{version}'\n") +if __name__ == "__main__": -with open("README.md") as f: - readme = f.read() + print(f"Building wheel {PKG_NAME}-{VERSION}") -with open("requirements.txt") as f: - requirements = f.read().splitlines() + # Dynamically set the __version__ attribute + cwd = Path(__file__).parent.absolute() + with open(cwd.joinpath("pyroengine", "version.py"), "w", encoding="utf-8") as f: + f.write(f"__version__ = '{VERSION}'\n") -setup( - # Metadata - name=package_index, - version=version, - author="PyroNear Contributors", - author_email="pyronear.d4g@gmail.com", - maintainer="Pyronear", - description="Pyronear Engine is a repository that aims at deploying pyronear", - long_description=readme, - long_description_content_type="text/markdown", - url="https://github.com/pyronear/pyro-engine", - download_url="https://github.com/pyronear/pyro-engine/tags", - license="Apache 2.0", - classifiers=[ - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: Apache Software License", - "Natural Language :: English", - "Operating System :: OS Independent", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: Mathematics", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Software Development", - "Topic :: Software Development :: Libraries", - "Topic :: Software Development :: Libraries :: Python Modules", - ], - keywords=[ - "pytorch", - "deep learning", - "vision", - "models", - "wildfire", - "object detection", - ], - # Package info - packages=find_packages(exclude=("test",)), - zip_safe=True, - python_requires=">=3.6.0", - include_package_data=True, - install_requires=requirements, - package_data={"": ["LICENSE"]}, -) + setup(name=PKG_NAME, version=VERSION) diff --git a/src/Makefile b/src/Makefile new file mode 100644 index 00000000..3cd04242 --- /dev/null +++ b/src/Makefile @@ -0,0 +1,4 @@ +# Pin the dependencies +lock: + poetry lock + poetry export -f requirements.txt --without-hashes --output requirements.txt diff --git a/src/capture.py b/src/capture.py new file mode 100644 index 00000000..522e0b8d --- /dev/null +++ b/src/capture.py @@ -0,0 +1,52 @@ +# Copyright (C) 2020-2022, Pyronear. + +# This program is licensed under the Apache License 2.0. +# See LICENSE or go to for full license details. + + +import json +import logging +import os +import time +from io import BytesIO + +import requests +import urllib3 +from dotenv import load_dotenv +from PIL import Image + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +logging.basicConfig(format="%(asctime)s | %(levelname)s: %(message)s", level=logging.INFO, force=True) + + +def capture(ip, CAM_USER, CAM_PWD): + url = f"https://{ip}/cgi-bin/api.cgi?cmd=Snap&channel=0&rs=wuuPhkmUCeI9WG7C&user={CAM_USER}&password={CAM_PWD}" + + response = requests.get(url, verify=False, timeout=3) + return Image.open(BytesIO(response.content)) + + +def main(): + load_dotenv("/home/pi/pyro-engine/runner/data/.env") + + CAM_USER = os.environ.get("CAM_USER") + CAM_PWD = os.environ.get("CAM_PWD") + + with open("/home/pi/pyro-engine/runner/data/cameras_credentials.json") as json_file: + cameras_credentials = json.load(json_file) + + for ip in cameras_credentials.keys(): + try: + img = capture(ip, CAM_USER, CAM_PWD) + file = os.path.join("/home/pi/captured_images", ip, f"{time.strftime('%Y%m%d-%H%M%S')}.jpg") + os.makedirs(os.path.split(file)[0], exist_ok=True) + img.save(file) + + time.sleep(1) + except Exception: + logging.warning(f"Unable to get image from camera {ip}") + + +if __name__ == "__main__": + main() diff --git a/src/pyproject.toml b/src/pyproject.toml new file mode 100644 index 00000000..fbdb7c25 --- /dev/null +++ b/src/pyproject.toml @@ -0,0 +1,16 @@ +[build-system] +requires = ["poetry>=1.0"] +build-backend = "poetry.masonry.api" + +[tool.poetry] +name = "pyro-engine" +version = "0.2.0.dev0" +description = "Wildfire detection on edge devices" +authors = ["Pyronear "] +license = "Apache-2.0" + +[tool.poetry.dependencies] +python = "^3.8" +pyroengine = "^0.2.0" +python-dotenv = ">=0.15.0" +requests = "^2.20.0" diff --git a/src/requirements.txt b/src/requirements.txt new file mode 100644 index 00000000..ee9a8351 --- /dev/null +++ b/src/requirements.txt @@ -0,0 +1,7 @@ +pyroengine +python-dotenv==0.15.0 +requests==2.25.1 +Pillow==9.2.0 +onnxruntime==1.11.1 +numpy==1.22.4 +huggingface-hub==0.7.0 diff --git a/src/run.py b/src/run.py new file mode 100644 index 00000000..dd2b3d23 --- /dev/null +++ b/src/run.py @@ -0,0 +1,155 @@ +# Copyright (C) 2022, Pyronear. + +# This program is licensed under the Apache License 2.0. +# See LICENSE or go to for full license details. + + +import argparse +import json +import logging +import os +import time +from io import BytesIO +from pathlib import Path +from typing import List + +import requests +import urllib3 +from dotenv import load_dotenv +from PIL import Image + +from pyroengine import Engine + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +logging.basicConfig(format="%(asctime)s | %(levelname)s: %(message)s", level=logging.INFO, force=True) + +CAM_URL = "https://{ip_address}/cgi-bin/api.cgi?cmd=Snap&channel=0&rs=wuuPhkmUCeI9WG7C&user={login}&password={password}" + + +class ReolinkCamera: + """Implements a camera controller + + Args: + ip_address: the local IP address to reach the Reolink camera + login: login to access camera stream + password: password to access camera stream + """ + + def __init__(self, ip_address: str, login: str, password: str) -> None: + self.ip_address = ip_address + self.login = login + self._url = CAM_URL.format(ip_address=ip_address, login=login, password=password) + # Check the connection + assert isinstance(self.capture(), Image.Image) + + def capture(self) -> Image.Image: + """Retrieves the camera stream""" + response = requests.get(self._url, verify=False, timeout=3) + return Image.open(BytesIO(response.content)) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(ip_address={self.ip_address}, login={self.login})" + + +class SystemController: + """Implements the full system controller + + Args: + engine: the image analyzer + cameras: the cameras to get the visual streams from + """ + + def __init__(self, engine: Engine, cameras: List[ReolinkCamera]) -> None: + self.engine = engine + self.cameras = cameras + + def analyze_stream(self, idx: int) -> float: + assert 0 <= idx < len(self.cameras) + try: + img = self.cameras[idx].capture() + try: + self.engine.predict(img, self.cameras[idx].ip_address) + except Exception: + logging.warning(f"Unable to analyze stream from camera {self.cameras[idx]}") + except Exception: + logging.warning(f"Unable to fetch stream from camera {self.cameras[idx]}") + + def run(self): + """Analyzes all camera streams""" + for idx in range(len(self.cameras)): + self.analyze_stream(idx) + + +def main(args): + print(args) + + # .env loading + load_dotenv(".env") + API_URL = os.environ.get("API_URL") + LAT = float(os.environ.get("LAT")) + LON = float(os.environ.get("LON")) + assert isinstance(API_URL, str) and isinstance(LAT, float) and isinstance(LON, float) + CAM_USER = os.environ.get("CAM_USER") + CAM_PWD = os.environ.get("CAM_PWD") + assert isinstance(CAM_USER, str) and isinstance(CAM_PWD, str) + + # Loading camera creds + with open(args.creds, "rb") as json_file: + cameras_credentials = json.load(json_file) + + # Check if model is available in cache + cache = Path(args.cache) + _model, _config = args.model, args.config + if cache.is_dir(): + if cache.joinpath("model.onnx").is_file(): + _model = str(cache.joinpath("model.onnx")) + if cache.joinpath("config.json").is_file(): + _config = str(cache.joinpath("config.json")) + + if isinstance(_model, str): + logging.info(f"Loading model from: {_model}") + + engine = Engine( + args.hub, + args.thresh, + API_URL, + cameras_credentials, + LAT, + LON, + frame_saving_period=args.save_period // args.period, + model_path=_model, + cfg_path=_config, + cache_folder=args.cache, + ) + + sys_controller = SystemController( + engine, + [ReolinkCamera(_ip, CAM_USER, CAM_PWD) for _ip in cameras_credentials], + ) + + while True: + start_ts = time.time() + sys_controller.run() + # Sleep only once all images are processed + time.sleep(max(args.period - time.time() + start_ts, 0)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Raspberry Pi system controller", formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + # Model + parser.add_argument("--hub", type=str, default="pyronear/rexnet1_3x", help="HF Hub repo to use") + parser.add_argument("--model", type=str, default=None, help="Overrides the ONNX model") + parser.add_argument("--config", type=str, default=None, help="Overrides the model config") + parser.add_argument("--thresh", type=float, default=0.5, help="Confidence threshold") + # Camera & cache + parser.add_argument("--creds", type=str, default="data/credentials.json", help="Camera credentials") + parser.add_argument("--cache", type=str, default="./data", help="Cache folder") + # Time config + parser.add_argument("--period", type=int, default=30, help="Number of seconds between each camera stream analysis") + parser.add_argument("--save-period", type=int, default=3600, help="Number of seconds between each media save") + args = parser.parse_args() + + main(args) diff --git a/test/fake_gpiozero.py b/test/fake_gpiozero.py deleted file mode 100644 index 72b56111..00000000 --- a/test/fake_gpiozero.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - - -class CPUTemperature: - def __init__(self): - print("using fake CPUTemperature") - self.temperature = 40 diff --git a/test/fake_picamera.py b/test/fake_picamera.py deleted file mode 100644 index 42af846c..00000000 --- a/test/fake_picamera.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - - -class PiCamera: - @property - def resolution(self): - pass - - @resolution.setter - def resolution(self, resolution): - pass - - def start_preview(self): - pass - - def capture(self, output, format=None): - pass diff --git a/test/pi_patch.py b/test/pi_patch.py deleted file mode 100644 index 81470e26..00000000 --- a/test/pi_patch.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -import sys -import fake_gpiozero -import fake_picamera - - -sys.modules["gpiozero"] = fake_gpiozero -sys.modules["picamera"] = fake_picamera diff --git a/test/test_engine.py b/test/test_engine.py deleted file mode 100644 index f8bf13cc..00000000 --- a/test/test_engine.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -import unittest -from PIL import Image -import io -import requests -from pyroengine.engine import PyronearEngine - - -url = "https://beta.ctvnews.ca/content/dam/ctvnews/images/2020/9/15/1_5105012.jpg?cache_timestamp=1600164224519" - - -class EngineTester(unittest.TestCase): - def test_engine(self): - # Init - engine = PyronearEngine() - # Get Image - response = requests.get(url) - image_bytes = io.BytesIO(response.content) - im = Image.open(image_bytes) - # Predict - res = engine.predict(im) - - self.assertGreater(res, 0.5) - - # Check backup - engine.save_cache_to_disk() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_raspberryPi_monitorPi.py b/test/test_raspberryPi_monitorPi.py deleted file mode 100644 index e6dae795..00000000 --- a/test/test_raspberryPi_monitorPi.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -import unittest -from threading import Thread -from unittest.mock import Mock, patch - -# noinspection PyUnresolvedReferences -import pi_patch -import requests -from requests import HTTPError - -from pyroengine.pi_utils.monitor_pi import MonitorPi - - -class MonitorPiTester(unittest.TestCase): - def setUp(self): - self.module_path = "pyroengine.pi_utils.monitor_pi" - - def test_get_record(self): - with patch(f"{self.module_path}.psutil") as mock_psutil, patch( - f"{self.module_path}.requests" - ) as mock_requests: - mock_response = requests.Response() - mock_response.status_code = 200 - mock_requests.post.return_value = mock_response - - # add "attributes" to psutil mock - mock_psutil.cpu_percent.return_value = 11 - mock_psutil.virtual_memory().available = 99 * 1024**3 - - record_logs = MonitorPi("my_url") - record_logs.cpu_temp = Mock(temperature=5) - - record_logs.get_record() - - test_metrics = { - "id": 0, - "cpu_temperature_C": record_logs.cpu_temp.temperature, - "mem_available_GB": mock_psutil.virtual_memory().available / 1024**3, - "cpu_usage_percent": mock_psutil.cpu_percent(), - } - mock_requests.post.assert_called_once_with("my_url", json=test_metrics) - - def test_get_record_raises_http_exception_on_400(self): - with patch(f"{self.module_path}.requests") as mock_requests: - mock_response = requests.Response() - mock_response.status_code = 400 - mock_requests.post.return_value = mock_response - - record_logs = MonitorPi("my_url") - - with self.assertRaises(HTTPError): - record_logs.get_record() - - def test_record(self): - record_logs = MonitorPi("my_url") - calls = [] - - def new_get_record(): - calls.append(True) - if len(calls) == 3: - record_logs.stop_monitoring() - - mock_get_record = Mock(side_effect=new_get_record) - with patch.object(record_logs, "get_record", new=mock_get_record): - record_logs.record(time_step=0) - - self.assertEqual(3, mock_get_record.call_count) - - def test_record_catches_exception(self): - record_logs = MonitorPi("my_url") - calls = [] - - def new_get_record(): - calls.append(True) - if len(calls) == 3: - record_logs.stop_monitoring() - elif len(calls) == 2: - raise HTTPError() - - mock_get_record = Mock(side_effect=new_get_record) - with patch.object(record_logs, "get_record", new=mock_get_record): - record_logs.record(time_step=0) - - self.assertEqual(3, mock_get_record.call_count) - - def test_stop_monitoring(self): - record_logs = MonitorPi("my_url") - monitoring_thread = Thread( - target=lambda: record_logs.record(time_step=0.1), daemon=True - ) - with patch(f"{self.module_path}.requests") as mock_requests: - mock_response = requests.Response() - mock_response.status_code = 400 - mock_requests.post.return_value = mock_response - - monitoring_thread.start() - - record_logs.stop_monitoring() - monitoring_thread.join(timeout=1) - self.assertFalse(monitoring_thread.is_alive()) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..923dd478 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,11 @@ +from io import BytesIO + +import pytest +import requests +from PIL import Image + + +@pytest.fixture(scope="session") +def mock_classification_image(tmpdir_factory): + url = "https://github.com/pyronear/pyro-vision/releases/download/v0.1.2/fire_sample_image.jpg" + return Image.open(BytesIO(requests.get(url).content)) diff --git a/tests/test_engine_core.py b/tests/test_engine_core.py new file mode 100644 index 00000000..4d3526a1 --- /dev/null +++ b/tests/test_engine_core.py @@ -0,0 +1,50 @@ +import json +from datetime import datetime + +from pyroengine.core import Engine + + +def test_engine(tmpdir_factory, mock_classification_image): + + # Cache + folder = str(tmpdir_factory.mktemp("engine_cache")) + + # No API + engine = Engine("pyronear/rexnet1_3x", cache_folder=folder) + + # Cache saving + _ts = datetime.utcnow().isoformat() + engine._stage_alert(mock_classification_image, 0) + assert len(engine._alerts) == 1 + assert engine._alerts[0]["ts"] < datetime.utcnow().isoformat() and _ts < engine._alerts[0]["ts"] + assert engine._alerts[0]["media_id"] is None + assert engine._alerts[0]["alert_id"] is None + + # Cache dump + engine._dump_cache() + assert engine._cache.joinpath("pending_alerts.json").is_file() + with open(engine._cache.joinpath("pending_alerts.json"), "rb") as f: + cache_dump = json.load(f) + assert isinstance(cache_dump, list) and len(cache_dump) == 1 and len(engine._alerts) == 1 + assert cache_dump[0] == { + "frame_path": str(engine._cache.joinpath("pending_frame0.jpg")), + "cam_id": 0, + "ts": engine._alerts[0]["ts"], + } + + # Cache dump loading + engine = Engine("pyronear/rexnet1_3x", cache_folder=folder) + assert len(engine._alerts) == 1 + engine.clear_cache() + + # inference + engine = Engine("pyronear/rexnet1_3x", cache_folder=folder) + out = engine.predict(mock_classification_image, 0) + assert isinstance(out, float) and 0 <= out <= 1 + # Alert relaxation + assert not engine._states["-1"]["ongoing"] + out = engine.predict(mock_classification_image, 0) + out = engine.predict(mock_classification_image, 0) + assert engine._states["-1"]["ongoing"] + + # With API diff --git a/tests/test_engine_vision.py b/tests/test_engine_vision.py new file mode 100644 index 00000000..d821004a --- /dev/null +++ b/tests/test_engine_vision.py @@ -0,0 +1,24 @@ +import numpy as np +from huggingface_hub import hf_hub_download + +from pyroengine.vision import Classifier + + +def test_classifier(mock_classification_image): + + # Instantiae the ONNX model + model = Classifier("pyronear/rexnet1_3x") + # Check preprocessing + out = model.preprocess_image(mock_classification_image) + assert isinstance(out, np.ndarray) and out.dtype == np.float32 + assert out.shape == (1, 3, 224, 224) + # Check inference + out = model(mock_classification_image) + assert isinstance(out, np.ndarray) and out.dtype == np.float32 + assert out.shape == (1,) + assert out >= 0 and out <= 1 + + # Load static file + cfg_path = hf_hub_download("pyronear/rexnet1_3x", filename="config.json") + model_path = hf_hub_download("pyronear/rexnet1_3x", filename="model.onnx") + Classifier("pyronear/rexnet1_3x", cfg_path=cfg_path, model_path=model_path)