diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml
index 3fb4f20009..155b27add4 100644
--- a/.github/workflows/build-and-test.yml
+++ b/.github/workflows/build-and-test.yml
@@ -28,10 +28,10 @@ jobs:
name: Check for syntax errors
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Set up python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ env.LATEST_SUPPORTED_PYTHON_VERSION }}
@@ -46,13 +46,13 @@ jobs:
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
python -m flake8 src --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- check-history-rst-syntax:
- name: Check HISTORY RST syntax
+ check-rst-syntax:
+ name: Check RST syntax
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@v5
name: Set up python
with:
python-version: ${{ env.LATEST_SUPPORTED_PYTHON_VERSION }}
@@ -63,7 +63,7 @@ jobs:
- name: Lint with doc8
run: |
# Skip line-too-long errors (D001)
- python -m doc8 --ignore D001 HISTORY.rst
+ python -m doc8 --ignore D001 HISTORY.rst README_PYTHON.rst
run-model-tests:
name: Run model tests
@@ -75,7 +75,7 @@ jobs:
python-version: [3.8, 3.9, "3.10", "3.11", "3.12"]
os: [windows-latest, macos-13]
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch complete history for accurate versioning
@@ -127,13 +127,13 @@ jobs:
run: make test
- name: Upload wheel artifact
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: Wheel for ${{ matrix.os }} ${{ matrix.python-version }}
path: dist
- name: Upload conda env artifact
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: Conda Env for ${{ matrix.os }} ${{ matrix.python-version }}
@@ -141,13 +141,13 @@ jobs:
- name: Authenticate GCP
if: github.event_name != 'pull_request'
- uses: google-github-actions/auth@v0
+ uses: google-github-actions/auth@v2
with:
credentials_json: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }}
- name: Set up GCP
if: github.event_name != 'pull_request'
- uses: google-github-actions/setup-gcloud@v0
+ uses: google-github-actions/setup-gcloud@v2
- name: Deploy artifacts to GCS
if: github.event_name != 'pull_request'
@@ -162,7 +162,7 @@ jobs:
python-version: [3.8, 3.9, "3.10", "3.11", "3.12"]
os: [windows-latest, macos-13]
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch complete history for accurate versioning
@@ -170,7 +170,7 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
requirements-files: requirements.txt
- requirements: ${{ env.CONDA_DEFAULT_DEPENDENCIES }}
+ requirements: ${{ env.CONDA_DEFAULT_DEPENDENCIES }} twine
- name: Build source distribution
run: |
@@ -193,9 +193,12 @@ jobs:
# natcap.invest from source and that it imports.
python -c "from natcap.invest import *"
- - uses: actions/upload-artifact@v3
+ - name: Check long description with twine
+ run: twine check $(find dist -name "natcap[._-]invest*")
+
+ - uses: actions/upload-artifact@v4
with:
- name: Source distribution
+ name: Source distribution ${{ matrix.os }} ${{ matrix.python-version }}
path: dist
# Secrets not available in PR so don't use GCP.
@@ -204,13 +207,13 @@ jobs:
# different extensions)
- name: Authenticate GCP
if: github.event_name != 'pull_request' && matrix.os == 'macos-13' && matrix.python-version == env.LATEST_SUPPORTED_PYTHON_VERSION
- uses: google-github-actions/auth@v0
+ uses: google-github-actions/auth@v2
with:
credentials_json: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }}
- name: Set up GCP
if: github.event_name != 'pull_request' && matrix.os == 'macos-13' && matrix.python-version == env.LATEST_SUPPORTED_PYTHON_VERSION
- uses: google-github-actions/setup-gcloud@v0
+ uses: google-github-actions/setup-gcloud@v2
- name: Deploy artifacts to GCS
if: github.event_name != 'pull_request' && matrix.os == 'macos-13' && matrix.python-version == env.LATEST_SUPPORTED_PYTHON_VERSION
@@ -221,7 +224,7 @@ jobs:
runs-on: windows-latest
needs: check-syntax-errors
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch complete history for accurate versioning
@@ -252,7 +255,7 @@ jobs:
steps:
- name: Check out repo
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch complete history for accurate versioning
@@ -309,7 +312,7 @@ jobs:
workspace-path: ${{ github.workspace }}
binary-extension: exe
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch complete history for accurate versioning
@@ -366,13 +369,13 @@ jobs:
- name: Authenticate GCP
if: github.event_name != 'pull_request'
- uses: google-github-actions/auth@v0
+ uses: google-github-actions/auth@v2
with:
credentials_json: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }}
- name: Set up GCP
if: github.event_name != 'pull_request'
- uses: google-github-actions/setup-gcloud@v0
+ uses: google-github-actions/setup-gcloud@v2
- name: Build Workbench (PRs)
if: github.event_name == 'pull_request'
@@ -441,20 +444,20 @@ jobs:
- name: Upload workbench binary artifact
if: always()
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: Workbench-${{ runner.os }}-binary
path: workbench/dist/*.${{ matrix.binary-extension }}
- name: Upload user's guide artifact (Windows)
if: matrix.os == 'windows-latest'
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: InVEST-user-guide
path: dist/InVEST_*_userguide.zip
- name: Upload workbench logging from puppeteer
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
if: always()
with:
name: ${{ runner.os }}_puppeteer_log.zip'
@@ -473,7 +476,7 @@ jobs:
- name: Upload workspace on failure
if: ${{ failure() }}
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: InVEST-failed-${{ runner.os }}-workspace
path: ${{ matrix.workspace-path}}
@@ -483,11 +486,11 @@ jobs:
needs: check-syntax-errors
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch complete history for accurate versioning
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@v5
with:
python-version: ${{ env.LATEST_SUPPORTED_PYTHON_VERSION }}
@@ -498,20 +501,20 @@ jobs:
- run: make sampledata sampledata_single
- name: Upload sample data artifact
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: InVEST-sample-data
path: dist/*.zip
- name: Authenticate GCP
if: github.event_name != 'pull_request'
- uses: google-github-actions/auth@v0
+ uses: google-github-actions/auth@v2
with:
credentials_json: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }}
- name: Set up GCP
if: github.event_name != 'pull_request'
- uses: google-github-actions/setup-gcloud@v0
+ uses: google-github-actions/setup-gcloud@v2
- name: Deploy artifacts to GCS
if: github.event_name != 'pull_request'
diff --git a/HISTORY.rst b/HISTORY.rst
index 47406cd82c..5eb71f8807 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -37,8 +37,49 @@
Unreleased Changes
------------------
+* General
+ * InVEST has been updated to build against numpy 2.
+ https://github.com/natcap/invest/issues/1641
+ * Updating validation to handle a change in exceptions raised by GDAL in
+ ``pygeoprocessing.get_raster_info`` and
+ ``pygeoprocessing.get_vector_info``.
+ https://github.com/natcap/invest/issues/1645
+ * The scripting page of the InVEST API documentation has been updated to
+ reflect changes in how InVEST is installed on modern systems, and also to
+ include images of the InVEST workbench instead of just broken links.
+ https://github.com/natcap/invest/issues/1660
* Workbench
- * Several small updates to the model input form UI to improve usability and visual consistency (https://github.com/natcap/invest/issues/912)
+ * Several small updates to the model input form UI to improve usability
+ and visual consistency (https://github.com/natcap/invest/issues/912).
+ * Fixed a bug that caused the application to crash when attempting to
+ open a workspace without a valid logfile
+ (https://github.com/natcap/invest/issues/1598).
+ * Fixed a bug that was allowing readonly workspace directories on Windows
+ (https://github.com/natcap/invest/issues/1599).
+ * Fixed a bug that, in certain scenarios, caused a datastack to be saved
+ with relative paths when the Relative Paths checkbox was left unchecked
+ (https://github.com/natcap/invest/issues/1609).
+ * Improved error handling when a datastack cannot be saved with relative
+ paths across drives (https://github.com/natcap/invest/issues/1608).
+* Coastal Vulnerability
+ * Fixed a regression where an AOI with multiple features could raise a
+ TypeError after intersecting with the landmass polygon.
+ https://github.com/natcap/invest/issues/1657
+* Forest Carbon Edge Effects
+ * Updating vector reprojection to allow partial reprojection. Related to
+ https://github.com/natcap/invest/issues/1645
+* Habitat Quality
+ * Access raster is now generated from the reprojected access vector
+ (https://github.com/natcap/invest/issues/1615).
+ * Rarity values are now output in CSV format (as well as in raster format)
+ (https://github.com/natcap/invest/issues/721).
+* Urban Flood Risk
+ * Fields present on the input AOI vector are now retained in the output.
+ (https://github.com/natcap/invest/issues/1600)
+* Urban Nature Access
+ * The model now works as expected when the user provides an LULC raster
+ that does not have a nodata value defined.
+ https://github.com/natcap/invest/issues/1293
3.14.2 (2024-05-29)
-------------------
diff --git a/Makefile b/Makefile
index 234e814bbf..97af79b9f5 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
DATA_DIR := data
GIT_SAMPLE_DATA_REPO := https://bitbucket.org/natcap/invest-sample-data.git
GIT_SAMPLE_DATA_REPO_PATH := $(DATA_DIR)/invest-sample-data
-GIT_SAMPLE_DATA_REPO_REV := ab8c74a62a93fd0019de2bca064abc0a5a07afab
+GIT_SAMPLE_DATA_REPO_REV := 0f8b41557753dad3670ba8220f41650b51435a93
GIT_TEST_DATA_REPO := https://bitbucket.org/natcap/invest-test-data.git
GIT_TEST_DATA_REPO_PATH := $(DATA_DIR)/invest-test-data
@@ -10,7 +10,7 @@ GIT_TEST_DATA_REPO_REV := 324abde73e1d770ad75921466ecafd1ec6297752
GIT_UG_REPO := https://github.com/natcap/invest.users-guide
GIT_UG_REPO_PATH := doc/users-guide
-GIT_UG_REPO_REV := 0404bc5d4d43085cdc58f50f8fc29944b10cefb1
+GIT_UG_REPO_REV := f203ec069f9f03560c9a85b268e67ebb6b994953
ENV = "./env"
ifeq ($(OS),Windows_NT)
diff --git a/constraints_tests.txt b/constraints_tests.txt
index 92920cea3f..b0056a12e1 100644
--- a/constraints_tests.txt
+++ b/constraints_tests.txt
@@ -6,5 +6,7 @@
# occur with regular use of invest. https://github.com/OSGeo/gdal/issues/8497
GDAL!=3.6.*,!=3.7.*
-# https://github.com/natcap/pygeoprocessing/issues/387
-GDAL<3.8.5
+# Pyinstaller 6.10 breaks our windows builds. Until we can figure out the
+# root cause, let's cap the versions to those that work.
+# https://github.com/natcap/invest/issues/1622
+#pyinstaller<6.10
diff --git a/doc/api-docs/conf.py b/doc/api-docs/conf.py
index 495e68e8b8..46b1e83993 100644
--- a/doc/api-docs/conf.py
+++ b/doc/api-docs/conf.py
@@ -78,13 +78,11 @@
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
+
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
-# Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
-
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/invest-logo.png"
@@ -130,7 +128,7 @@
# dir menu entry, description, category)
texinfo_documents = [
('index', 'InVEST', 'InVEST Documentation',
- 'The Natural Capital Project', 'InVEST',
+ 'The Natural Capital Project', 'InVEST',
'Integrated Valuation of Ecosystem Services and Tradeoffs',
'Scientific Software'),
]
@@ -138,8 +136,8 @@
# -- Prepare for sphinx build ---------------------------------------------
-# Use sphinx apidoc tool to generate documentation for invest. Generated rst
-# files go into the api/ directory. Note that some apidoc options may not work
+# Use sphinx apidoc tool to generate documentation for invest. Generated rst
+# files go into the api/ directory. Note that some apidoc options may not work
# the same because we aren't using their values in the custom templates
apidoc.main([
'--force', # overwrite any files from previous run
@@ -164,7 +162,7 @@
All InVEST models share a consistent python API:
- - Every InVEST model has a corresponding module or subpackage in the
+ - Every InVEST model has a corresponding module or subpackage in the
``natcap.invest`` package
- The model modules contain a function called ``execute``
- The ``execute`` function takes a single argument (``args``), a dictionary
@@ -189,7 +187,7 @@
natcap.invest.carbon.execute(args)
For examples of scripting a model run, or multiple successive model runs,
-see :ref:`CreatingSamplePythonScripts`.
+see :ref:`CreatingPythonScripts`.
.. contents:: Available Models and Tools:
diff --git a/doc/api-docs/scripting.rst b/doc/api-docs/scripting.rst
index 393ddd2ef4..442a63375a 100644
--- a/doc/api-docs/scripting.rst
+++ b/doc/api-docs/scripting.rst
@@ -20,98 +20,71 @@ InVEST models from the command-line.
Setting up your Python environment
==================================
-1. **Install Python 3.6 or later.**
+We recommend using the ``conda`` or ``mamba`` package manager to create an
+environment. The easiest way to do so is to execute the following command::
- Python can be `downloaded from here `_. When installing,
- be sure to allow ``python.exe`` to be added to the path in the installation options.
+ mamba create -n invest-env -c conda-forge natcap.invest
+This will create a new environment called ``invest-env`` with the latest
+released versions of python and ``natcap.invest``, as well as all dependencies
+needed to execute ``natcap.invest``.
-2. **Put pip on the PATH.**
+Alternate installation methods are available. Please see the :ref:`installing`
+guide for further instructions on setting up your python environment and
+installing the ``natcap.invest`` package.
- The ``pip`` utility for installing python packages is already included with
- Python 2.7.9 and later. Be sure to add ``C:\Python37\Scripts`` (or your custom
- install location) to the Windows PATH environment variable so that ``pip`` can
- be called from the command line without needing to use its full path.
+.. _CreatingPythonScripts:
- After this is done (and you've opened a new command-line window), you will be
- able to use ``pip`` at the command-line to install packages like so::
+=======================
+Creating Python Scripts
+=======================
- > pip install
+1. **Launch InVEST Workbench and Open the Selected Model**
-3. **Install packages needed to run InVEST.**
-
- Most (maybe even all) of these packages can be downloaded as precompiled
- wheels from `Christoph Gohlke's build page `_.
- Others should be able to be installed via ``pip install ``.
-
- .. This ..include:: part will automatically pull the contents of requirements.txt
- and include the file from line 9 through the end.
-
- .. include:: ../../requirements.txt
- :literal:
- :start-line: 9
-
-
-4. **Install the InVEST python package**
-
- 4a. Download a release of the ``natcap.invest`` python package.
-
- * `Releases on the python package index `_
-
- 4b. Install the downloaded python package..
-
- * ``win32.whl`` files are prebuilt binary distributions and can be
- installed via pip.
- See the `pip docs for installing a package from a wheel
- `_
- * ``.zip`` and ``.tar.gz`` files are source archives.
- See :ref:`installing` for details, including how to
- install specific development versions of ``natcap.invest``.
-
-
-.. _CreatingSamplePythonScripts:
-
-==============================
-Creating Sample Python Scripts
-==============================
-
-1. **Launch InVEST Model**
-
- Once an InVEST model is selected for scripting,
- launch that model from the Windows Start menu. This example in this guide
- follows the NDR model.
+ Once you have selected an InVEST model to script, launch the workbench and
+ select the model from the list of available models. This guide will follow
+ the Nutrient Delivery Ratio (NDR) model.
2. **Fill in InVEST Model Input Parameters**
Once the user interface loads, populate the inputs in the model likely to
be used in the Python script. For testing purposes the default InVEST’s
- data is appropriate. However, if a user wishes to write a batch for several
- InVEST runs, it would be reasonable to populate the user interface with
- data for the first run.
+ data is appropriate. However, if you wish to write a script to execute
+ several InVEST model runs in sequence, it would be reasonable to populate
+ the user interface with data for the first run in the sequence.
-3. **Generate a sample Python Script from the User Interface**
+3. **Generate a Python Script from the User Interface**
- Open the Development menu at the top of the user interface and select
- *"Save to python script..."* and save the file to a known location.
+ In the model interface, select *"Save as..."* in the left-hand menu bar,
+ *"Python script"* in the dialog box, and then save the file to a known location.
-|2w7pilj.png|
+ |workbench_save_as.png|
+ |modal_save_as.png|
-.. |2w7pilj.png| image:: https://bitbucket.org/repo/ag8qdk/images/3205488039-2w7pilj.png
+.. |workbench_save_as.png| image:: scripting/workbench_save_as.png
+.. |modal_save_as.png| image:: scripting/modal_save_as.png
4. **Execute the script in the InVEST Python Environment**
- Launch a Windows PowerShell from the Start menu (type “powershell” in the
- search box), then invoke the Python interpreter on the InVEST Python
- script from that shell. In this example the Python interpreter is
- installed in ``C:\Python37\python.exe`` and the script was saved in
- ``C:\Users\rpsharp\Desktop\ndr.py``, thus the command to invoke the
- interpreter is::
+ Launch a shell of your choice and activate your InVEST environment you
+ created earlier. We will use the Windows command prompt ``cmd.exe`` for an
+ example, but the same commands should work on Powershell, bash and zsh. If
+ you created an environment called ``invest-env``, you would activate it with
+ ``conda`` like so::
- > C:\Python37\python.exe C:\Users\rpsharp\Desktop\ndr.py
+ > conda activate invest-env
-|34ecba0.png|
+ You will know your environment activated correctly when you see the
+ environment name at the beginning of your shell prompt. For example::
-.. |34ecba0.png| image:: https://bitbucket.org/repo/ag8qdk/images/2939811460-34ecba0.png
+ (invest-env) C:\Users\demo >
+
+ Once the environment has been activated, we can run the python script we
+ saved in the previous step. Suppose we're on Windows and the script was
+ saved to ``C:\Users\demo\Desktop\ndr.py``, the command to run the script
+ would be::
+
+ (invest-env) C:\Users\demo > python C:\Users\demo\Desktop\ndr.py
5. **Output Results**
@@ -133,50 +106,68 @@ For reference, consider the following script generated by the Nutrient model wit
.. code-block:: python
- """
- This is a saved model run from natcap.invest.ndr.ndr.
- Generated: Mon 16 May 2016 03:52:59 PM
- InVEST version: 3.3.0
- """
+ # coding=UTF-8
+ # -----------------------------------------------
+ # Generated by InVEST 3.14.2 on Wed Oct 23 10:49:40 2024
+ # Model: Nutrient Delivery Ratio
+
+ import logging
+ import sys
import natcap.invest.ndr.ndr
+ import natcap.invest.utils
+
+ LOGGER = logging.getLogger(__name__)
+ root_logger = logging.getLogger()
+
+ handler = logging.StreamHandler(sys.stdout)
+ formatter = logging.Formatter(
+ fmt=natcap.invest.utils.LOG_FMT,
+ datefmt='%m/%d/%Y %H:%M:%S ')
+ handler.setFormatter(formatter)
+ logging.basicConfig(level=logging.INFO, handlers=[handler])
args = {
- u'k_param': u'2',
- u'runoff_proxy_uri': u'C:\InVEST_3.3.0_x86\Base_Data\Freshwater\precip',
- u'subsurface_critical_length_n': u'150',
- u'subsurface_critical_length_p': u'150',
- u'subsurface_eff_n': u'0.8',
- u'subsurface_eff_p': u'0.8',
- u'threshold_flow_accumulation': u'1000',
- u'biophysical_table_uri': u'C:\InVEST_3.3.0_x86\WP_Nutrient_Retention\Input\water_biophysical_table.csv',
- u'calc_n': True,
- u'calc_p': True,
- u'suffix': '',
- u'dem_uri': u'C:\InVEST_3.3.0_x86\Base_Data\Freshwater\dem',
- u'lulc_uri': u'C:\InVEST_3.3.0_x86\Base_Data\Freshwater\landuse_90',
- u'watersheds_uri': u'C:\InVEST_3.3.0_x86\Base_Data\Freshwater\watersheds.shp',
- u'workspace_dir': u'C:\InVEST_3.3.0_x86\ndr_workspace',
+ 'biophysical_table_path': 'C:\\Users\demo\invest-sample-data\\NDR\\biophysical_table_gura.csv',
+ 'calc_n': True,
+ 'calc_p': True,
+ 'dem_path': 'C:\\Users\demo\invest-sample-data\\NDR\\DEM_gura.tif',
+ 'k_param': '2',
+ 'lulc_path': 'C:\\Users\demo\invest-sample-data\\NDR\land_use_gura.tif',
+ 'results_suffix': 'gura',
+ 'runoff_proxy_path': 'C:\\Users\demo\invest-sample-data\\NDR\precipitation_gura.tif',
+ 'subsurface_critical_length_n': '200',
+ 'subsurface_eff_n': '0.8',
+ 'threshold_flow_accumulation': '1000',
+ 'watersheds_path': 'C:\\Users\demo\invest-sample-data\\NDR\watershed_gura.shp',
+ 'workspace_dir': 'C:\\Users\demo\invest-sample-data\\NDR\workspace',
}
if __name__ == '__main__':
natcap.invest.ndr.ndr.execute(args)
-Elements to note:
-
-* *Parameter Python Dictionary*: Key elements include the ``‘args’`` dictionary. Note the similarities between the key values such as ``‘workspace_dir’`` and the equivalent “Workspace” input parameter in the user interface. Every key in the ``‘args’`` dictionary has a corresponding reference in the user interface.
-|95zj7p.png|
+Elements to note:
-.. |95zj7p.png| image:: https://bitbucket.org/repo/ag8qdk/images/22613179-95zj7p.png
+* *Parameter Python Dictionary*: Key elements include the ``'args'``
+ dictionary. Note the similarities between the key values such as
+ ``'workspace_dir'`` and the equivalent "Workspace" input parameter in the
+ user interface. Every key in the ``'args'`` dictionary has a corresponding
+ reference in the user interface.
-In the example below we’ll modify the script to execute the nutrient model for a parameter study of ``‘threshold_flow_accumulation’.``
+ In the example below we’ll modify the script to execute the nutrient model
+ for a parameter study of ``'threshold_flow_accumulation'``.
-* *Execution of the InVEST model*: The InVEST API invokes models with a consistent syntax where the module name that contains the InVEST model is listed first and is followed by a function called ‘execute’ that takes a single parameter called ``‘args’``. This parameter is the dictionary of input parameters discussed above. In this example, the line
+* *Execution of the InVEST model*: The InVEST API invokes models with a
+ consistent syntax where the module name that contains the InVEST model is
+ listed first and is followed by a function called ‘execute’ that takes a
+ single parameter called ``'args'``. This parameter is the dictionary of input
+ parameters discussed above. In this example, the line ::
-``natcap.invest.ndr.ndr.execute(args)``
+ natcap.invest.ndr.ndr.execute(args)
-executes the nutrient model end-to-end. If the user wishes to make batch calls to InVEST, this line will likely be placed inside a loop.
+ executes the nutrient model end-to-end. If the user wishes to make batch
+ calls to InVEST, this line will likely be placed inside a loop.
====================================================
Example: Threshold Flow Accumulation Parameter Study
@@ -212,7 +203,7 @@ example, replace the last line in the UI generated Python script with:
.. code-block:: python
import os
- landcover_dir = r'C:\User\Rich\Desktop\landcover_data'
+ landcover_dir = r'C:\User\demo\Desktop\landcover_data'
if __name__ == '__main__':
#Loop over all the filenames in the landcover dir
@@ -225,43 +216,31 @@ example, replace the last line in the UI generated Python script with:
natcap.invest.ndr.ndr.execute(args)
This loop covers all the files located in
-``C:\User\Rich\Desktop\landcover_data``
-and updates the relevant ``lulc_uri`` key in the args dictionary to each
-of those files during execution as well as making a useful suffix so output
-files can be distinguished from each other.
+``C:\User\demo\Desktop\landcover_data`` and updates the relevant ``lulc_uri``
+key in the args dictionary to each of those files during execution as well as
+making a useful suffix so output files can be distinguished from each other.
============================================
Example: Saving model log messages to a file
============================================
-There are many cases where you may want or need to capture all of the log
-messages generated by the model. When we run models through the InVEST user
-interface application, the UI captures all of this logging and saves it to a
-logfile. We can replicate this behavior through the python logging package,
-by adding the following code just after the ``import`` statements in the
-example script.
-
+When you save a model run to a python script, InVEST will automatically include
+code to write all logging to your console while it executes. But sometimes,
+it's useful to save log messages from the model to a file. This can be done by
+adding a few lines of code to the script, just after the existing logging
+code:
.. code-block:: python
- import logging
- import pygeoprocessing
-
- # Write all NDR log messages to logfile.txt
- MODEL_LOGGER = logging.getLogger('natcap.invest.ndr')
- handler = logging.FileHandler('logfile.txt')
- MODEL_LOGGER.addHandler(handler)
-
- # log pygeoprocessing messages to the same logfile
- PYGEO_LOGGER = logging.getLogger('pygeoprocessing')
- PYGEO_LOGGER.addHandler(handler)
+ file_handler = logging.FileHandler('logfile.txt')
+ root_logger.addHandler(file_handler)
-This will capture all logging generated by the ``ndr`` model and by
-``pygeoprocessing``, writing all messages to ``logfile.txt``. While
+This will capture all logging generated by any python package that executes
+while running the ``ndr`` model writing all messages to ``logfile.txt``. While
this is a common use case, the ``logging`` package provides functionality
for many more complex logging features. For more
advanced use of the python logging module, refer to the Python project's
-`Logging Cookbook `_
+`Logging Cookbook `_
=====================================
Example: Enabling Parallel Processing
@@ -306,7 +285,8 @@ Using the parameter study example, this might look like:
Internationalization
====================
-If you use the InVEST python API to access model names, ``MODEL_SPEC``s, or validation messages, you can translate those strings using ``gettext``:
+If you use the InVEST python API to access model names, ``MODEL_SPEC`` s, or
+validation messages, you can translate those strings using ``gettext``:
.. code-block:: python
diff --git a/doc/api-docs/scripting/modal_save_as.png b/doc/api-docs/scripting/modal_save_as.png
new file mode 100644
index 0000000000..b5f84c1bad
Binary files /dev/null and b/doc/api-docs/scripting/modal_save_as.png differ
diff --git a/doc/api-docs/scripting/workbench_save_as.png b/doc/api-docs/scripting/workbench_save_as.png
new file mode 100644
index 0000000000..2d3a6525d1
Binary files /dev/null and b/doc/api-docs/scripting/workbench_save_as.png differ
diff --git a/exe/hooks/rthook.py b/exe/hooks/rthook.py
index 53a9af14b6..ec0b3264a8 100644
--- a/exe/hooks/rthook.py
+++ b/exe/hooks/rthook.py
@@ -1,9 +1,6 @@
-import sys
import os
-import multiprocessing
import platform
-
-multiprocessing.freeze_support()
+import sys
os.environ['PROJ_LIB'] = os.path.join(sys._MEIPASS, 'proj')
diff --git a/exe/invest.spec b/exe/invest.spec
index ac4ec440bd..b0f3085ec7 100644
--- a/exe/invest.spec
+++ b/exe/invest.spec
@@ -30,7 +30,9 @@ kwargs = {
'pkg_resources.py2_warn',
'cmath',
'charset_normalizer',
- 'scipy.special._cdflib'
+ 'scipy.special._cdflib',
+ 'scipy.special._special_ufuncs',
+ 'scipy._lib.array_api_compat.numpy.fft',
],
'datas': [proj_datas],
'cipher': block_cipher,
diff --git a/pyproject.toml b/pyproject.toml
index fadad29609..b16e94eead 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -46,7 +46,8 @@ invest = "natcap.invest.cli:main"
# available at runtime.
requires = [
'setuptools>=61', 'wheel', 'setuptools_scm>=8.0', 'cython>=3.0.0', 'babel',
- 'oldest-supported-numpy'
+ 'oldest-supported-numpy; python_version<="3.8"',
+ 'numpy>=2; python_version>="3.9"', # numpy 2 only available for 3.9+
]
build-backend = "setuptools.build_meta"
diff --git a/requirements.txt b/requirements.txt
index cf2d3aeb76..0ab53af935 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -13,11 +13,11 @@
GDAL>=3.4.2
Pyro4==4.77 # pip-only
pandas>=1.2.1
-numpy>=1.11.0,!=1.16.0,<2.0
+numpy>=1.11.0,!=1.16.0
Rtree>=0.8.2,!=0.9.1
shapely>=2.0.0
scipy>=1.9.0,!=1.12.*
-pygeoprocessing>=2.4.2 # pip-only
+pygeoprocessing>=2.4.6 # pip-only
taskgraph>=0.11.0
psutil>=5.6.6
chardet>=3.0.4
diff --git a/scripts/invest-autovalidate.py b/scripts/invest-autovalidate.py
index 02ba843db9..d4a1a1a1de 100644
--- a/scripts/invest-autovalidate.py
+++ b/scripts/invest-autovalidate.py
@@ -1,14 +1,14 @@
#!python
-import os
-import tempfile
-import logging
import argparse
-import unittest
import glob
import importlib
-import shutil
+import logging
+import os
import pprint
+import shutil
+import tempfile
+import unittest
from natcap.invest import datastack
@@ -81,7 +81,7 @@ def main(sampledatadir):
model_warnings = [] # define here in case of uncaught exception.
try:
- LOGGER.info('validating %s ', datastack_path)
+ LOGGER.info('validating %s ', os.path.abspath(datastack_path))
model_warnings = getattr(
model_module, 'validate')(paramset.args)
except AttributeError as err:
diff --git a/src/natcap/invest/__main__.py b/src/natcap/invest/__main__.py
index 1f68c51913..167e0fc170 100644
--- a/src/natcap/invest/__main__.py
+++ b/src/natcap/invest/__main__.py
@@ -1,5 +1,11 @@
+import multiprocessing
import sys
+# We want to guarantee that this is called BEFORE any other processes start,
+# which could happen at import time.
+if __name__ == '__main__':
+ multiprocessing.freeze_support()
+
from . import cli
if __name__ == '__main__':
diff --git a/src/natcap/invest/coastal_vulnerability.py b/src/natcap/invest/coastal_vulnerability.py
index ff30d3a24e..a1637451a6 100644
--- a/src/natcap/invest/coastal_vulnerability.py
+++ b/src/natcap/invest/coastal_vulnerability.py
@@ -21,6 +21,7 @@
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
+from shapely.geometry import LineString, MultiLineString
from shapely.geometry.base import BaseMultipartGeometry
from shapely.strtree import STRtree
@@ -1104,7 +1105,6 @@ def prepare_landmass_line_index_and_interpolate_shore_points(
# Get shapely geometries from landmass
landmass_polygon_shapely_list = _ogr_to_geometry_list(landmass_vector_path)
landmass_shapely = shapely.ops.unary_union(landmass_polygon_shapely_list)
-
landmass_polygon_shapely_list = None
# store polygon geom for point-in-poly check later in ray-casting
@@ -1170,19 +1170,13 @@ def prepare_landmass_line_index_and_interpolate_shore_points(
if aoi_shapely_prepped.intersects(landmass_line):
intersected_shapely_geom = aoi_shapely.intersection(
landmass_line)
- if intersected_shapely_geom.geom_type == 'LineString':
- lines_in_aoi_list.append(intersected_shapely_geom)
- elif intersected_shapely_geom.geom_type == 'MultiLineString':
- shapely_geom_explode = [
- shapely.geometry.LineString(x)
- for x in intersected_shapely_geom]
-
- lines_in_aoi_list.extend(shapely_geom_explode)
- else:
- # intersection could generate a point geom
- # or if somehow the intersection is empty,
- # type will be GeometryCollection.
- continue
+ # intersection could generate a point geom,
+ # or if somehow the intersection is empty,
+ # type will be GeometryCollection.
+ if isinstance(intersected_shapely_geom,
+ (LineString, MultiLineString)):
+ lines_in_aoi_list.extend(
+ _list_geometry(intersected_shapely_geom))
# if none of the lines were disjoint before this linemerge,
# unioned_line will now be a LineString.
diff --git a/src/natcap/invest/datastack.py b/src/natcap/invest/datastack.py
index 461a12d60e..0fb2cabed4 100644
--- a/src/natcap/invest/datastack.py
+++ b/src/natcap/invest/datastack.py
@@ -535,6 +535,9 @@ def build_parameter_set(args, model_name, paramset_path, relative=False):
Returns:
``None``
+
+ Raises:
+ ValueError if creating a relative path fails.
"""
def _recurse(args_param):
if isinstance(args_param, dict):
@@ -552,8 +555,16 @@ def _recurse(args_param):
if (normalized_path == '.' or
os.path.dirname(paramset_path) == normalized_path):
return '.'
- temp_rel_path = os.path.relpath(
- normalized_path, os.path.dirname(paramset_path))
+ try:
+ temp_rel_path = os.path.relpath(
+ normalized_path, os.path.dirname(paramset_path))
+ except ValueError:
+ # On Windows, ValueError is raised when ``path`` and
+ # ``start`` are on different drives
+ raise ValueError(
+ """Error: Cannot save datastack with relative
+ paths across drives. Choose a different save
+ location, or use absolute paths.""")
# Always save unix paths.
linux_style_path = temp_rel_path.replace('\\', '/')
else:
diff --git a/src/natcap/invest/forest_carbon_edge_effect.py b/src/natcap/invest/forest_carbon_edge_effect.py
index bb0fb399d0..aa7a394ab8 100644
--- a/src/natcap/invest/forest_carbon_edge_effect.py
+++ b/src/natcap/invest/forest_carbon_edge_effect.py
@@ -583,13 +583,14 @@ def _aggregate_carbon_map(
for poly_feat in target_aggregate_layer:
poly_fid = poly_feat.GetFID()
poly_feat.SetField(
- 'c_sum', serviceshed_stats[poly_fid]['sum'])
+ 'c_sum', float(serviceshed_stats[poly_fid]['sum']))
# calculates mean pixel value per ha in for each feature in AOI
poly_geom = poly_feat.GetGeometryRef()
poly_area_ha = poly_geom.GetArea() / 1e4 # converts m^2 to hectare
poly_geom = None
poly_feat.SetField(
- 'c_ha_mean', serviceshed_stats[poly_fid]['sum']/poly_area_ha)
+ 'c_ha_mean',
+ float(serviceshed_stats[poly_fid]['sum'] / poly_area_ha))
target_aggregate_layer.SetFeature(poly_feat)
target_aggregate_layer.CommitTransaction()
@@ -765,9 +766,11 @@ def _build_spatial_index(
local_model_dir, 'local_carbon_shape.shp')
lulc_projection_wkt = pygeoprocessing.get_raster_info(
base_raster_path)['projection_wkt']
- pygeoprocessing.reproject_vector(
- tropical_forest_edge_carbon_model_vector_path, lulc_projection_wkt,
- carbon_model_reproject_path)
+
+ with utils._set_gdal_configuration('OGR_ENABLE_PARTIAL_REPROJECTION', 'TRUE'):
+ pygeoprocessing.reproject_vector(
+ tropical_forest_edge_carbon_model_vector_path, lulc_projection_wkt,
+ carbon_model_reproject_path)
model_vector = gdal.OpenEx(carbon_model_reproject_path)
model_layer = model_vector.GetLayer()
diff --git a/src/natcap/invest/habitat_quality.py b/src/natcap/invest/habitat_quality.py
index 2bf024fe46..c167bca01f 100644
--- a/src/natcap/invest/habitat_quality.py
+++ b/src/natcap/invest/habitat_quality.py
@@ -1,6 +1,7 @@
# coding=UTF-8
"""InVEST Habitat Quality model."""
import collections
+import csv
import logging
import os
@@ -213,36 +214,36 @@
"output": {
"type": "directory",
"contents": {
- "deg_sum_out_c.tif": {
+ "deg_sum_c.tif": {
"about": (
"Relative level of habitat degradation on the current "
"landscape."),
"bands": {1: {"type": "ratio"}}
},
- "deg_sum_out_f.tif": {
+ "deg_sum_f.tif": {
"about": (
"Relative level of habitat degradation on the future "
"landscape."),
"bands": {1: {"type": "ratio"}},
"created_if": "lulc_fut_path"
},
- "quality_out_c.tif": {
+ "quality_c.tif": {
"about": (
"Relative level of habitat quality on the current "
"landscape."),
"bands": {1: {"type": "ratio"}}
},
- "quality_out_f.tif": {
+ "quality_f.tif": {
"about": (
"Relative level of habitat quality on the future "
"landscape."),
"bands": {1: {"type": "ratio"}},
"created_if": "lulc_fut_path"
},
- "rarity_out_c.tif": {
+ "rarity_c.tif": {
"about": (
"Relative habitat rarity on the current landscape "
- "vis-a-vis the baseline map. The grid cell’s values "
+ "vis-a-vis the baseline map. The grid cell's values "
"are defined between a range of 0 and 1 where 0.5 "
"indicates no abundance change between the baseline "
"and current or projected map. Values between 0 and 0.5 "
@@ -258,10 +259,10 @@
"created_if": "lulc_bas_path",
"bands": {1: {"type": "ratio"}}
},
- "rarity_out_f.tif": {
+ "rarity_f.tif": {
"about": (
"Relative habitat rarity on the future landscape "
- "vis-a-vis the baseline map. The grid cell’s values "
+ "vis-a-vis the baseline map. The grid cell's values "
"are defined between a range of 0 and 1 where 0.5 "
"indicates no abundance change between the baseline "
"and current or projected map. Values between 0 and "
@@ -278,6 +279,70 @@
"created_if": "lulc_bas_path and lulc_fut_path",
"bands": {1: {"type": "ratio"}}
},
+ "rarity_c.csv": {
+ "about": ("Table of rarity values by LULC code for the "
+ "current landscape."),
+ "index_col": "lulc_code",
+ "columns": {
+ "lulc_code": {
+ "type": "number",
+ "units": u.none,
+ "about": "LULC class",
+ },
+ "rarity_value": {
+ "type": "number",
+ "units": u.none,
+ "about": (
+ "Relative habitat rarity on the current landscape "
+ "vis-a-vis the baseline map. The rarity values "
+ "are defined between a range of 0 and 1 where 0.5 "
+ "indicates no abundance change between the baseline "
+ "and current or projected map. Values between 0 and 0.5 "
+ "indicate a habitat is more abundant and the closer "
+ "the value is to 0 the lesser the likelihood that the "
+ "preservation of that habitat type on the current or "
+ "future landscape is important to biodiversity conservation. "
+ "Values between 0.5 and 1 indicate a habitat is less "
+ "abundant and the closer the value is to 1 the greater "
+ "the likelihood that the preservation of that habitat "
+ "type on the current or future landscape is important "
+ "to biodiversity conservation."),
+ },
+ },
+ "created_if": "lulc_bas_path",
+ },
+ "rarity_f.csv": {
+ "about": ("Table of rarity values by LULC code for the "
+ "future landscape."),
+ "index_col": "lulc_code",
+ "columns": {
+ "lulc_code": {
+ "type": "number",
+ "units": u.none,
+ "about": "LULC class",
+ },
+ "rarity_value": {
+ "type": "number",
+ "units": u.none,
+ "about": (
+ "Relative habitat rarity on the future landscape "
+ "vis-a-vis the baseline map. The rarity values "
+ "are defined between a range of 0 and 1 where 0.5 "
+ "indicates no abundance change between the baseline "
+ "and current or projected map. Values between 0 and 0.5 "
+ "indicate a habitat is more abundant and the closer "
+ "the value is to 0 the lesser the likelihood that the "
+ "preservation of that habitat type on the current or "
+ "future landscape is important to biodiversity conservation. "
+ "Values between 0.5 and 1 indicate a habitat is less "
+ "abundant and the closer the value is to 1 the greater "
+ "the likelihood that the preservation of that habitat "
+ "type on the current or future landscape is important "
+ "to biodiversity conservation."),
+ },
+ },
+ "created_if": "lulc_bas_path and lulc_fut_path",
+ },
}
},
"intermediate": {
@@ -555,7 +620,7 @@ def execute(args):
rasterize_access_task = task_graph.add_task(
func=pygeoprocessing.rasterize,
- args=(args['access_vector_path'], access_raster_path),
+ args=(reprojected_access_path, access_raster_path),
kwargs={
'option_list': ['ATTRIBUTE=ACCESS'],
'burn_values': None
@@ -743,13 +808,16 @@ def execute(args):
intermediate_output_dir,
f'new_cover{lulc_key}{file_suffix}.tif')
- rarity_path = os.path.join(
+ rarity_raster_path = os.path.join(
output_dir, f'rarity{lulc_key}{file_suffix}.tif')
+ rarity_csv_path = os.path.join(
+ output_dir, f'rarity{lulc_key}{file_suffix}.csv')
+
_ = task_graph.add_task(
func=_compute_rarity_operation,
args=((lulc_base_path, 1), (lulc_path, 1), (new_cover_path, 1),
- rarity_path),
+ rarity_raster_path, rarity_csv_path),
dependent_task_list=[align_task],
task_name=f'rarity{lulc_time}')
@@ -773,7 +841,7 @@ def _calculate_habitat_quality(deg_hab_raster_list, quality_out_path, ksq):
pygeoprocessing.raster_map(
op=lambda degradation, habitat: (
habitat * (1 - (degradation**_SCALING_PARAM) /
- (degradation**_SCALING_PARAM + ksq))),
+ (degradation**_SCALING_PARAM + ksq))),
rasters=deg_hab_raster_list,
target_path=quality_out_path)
@@ -829,8 +897,9 @@ def total_degradation(*arrays):
def _compute_rarity_operation(
- base_lulc_path_band, lulc_path_band, new_cover_path, rarity_path):
- """Calculate habitat rarity.
+ base_lulc_path_band, lulc_path_band, new_cover_path,
+ rarity_raster_path, rarity_csv_path):
+ """Calculate habitat rarity and generate raster and CSV output.
Output rarity values will be an index from 0 - 1 where:
pixel > 0.5 - more rare
@@ -846,7 +915,8 @@ def _compute_rarity_operation(
new_cover_path (tuple): a 2 tuple for the path to intermediate
raster file for trimming ``lulc_path_band`` to
``base_lulc_path_band`` of the form (path, band index).
- rarity_path (string): path to output rarity raster.
+ rarity_raster_path (string): path to output rarity raster.
+ rarity_csv_path (string): path to output rarity CSV.
Returns:
None
@@ -857,7 +927,6 @@ def _compute_rarity_operation(
base_lulc_path_band[0])
base_pixel_size = base_raster_info['pixel_size']
base_area = float(abs(base_pixel_size[0]) * abs(base_pixel_size[1]))
- base_nodata = base_raster_info['nodata'][0]
lulc_code_count_b = _raster_pixel_count(base_lulc_path_band)
@@ -865,7 +934,6 @@ def _compute_rarity_operation(
lulc_raster_info = pygeoprocessing.get_raster_info(lulc_path_band[0])
lulc_pixel_size = lulc_raster_info['pixel_size']
lulc_area = float(abs(lulc_pixel_size[0]) * abs(lulc_pixel_size[1]))
- lulc_nodata = lulc_raster_info['nodata'][0]
# Trim cover_x to the mask of base.
pygeoprocessing.raster_map(
@@ -895,13 +963,34 @@ def _compute_rarity_operation(
code_index[code] = 0.0
pygeoprocessing.reclassify_raster(
- new_cover_path, code_index, rarity_path, gdal.GDT_Float32,
+ new_cover_path, code_index, rarity_raster_path, gdal.GDT_Float32,
_OUT_NODATA)
+ _generate_rarity_csv(code_index, rarity_csv_path)
+
LOGGER.info('Finished rarity computation on'
f' {os.path.basename(lulc_path_band[0])} land cover.')
+def _generate_rarity_csv(rarity_dict, target_csv_path):
+ """Generate CSV containing rarity values by LULC code.
+
+ Args:
+ rarity_dict (dict): dictionary containing LULC codes (as keys)
+ and their associated rarity values (as values).
+ target_csv_path (string): path to output CSV.
+
+ Returns:
+ None
+ """
+ lulc_codes = sorted(rarity_dict)
+ with open(target_csv_path, 'w', newline='') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['lulc_code', 'rarity_value'])
+ for lulc_code in lulc_codes:
+ writer.writerow([lulc_code, rarity_dict[lulc_code]])
+
+
def _raster_pixel_count(raster_path_band):
"""Count unique pixel values in single band raster.
diff --git a/src/natcap/invest/pollination.py b/src/natcap/invest/pollination.py
index 60def5ec3e..e6b6895592 100644
--- a/src/natcap/invest/pollination.py
+++ b/src/natcap/invest/pollination.py
@@ -1036,28 +1036,28 @@ def execute(args):
# this is YT from the user's guide (y_tot)
farm_feature.SetField(
_TOTAL_FARM_YIELD_FIELD_ID,
- 1 - nu * (
+ float(1 - nu * (
1 - total_farm_results[fid]['sum'] /
- float(total_farm_results[fid]['count'])))
+ float(total_farm_results[fid]['count']))))
# this is PYW ('pdep_y_w')
farm_feature.SetField(
_POLLINATOR_PROPORTION_FARM_YIELD_FIELD_ID,
- (wild_pollinator_yield_aggregate[fid]['sum'] /
+ float(wild_pollinator_yield_aggregate[fid]['sum'] /
float(wild_pollinator_yield_aggregate[fid]['count'])))
# this is YW ('y_wild')
farm_feature.SetField(
_WILD_POLLINATOR_FARM_YIELD_FIELD_ID,
- nu * (wild_pollinator_yield_aggregate[fid]['sum'] /
- float(wild_pollinator_yield_aggregate[fid]['count'])))
+ float(nu * (wild_pollinator_yield_aggregate[fid]['sum'] /
+ float(wild_pollinator_yield_aggregate[fid]['count']))))
# this is PAT ('p_abund')
farm_season = farm_feature.GetField(_FARM_SEASON_FIELD)
farm_feature.SetField(
_POLLINATOR_ABUNDANCE_FARM_FIELD_ID,
- pollinator_abundance_results[farm_season][fid]['sum'] /
- float(pollinator_abundance_results[farm_season][fid]['count']))
+ float(pollinator_abundance_results[farm_season][fid]['sum'] /
+ float(pollinator_abundance_results[farm_season][fid]['count'])))
target_farm_layer.SetFeature(farm_feature)
target_farm_layer.SyncToDisk()
diff --git a/src/natcap/invest/recreation/recmodel_client.py b/src/natcap/invest/recreation/recmodel_client.py
index c695eef54a..5250eabbe9 100644
--- a/src/natcap/invest/recreation/recmodel_client.py
+++ b/src/natcap/invest/recreation/recmodel_client.py
@@ -1029,10 +1029,12 @@ def _raster_sum_mean(
numpy.array(fid_raster_values['sum']) /
numpy.array(fid_raster_values['count']))
predictor_results = dict(
- zip(fid_raster_values['fid'], mean_results))
+ zip(fid_raster_values['fid'],
+ (i.item() for i in mean_results)))
else:
predictor_results = dict(
- zip(fid_raster_values['fid'], fid_raster_values['sum']))
+ zip(fid_raster_values['fid'],
+ (i.item() for i in fid_raster_values['sum'])))
with open(predictor_target_path, 'w') as jsonfile:
json.dump(predictor_results, jsonfile)
diff --git a/src/natcap/invest/scenario_gen_proximity.py b/src/natcap/invest/scenario_gen_proximity.py
index bb62ce8fae..5f23f95269 100644
--- a/src/natcap/invest/scenario_gen_proximity.py
+++ b/src/natcap/invest/scenario_gen_proximity.py
@@ -490,7 +490,7 @@ def _convert_landscape(
def _mask_base_op(lulc_array):
"""Create a mask of valid non-base pixels only."""
- base_mask = numpy.in1d(
+ base_mask = numpy.isin(
lulc_array.flatten(), focal_landcover_codes).reshape(
lulc_array.shape)
if invert_mask:
@@ -535,7 +535,7 @@ def _combine_masks(base_distance_array, non_base_distance_array):
# turn inside and outside masks into a single mask
def _mask_to_convertible_codes(distance_from_base_edge, lulc):
"""Mask out the distance transform to a set of lucodes."""
- convertible_mask = numpy.in1d(
+ convertible_mask = numpy.isin(
lulc.flatten(), convertible_type_list).reshape(lulc.shape)
return numpy.where(
convertible_mask, distance_from_base_edge,
diff --git a/src/natcap/invest/sdr/sdr_core.pyx b/src/natcap/invest/sdr/sdr_core.pyx
index be8e571a77..afe1925194 100644
--- a/src/natcap/invest/sdr/sdr_core.pyx
+++ b/src/natcap/invest/sdr/sdr_core.pyx
@@ -27,13 +27,13 @@ def calculate_sediment_deposition(
This algorithm outputs both sediment deposition (t_i) and flux (f_i)::
- t_i = dr_i * (sum over j ∈ J of f_j * p(i,j)) + E'_i
+ t_i = dt_i * (sum over j ∈ J of f_j * p(j,i))
- f_i = (1 - dr_i) * (sum over j ∈ J of f_j * p(i,j)) + E'_i
+ f_i = (1 - dt_i) * (sum over j ∈ J of f_j * p(j,i)) + E'_i
(sum over k ∈ K of SDR_k * p(i,k)) - SDR_i
- dr_i = --------------------------------------------
+ dt_i = --------------------------------------------
(1 - SDR_i)
where:
@@ -233,21 +233,21 @@ def calculate_sediment_deposition(
if sdr_i == 1:
# This reflects property B in the user's guide and is
# an edge case to avoid division-by-zero.
- dr_i = 1
+ dt_i = 1
else:
- dr_i = (downslope_sdr_weighted_sum - sdr_i) / (1 - sdr_i)
+ dt_i = (downslope_sdr_weighted_sum - sdr_i) / (1 - sdr_i)
# Lisa's modified equations
- t_i = dr_i * f_j_weighted_sum # deposition, a.k.a trapped sediment
- f_i = (1 - dr_i) * f_j_weighted_sum + e_prime_i # flux
+ t_i = dt_i * f_j_weighted_sum # deposition, a.k.a trapped sediment
+ f_i = (1 - dt_i) * f_j_weighted_sum + e_prime_i # flux
- # On large flow paths, it's possible for dr_i, f_i and t_i
+ # On large flow paths, it's possible for dt_i, f_i and t_i
# to have very small negative values that are numerically
# equivalent to 0. These negative values were raising
# questions on the forums and it's easier to clamp the
# values here than to explain IEEE 754.
- if dr_i < 0:
- dr_i = 0
+ if dt_i < 0:
+ dt_i = 0
if t_i < 0:
t_i = 0
if f_i < 0:
diff --git a/src/natcap/invest/seasonal_water_yield/seasonal_water_yield.py b/src/natcap/invest/seasonal_water_yield/seasonal_water_yield.py
index 25228b3db4..728859a2d7 100644
--- a/src/natcap/invest/seasonal_water_yield/seasonal_water_yield.py
+++ b/src/natcap/invest/seasonal_water_yield/seasonal_water_yield.py
@@ -632,6 +632,7 @@ def execute(args):
# ValueError when n_workers is an empty string.
# TypeError when n_workers is None.
n_workers = -1 # Synchronous mode.
+ LOGGER.debug('n_workers: %s', n_workers)
task_graph = taskgraph.TaskGraph(
os.path.join(args['workspace_dir'], 'taskgraph_cache'),
n_workers, reporting_interval=5)
@@ -642,6 +643,9 @@ def execute(args):
(_INTERMEDIATE_BASE_FILES, intermediate_output_dir)], file_suffix)
LOGGER.info('Checking that the AOI is not the output aggregate vector')
+ LOGGER.debug("aoi_path: %s", args['aoi_path'])
+ LOGGER.debug("aggregate_vector_path: %s",
+ os.path.normpath(file_registry['aggregate_vector_path']))
if (os.path.normpath(args['aoi_path']) ==
os.path.normpath(file_registry['aggregate_vector_path'])):
raise ValueError(
@@ -1234,7 +1238,7 @@ def cn_op(lulc_array, soil_group_array):
# if lulc_array value not in lulc_to_soil[soil_group_id]['lulc_values']
# then numpy.digitize will not bin properly and cause an IndexError
# during the reshaping call
- lulc_unique = set(numpy.unique(lulc_array))
+ lulc_unique = set(i.item() for i in numpy.unique(lulc_array))
if not lulc_unique.issubset(lucodes_set):
# cast to list to conform with similar error messages in InVEST
missing_lulc_values = sorted(lulc_unique.difference(lucodes_set))
diff --git a/src/natcap/invest/ui_server.py b/src/natcap/invest/ui_server.py
index bfbe1874ec..7c4ad72af2 100644
--- a/src/natcap/invest/ui_server.py
+++ b/src/natcap/invest/ui_server.py
@@ -175,7 +175,9 @@ def write_parameter_set_file():
relativePaths: boolean
Returns:
- A string.
+ A dictionary with the following key/value pairs:
+ - message (string): for logging and/or rendering in the UI.
+ - error (boolean): True if an error occurred, otherwise False.
"""
payload = request.get_json()
filepath = payload['filepath']
@@ -183,9 +185,19 @@ def write_parameter_set_file():
args = json.loads(payload['args'])
relative_paths = payload['relativePaths']
- datastack.build_parameter_set(
- args, modulename, filepath, relative=relative_paths)
- return 'parameter set saved'
+ try:
+ datastack.build_parameter_set(
+ args, modulename, filepath, relative=relative_paths)
+ except ValueError as message:
+ LOGGER.error(str(message))
+ return {
+ 'message': str(message),
+ 'error': True
+ }
+ return {
+ 'message': 'Parameter set saved',
+ 'error': False
+ }
@app.route(f'/{PREFIX}/save_to_python', methods=['POST'])
@@ -221,15 +233,26 @@ def build_datastack_archive():
args: JSON string of InVEST model args keys and values
Returns:
- A string.
+ A dictionary with the following key/value pairs:
+ - message (string): for logging and/or rendering in the UI.
+ - error (boolean): True if an error occurred, otherwise False.
"""
payload = request.get_json()
- datastack.build_datastack_archive(
- json.loads(payload['args']),
- payload['moduleName'],
- payload['filepath'])
-
- return 'datastack archive created'
+ try:
+ datastack.build_datastack_archive(
+ json.loads(payload['args']),
+ payload['moduleName'],
+ payload['filepath'])
+ except ValueError as message:
+ LOGGER.error(str(message))
+ return {
+ 'message': str(message),
+ 'error': True
+ }
+ return {
+ 'message': 'Datastack archive created',
+ 'error': False
+ }
@app.route(f'/{PREFIX}/log_model_start', methods=['POST'])
@@ -251,6 +274,7 @@ def log_model_exit():
payload['status'])
return 'OK'
+
@app.route(f'/{PREFIX}/languages', methods=['GET'])
def get_supported_languages():
"""Return a mapping of supported languages to their display names."""
diff --git a/src/natcap/invest/urban_cooling_model.py b/src/natcap/invest/urban_cooling_model.py
index eea80d8a12..ed3084016e 100644
--- a/src/natcap/invest/urban_cooling_model.py
+++ b/src/natcap/invest/urban_cooling_model.py
@@ -963,13 +963,13 @@ def calculate_uhi_result_vector(
if cc_stats[feature_id]['count'] > 0:
mean_cc = (
cc_stats[feature_id]['sum'] / cc_stats[feature_id]['count'])
- feature.SetField('avg_cc', mean_cc)
+ feature.SetField('avg_cc', float(mean_cc))
mean_t_air = None
if t_air_stats[feature_id]['count'] > 0:
mean_t_air = (
t_air_stats[feature_id]['sum'] /
t_air_stats[feature_id]['count'])
- feature.SetField('avg_tmp_v', mean_t_air)
+ feature.SetField('avg_tmp_v', float(mean_t_air))
if mean_t_air:
feature.SetField(
@@ -979,7 +979,7 @@ def calculate_uhi_result_vector(
wbgt = (
wbgt_stats[feature_id]['sum'] /
wbgt_stats[feature_id]['count'])
- feature.SetField('avg_wbgt_v', wbgt)
+ feature.SetField('avg_wbgt_v', float(wbgt))
if light_loss_stats and light_loss_stats[feature_id]['count'] > 0:
light_loss = (
diff --git a/src/natcap/invest/urban_flood_risk_mitigation.py b/src/natcap/invest/urban_flood_risk_mitigation.py
index 7f865e5e3f..63ec19298e 100644
--- a/src/natcap/invest/urban_flood_risk_mitigation.py
+++ b/src/natcap/invest/urban_flood_risk_mitigation.py
@@ -173,14 +173,14 @@
"about": "Map of runoff volume.",
"bands": {1: {"type": "number", "units": u.meter**3}}
},
- "reprojected_aoi.gpkg": {
+ "reprojected_aoi.shp": {
"about": (
"Copy of AOI vector reprojected to the same spatial "
"reference as the LULC."),
"geometries": spec_utils.POLYGONS,
"fields": {}
},
- "structures_reprojected.gpkg": {
+ "structures_reprojected.shp": {
"about": (
"Copy of built infrastructure vector reprojected to "
"the same spatial reference as the LULC."),
@@ -408,14 +408,14 @@ def execute(args):
task_name='calculate service built raster')
reprojected_aoi_path = os.path.join(
- intermediate_dir, 'reprojected_aoi.gpkg')
+ intermediate_dir, 'reprojected_aoi')
reprojected_aoi_task = task_graph.add_task(
func=pygeoprocessing.reproject_vector,
args=(
args['aoi_watersheds_path'],
target_sr_wkt,
reprojected_aoi_path),
- kwargs={'driver_name': 'GPKG'},
+ kwargs={'driver_name': 'ESRI Shapefile'},
target_path_list=[reprojected_aoi_path],
task_name='reproject aoi/watersheds')
@@ -435,7 +435,7 @@ def execute(args):
(runoff_retention_raster_path, 1),
reprojected_aoi_path),
store_result=True,
- dependent_task_list=[runoff_retention_task],
+ dependent_task_list=[runoff_retention_task, reprojected_aoi_task],
task_name='zonal_statistics over runoff_retention raster')
runoff_retention_volume_stats_task = task_graph.add_task(
@@ -444,7 +444,7 @@ def execute(args):
(runoff_retention_vol_raster_path, 1),
reprojected_aoi_path),
store_result=True,
- dependent_task_list=[runoff_retention_vol_task],
+ dependent_task_list=[runoff_retention_vol_task, reprojected_aoi_task],
task_name='zonal_statistics over runoff_retention_volume raster')
damage_per_aoi_stats = None
@@ -457,13 +457,13 @@ def execute(args):
args['built_infrastructure_vector_path'] not in ('', None)):
# Reproject the built infrastructure vector to the target SRS.
reprojected_structures_path = os.path.join(
- intermediate_dir, 'structures_reprojected.gpkg')
+ intermediate_dir, 'structures_reprojected')
reproject_built_infrastructure_task = task_graph.add_task(
func=pygeoprocessing.reproject_vector,
args=(args['built_infrastructure_vector_path'],
target_sr_wkt,
reprojected_structures_path),
- kwargs={'driver_name': 'GPKG'},
+ kwargs={'driver_name': 'ESRI Shapefile'},
target_path_list=[reprojected_structures_path],
task_name='reproject built infrastructure to target SRS')
@@ -512,7 +512,7 @@ def _write_summary_vector(
runoff_ret_vol_stats, flood_volume_stats, damage_per_aoi_stats=None):
"""Write a vector with summary statistics.
- This vector will always contain two fields::
+ This vector will always contain three fields::
* ``'flood_vol'``: The volume of flood (runoff), in m3, per watershed.
* ``'rnf_rt_idx'``: Average of runoff retention values per watershed
@@ -553,21 +553,11 @@ def _write_summary_vector(
``None``
"""
source_aoi_vector = gdal.OpenEx(source_aoi_vector_path, gdal.OF_VECTOR)
- source_aoi_layer = source_aoi_vector.GetLayer()
- source_geom_type = source_aoi_layer.GetGeomType()
- source_srs_wkt = pygeoprocessing.get_vector_info(
- source_aoi_vector_path)['projection_wkt']
- source_srs = osr.SpatialReference()
- source_srs.ImportFromWkt(source_srs_wkt)
-
esri_driver = gdal.GetDriverByName('ESRI Shapefile')
- target_watershed_vector = esri_driver.Create(
- target_vector_path, 0, 0, 0, gdal.GDT_Unknown)
- layer_name = os.path.splitext(os.path.basename(
- target_vector_path))[0]
- LOGGER.debug(f"creating layer {layer_name}")
- target_watershed_layer = target_watershed_vector.CreateLayer(
- layer_name, source_srs, source_geom_type)
+ esri_driver.CreateCopy(target_vector_path, source_aoi_vector)
+ target_watershed_vector = gdal.OpenEx(target_vector_path,
+ gdal.OF_VECTOR | gdal.GA_Update)
+ target_watershed_layer = target_watershed_vector.GetLayer()
target_fields = ['rnf_rt_idx', 'rnf_rt_m3', 'flood_vol']
if damage_per_aoi_stats is not None:
@@ -579,13 +569,9 @@ def _write_summary_vector(
field_def.SetPrecision(11)
target_watershed_layer.CreateField(field_def)
- target_layer_defn = target_watershed_layer.GetLayerDefn()
- for base_feature in source_aoi_layer:
- feature_id = base_feature.GetFID()
- target_feature = ogr.Feature(target_layer_defn)
- base_geom_ref = base_feature.GetGeometryRef()
- target_feature.SetGeometry(base_geom_ref.Clone())
- base_geom_ref = None
+ target_watershed_layer.ResetReading()
+ for target_feature in target_watershed_layer:
+ feature_id = target_feature.GetFID()
pixel_count = runoff_ret_stats[feature_id]['count']
if pixel_count > 0:
@@ -605,13 +591,14 @@ def _write_summary_vector(
# This is the service_built equation.
target_feature.SetField(
- 'serv_blt', (
+ 'serv_blt', float(
damage_sum * runoff_ret_vol_stats[feature_id]['sum']))
target_feature.SetField(
'flood_vol', float(flood_volume_stats[feature_id]['sum']))
- target_watershed_layer.CreateFeature(target_feature)
+ target_watershed_layer.SetFeature(target_feature)
+
target_watershed_layer.SyncToDisk()
target_watershed_layer = None
target_watershed_vector = None
@@ -949,7 +936,8 @@ def validate(args, limit_to=None):
nan_mask = cn_df.isna()
if nan_mask.any(axis=None):
nan_lucodes = nan_mask[nan_mask.any(axis=1)].index
- lucode_list = list(nan_lucodes.values)
+ # Convert numpy dtype values to native python types
+ lucode_list = [i.item() for i in nan_lucodes.values]
validation_warnings.append((
['curve_number_table_path'],
f'Missing curve numbers for lucode(s) {lucode_list}'))
diff --git a/src/natcap/invest/urban_nature_access.py b/src/natcap/invest/urban_nature_access.py
index 2831e65286..136575a956 100644
--- a/src/natcap/invest/urban_nature_access.py
+++ b/src/natcap/invest/urban_nature_access.py
@@ -755,13 +755,12 @@ def execute(args):
squared_lulc_pixel_size = _square_off_pixels(args['lulc_raster_path'])
lulc_alignment_task = graph.add_task(
- pygeoprocessing.warp_raster,
+ _warp_lulc,
kwargs={
- 'base_raster_path': args['lulc_raster_path'],
- 'target_pixel_size': squared_lulc_pixel_size,
- 'target_bb': target_bounding_box,
- 'target_raster_path': file_registry['aligned_lulc'],
- 'resample_method': 'near',
+ "source_lulc_path": args['lulc_raster_path'],
+ "target_lulc_path": file_registry['aligned_lulc'],
+ "target_pixel_size": squared_lulc_pixel_size,
+ "target_bounding_box": target_bounding_box,
},
target_path_list=[file_registry['aligned_lulc']],
task_name='Resample LULC to have square pixels'
@@ -2528,6 +2527,40 @@ def _create_mask(*raster_arrays):
_create_mask, target_mask_path, gdal.GDT_Byte, nodata_target=255)
+def _warp_lulc(source_lulc_path, target_lulc_path, target_pixel_size,
+ target_bounding_box):
+ """Warp a LULC raster and set a nodata if needed.
+
+ Args:
+ source_lulc_path (str): The path to a source LULC raster.
+ target_lulc_path (str): The path to the new LULC raster.
+ target_pixel_size (tuple): A 2-tuple of the target pixel size.
+ target_bounding_box (tuple): A 4-tuple of the target bounding box.
+
+ Returns:
+ ``None``.
+ """
+ source_raster_info = pygeoprocessing.get_raster_info(source_lulc_path)
+ target_nodata = source_raster_info['nodata'][0]
+
+ pygeoprocessing.warp_raster(
+ source_lulc_path, target_pixel_size, target_lulc_path,
+ 'near', target_bb=target_bounding_box,
+ target_projection_wkt=source_raster_info['projection_wkt'])
+
+ # if there is no defined nodata, set a default value
+ if target_nodata is None:
+ # Guarantee that our nodata cannot be represented by the datatype -
+ # select a nodata value that's out of range.
+ target_nodata = pygeoprocessing.choose_nodata(
+ source_raster_info['numpy_type']) + 1
+ raster = gdal.OpenEx(target_lulc_path, gdal.GA_Update)
+ band = raster.GetRasterBand(1)
+ band.SetNoDataValue(target_nodata)
+ band = None
+ raster = None
+
+
def _mask_raster(source_raster_path, mask_raster_path, target_raster_path):
"""Convert pixels to nodata given an existing mask raster.
diff --git a/src/natcap/invest/utils.py b/src/natcap/invest/utils.py
index f34c25d9e5..dcae5734dd 100644
--- a/src/natcap/invest/utils.py
+++ b/src/natcap/invest/utils.py
@@ -121,6 +121,25 @@ def capture_gdal_logging():
gdal.PopErrorHandler()
+@contextlib.contextmanager
+def _set_gdal_configuration(opt, value):
+ """Temporarily set a GDAL configuration option.
+
+ Args:
+ opt (string): The GDAL configuration option to set.
+ value (string): The value to set the option to.
+
+ Returns:
+ ``None``
+ """
+ prior_value = gdal.GetConfigOption(opt)
+ gdal.SetConfigOption(opt, value)
+ try:
+ yield
+ finally:
+ gdal.SetConfigOption(opt, prior_value)
+
+
def _format_time(seconds):
"""Render the integer number of seconds as a string. Returns a string."""
hours, remainder = divmod(seconds, 3600)
diff --git a/src/natcap/invest/validation.py b/src/natcap/invest/validation.py
index 56e053dd7f..d6f7df05fc 100644
--- a/src/natcap/invest/validation.py
+++ b/src/natcap/invest/validation.py
@@ -166,9 +166,8 @@ def check_directory(dirpath, must_exist=True, permissions='rx', **kwargs):
must_exist=True (bool): If ``True``, the directory at ``dirpath``
must already exist on the filesystem.
permissions='rx' (string): A string that includes the lowercase
- characters ``r``, ``w`` and/or ``x`` indicating required
- permissions for this folder . See ``check_permissions`` for
- details.
+ characters ``r``, ``w`` and/or ``x``, indicating read, write, and
+ execute permissions (respectively) required for this directory.
Returns:
A string error message if an error was found. ``None`` otherwise.
@@ -193,9 +192,33 @@ def check_directory(dirpath, must_exist=True, permissions='rx', **kwargs):
dirpath = parent
break
- permissions_warning = check_permissions(dirpath, permissions, True)
- if permissions_warning:
- return permissions_warning
+ MESSAGE_KEY = 'NEED_PERMISSION_DIRECTORY'
+
+ if 'r' in permissions:
+ try:
+ os.scandir(dirpath).close()
+ except OSError:
+ return MESSAGES[MESSAGE_KEY].format(permission='read')
+
+ # Check for x access before checking for w,
+ # since w operations to a dir are dependent on x access
+ if 'x' in permissions:
+ try:
+ cwd = os.getcwd()
+ os.chdir(dirpath)
+ except OSError:
+ return MESSAGES[MESSAGE_KEY].format(permission='execute')
+ finally:
+ os.chdir(cwd)
+
+ if 'w' in permissions:
+ try:
+ temp_path = os.path.join(dirpath, 'temp__workspace_validation.txt')
+ with open(temp_path, 'w') as temp:
+ temp.close()
+ os.remove(temp_path)
+ except OSError:
+ return MESSAGES[MESSAGE_KEY].format(permission='write')
def check_file(filepath, permissions='r', **kwargs):
@@ -204,9 +227,8 @@ def check_file(filepath, permissions='r', **kwargs):
Args:
filepath (string): The filepath to validate.
permissions='r' (string): A string that includes the lowercase
- characters ``r``, ``w`` and/or ``x`` indicating required
- permissions for this file. See ``check_permissions`` for
- details.
+ characters ``r``, ``w`` and/or ``x``, indicating read, write, and
+ execute permissions (respectively) required for this file.
Returns:
A string error message if an error was found. ``None`` otherwise.
@@ -215,36 +237,12 @@ def check_file(filepath, permissions='r', **kwargs):
if not os.path.exists(filepath):
return MESSAGES['FILE_NOT_FOUND']
- permissions_warning = check_permissions(filepath, permissions)
- if permissions_warning:
- return permissions_warning
-
-
-def check_permissions(path, permissions, is_directory=False):
- """Validate permissions on a filesystem object.
-
- This function uses ``os.access`` to determine permissions access.
-
- Args:
- path (string): The path to examine for permissions.
- permissions (string): a string including the characters ``r``, ``w``
- and/or ``x`` (lowercase), indicating read, write, and execute
- permissions (respectively) that the filesystem object at ``path``
- must have.
- is_directory (boolean): Indicates whether the path refers to a directory
- (True) or a file (False). Defaults to False.
-
- Returns:
- A string error message if an error was found. ``None`` otherwise.
-
- """
for letter, mode, descriptor in (
('r', os.R_OK, 'read'),
('w', os.W_OK, 'write'),
('x', os.X_OK, 'execute')):
- if letter in permissions and not os.access(path, mode):
- message_key = 'NEED_PERMISSION_DIRECTORY' if is_directory else 'NEED_PERMISSION_FILE'
- return MESSAGES[message_key].format(permission=descriptor)
+ if letter in permissions and not os.access(filepath, mode):
+ return MESSAGES['NEED_PERMISSION_FILE'].format(permission=descriptor)
def _check_projection(srs, projected, projection_units):
@@ -644,7 +642,7 @@ def get_validated_dataframe(
patterns.append(f'{groups[0]}(.+){groups[2]}')
else:
# for regular column names, use the exact name as the pattern
- patterns.append(column.replace('(', '\(').replace(')', '\)'))
+ patterns.append(column.replace('(', r'\(').replace(')', r'\)'))
# select only the columns that match a pattern
df = df[[col for col in df.columns if any(
@@ -801,7 +799,11 @@ def check_spatial_overlap(spatial_filepaths_list,
for filepath in spatial_filepaths_list:
try:
info = pygeoprocessing.get_raster_info(filepath)
- except ValueError:
+ except (ValueError, RuntimeError):
+ # ValueError is raised by PyGeoprocessing < 3.4.4 when the file is
+ # not a raster.
+ # RuntimeError is raised by GDAL in PyGeoprocessing >= 3.4.4 when
+ # the file is not a raster.
info = pygeoprocessing.get_vector_info(filepath)
if info['projection_wkt'] is None:
diff --git a/tests/test_coastal_vulnerability.py b/tests/test_coastal_vulnerability.py
index 0b3be853a2..a6e6f60887 100644
--- a/tests/test_coastal_vulnerability.py
+++ b/tests/test_coastal_vulnerability.py
@@ -1190,11 +1190,15 @@ def test_aoi_multiple_features(self):
srs = osr.SpatialReference()
srs.ImportFromEPSG(26910) # UTM Zone 10N
wkt = srs.ExportToWkt()
+ # These two disjoint AOI polygons intersect the same landmass line
+ # segment. This tests an edge case where a MultiLineString
+ # geometry is created when landmass lines are clipped by the AOI.
poly_a = Polygon([
(-200, -200), (-100, -200), (-100, -100), (-200, -100),
(-200, -200)])
poly_b = Polygon([
- (100, 100), (200, 100), (200, 200), (100, 200), (100, 100)])
+ (100, -200), (200, -200), (200, -100), (100, -100),
+ (100, -200)])
pygeoprocessing.shapely_geometry_to_vector(
[poly_a, poly_b], aoi_path, wkt, 'GeoJSON')
@@ -1358,6 +1362,48 @@ def test_aoi_invalid_geometry(self):
aoi_path, landmass_path, model_resolution, target_vector_path,
polygon_pickle, lines_pickle, lines_rtree)
+ def test_prepare_landmass_invalid_geometry(self):
+ """CV: test handling invalid geometries in landmass vector."""
+ from natcap.invest import coastal_vulnerability
+ aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
+ srs = osr.SpatialReference()
+ srs.ImportFromEPSG(26910) # UTM Zone 10N
+ wkt = srs.ExportToWkt()
+
+ aoi_geometries = [Polygon([
+ (-200, -200), (200, -200), (200, 200), (-200, 200), (-200, -200)])]
+ pygeoprocessing.shapely_geometry_to_vector(
+ aoi_geometries, aoi_path, wkt, 'GeoJSON')
+
+ landmass_vector_path = os.path.join(self.workspace_dir, 'vector.gpkg')
+ n_features = make_vector_of_invalid_geoms(landmass_vector_path)
+
+ target_polygon_pickle_path = os.path.join(
+ self.workspace_dir, 'polygon.pickle')
+ target_lines_pickle_path = os.path.join(
+ self.workspace_dir, 'lines.pickle')
+ target_rtree_path = os.path.join(self.workspace_dir, 'rtree.dat')
+ # Create rtree files to exercise the function's logic of removing
+ # pre-exisiting files
+ target_rtree_path_base = os.path.splitext(target_rtree_path)[0]
+ open(target_rtree_path, 'a').close()
+ open(target_rtree_path_base + '.idx', 'a').close()
+
+ model_resolution = 100
+ target_vector_path = os.path.join(
+ self.workspace_dir, 'temp-shore-pts.gpkg')
+ coastal_vulnerability.prepare_landmass_line_index_and_interpolate_shore_points(
+ aoi_path, landmass_vector_path, model_resolution,
+ target_vector_path, target_polygon_pickle_path,
+ target_lines_pickle_path, target_rtree_path)
+
+ with open(target_polygon_pickle_path, 'rb') as polygon_file:
+ shapely_geom = pickle.load(polygon_file)
+
+ # Expect 1 input geometry to be skipped, and the rest to be in
+ # shapely_geom_list.
+ self.assertTrue(len(shapely_geom.geoms) == n_features - 1)
+
def test_no_wwiii_coverage(self):
"""CV: test exception when shore points are outside max wwiii dist."""
from natcap.invest import coastal_vulnerability
@@ -1434,48 +1480,6 @@ def test_projected_wwiii_input(self):
layer = None
vector = None
- def test_prepare_landmass_invalid_geometry(self):
- """CV: test handling invalid geometries in landmass vector."""
- from natcap.invest import coastal_vulnerability
- aoi_path = os.path.join(self.workspace_dir, 'aoi.geojson')
- srs = osr.SpatialReference()
- srs.ImportFromEPSG(26910) # UTM Zone 10N
- wkt = srs.ExportToWkt()
-
- aoi_geometries = [Polygon([
- (-200, -200), (200, -200), (200, 200), (-200, 200), (-200, -200)])]
- pygeoprocessing.shapely_geometry_to_vector(
- aoi_geometries, aoi_path, wkt, 'GeoJSON')
-
- landmass_vector_path = os.path.join(self.workspace_dir, 'vector.gpkg')
- n_features = make_vector_of_invalid_geoms(landmass_vector_path)
-
- target_polygon_pickle_path = os.path.join(
- self.workspace_dir, 'polygon.pickle')
- target_lines_pickle_path = os.path.join(
- self.workspace_dir, 'lines.pickle')
- target_rtree_path = os.path.join(self.workspace_dir, 'rtree.dat')
- # Create rtree files to exercise the function's logic of removing
- # pre-exisiting files
- target_rtree_path_base = os.path.splitext(target_rtree_path)[0]
- open(target_rtree_path, 'a').close()
- open(target_rtree_path_base + '.idx', 'a').close()
-
- model_resolution = 100
- target_vector_path = os.path.join(
- self.workspace_dir, 'temp-shore-pts.gpkg')
- coastal_vulnerability.prepare_landmass_line_index_and_interpolate_shore_points(
- aoi_path, landmass_vector_path, model_resolution,
- target_vector_path, target_polygon_pickle_path,
- target_lines_pickle_path, target_rtree_path)
-
- with open(target_polygon_pickle_path, 'rb') as polygon_file:
- shapely_geom = pickle.load(polygon_file)
-
- # Expect 1 input geometry to be skipped, and the rest to be in
- # shapely_geom_list.
- self.assertTrue(len(shapely_geom.geoms) == n_features - 1)
-
def test_clip_project_already_projected_raster(self):
"""CV: test clip_and_project_raster on an already projected raster."""
from natcap.invest import coastal_vulnerability
diff --git a/tests/test_datastack.py b/tests/test_datastack.py
index 45d2e590bd..35d0695fa7 100644
--- a/tests/test_datastack.py
+++ b/tests/test_datastack.py
@@ -6,10 +6,10 @@
import pprint
import shutil
import sys
-import tarfile
import tempfile
import textwrap
import unittest
+from unittest.mock import patch
import numpy
import pandas
@@ -397,6 +397,25 @@ def test_archive_extraction(self):
os.path.join(spatial_csv_dir, spatial_csv_dict[4]['path']),
target_csv_vector_path)
+ def test_relative_path_failure(self):
+ """Datastack: raise error when relative path creation fails."""
+ from natcap.invest import datastack
+ params = {
+ 'workspace_dir': os.path.join(self.workspace),
+ }
+
+ archive_path = os.path.join(self.workspace, 'archive.invs.tar.gz')
+
+ # Call build_datastack_archive and force build_parameter_set
+ # to raise an error
+ error_message = 'Error saving datastack'
+ with self.assertRaises(ValueError):
+ with patch('natcap.invest.datastack.build_parameter_set',
+ side_effect=ValueError(error_message)):
+ datastack.build_datastack_archive(
+ params, 'test_datastack_modules.simple_parameters',
+ archive_path)
+
class ParameterSetTest(unittest.TestCase):
"""Test Datastack."""
@@ -506,6 +525,26 @@ def test_relative_parameter_set(self):
self.assertEqual(invest_version, __version__)
self.assertEqual(callable_name, modelname)
+ def test_relative_path_failure(self):
+ """Datastack: raise error when relative path creation fails."""
+ from natcap.invest import datastack
+
+ params = {
+ 'data_dir': os.path.join(self.workspace, 'data_dir'),
+ }
+ modelname = 'natcap.invest.foo'
+ paramset_filename = os.path.join(self.workspace, 'paramset.json')
+
+ # make the sample data so filepaths are interpreted correctly
+ os.makedirs(params['data_dir'])
+
+ # Call build_parameter_set and force it into an error state
+ with self.assertRaises(ValueError):
+ with patch('natcap.invest.os.path.relpath',
+ side_effect=ValueError):
+ datastack.build_parameter_set(
+ params, modelname, paramset_filename, relative=True)
+
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_relative_parameter_set_windows(self):
"""Datastack: test relative parameter set paths saved linux style."""
diff --git a/tests/test_habitat_quality.py b/tests/test_habitat_quality.py
index 0924dc76c8..2359cfeef1 100644
--- a/tests/test_habitat_quality.py
+++ b/tests/test_habitat_quality.py
@@ -1,4 +1,5 @@
"""Module for Regression Testing the InVEST Habitat Quality model."""
+import csv
import os
import shutil
import tempfile
@@ -245,6 +246,33 @@ def test_habitat_quality_presence_absence_regression(self):
# so we should exclude those new nodata pixel values.
assert_array_sum(raster_path, assert_value, include_nodata=False)
+ # Based on the scenarios used to generate the rasters above,
+ # rarity values are calculated as follows:
+ # For LULC 1, rarity = 1.0 - (5000 / (10000 + 5000)) = 0.6667.
+ # For LULC 2 and 3, rarity = 0.0 because they are not in the baseline.
+ expected_csv_values = {
+ 'rarity_c_regression.csv': [
+ (1, 0.6667, 4),
+ (2, 0.0, 0),
+ ],
+ 'rarity_f_regression.csv': [
+ (1, 0.6667, 4),
+ (3, 0.0, 0),
+ ],
+ }
+ for csv_filename in expected_csv_values.keys():
+ csv_path = os.path.join(args['workspace_dir'], csv_filename)
+ with open(csv_path, newline='') as csvfile:
+ reader = csv.DictReader(csvfile, delimiter=',')
+ self.assertEqual(reader.fieldnames,
+ ['lulc_code', 'rarity_value'])
+ for (exp_lulc, exp_rarity,
+ places_to_round) in expected_csv_values[csv_filename]:
+ row = next(reader)
+ self.assertEqual(int(row['lulc_code']), exp_lulc)
+ self.assertAlmostEqual(float(row['rarity_value']),
+ exp_rarity, places_to_round)
+
def test_habitat_quality_regression_different_projections(self):
"""Habitat Quality: base regression test with simplified data."""
from natcap.invest import habitat_quality
@@ -1047,7 +1075,6 @@ def test_habitat_quality_case_insensitivty(self):
habitat_quality.execute(args)
# Reasonable to just check quality out in this case
- #assert_array_sum(
assert_array_sum(
os.path.join(args['workspace_dir'], 'quality_c.tif'),
5852.088)
diff --git a/tests/test_seasonal_water_yield_regression.py b/tests/test_seasonal_water_yield_regression.py
index 899fc1446b..e5eafa0e98 100644
--- a/tests/test_seasonal_water_yield_regression.py
+++ b/tests/test_seasonal_water_yield_regression.py
@@ -495,7 +495,7 @@ def test_duplicate_aoi_assertion(self):
args = {
'workspace_dir': self.workspace_dir,
'aoi_path': os.path.join(
- self.workspace_dir, 'aggregated_results_foo.shp'),
+ self.workspace_dir, 'aggregated_results_swy_foo.shp'),
'results_suffix': 'foo',
'alpha_m': '1/12',
'beta_i': '1.0',
diff --git a/tests/test_ucm.py b/tests/test_ucm.py
index 1696cd4e58..ea6ef583b7 100644
--- a/tests/test_ucm.py
+++ b/tests/test_ucm.py
@@ -1,12 +1,12 @@
"""InVEST Urban Heat Island Mitigation model tests."""
-import unittest
-import tempfile
-import shutil
import os
+import shutil
+import tempfile
+import unittest
import numpy
-from osgeo import gdal
import pandas
+from osgeo import gdal
REGRESSION_DATA = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data', 'ucm')
@@ -79,11 +79,9 @@ def test_ucm_regression_factors(self):
for key, expected_value in expected_results.items():
actual_value = float(results_feature.GetField(key))
# These accumulated values (esp. avd_eng_cn) are accumulated
- # and may differ past about 4 decimal places.
- self.assertAlmostEqual(
- actual_value, expected_value, places=4,
- msg='%s should be close to %f, actual: %f' % (
- key, expected_value, actual_value))
+ # and may differ slightly from expected regression values.
+ numpy.testing.assert_allclose(actual_value, expected_value,
+ rtol=1e-4)
finally:
results_layer = None
results_vector = None
@@ -110,10 +108,7 @@ def test_ucm_regression_factors(self):
# Expected energy savings is an accumulated value and may differ
# past about 4 decimal places.
- self.assertAlmostEqual(
- energy_sav, expected_energy_sav, places=4, msg=(
- '%f should be close to %f' % (
- energy_sav, expected_energy_sav)))
+ numpy.testing.assert_allclose(energy_sav, expected_energy_sav, rtol=1e-4)
self.assertEqual(n_nonetype, 119)
finally:
buildings_layer = None
@@ -151,10 +146,8 @@ def test_ucm_regression_factors(self):
# These accumulated values are accumulated
# and may differ past about 4 decimal places.
- self.assertAlmostEqual(
- energy_sav, expected_energy_sav, places=4, msg=(
- '%f should be close to %f' % (
- energy_sav, expected_energy_sav)))
+ numpy.testing.assert_allclose(energy_sav, expected_energy_sav,
+ rtol=1e-4)
self.assertEqual(n_nonetype, 119)
finally:
buildings_layer = None
@@ -215,11 +208,9 @@ def test_ucm_regression_intensity(self):
for key, expected_value in expected_results.items():
actual_value = float(results_feature.GetField(key))
# These accumulated values (esp. avd_eng_cn) are accumulated
- # and may differ past about 4 decimal places.
- self.assertAlmostEqual(
- actual_value, expected_value, places=4,
- msg='%s should be close to %f, actual: %f' % (
- key, expected_value, actual_value))
+ # and may differ slightly.
+ numpy.testing.assert_allclose(actual_value, expected_value,
+ rtol=1e-4)
finally:
results_layer = None
results_vector = None
@@ -335,7 +326,8 @@ def test_missing_lulc_value_in_table(self):
def test_bad_args(self):
"""UCM: test validation of bad arguments."""
- from natcap.invest import urban_cooling_model, validation
+ from natcap.invest import urban_cooling_model
+ from natcap.invest import validation
args = {
'workspace_dir': self.workspace_dir,
'results_suffix': 'test_suffix',
diff --git a/tests/test_ufrm.py b/tests/test_ufrm.py
index 07df671573..eb2cef2f6a 100644
--- a/tests/test_ufrm.py
+++ b/tests/test_ufrm.py
@@ -59,6 +59,11 @@ def test_ufrm_regression(self):
"""UFRM: regression test."""
from natcap.invest import urban_flood_risk_mitigation
args = self._make_args()
+ input_vector = gdal.OpenEx(args['aoi_watersheds_path'],
+ gdal.OF_VECTOR)
+ input_layer = input_vector.GetLayer()
+ input_fields = [field.GetName() for field in input_layer.schema]
+
urban_flood_risk_mitigation.execute(args)
result_vector = gdal.OpenEx(os.path.join(
@@ -66,13 +71,15 @@ def test_ufrm_regression(self):
gdal.OF_VECTOR)
result_layer = result_vector.GetLayer()
- # Check that all four expected fields are there.
+ # Check that all expected fields are there.
+ output_fields = ['aff_bld', 'serv_blt', 'rnf_rt_idx',
+ 'rnf_rt_m3', 'flood_vol']
+ output_fields += input_fields
self.assertEqual(
- set(('aff_bld', 'serv_blt', 'rnf_rt_idx', 'rnf_rt_m3',
- 'flood_vol')),
+ set(output_fields),
set(field.GetName() for field in result_layer.schema))
- result_feature = result_layer.GetFeature(0)
+ result_feature = result_layer.GetNextFeature()
for fieldname, expected_value in (
('aff_bld', 187010830.32202843),
('serv_blt', 13253546667257.65),
@@ -85,6 +92,11 @@ def test_ufrm_regression(self):
self.assertAlmostEqual(
result_val, expected_value, places=-places_to_round)
+ input_feature = input_layer.GetNextFeature()
+ for fieldname in input_fields:
+ self.assertEqual(result_feature.GetField(fieldname),
+ input_feature.GetField(fieldname))
+
result_feature = None
result_layer = None
result_vector = None
@@ -94,6 +106,11 @@ def test_ufrm_regression_no_infrastructure(self):
from natcap.invest import urban_flood_risk_mitigation
args = self._make_args()
del args['built_infrastructure_vector_path']
+ input_vector = gdal.OpenEx(args['aoi_watersheds_path'],
+ gdal.OF_VECTOR)
+ input_layer = input_vector.GetLayer()
+ input_fields = [field.GetName() for field in input_layer.schema]
+
urban_flood_risk_mitigation.execute(args)
result_raster = gdal.OpenEx(os.path.join(
@@ -115,9 +132,11 @@ def test_ufrm_regression_no_infrastructure(self):
result_layer = result_vector.GetLayer()
result_feature = result_layer.GetFeature(0)
- # Check that only the two expected fields are there.
+ # Check that only the expected fields are there.
+ output_fields = ['rnf_rt_idx', 'rnf_rt_m3', 'flood_vol']
+ output_fields += input_fields
self.assertEqual(
- set(('rnf_rt_idx', 'rnf_rt_m3', 'flood_vol')),
+ set(output_fields),
set(field.GetName() for field in result_layer.schema))
for fieldname, expected_value in (
@@ -218,7 +237,6 @@ def test_ufrm_explicit_zeros_in_table(self):
except ValueError:
self.fail('unexpected ValueError when testing curve number row with all zeros')
-
def test_ufrm_string_damage_to_infrastructure(self):
"""UFRM: handle str(int) structure indices.
diff --git a/tests/test_ui_server.py b/tests/test_ui_server.py
index 426dc57005..78cc86cd01 100644
--- a/tests/test_ui_server.py
+++ b/tests/test_ui_server.py
@@ -122,14 +122,41 @@ def test_write_parameter_set_file(self):
}),
'relativePaths': True,
}
- _ = test_client.post(
+ response = test_client.post(
f'{ROUTE_PREFIX}/write_parameter_set_file', json=payload)
+ self.assertEqual(
+ response.json,
+ {'message': 'Parameter set saved', 'error': False})
with open(filepath, 'r') as file:
actual_data = json.loads(file.read())
self.assertEqual(
set(actual_data),
{'args', 'invest_version', 'model_name'})
+ def test_write_parameter_set_file_error_handling(self):
+ """UI server: write_parameter_set_file endpoint
+ should catch a ValueError and return an error message.
+ """
+ test_client = ui_server.app.test_client()
+ self.workspace_dir = tempfile.mkdtemp()
+ filepath = os.path.join(self.workspace_dir, 'datastack.json')
+ payload = {
+ 'filepath': filepath,
+ 'moduleName': 'natcap.invest.carbon',
+ 'args': json.dumps({
+ 'workspace_dir': 'foo'
+ }),
+ 'relativePaths': True,
+ }
+ error_message = 'Error saving datastack'
+ with patch('natcap.invest.datastack.build_parameter_set',
+ side_effect=ValueError(error_message)):
+ response = test_client.post(
+ f'{ROUTE_PREFIX}/write_parameter_set_file', json=payload)
+ self.assertEqual(
+ response.json,
+ {'message': error_message, 'error': True})
+
def test_save_to_python(self):
"""UI server: save_to_python endpoint."""
test_client = ui_server.app.test_client()
@@ -163,11 +190,42 @@ def test_build_datastack_archive(self):
'carbon_pools_path': data_path
}),
}
- _ = test_client.post(
+ response = test_client.post(
f'{ROUTE_PREFIX}/build_datastack_archive', json=payload)
+ self.assertEqual(
+ response.json,
+ {'message': 'Datastack archive created', 'error': False})
# test_datastack.py asserts the actual archiving functionality
self.assertTrue(os.path.exists(target_filepath))
+ def test_build_datastack_archive_error_handling(self):
+ """UI server: build_datastack_archive endpoint
+ should catch a ValueError and return an error message.
+ """
+ test_client = ui_server.app.test_client()
+ self.workspace_dir = tempfile.mkdtemp()
+ target_filepath = os.path.join(self.workspace_dir, 'data.tgz')
+ data_path = os.path.join(self.workspace_dir, 'data.csv')
+ with open(data_path, 'w') as file:
+ file.write('hello')
+
+ payload = {
+ 'filepath': target_filepath,
+ 'moduleName': 'natcap.invest.carbon',
+ 'args': json.dumps({
+ 'workspace_dir': 'foo',
+ 'carbon_pools_path': data_path
+ }),
+ }
+ error_message = 'Error saving datastack'
+ with patch('natcap.invest.datastack.build_datastack_archive',
+ side_effect=ValueError(error_message)):
+ response = test_client.post(
+ f'{ROUTE_PREFIX}/build_datastack_archive', json=payload)
+ self.assertEqual(
+ response.json,
+ {'message': error_message, 'error': True})
+
@patch('natcap.invest.ui_server.usage.requests.post')
@patch('natcap.invest.ui_server.usage.requests.get')
def test_log_model_start(self, mock_get, mock_post):
diff --git a/tests/test_urban_nature_access.py b/tests/test_urban_nature_access.py
index 0e4858a832..a3d3d7b43c 100644
--- a/tests/test_urban_nature_access.py
+++ b/tests/test_urban_nature_access.py
@@ -358,6 +358,21 @@ def test_core_model(self):
self.assertAlmostEqual(numpy.min(valid_pixels), 1171.7352294921875)
self.assertAlmostEqual(numpy.max(valid_pixels), 11898.0712890625)
+ def test_no_lulc_nodata(self):
+ """UNA: verify behavior when the LULC has no nodata value."""
+ from natcap.invest import urban_nature_access
+
+ args = _build_model_args(self.workspace_dir)
+ args['search_radius_mode'] = urban_nature_access.RADIUS_OPT_UNIFORM
+ args['search_radius'] = 100
+
+ raster = gdal.OpenEx(args['lulc_raster_path'], gdal.OF_RASTER)
+ band = raster.GetRasterBand(1)
+ band.DeleteNoDataValue()
+ band = None
+ raster = None
+ urban_nature_access.execute(args)
+
def test_split_urban_nature(self):
from natcap.invest import urban_nature_access
@@ -397,7 +412,7 @@ def test_split_urban_nature(self):
)
for fieldname, expected_value in expected_values.items():
numpy.testing.assert_allclose(
- admin_feature.GetField(fieldname), expected_value)
+ admin_feature.GetField(fieldname), expected_value, rtol=1e-6)
# The sum of the under-and-oversupplied populations should be equal
# to the total population count.
@@ -588,8 +603,8 @@ def test_radii_by_pop_group(self):
set(defn.GetName() for defn in summary_layer.schema),
set(expected_field_values.keys()))
for fieldname, expected_value in expected_field_values.items():
- self.assertAlmostEqual(
- expected_value, summary_feature.GetField(fieldname))
+ numpy.testing.assert_allclose(
+ expected_value, summary_feature.GetField(fieldname), rtol=1e-6)
output_dir = os.path.join(args['workspace_dir'], 'output')
self._assert_urban_nature(os.path.join(
@@ -664,8 +679,8 @@ def test_radii_by_pop_group_exponential_kernal(self):
set(defn.GetName() for defn in summary_layer.schema),
set(expected_field_values.keys()))
for fieldname, expected_value in expected_field_values.items():
- self.assertAlmostEqual(
- expected_value, summary_feature.GetField(fieldname))
+ numpy.testing.assert_allclose(
+ expected_value, summary_feature.GetField(fieldname), rtol=1e-6)
output_dir = os.path.join(args['workspace_dir'], 'output')
self._assert_urban_nature(os.path.join(
diff --git a/tests/test_validation.py b/tests/test_validation.py
index b87e5e6663..f5ad094f50 100644
--- a/tests/test_validation.py
+++ b/tests/test_validation.py
@@ -1,11 +1,10 @@
"""Testing module for validation."""
-import codecs
-import collections
import functools
import os
-import platform
import shutil
+import stat
import string
+import sys
import tempfile
import textwrap
import time
@@ -330,6 +329,91 @@ def test_workspace_not_exists(self):
new_dir, must_exist=False, permissions='rwx'))
+@unittest.skipIf(
+ sys.platform.startswith('win'),
+ 'requires support for os.chmod(), which is unreliable on Windows')
+class DirectoryValidationMacOnly(unittest.TestCase):
+ """Test Directory Permissions Validation."""
+
+ def test_invalid_permissions_r(self):
+ """Validation: when a folder must have read/write/execute
+ permissions but is missing write and execute permissions."""
+ from natcap.invest import validation
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ os.chmod(tempdir, stat.S_IREAD)
+ validation_warning = validation.check_directory(tempdir,
+ permissions='rwx')
+ self.assertEqual(
+ validation_warning,
+ validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='execute'))
+
+ def test_invalid_permissions_w(self):
+ """Validation: when a folder must have read/write/execute
+ permissions but is missing read and execute permissions."""
+ from natcap.invest import validation
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ os.chmod(tempdir, stat.S_IWRITE)
+ validation_warning = validation.check_directory(tempdir,
+ permissions='rwx')
+ self.assertEqual(
+ validation_warning,
+ validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='read'))
+
+ def test_invalid_permissions_x(self):
+ """Validation: when a folder must have read/write/execute
+ permissions but is missing read and write permissions."""
+ from natcap.invest import validation
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ os.chmod(tempdir, stat.S_IEXEC)
+ validation_warning = validation.check_directory(tempdir,
+ permissions='rwx')
+ self.assertEqual(
+ validation_warning,
+ validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='read'))
+
+ def test_invalid_permissions_rw(self):
+ """Validation: when a folder must have read/write/execute
+ permissions but is missing execute permission."""
+ from natcap.invest import validation
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ os.chmod(tempdir, stat.S_IREAD | stat.S_IWRITE)
+ validation_warning = validation.check_directory(tempdir,
+ permissions='rwx')
+ self.assertEqual(
+ validation_warning,
+ validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='execute'))
+
+ def test_invalid_permissions_rx(self):
+ """Validation: when a folder must have read/write/execute
+ permissions but is missing write permission."""
+ from natcap.invest import validation
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ os.chmod(tempdir, stat.S_IREAD | stat.S_IEXEC)
+ validation_warning = validation.check_directory(tempdir,
+ permissions='rwx')
+ self.assertEqual(
+ validation_warning,
+ validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='write'))
+
+ def test_invalid_permissions_wx(self):
+ """Validation: when a folder must have read/write/execute
+ permissions but is missing read permission."""
+ from natcap.invest import validation
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ os.chmod(tempdir, stat.S_IWRITE | stat.S_IEXEC)
+ validation_warning = validation.check_directory(tempdir,
+ permissions='rwx')
+ self.assertEqual(
+ validation_warning,
+ validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='read'))
+
+
class FileValidation(unittest.TestCase):
"""Test File Validator."""
@@ -539,7 +623,6 @@ def test_vector_projected_in_m(self):
def test_wrong_geom_type(self):
"""Validation: checks that the vector's geometry type is correct."""
- from natcap.invest import spec_utils
from natcap.invest import validation
driver = gdal.GetDriverByName('GPKG')
filepath = os.path.join(self.workspace_dir, 'vector.gpkg')
@@ -1369,7 +1452,6 @@ def test_csv_raster_validation_missing_file(self):
})
self.assertIn('File not found', str(cm.exception))
-
def test_csv_raster_validation_not_projected(self):
"""validation: validate unprojected raster within csv column"""
from natcap.invest import validation
diff --git a/workbench/src/main/ipcMainChannels.js b/workbench/src/main/ipcMainChannels.js
index de7ca13c19..f908d7c33b 100644
--- a/workbench/src/main/ipcMainChannels.js
+++ b/workbench/src/main/ipcMainChannels.js
@@ -14,6 +14,7 @@ export const ipcMainChannels = {
IS_FIRST_RUN: 'is-first-run',
LOGGER: 'logger',
OPEN_EXTERNAL_URL: 'open-external-url',
+ OPEN_PATH: 'open-path',
OPEN_LOCAL_HTML: 'open-local-html',
SET_SETTING: 'set-setting',
SHOW_ITEM_IN_FOLDER: 'show-item-in-folder',
diff --git a/workbench/src/main/setupDialogs.js b/workbench/src/main/setupDialogs.js
index cecc003a90..3e79df45ba 100644
--- a/workbench/src/main/setupDialogs.js
+++ b/workbench/src/main/setupDialogs.js
@@ -26,4 +26,10 @@ export default function setupDialogs() {
shell.showItemInFolder(filepath);
}
);
+
+ ipcMain.handle(
+ ipcMainChannels.OPEN_PATH, async (event, dirpath) => {
+ return await shell.openPath(dirpath);
+ }
+ );
}
diff --git a/workbench/src/renderer/components/InvestTab/index.jsx b/workbench/src/renderer/components/InvestTab/index.jsx
index 0c250a4e3a..7e26369c8b 100644
--- a/workbench/src/renderer/components/InvestTab/index.jsx
+++ b/workbench/src/renderer/components/InvestTab/index.jsx
@@ -7,6 +7,8 @@ import TabContainer from 'react-bootstrap/TabContainer';
import Nav from 'react-bootstrap/Nav';
import Row from 'react-bootstrap/Row';
import Col from 'react-bootstrap/Col';
+import Modal from 'react-bootstrap/Modal';
+import Button from 'react-bootstrap/Button';
import {
MdKeyboardArrowRight,
} from 'react-icons/md';
@@ -45,10 +47,6 @@ async function investGetSpec(modelName) {
return undefined;
}
-function handleOpenWorkspace(logfile) {
- ipcRenderer.send(ipcMainChannels.SHOW_ITEM_IN_FOLDER, logfile);
-}
-
/**
* Render an invest model setup form, log display, etc.
* Manage launching of an invest model in a child process.
@@ -64,6 +62,7 @@ class InvestTab extends React.Component {
uiSpec: null,
userTerminated: false,
executeClicked: false,
+ showErrorModal: false,
};
this.investExecute = this.investExecute.bind(this);
@@ -71,6 +70,8 @@ class InvestTab extends React.Component {
this.terminateInvestProcess = this.terminateInvestProcess.bind(this);
this.investLogfileCallback = this.investLogfileCallback.bind(this);
this.investExitCallback = this.investExitCallback.bind(this);
+ this.handleOpenWorkspace = this.handleOpenWorkspace.bind(this);
+ this.showErrorModal = this.showErrorModal.bind(this);
}
async componentDidMount() {
@@ -154,6 +155,7 @@ class InvestTab extends React.Component {
updateJobProperties(tabID, {
argsValues: args,
status: undefined, // in case of re-run, clear an old status
+ logfile: undefined, // in case of re-run where logfile may no longer exist, clear old logfile path
});
ipcRenderer.send(
@@ -186,6 +188,22 @@ class InvestTab extends React.Component {
);
}
+ async handleOpenWorkspace(workspace_dir) {
+ if (workspace_dir) {
+ const error = await ipcRenderer.invoke(ipcMainChannels.OPEN_PATH, workspace_dir);
+ if (error) {
+ logger.error(`Error opening workspace (${workspace_dir}). ${error}`);
+ this.showErrorModal(true);
+ }
+ }
+ }
+
+ showErrorModal(shouldShow) {
+ this.setState({
+ showErrorModal: shouldShow,
+ });
+ }
+
render() {
const {
activeTab,
@@ -193,6 +211,7 @@ class InvestTab extends React.Component {
argsSpec,
uiSpec,
executeClicked,
+ showErrorModal,
} = this.state;
const {
status,
@@ -213,88 +232,99 @@ class InvestTab extends React.Component {
const sidebarFooterElementId = `sidebar-footer-${tabID}`;
return (
-
-
-