From 5d51615ec6a62053e6a6ff057fb8b62f1691a94f Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 30 Jul 2020 18:53:09 +0200 Subject: [PATCH 001/109] Fixed README link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0b720647..d7390d2d 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ See also the [changelog](CHANGELOG.md) for the changes between versions and the This repository contains a set of files formally describing the openEO Processes: -* The [`*.json`](examples/) files provide the process specifications as defined by the openEO API. +* The `*.json` files provide the process specifications as defined by the openEO API. * [subtype-schemas.json](meta/subtype-schemas.json) in the `meta` folder defines common data types (`subtype`s) for JSON Schema used in openEO processes. * The [`examples`](examples/) folder contains some useful examples that the processes link to. All of these are non-binding additions. * The [`tests`](tests/) folder can be used to test the process specification for validity and and consistent "style". It also allows to render the processes in a web browser. From f317bb9fa03bc347bfe2bdde326a10102027635a Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 4 Aug 2020 11:44:20 +0200 Subject: [PATCH 002/109] Clarify overlap resolver #184 --- CHANGELOG.md | 2 ++ merge_cubes.json | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 167c081f..de862028 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased / Draft +### Fixed +- Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) ## 1.0.0 - 2020-07-31 diff --git a/merge_cubes.json b/merge_cubes.json index 7aee9a3c..7ec9473e 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -31,14 +31,14 @@ "parameters": [ { "name": "x", - "description": "The first value.", + "description": "The overlapping value from the first data cube `cube1`.", "schema": { "description": "Any data type." } }, { "name": "y", - "description": "The second value.", + "description": "The overlapping value from the second data cube `cube2`.", "schema": { "description": "Any data type." } From 48aa3ddce24aa645d37e3291c69ea05e368d68cb Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 3 Sep 2020 19:46:49 +0200 Subject: [PATCH 003/109] Fixed invalid examples --- CHANGELOG.md | 1 + examples/array_contains_nodata.json | 6 ++++-- examples/array_find_nodata.json | 6 ++++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index de862028..67aa16da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) +- Examples `array_contains_nodata` and `array_find_nodata` ## 1.0.0 - 2020-07-31 diff --git a/examples/array_contains_nodata.json b/examples/array_contains_nodata.json index 96c84999..755d23fa 100644 --- a/examples/array_contains_nodata.json +++ b/examples/array_contains_nodata.json @@ -31,11 +31,13 @@ "from_parameter": "data" }, "process": { - "process-graph": { + "process_graph": { "is_null": { "process_id": "is_nodata", "arguments": { - "from_parameter": "x" + "x": { + "from_parameter": "x" + } }, "result": true } diff --git a/examples/array_find_nodata.json b/examples/array_find_nodata.json index b2cd3f7c..a3bef55e 100644 --- a/examples/array_find_nodata.json +++ b/examples/array_find_nodata.json @@ -37,11 +37,13 @@ "from_parameter": "data" }, "process": { - "process-graph": { + "process_graph": { "is_null": { "process_id": "is_nodata", "arguments": { - "from_parameter": "x" + "x": { + "from_parameter": "x" + } }, "result": true } From f8e06ec1358e8814628f654e9dbc53039b9c4713 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 3 Sep 2020 19:47:16 +0200 Subject: [PATCH 004/109] Updated test dependencies --- tests/package.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/package.json b/tests/package.json index d9cab1cb..7c2be337 100644 --- a/tests/package.json +++ b/tests/package.json @@ -19,12 +19,12 @@ }, "devDependencies": { "@apidevtools/json-schema-ref-parser": "^9.0.6", - "@openeo/js-processgraphs": "^1.0.0-beta.3", - "ajv": "^6.10.2", + "@openeo/js-processgraphs": "^1.0.0-beta.4", + "ajv": "^6.12.4", "concat-json-files": "^1.1.0", "glob": "^7.1.6", "http-server": "^0.12.3", - "jest": "^24.9.0", + "jest": "^26.4.2", "markdown-spellcheck": "^1.3.1", "markdownlint": "^0.18.0" }, From 3c52968b722fd4a4f30fb382dc5d08b1f17f1525 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 16 Sep 2020 12:00:56 +0200 Subject: [PATCH 005/109] Fix issue 189 (#190) * Clarify specifications for `is_nan`, `is_nodata`, `is_valid`. #189 Added `is_infinity` and `nan`. --- CHANGELOG.md | 6 ++++++ is_infinite.json | 50 ++++++++++++++++++++++++++++++++++++++++++++++++ is_nan.json | 20 +++++++++++++++---- is_nodata.json | 12 +++++++++--- is_valid.json | 8 +++++++- nan.json | 27 ++++++++++++++++++++++++++ 6 files changed, 115 insertions(+), 8 deletions(-) create mode 100644 is_infinite.json create mode 100644 nan.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 67aa16da..bc333cc3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,8 +6,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased / Draft +### Added +- Processes: + - `is_infinity` + - `nan` + ### Fixed - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) +- Improve and clarify specifications for `is_nan`, `is_nodata`, `is_valid`. [#189](https://github.com/Open-EO/openeo-processes/issues/189) - Examples `array_contains_nodata` and `array_find_nodata` ## 1.0.0 - 2020-07-31 diff --git a/is_infinite.json b/is_infinite.json new file mode 100644 index 00000000..0501b8ef --- /dev/null +++ b/is_infinite.json @@ -0,0 +1,50 @@ +{ + "id": "is_infinite", + "summary": "Value is an infinite number", + "description": "Checks whether the specified value `x` is an infinite number. The definition of infinite numbers follows the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935). The special numerical value `NaN` (not a number) as defined by the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935) is not an infinite number and must return `false`.", + "categories": [ + "comparison" + ], + "parameters": [ + { + "name": "x", + "description": "The data to check.", + "schema": { + "description": "Any data type is allowed." + } + } + ], + "returns": { + "description": "`true` if the data is an infinite number, otherwise `false`.", + "schema": { + "type": "boolean" + } + }, + "examples": [ + { + "arguments": { + "x": 1 + }, + "returns": false + }, + { + "arguments": { + "x": "Test" + }, + "returns": false + }, + { + "arguments": { + "x": null + }, + "returns": false + } + ], + "links": [ + { + "rel": "about", + "href": "https://ieeexplore.ieee.org/document/4610935", + "title": "IEEE Standard 754-2008 for Floating-Point Arithmetic" + } + ] +} \ No newline at end of file diff --git a/is_nan.json b/is_nan.json index 37d64157..25650261 100644 --- a/is_nan.json +++ b/is_nan.json @@ -1,9 +1,10 @@ { "id": "is_nan", "summary": "Value is not a number", - "description": "Checks whether the specified value `x` is not a number (often abbreviated as `NaN`). The definition of `NaN` follows the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935). All non-numeric data types MUST also return `true`.", + "description": "Checks whether the specified value `x` is not a number. Returns `true` for numeric values (integers and floating point numbers), except for the special value `NaN` as defined by the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935). All non-numeric data types MUST also return `true`, including arrays that contain `NaN` values.", "categories": [ - "comparison" + "comparison", + "math > constants" ], "parameters": [ { @@ -15,7 +16,7 @@ } ], "returns": { - "description": "`true` if the data is not a number, otherwise `false`", + "description": "`true` if the data is not a number, otherwise `false`.", "schema": { "type": "boolean" } @@ -32,6 +33,12 @@ "x": "Test" }, "returns": true + }, + { + "arguments": { + "x": null + }, + "returns": true } ], "links": [ @@ -39,6 +46,11 @@ "rel": "about", "href": "https://ieeexplore.ieee.org/document/4610935", "title": "IEEE Standard 754-2008 for Floating-Point Arithmetic" + }, + { + "rel": "about", + "href": "http://mathworld.wolfram.com/NaN.html", + "title": "NaN explained by Wolfram MathWorld" } ] -} \ No newline at end of file +} diff --git a/is_nodata.json b/is_nodata.json index f2ab4d5e..1aaef124 100644 --- a/is_nodata.json +++ b/is_nodata.json @@ -1,7 +1,7 @@ { "id": "is_nodata", "summary": "Value is not a no-data value", - "description": "Checks whether the specified data is a missing data, i.e. equals to any of the no-data values / `null`.", + "description": "Checks whether the specified data is a missing data, i.e. equals to `null` or any of the no-data values specified in the metadata. The special numerical value `NaN` (not a number) as defined by the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935) is not considered no-data and must return `false`.", "categories": [ "comparison" ], @@ -15,7 +15,7 @@ } ], "returns": { - "description": "`true` if the data is a no-data value, otherwise `false`", + "description": "`true` if the data is a no-data value, otherwise `false`.", "schema": { "type": "boolean" } @@ -38,6 +38,12 @@ "x": null }, "returns": true + }, + { + "arguments": { + "x": [null, null] + }, + "returns": false } ] -} \ No newline at end of file +} diff --git a/is_valid.json b/is_valid.json index 84f67ce8..56c57110 100644 --- a/is_valid.json +++ b/is_valid.json @@ -1,7 +1,7 @@ { "id": "is_valid", "summary": "Value is valid data", - "description": "Checks whether the specified value `x` is valid. A value is considered valid if it is\n\n1. not a no-data value (`null`) and\n2. a finite number (only if `x` is a number). The definition of finite and infinite numbers follows the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935).", + "description": "Checks whether the specified value `x` is valid. The following values are considered valid:\n\n* Any finite numerical value (integers and floating point numbers). The definition of finite numbers follows the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935) and excludes the special value `NaN` (not a number).\n* Any other value that is not a no-data value according to ``is_nodata()``. All arrays and objects are valid.", "categories": [ "comparison" ], @@ -38,6 +38,12 @@ "x": null }, "returns": false + }, + { + "arguments": { + "x": [null, null] + }, + "returns": true } ], "links": [ diff --git a/nan.json b/nan.json new file mode 100644 index 00000000..0ed781af --- /dev/null +++ b/nan.json @@ -0,0 +1,27 @@ +{ + "id": "nan", + "summary": "Not a Number (NaN)", + "description": "NaN (not a number) is a symbolic floating-point representation which is neither a signed infinity nor a finite number.", + "categories": [ + "math > constants" + ], + "parameters": [], + "returns": { + "description": "Returns NaN.", + "schema": { + "description": "Returns NaN.\n\nJSON Schema can't represent NaN, thus a schema can't be specified." + } + }, + "links": [ + { + "rel": "about", + "href": "https://ieeexplore.ieee.org/document/4610935", + "title": "IEEE Standard 754-2008 for Floating-Point Arithmetic" + }, + { + "rel": "about", + "href": "http://mathworld.wolfram.com/NaN.html", + "title": "NaN explained by Wolfram MathWorld" + } + ] +} \ No newline at end of file From 5c90a90604b3a00c7b031f828211e786bdcdbc25 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 16 Sep 2020 12:10:20 +0200 Subject: [PATCH 006/109] Remove strange examples etc #189 #190 --- is_infinite.json | 20 -------------------- is_nodata.json | 5 ++++- is_valid.json | 7 +++++-- 3 files changed, 9 insertions(+), 23 deletions(-) diff --git a/is_infinite.json b/is_infinite.json index 0501b8ef..9de215b0 100644 --- a/is_infinite.json +++ b/is_infinite.json @@ -20,26 +20,6 @@ "type": "boolean" } }, - "examples": [ - { - "arguments": { - "x": 1 - }, - "returns": false - }, - { - "arguments": { - "x": "Test" - }, - "returns": false - }, - { - "arguments": { - "x": null - }, - "returns": false - } - ], "links": [ { "rel": "about", diff --git a/is_nodata.json b/is_nodata.json index 1aaef124..0b8b38d3 100644 --- a/is_nodata.json +++ b/is_nodata.json @@ -41,7 +41,10 @@ }, { "arguments": { - "x": [null, null] + "x": [ + null, + null + ] }, "returns": false } diff --git a/is_valid.json b/is_valid.json index 56c57110..82bfe485 100644 --- a/is_valid.json +++ b/is_valid.json @@ -1,7 +1,7 @@ { "id": "is_valid", "summary": "Value is valid data", - "description": "Checks whether the specified value `x` is valid. The following values are considered valid:\n\n* Any finite numerical value (integers and floating point numbers). The definition of finite numbers follows the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935) and excludes the special value `NaN` (not a number).\n* Any other value that is not a no-data value according to ``is_nodata()``. All arrays and objects are valid.", + "description": "Checks whether the specified value `x` is valid. The following values are considered valid:\n\n* Any finite numerical value (integers and floating point numbers). The definition of finite numbers follows the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935) and excludes the special value `NaN` (not a number).\n* Any other value that is not a no-data value according to ``is_nodata()`. Thus all arrays, objects and strings are valid, regardless of their content.", "categories": [ "comparison" ], @@ -41,7 +41,10 @@ }, { "arguments": { - "x": [null, null] + "x": [ + null, + null + ] }, "returns": true } From 66c5f782f3a1f492d66191ad58a8c3b810ddfc20 Mon Sep 17 00:00:00 2001 From: clausmichele <31700619+clausmichele@users.noreply.github.com> Date: Tue, 22 Sep 2020 12:12:39 +0200 Subject: [PATCH 007/109] Add aggregate_spatial_window (#192) --- aggregate_spatial_window.json | 98 +++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 aggregate_spatial_window.json diff --git a/aggregate_spatial_window.json b/aggregate_spatial_window.json new file mode 100644 index 00000000..994e4114 --- /dev/null +++ b/aggregate_spatial_window.json @@ -0,0 +1,98 @@ +{ + "id": "aggregate_spatial_window", + "summary": "Zonal statistics for rectangular windows", + "description": "Aggregates statistics over the horizontal spatial dimensions (axes `x` and `y`) of the data cube.\n\nThe pixel grid for the axes `x` and `y` is divided into non-overlapping windows with the size specified in the parameter `size`. If the number of values for the axes `x` and `y` is not a multiple of the window size, the behaviour specified in `boundary` parameter is applied.\nFor each of these windows, the reducer process computes the result.", + "categories": [ + "cubes", + "aggregate & resample" + ], + "parameters": [ + { + "name": "data", + "description": "A raster data cube.", + "schema": { + "type": "object", + "subtype": "raster-cube" + } + }, + { + "name": "reducer", + "description": "A reducer to be applied on all values of each geometry. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes.", + "schema": { + "type": "object", + "subtype": "process-graph", + "parameters": [ + { + "name": "data", + "description": "An array with elements of any type.", + "schema": { + "type": "array", + "items": { + "description": "Any data type." + } + } + }, + { + "name": "context", + "description": "Additional data passed by the user.", + "schema": { + "description": "Any data type." + }, + "optional": true, + "default": null + } + ] + } + }, + { + "name": "size", + "description": "Window sizes in pixels along the horizontal spatial dimensions.\n\nThe first value corresponds to the `x` axis, the second values corresponds to the `y` axis.", + "schema": { + "type": "array", + "items": { + "type": "object", + "subtype": "chunk-size", + "required": [ + "value" + ], + "properties": { + "value": { + "type": "number", + "minimum": 1, + "description": "The unit the values are given in pixels (`px`)" + } + } + } + } + }, + { + "name": "boundary", + "description": "Behavior to apply if the number of values for the axes `x` and `y` is not a multiple of the corresponding value in the `size` parameter. Options are:\n\n- `pad` (default): pad the data cube with `NaN` (not a number) values to fit the required window size.\n\n- `trim`: trim the data cube to fit the required window size.", + "schema": { + "type": "string", + "enum": [ + "pad", + "trim" + ] + }, + "optional": true, + "default": "pad" + }, + { + "name": "context", + "description": "Additional data to be passed to the reducer.", + "schema": { + "description": "Any data type." + }, + "optional": true, + "default": null + } + ], + "returns": { + "description": "A data cube with the newly computed values and the same dimensions.\n\nThe resolution will change, depending on the chosen parameters. It decreases for the dimensions which have the corresponding parameter `size` = [`x_size`,`y_size`] set to values greater than 1.\n\nThe dimension labels will be set to the center of the window. The other dimension properties (name, type and reference system) remain unchanged.", + "schema": { + "type": "object", + "subtype": "raster-cube" + } + } +} From 42ae8ff7195c0b39610d483f5062516948c93494 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 22 Sep 2020 14:53:21 +0200 Subject: [PATCH 008/109] Further tweaks for aggregate_spatial_window --- aggregate_spatial_window.json | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/aggregate_spatial_window.json b/aggregate_spatial_window.json index 994e4114..73d081bf 100644 --- a/aggregate_spatial_window.json +++ b/aggregate_spatial_window.json @@ -1,11 +1,12 @@ { "id": "aggregate_spatial_window", "summary": "Zonal statistics for rectangular windows", - "description": "Aggregates statistics over the horizontal spatial dimensions (axes `x` and `y`) of the data cube.\n\nThe pixel grid for the axes `x` and `y` is divided into non-overlapping windows with the size specified in the parameter `size`. If the number of values for the axes `x` and `y` is not a multiple of the window size, the behaviour specified in `boundary` parameter is applied.\nFor each of these windows, the reducer process computes the result.", + "description": "Aggregates statistics over the horizontal spatial dimensions (axes `x` and `y`) of the data cube.\n\nThe pixel grid for the axes `x` and `y` is divided into non-overlapping windows with the size specified in the parameter `size`. If the number of values for the axes `x` and `y` is not a multiple of the corresponding window size, the behaviour specified in `boundary` parameter is applied.\nFor each of these windows, the reducer process computes the result.", "categories": [ "cubes", "aggregate & resample" ], + "experimental": true, "parameters": [ { "name": "data", @@ -17,7 +18,7 @@ }, { "name": "reducer", - "description": "A reducer to be applied on all values of each geometry. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes.", + "description": "A reducer to be applied on the list of values, which contain all pixels covered by the window. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes.", "schema": { "type": "object", "subtype": "process-graph", @@ -49,25 +50,17 @@ "description": "Window sizes in pixels along the horizontal spatial dimensions.\n\nThe first value corresponds to the `x` axis, the second values corresponds to the `y` axis.", "schema": { "type": "array", + "minItems": 2, + "maxItems": 2, "items": { - "type": "object", - "subtype": "chunk-size", - "required": [ - "value" - ], - "properties": { - "value": { - "type": "number", - "minimum": 1, - "description": "The unit the values are given in pixels (`px`)" - } - } + "type": "integer", + "minimum": 1 } } }, { "name": "boundary", - "description": "Behavior to apply if the number of values for the axes `x` and `y` is not a multiple of the corresponding value in the `size` parameter. Options are:\n\n- `pad` (default): pad the data cube with `NaN` (not a number) values to fit the required window size.\n\n- `trim`: trim the data cube to fit the required window size.", + "description": "Behaviour to apply if the number of values for the axes `x` and `y` is not a multiple of the corresponding value in the `size` parameter. Options are:\n\n- `pad` (default): pad the data cube with the no-data value `null` to fit the required window size.\n\n- `trim`: trim the data cube to fit the required window size.", "schema": { "type": "string", "enum": [ @@ -89,10 +82,10 @@ } ], "returns": { - "description": "A data cube with the newly computed values and the same dimensions.\n\nThe resolution will change, depending on the chosen parameters. It decreases for the dimensions which have the corresponding parameter `size` = [`x_size`,`y_size`] set to values greater than 1.\n\nThe dimension labels will be set to the center of the window. The other dimension properties (name, type and reference system) remain unchanged.", + "description": "A data cube with the newly computed values and the same dimensions.\n\nThe resolution will change depending on the chosen values for the `size` and `boundary` parameter. It usually decreases for the dimensions which have the corresponding parameter `size` set to values greater than 1.\n\nThe dimension labels will be set to the coordinate at the center of the window. The other dimension properties (name, type and reference system) remain unchanged.", "schema": { "type": "object", "subtype": "raster-cube" } } -} +} \ No newline at end of file From 5059f50d404e3fa91484e10c1a91cdd3fa6672b3 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 24 Sep 2020 11:40:56 +0200 Subject: [PATCH 009/109] Establish proposal procedure #196 (#197) --- CHANGELOG.md | 4 ++ README.md | 7 +++- .../aggregate_spatial_binary.json | 0 .../reduce_dimension_binary.json | 0 .../run_udf_externally.json | 0 tests/package.json | 2 +- tests/processes.test.js | 37 ++++++++++++------- tests/testHelpers.js | 2 +- 8 files changed, 35 insertions(+), 17 deletions(-) rename aggregate_spatial_binary.json => proposals/aggregate_spatial_binary.json (100%) rename reduce_dimension_binary.json => proposals/reduce_dimension_binary.json (100%) rename run_udf_externally.json => proposals/run_udf_externally.json (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc333cc3..478aa3bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `is_infinity` - `nan` +### Changed +- Added `proposals` folder for experimental processes. Experimental processes are not covered by the CHANGELOG! + - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. + ### Fixed - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) - Improve and clarify specifications for `is_nan`, `is_nodata`, `is_valid`. [#189](https://github.com/Open-EO/openeo-processes/issues/189) diff --git a/README.md b/README.md index d7390d2d..27563b33 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,9 @@ openEO develops interoperable processes for big Earth observation cloud processi ## Versions / Branches -The [master branch](https://github.com/Open-EO/openeo-processes/tree/master) is the 'stable' version of the openEO processes specification. The latest release is version **1.0.0**. The [draft branch](https://github.com/Open-EO/openeo-processes/tree/draft) is where active development takes place. +The [master branch](https://github.com/Open-EO/openeo-processes/tree/master) is the 'stable' version of the openEO processes specification. Exception is the [`proposals`](proposals/) folder, which provides experimental new processes currently under discussion. They may still change, but everyone is encouraged to implement them and give feedback. + +The latest release is version **1.0.0**. The [draft branch](https://github.com/Open-EO/openeo-processes/tree/draft) is where active development takes place. PRs should be made against the draft branch. | Version / Branch | Status | openEO API versions | | ------------------------------------------------------------ | ------------------------- | ------------------- | @@ -25,7 +27,8 @@ See also the [changelog](CHANGELOG.md) for the changes between versions and the This repository contains a set of files formally describing the openEO Processes: -* The `*.json` files provide the process specifications as defined by the openEO API. +* The `*.json` files provide the stable process specifications as defined by openEO. New processes need at least two implementations or consensus from the openEO PSC. +* The `*.json` files in the [`proposals`](proposals/) folder provide proposed new process specifications that are still experimental and subject to change. Ideally, each specification is backed by an implementation. Everyone is encouraged to base their work on the proposals and give feedback so that eventually the processes evolve into stable process specifications. * [subtype-schemas.json](meta/subtype-schemas.json) in the `meta` folder defines common data types (`subtype`s) for JSON Schema used in openEO processes. * The [`examples`](examples/) folder contains some useful examples that the processes link to. All of these are non-binding additions. * The [`tests`](tests/) folder can be used to test the process specification for validity and and consistent "style". It also allows to render the processes in a web browser. diff --git a/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json similarity index 100% rename from aggregate_spatial_binary.json rename to proposals/aggregate_spatial_binary.json diff --git a/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json similarity index 100% rename from reduce_dimension_binary.json rename to proposals/reduce_dimension_binary.json diff --git a/run_udf_externally.json b/proposals/run_udf_externally.json similarity index 100% rename from run_udf_externally.json rename to proposals/run_udf_externally.json diff --git a/tests/package.json b/tests/package.json index 7c2be337..75df7949 100644 --- a/tests/package.json +++ b/tests/package.json @@ -30,7 +30,7 @@ }, "scripts": { "test": "jest", - "generate": "concat-json-files \"../*.json\" -t \"processes.json\"", + "generate": "concat-json-files \"../{*,proposals/*}.json\" -t \"processes.json\"", "render": "npm run generate && http-server -p 9876 -o docs.html -c-1" } } diff --git a/tests/processes.test.js b/tests/processes.test.js index 40a60cf7..3d81952c 100644 --- a/tests/processes.test.js +++ b/tests/processes.test.js @@ -13,24 +13,30 @@ beforeAll(async () => { jsv = await getAjv(); }); -var processes = []; -const files = glob.sync("../*.json", {realpath: true}); -files.forEach(file => { +var loader = (file, proposal = false) => { try { var fileContent = fs.readFileSync(file); // Check JSON structure for faults var p = JSON.parse(fileContent); // Prepare for tests - processes.push([file, p, fileContent.toString()]); + processes.push([file, p, fileContent.toString(), proposal]); } catch(err) { - processes.push([file, {}, ""]); + processes.push([file, {}, "", proposal]); console.error(err); expect(err).toBeUndefined(); } -}); +}; + +var processes = []; + +const files = glob.sync("../*.json", {realpath: true}); +files.forEach(file => loader(file)); + +const proposals = glob.sync("../proposals/*.json", {realpath: true}); +proposals.forEach(file => loader(file, true)); -describe.each(processes)("%s", (file, p, fileContent) => { +describe.each(processes)("%s", (file, p, fileContent, proposal) => { test("File / JSON", () => { const ext = path.extname(file); @@ -77,7 +83,7 @@ describe.each(processes)("%s", (file, p, fileContent) => { }); test("Flags", () => { - checkFlags(p); + checkFlags(p, proposal); }); test("Parameters", () => { @@ -204,15 +210,20 @@ describe.each(processes)("%s", (file, p, fileContent) => { } }); -function checkFlags(p) { +function checkFlags(p, proposal = false) { // deprecated expect(typeof p.deprecated === 'undefined' || typeof p.deprecated === 'boolean').toBeTruthy(); // lint: don't specify defaults expect(typeof p.deprecated === 'undefined' || p.deprecated === true).toBeTruthy(); - // experimental - expect(typeof p.experimental === 'undefined' || typeof p.experimental === 'boolean').toBeTruthy(); - // lint: don't specify defaults - expect(typeof p.experimental === 'undefined' || p.experimental === true).toBeTruthy(); + if (proposal) { + // experimental must be true for proposals + expect(p.experimental).toBe(true); + } + else { + // experimental must not be false for stable + // lint: don't specify defaults, so false should not be set explicitly + expect(p.experimental).toBeUndefined(); + } } function checkParam(param, p, checkCbParams = true) { diff --git a/tests/testHelpers.js b/tests/testHelpers.js index ef57ca47..385d0449 100644 --- a/tests/testHelpers.js +++ b/tests/testHelpers.js @@ -27,7 +27,7 @@ for(let i in words) { spellcheck.spellcheck.addWord(words[i]); } // Add the process IDs to the word list -const files = glob.sync("../*.json", {realpath: true}); +const files = glob.sync("../{*,proposals/*}.json", {realpath: true}); for(let i in files) { spellcheck.spellcheck.addWord(path.basename(files[i], path.extname(files[i]))); } From 638b2683b312cb8cf1567166b5a794dee8d76ce9 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 25 Sep 2020 11:43:16 +0200 Subject: [PATCH 010/109] Update dependncies --- tests/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/package.json b/tests/package.json index 75df7949..a0763162 100644 --- a/tests/package.json +++ b/tests/package.json @@ -19,7 +19,7 @@ }, "devDependencies": { "@apidevtools/json-schema-ref-parser": "^9.0.6", - "@openeo/js-processgraphs": "^1.0.0-beta.4", + "@openeo/js-processgraphs": "^1.0.0-beta.5", "ajv": "^6.12.4", "concat-json-files": "^1.1.0", "glob": "^7.1.6", From d91097f0c8136f90acb8b250eee46ec53b1b4918 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 26 Oct 2020 16:14:05 +0100 Subject: [PATCH 011/109] Clarify all and any #199 (#200) --- CHANGELOG.md | 1 + all.json | 2 +- any.json | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 478aa3bf..eeb2c79f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) - Improve and clarify specifications for `is_nan`, `is_nodata`, `is_valid`. [#189](https://github.com/Open-EO/openeo-processes/issues/189) +- Improve and clarify specifications for `all` and `any`. [#189](https://github.com/Open-EO/openeo-processes/issues/199) - Examples `array_contains_nodata` and `array_find_nodata` ## 1.0.0 - 2020-07-31 diff --git a/all.json b/all.json index 7d8b5ebb..b12059de 100644 --- a/all.json +++ b/all.json @@ -1,7 +1,7 @@ { "id": "all", "summary": "Are all of the values true?", - "description": "Checks if **all** of the values in `data` are true. Evaluates all values from the first to the last element and stops once the outcome is unambiguous.\n\nIf only one value is given, the process evaluates to the given value. If no value is given (i.e. the array is empty) the process returns `null`.\n\nBy default all no-data values are ignored so that the process returns `null` if all values are no-data, `true` if all other values are true and `false` otherwise. Setting the `ignore_nodata` flag to `false` considers no-data values so that `null` is a valid logical object. If a component is `null`, the result will be `null` if the outcome is ambiguous. See the following truth table:\n\n```\n || null | false | true\n----- || ----- | ----- | -----\nnull || null | false | null\nfalse || false | false | false\ntrue || null | false | true\n```", + "description": "Checks if **all** of the values in `data` are true. If no value is given (i.e. the array is empty) the process returns `null`.\n\nBy default all no-data values are ignored so that the process returns `null` if all values are no-data, `true` if all values are true and `false` otherwise. Setting the `ignore_nodata` flag to `false` takes no-data values into account and the array values are reduced pairwise according to the following truth table:\n\n```\n || null | false | true\n----- || ----- | ----- | -----\nnull || null | false | null\nfalse || false | false | false\ntrue || null | false | true\n```\n\n**Remark:** The process evaluates all values from the first to the last element and stops once the outcome is unambiguous. A result is ambiguous unless a value is `false` or all values have been taken into account.", "categories": [ "logic", "reducer" diff --git a/any.json b/any.json index b99453b9..545c1669 100644 --- a/any.json +++ b/any.json @@ -1,7 +1,7 @@ { "id": "any", "summary": "Is at least one value true?", - "description": "Checks if **any** (i.e. at least one) value in `data` is `true`. Evaluates all values from the first to the last element and stops once the outcome is unambiguous.\n\nIf only one value is given, the process evaluates to the given value. If no value is given (i.e. the array is empty) the process returns `null`.\n\nBy default all no-data values are ignored so that the process returns `null` if all values are no-data, `true` if at least one of the other values is true and `false` otherwise. Setting the `ignore_nodata` flag to `false` considers no-data values so that `null` is a valid logical object. If a component is `null`, the result will be `null` if the outcome is ambiguous. See the following truth table:\n\n```\n || null | false | true\n----- || ---- | ----- | ----\nnull || null | null | true\nfalse || null | false | true\ntrue || true | true | true\n```", + "description": "Checks if **any** (i.e. at least one) value in `data` is `true`. If no value is given (i.e. the array is empty) the process returns `null`.\n\nBy default all no-data values are ignored so that the process returns `null` if all values are no-data, `true` if at least one value is true and `false` otherwise. Setting the `ignore_nodata` flag to `false` takes no-data values into account and the array values are reduced pairwise according to the following truth table:\n\n```\n || null | false | true\n----- || ---- | ----- | ----\nnull || null | null | true\nfalse || null | false | true\ntrue || true | true | true\n```\n\n**Remark:** The process evaluates all values from the first to the last element and stops once the outcome is unambiguous. A result is ambiguous unless a value is `true`.", "categories": [ "logic", "reducer" From 52b94fb1bcfc308aa2f69b4b68cbe893e34a4aba Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 28 Oct 2020 16:14:14 +0100 Subject: [PATCH 012/109] Clarify contradicting statements in filter_temporal (#206) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Clarify contradicting statements in filter_temporal for the default value of the dimension parameter. By default all temporal dimensions are affected by the process. #203 --- CHANGELOG.md | 1 + filter_temporal.json | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eeb2c79f..bf91b2bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. ### Fixed +- Clarify contradicting statements in `filter_temporal` for the default value of the `dimension` parameter. By default *all* temporal dimensions are affected by the process. [#203](https://github.com/Open-EO/openeo-processes/issues/203) - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) - Improve and clarify specifications for `is_nan`, `is_nodata`, `is_valid`. [#189](https://github.com/Open-EO/openeo-processes/issues/189) - Improve and clarify specifications for `all` and `any`. [#189](https://github.com/Open-EO/openeo-processes/issues/199) diff --git a/filter_temporal.json b/filter_temporal.json index dbb3f201..5e42cb0f 100644 --- a/filter_temporal.json +++ b/filter_temporal.json @@ -1,7 +1,7 @@ { "id": "filter_temporal", "summary": "Temporal filter for a temporal intervals", - "description": "Limits the data cube to the specified interval of dates and/or times.\n\nMore precisely, the filter checks whether the temporal dimension label is greater than or equal to the lower boundary (start date/time) and the temporal dimension label is less than the value of the upper boundary (end date/time). This corresponds to a left-closed interval, which contains the lower boundary but not the upper boundary.\n\nIf the dimension is set to `null` (it's the default value), the data cube is expected to only have one temporal dimension.", + "description": "Limits the data cube to the specified interval of dates and/or times.\n\nMore precisely, the filter checks whether each of the temporal dimension labels is greater than or equal to the lower boundary (start date/time) and less than the value of the upper boundary (end date/time). This corresponds to a left-closed interval, which contains the lower boundary but not the upper boundary.", "categories": [ "cubes", "filter" @@ -61,7 +61,7 @@ }, { "name": "dimension", - "description": "The name of the temporal dimension to filter on. If the dimension is not set or is set to `null`, the filter applies to all temporal dimensions. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the temporal dimension to filter on. If no specific dimension is specified or it is set to `null`, the filter applies to all temporal dimensions. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", "schema": { "type": [ "string", @@ -73,7 +73,7 @@ } ], "returns": { - "description": "A data cube restricted to the specified temporal extent. The dimensions and dimension properties (name, type, labels, reference system and resolution) remain unchanged, except that the given temporal dimension(s) have less (or the same) dimension labels.", + "description": "A data cube restricted to the specified temporal extent. The dimensions and dimension properties (name, type, labels, reference system and resolution) remain unchanged, except that the temporal dimensions (determined by `dimensions` parameter) may have less dimension labels.", "schema": { "type": "object", "subtype": "raster-cube" @@ -84,4 +84,4 @@ "message": "A dimension with the specified name does not exist." } } -} \ No newline at end of file +} From 37f1ac8a2050e31b5b87ed34aac217a2c514de06 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 6 Oct 2020 15:56:35 +0200 Subject: [PATCH 013/109] Update dependencies --- tests/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/package.json b/tests/package.json index a0763162..ad1ff9c9 100644 --- a/tests/package.json +++ b/tests/package.json @@ -19,7 +19,7 @@ }, "devDependencies": { "@apidevtools/json-schema-ref-parser": "^9.0.6", - "@openeo/js-processgraphs": "^1.0.0-beta.5", + "@openeo/js-processgraphs": "^1.0.0-beta.6", "ajv": "^6.12.4", "concat-json-files": "^1.1.0", "glob": "^7.1.6", From 88c2a92a362aab06ba0580deb71e3bcc8e87b1d5 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 2 Nov 2020 15:38:25 +0100 Subject: [PATCH 014/109] Move process to proposals --- .../aggregate_spatial_window.json | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename aggregate_spatial_window.json => proposals/aggregate_spatial_window.json (100%) diff --git a/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json similarity index 100% rename from aggregate_spatial_window.json rename to proposals/aggregate_spatial_window.json From 1233d070e9150e64fdeb9a13b374638d57c433b3 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 2 Nov 2020 16:07:05 +0100 Subject: [PATCH 015/109] Add `align` parameter --- proposals/aggregate_spatial_window.json | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/proposals/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json index 73d081bf..98c68fba 100644 --- a/proposals/aggregate_spatial_window.json +++ b/proposals/aggregate_spatial_window.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial_window", "summary": "Zonal statistics for rectangular windows", - "description": "Aggregates statistics over the horizontal spatial dimensions (axes `x` and `y`) of the data cube.\n\nThe pixel grid for the axes `x` and `y` is divided into non-overlapping windows with the size specified in the parameter `size`. If the number of values for the axes `x` and `y` is not a multiple of the corresponding window size, the behaviour specified in `boundary` parameter is applied.\nFor each of these windows, the reducer process computes the result.", + "description": "Aggregates statistics over the horizontal spatial dimensions (axes `x` and `y`) of the data cube.\n\nThe pixel grid for the axes `x` and `y` is divided into non-overlapping windows with the size specified in the parameter `size`. If the number of values for the axes `x` and `y` is not a multiple of the corresponding window size, the behaviour specified in the parameters `boundary` and `align` is applied.\nFor each of these windows, the reducer process computes the result.", "categories": [ "cubes", "aggregate & resample" @@ -60,7 +60,7 @@ }, { "name": "boundary", - "description": "Behaviour to apply if the number of values for the axes `x` and `y` is not a multiple of the corresponding value in the `size` parameter. Options are:\n\n- `pad` (default): pad the data cube with the no-data value `null` to fit the required window size.\n\n- `trim`: trim the data cube to fit the required window size.", + "description": "Behaviour to apply if the number of values for the axes `x` and `y` is not a multiple of the corresponding value in the `size` parameter. Options are:\n\n- `pad` (default): pad the data cube with the no-data value `null` to fit the required window size.\n\n- `trim`: trim the data cube to fit the required window size.\n\nSet the parameter `align` to specifies to which corner the data is aligned to.", "schema": { "type": "string", "enum": [ @@ -71,6 +71,21 @@ "optional": true, "default": "pad" }, + { + "name": "align", + "description": "If the data requires padding or trimming (see parameter `boundary`), specifies to which corner of the spatial extent the data is aligned to. For example, if the data is aligned to the upper left, the process pads/trims at the lower-right.", + "schema": { + "type": "string", + "enum": [ + "lower-left", + "upper-left", + "lower-right", + "upper-right" + ] + }, + "default": "upper-left", + "optional": true + }, { "name": "context", "description": "Additional data to be passed to the reducer.", From 08c600495452d32a2c34265304fc51066c98309a Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 2 Nov 2020 16:36:51 +0100 Subject: [PATCH 016/109] Clarify data cube constraints. --- proposals/aggregate_spatial_window.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proposals/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json index 98c68fba..df4bd4e6 100644 --- a/proposals/aggregate_spatial_window.json +++ b/proposals/aggregate_spatial_window.json @@ -10,7 +10,7 @@ "parameters": [ { "name": "data", - "description": "A raster data cube.", + "description": "A raster data cube with exactly two horizontal spatial dimensions and an arbitrary number of additional dimensions. The process is applied to all additional dimensions individually.", "schema": { "type": "object", "subtype": "raster-cube" From 69d8552a1c36b7127a9e63557d4997b7e3334562 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 21 Dec 2020 19:54:54 +0100 Subject: [PATCH 017/109] Adds return value schemas for child processes https://github.com/Open-EO/openeo-api/issues/350 --- CHANGELOG.md | 2 ++ aggregate_spatial.json | 8 +++++++- aggregate_temporal.json | 8 +++++++- aggregate_temporal_period.json | 8 +++++++- apply.json | 8 +++++++- apply_dimension.json | 8 +++++++- apply_neighborhood.json | 12 +++++++++++- array_apply.json | 8 +++++++- array_filter.json | 8 +++++++- count.json | 8 +++++++- filter_labels.json | 8 +++++++- load_collection.json | 8 +++++++- merge_cubes.json | 8 +++++++- meta/subtype-schemas.json | 8 +++++++- proposals/aggregate_spatial_binary.json | 8 +++++++- proposals/aggregate_spatial_window.json | 8 +++++++- proposals/reduce_dimension_binary.json | 8 +++++++- reduce_dimension.json | 8 +++++++- resample_cube_temporal.json | 8 +++++++- 19 files changed, 132 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf91b2bf..9560ee5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Processes: - `is_infinity` - `nan` +- Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) +- `apply_neighborhood`: Clarify behavior for data cubes returned by the child processes and for that add the exception `DataCubePropertiesImmutable`. ### Changed - Added `proposals` folder for experimental processes. Experimental processes are not covered by the CHANGELOG! diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 1cd4533b..560d9cdf 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -49,7 +49,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/aggregate_temporal.json b/aggregate_temporal.json index 51cefbca..aead006a 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -112,7 +112,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/aggregate_temporal_period.json b/aggregate_temporal_period.json index b5b13a56..205c7117 100644 --- a/aggregate_temporal_period.json +++ b/aggregate_temporal_period.json @@ -62,7 +62,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/apply.json b/apply.json index 430c960f..61c1ec5a 100644 --- a/apply.json +++ b/apply.json @@ -37,7 +37,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/apply_dimension.json b/apply_dimension.json index 873f098f..381c4492 100644 --- a/apply_dimension.json +++ b/apply_dimension.json @@ -41,7 +41,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/apply_neighborhood.json b/apply_neighborhood.json index 28b27366..5c6d59c9 100644 --- a/apply_neighborhood.json +++ b/apply_neighborhood.json @@ -38,7 +38,14 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the data cube with the newly computed values and the same dimensions. The dimension properties (name, type, labels, reference system and resolution) must remain unchanged, otherwise a `DataCubePropertiesImmutable` exception will be thrown.", + "schema": { + "type": "object", + "subtype": "raster-cube" + } + } } }, { @@ -214,6 +221,9 @@ "exceptions": { "DimensionNotAvailable": { "message": "A dimension with the specified name does not exist." + }, + "DataCubePropertiesImmutable": { + "message": "The dimension properties (name, type, labels, reference system and resolution) must remain unchanged." } } } \ No newline at end of file diff --git a/array_apply.json b/array_apply.json index 22046b3a..d5732574 100644 --- a/array_apply.json +++ b/array_apply.json @@ -63,7 +63,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/array_filter.json b/array_filter.json index e9bc5d14..cdf1d86f 100644 --- a/array_filter.json +++ b/array_filter.json @@ -64,7 +64,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return `true` if the value should be kept in the array, otherwise return `false`.", + "schema": { + "type": "boolean" + } + } } }, { diff --git a/count.json b/count.json index 0a54a927..32707910 100644 --- a/count.json +++ b/count.json @@ -43,7 +43,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return `true` if the element should increase the counter, otherwise return `false`.", + "schema": { + "type": "boolean" + } + } }, { "title": "All elements", diff --git a/filter_labels.json b/filter_labels.json index 54aefcbd..8b7b61c2 100644 --- a/filter_labels.json +++ b/filter_labels.json @@ -43,7 +43,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return `true` if the dimension label should be kept in the data cube, otherwise return `false`.", + "schema": { + "type": "boolean" + } + } } }, { diff --git a/load_collection.json b/load_collection.json index b76ca004..d69c85ac 100644 --- a/load_collection.json +++ b/load_collection.json @@ -194,7 +194,13 @@ "description": "Any data type." } } - ] + ], + "returns": { + "description": "Return `true` if the data should be loaded into the data cube, otherwise return `false`.", + "schema": { + "type": "boolean" + } + } } }, { diff --git a/merge_cubes.json b/merge_cubes.json index 7ec9473e..752a4859 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -52,7 +52,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } }, "default": null, "optional": true diff --git a/meta/subtype-schemas.json b/meta/subtype-schemas.json index c1e8841a..cc4b7b82 100644 --- a/meta/subtype-schemas.json +++ b/meta/subtype-schemas.json @@ -222,7 +222,13 @@ "description": "Any data type." } } - ] + ], + "returns": { + "description": "Return `true` if the data should be used, otherwise return `false`.", + "schema": { + "type": "boolean" + } + } } }, "output-format": { diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 04fec738..f37eae36 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -54,7 +54,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/proposals/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json index df4bd4e6..6af11b8f 100644 --- a/proposals/aggregate_spatial_window.json +++ b/proposals/aggregate_spatial_window.json @@ -42,7 +42,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/proposals/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json index 31b5c583..58659613 100644 --- a/proposals/reduce_dimension_binary.json +++ b/proposals/reduce_dimension_binary.json @@ -46,7 +46,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/reduce_dimension.json b/reduce_dimension.json index f8e79e81..3f6bb79e 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -42,7 +42,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { diff --git a/resample_cube_temporal.json b/resample_cube_temporal.json index d31e31c9..208539b1 100644 --- a/resample_cube_temporal.json +++ b/resample_cube_temporal.json @@ -50,7 +50,13 @@ "optional": true, "default": null } - ] + ], + "returns": { + "description": "Return the value that should be stored in the data cube.", + "schema": { + "description": "Any data type." + } + } } }, { From e6db2db47c150167c9a2d631428209d5d5e1cb3c Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 22 Dec 2020 13:19:40 +0100 Subject: [PATCH 018/109] Proposals section changes, moved nan and is_infinite to proposals, update changelog, other minor changes --- CHANGELOG.md | 4 ++-- README.md | 4 ++-- is_infinite.json => proposals/is_infinite.json | 1 + nan.json => proposals/nan.json | 3 ++- 4 files changed, 7 insertions(+), 5 deletions(-) rename is_infinite.json => proposals/is_infinite.json (97%) rename nan.json => proposals/nan.json (84%) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf91b2bf..eaabe103 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased / Draft ### Added -- Processes: - - `is_infinity` +- New processes in proposal state + - `is_infinite` - `nan` ### Changed diff --git a/README.md b/README.md index 27563b33..4fcece55 100644 --- a/README.md +++ b/README.md @@ -27,8 +27,8 @@ See also the [changelog](CHANGELOG.md) for the changes between versions and the This repository contains a set of files formally describing the openEO Processes: -* The `*.json` files provide the stable process specifications as defined by openEO. New processes need at least two implementations or consensus from the openEO PSC. -* The `*.json` files in the [`proposals`](proposals/) folder provide proposed new process specifications that are still experimental and subject to change. Ideally, each specification is backed by an implementation. Everyone is encouraged to base their work on the proposals and give feedback so that eventually the processes evolve into stable process specifications. +* The `*.json` files provide the stable process specifications as defined by openEO. Stable processes need at least two implementations and a use-case example added to the [`examples`](examples/) folder *or* consensus from the openEO PSC. +* The `*.json` files in the [`proposals`](proposals/) folder provide proposed new process specifications that are still experimental and subject to change, including breaking changes. Everyone is encouraged to base their work on the proposals and give feedback so that eventually the processes evolve into stable process specifications. * [subtype-schemas.json](meta/subtype-schemas.json) in the `meta` folder defines common data types (`subtype`s) for JSON Schema used in openEO processes. * The [`examples`](examples/) folder contains some useful examples that the processes link to. All of these are non-binding additions. * The [`tests`](tests/) folder can be used to test the process specification for validity and and consistent "style". It also allows to render the processes in a web browser. diff --git a/is_infinite.json b/proposals/is_infinite.json similarity index 97% rename from is_infinite.json rename to proposals/is_infinite.json index 9de215b0..b6a5acca 100644 --- a/is_infinite.json +++ b/proposals/is_infinite.json @@ -5,6 +5,7 @@ "categories": [ "comparison" ], + "experimental": true, "parameters": [ { "name": "x", diff --git a/nan.json b/proposals/nan.json similarity index 84% rename from nan.json rename to proposals/nan.json index 0ed781af..6a3cc01c 100644 --- a/nan.json +++ b/proposals/nan.json @@ -5,11 +5,12 @@ "categories": [ "math > constants" ], + "experimental": true, "parameters": [], "returns": { "description": "Returns NaN.", "schema": { - "description": "Returns NaN.\n\nJSON Schema can't represent NaN, thus a schema can't be specified." + "description": "Returns NaN.\n\n*SON Schema can't represent NaN, thus a schema can't be specified.*" } }, "links": [ From 9de388b16988ec330b3fe17148027ebec4eb5ea4 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 22 Dec 2020 16:28:45 +0100 Subject: [PATCH 019/109] Clarify array_filter condition scope --- CHANGELOG.md | 1 + array_filter.json | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eaabe103..ea0cbdd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. ### Fixed +- Clarify that the `condition` parameter for `array_filter` works also on indices and labels. - Clarify contradicting statements in `filter_temporal` for the default value of the `dimension` parameter. By default *all* temporal dimensions are affected by the process. [#203](https://github.com/Open-EO/openeo-processes/issues/203) - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) - Improve and clarify specifications for `is_nan`, `is_nodata`, `is_valid`. [#189](https://github.com/Open-EO/openeo-processes/issues/189) diff --git a/array_filter.json b/array_filter.json index e9bc5d14..b99669ae 100644 --- a/array_filter.json +++ b/array_filter.json @@ -1,7 +1,7 @@ { "id": "array_filter", "summary": "Filter an array based on a condition", - "description": "Filters the array elements based on a logical expression so that afterwards an array is returned that only contains the values conforming to the condition.", + "description": "Filters the array elements based on a logical expression so that afterwards an array is returned that only contains the values, indices and/or labels conforming to the condition.", "categories": [ "arrays", "filter" @@ -19,7 +19,7 @@ }, { "name": "condition", - "description": "A condition that is evaluated against each value in the array. Only the array elements where the condition returns `true` are preserved.", + "description": "A condition that is evaluated against each value, index and/or label in the array. Only the array elements for which the condition returns `true` are preserved.", "schema": { "type": "object", "subtype": "process-graph", From bb78fa1c17c6c75cc39627b8a0d7bd8df1b7dfec Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 22 Dec 2020 16:50:12 +0100 Subject: [PATCH 020/109] Fixed tenses in summaries --- anomaly.json | 2 +- array_apply.json | 2 +- climatological_normal.json | 2 +- merge_cubes.json | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/anomaly.json b/anomaly.json index 06e9d9da..e0e1a3a0 100644 --- a/anomaly.json +++ b/anomaly.json @@ -1,6 +1,6 @@ { "id": "anomaly", - "summary": "Computes anomalies", + "summary": "Compute anomalies", "description": "Computes anomalies based on normals for temporal periods. It compares the data for each label in the temporal dimension with the corresponding data in the normals data cube by subtracting the normal from the data.", "categories": [ "climatology", diff --git a/array_apply.json b/array_apply.json index 22046b3a..0d5d55f8 100644 --- a/array_apply.json +++ b/array_apply.json @@ -1,6 +1,6 @@ { "id": "array_apply", - "summary": "Applies a unary process to each array element", + "summary": "Apply a unary process to each array element", "description": "Applies a **unary** process which takes a single value such as `abs` or `sqrt` to each value in the array. This is basically what other languages call either a `for each` loop or a `map` function.", "categories": [ "arrays" diff --git a/climatological_normal.json b/climatological_normal.json index a2ac7ae9..57ce30db 100644 --- a/climatological_normal.json +++ b/climatological_normal.json @@ -1,6 +1,6 @@ { "id": "climatological_normal", - "summary": "Computes climatology normals", + "summary": "Compute climatology normals", "description": "Climatological normal period is a usually 30 year average of a weather variable. Climatological normals are used as an average or baseline to evaluate climate events and provide context for yearly, monthly, daily or seasonal variability. The default climatology period is from 1981 until 2010 (both inclusive).", "categories": [ "climatology" diff --git a/merge_cubes.json b/merge_cubes.json index 7ec9473e..36d802c7 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -1,6 +1,6 @@ { "id": "merge_cubes", - "summary": "Merging two data cubes", + "summary": "Merge two data cubes", "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`,`y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`,`y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions `x`, `y` and `t` have the same dimension labels in `x`,`y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. Merging a data cube with dimensions `x`, `y`, `t` with another cube with dimensions `x`, `y` will join on the `x`, `y` dimension, so the lower dimension cube is merged with each time step in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatiotemporal data cube.", "categories": [ "cubes" From e67754b9f8c7a303fbf4a0dc84e2786faa88459e Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 22 Dec 2020 17:26:17 +0100 Subject: [PATCH 021/109] Added the `minimum: 0` constraint to all schemas describing zero-based indices (parameter `index`). --- CHANGELOG.md | 1 + array_apply.json | 3 ++- array_element.json | 3 ++- array_filter.json | 3 ++- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea0cbdd3..16e3b746 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) - Improve and clarify specifications for `is_nan`, `is_nodata`, `is_valid`. [#189](https://github.com/Open-EO/openeo-processes/issues/189) - Improve and clarify specifications for `all` and `any`. [#189](https://github.com/Open-EO/openeo-processes/issues/199) +- `array_apply`, `array_element`, `array_filter`: Added the `minimum: 0` constraint to all schemas describing zero-based indices (parameter `index`). - Examples `array_contains_nodata` and `array_find_nodata` ## 1.0.0 - 2020-07-31 diff --git a/array_apply.json b/array_apply.json index 0d5d55f8..b13d17d3 100644 --- a/array_apply.json +++ b/array_apply.json @@ -34,7 +34,8 @@ "name": "index", "description": "The zero-based index of the current element being processed.", "schema": { - "type": "integer" + "type": "integer", + "minimum": 0 } }, { diff --git a/array_element.json b/array_element.json index 389030bf..a3ad9e26 100644 --- a/array_element.json +++ b/array_element.json @@ -21,7 +21,8 @@ "name": "index", "description": "The zero-based index of the element to retrieve.", "schema": { - "type": "integer" + "type": "integer", + "minimum": 0 }, "optional": true }, diff --git a/array_filter.json b/array_filter.json index b99669ae..e024f4c8 100644 --- a/array_filter.json +++ b/array_filter.json @@ -35,7 +35,8 @@ "name": "index", "description": "The zero-based index of the current element being processed.", "schema": { - "type": "integer" + "type": "integer", + "minimum": 0 } }, { From f3625d70ec04545a16fa3fccec398772bca6d7b3 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 22 Dec 2020 17:28:15 +0100 Subject: [PATCH 022/109] Add array_create and array_merge to proposals --- CHANGELOG.md | 2 + proposals/array_create.json | 65 ++++++++++++++++++++++++++++++++ proposals/array_merge.json | 75 +++++++++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+) create mode 100644 proposals/array_create.json create mode 100644 proposals/array_merge.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 16e3b746..aa65e377 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New processes in proposal state + - `array_create` + - `array_merge` - `is_infinite` - `nan` diff --git a/proposals/array_create.json b/proposals/array_create.json new file mode 100644 index 00000000..f0bc5016 --- /dev/null +++ b/proposals/array_create.json @@ -0,0 +1,65 @@ +{ + "id": "array_create", + "summary": "Create an array", + "description": "Creates a new array, which by default is empty.\n\nBy providing the parameter `length`, the array can be pre-filled with the given number of elements. By default each element is set to `null` unless another value is specified through the parameter `value`.", + "categories": [ + "arrays" + ], + "experimental": true, + "parameters": [ + { + "name": "length", + "description": "The number of elements to fill the array with. Default to `0`.", + "optional": true, + "default": 0, + "schema": { + "type": "integer", + "minimum": 0 + } + }, + { + "name": "value", + "description": "The value to fill the array with in case `length` is greater than 0. Defaults to `null` (no data).", + "optional": true, + "default": null, + "schema": { + "description": "Any data type is allowed." + } + } + ], + "returns": { + "description": "The newly created array.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + "examples": [ + { + "arguments": {}, + "returns": [] + }, + { + "arguments": { + "length": 3 + }, + "returns": [ + null, + null, + null + ] + }, + { + "arguments": { + "length": 2, + "value": 1 + }, + "returns": [ + 1, + 1 + ] + } + ] +} \ No newline at end of file diff --git a/proposals/array_merge.json b/proposals/array_merge.json new file mode 100644 index 00000000..f407991d --- /dev/null +++ b/proposals/array_merge.json @@ -0,0 +1,75 @@ +{ + "id": "array_merge", + "summary": "Merge two arrays", + "description": "Merges two arrays into a single array by appending the second array to the first array. Array labels get discarded.", + "categories": [ + "arrays" + ], + "experimental": true, + "parameters": [ + { + "name": "array1", + "description": "The first array.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + { + "name": "array2", + "description": "The second array.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + } + ], + "returns": { + "description": "The merged array.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + "examples": [ + { + "description": "Merges two arrays containing different data type.", + "arguments": { + "array1": [ + "a", + "b" + ], + "array2": [ + 1, + 2 + ] + }, + "returns": [ + "a", + "b", + 1, + 2 + ] + }, + { + "description": "Duplicates an array by merging it with an empty array. Labels will be removed so this could also be used to just remove array labels.", + "arguments": { + "array1": [ + null, + 1.23 + ], + "array2": [] + }, + "returns": [ + null, + 1.23 + ] + } + ] +} \ No newline at end of file From 7e0e949a52d4e61c7a1f03513e3edc60eaaa9b11 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 4 Jan 2021 15:24:31 +0100 Subject: [PATCH 023/109] Align return value description terminology --- aggregate_spatial.json | 2 +- aggregate_temporal.json | 2 +- aggregate_temporal_period.json | 2 +- apply.json | 2 +- apply_dimension.json | 2 +- apply_neighborhood.json | 2 +- array_apply.json | 2 +- array_contains.json | 2 +- array_element.json | 2 +- array_filter.json | 2 +- array_find.json | 2 +- array_labels.json | 2 +- count.json | 2 +- debug.json | 2 +- dimension_labels.json | 2 +- eq.json | 2 +- examples/array_contains_nodata.json | 2 +- examples/array_find_nodata.json | 2 +- filter_labels.json | 2 +- load_collection.json | 2 +- merge_cubes.json | 2 +- meta/subtype-schemas.json | 2 +- neq.json | 2 +- proposals/aggregate_spatial_binary.json | 2 +- proposals/aggregate_spatial_window.json | 2 +- proposals/reduce_dimension_binary.json | 2 +- proposals/run_udf_externally.json | 2 +- reduce_dimension.json | 2 +- resample_cube_temporal.json | 2 +- run_udf.json | 2 +- text_merge.json | 2 +- 31 files changed, 31 insertions(+), 31 deletions(-) diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 560d9cdf..df0d1654 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -51,7 +51,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/aggregate_temporal.json b/aggregate_temporal.json index aead006a..4f333238 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -114,7 +114,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/aggregate_temporal_period.json b/aggregate_temporal_period.json index 205c7117..03454b5d 100644 --- a/aggregate_temporal_period.json +++ b/aggregate_temporal_period.json @@ -64,7 +64,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/apply.json b/apply.json index 61c1ec5a..878d6d6a 100644 --- a/apply.json +++ b/apply.json @@ -39,7 +39,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/apply_dimension.json b/apply_dimension.json index 381c4492..512924ba 100644 --- a/apply_dimension.json +++ b/apply_dimension.json @@ -43,7 +43,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/apply_neighborhood.json b/apply_neighborhood.json index 5c6d59c9..624002b6 100644 --- a/apply_neighborhood.json +++ b/apply_neighborhood.json @@ -40,7 +40,7 @@ } ], "returns": { - "description": "Return the data cube with the newly computed values and the same dimensions. The dimension properties (name, type, labels, reference system and resolution) must remain unchanged, otherwise a `DataCubePropertiesImmutable` exception will be thrown.", + "description": "The data cube with the newly computed values and the same dimensions. The dimension properties (name, type, labels, reference system and resolution) must remain unchanged, otherwise a `DataCubePropertiesImmutable` exception will be thrown.", "schema": { "type": "object", "subtype": "raster-cube" diff --git a/array_apply.json b/array_apply.json index d5732574..b3c3bc4e 100644 --- a/array_apply.json +++ b/array_apply.json @@ -65,7 +65,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the array.", "schema": { "description": "Any data type." } diff --git a/array_contains.json b/array_contains.json index 6722c566..5a1732be 100644 --- a/array_contains.json +++ b/array_contains.json @@ -27,7 +27,7 @@ } ], "returns": { - "description": "Returns `true` if the list contains the value, false` otherwise.", + "description": "`true` if the list contains the value, false` otherwise.", "schema": { "type": "boolean" } diff --git a/array_element.json b/array_element.json index 389030bf..bc3a9dbc 100644 --- a/array_element.json +++ b/array_element.json @@ -1,7 +1,7 @@ { "id": "array_element", "summary": "Get an element from an array", - "description": "Returns the element with the specified index or label from the array.\n\nEither the parameter `index` or `label` must be specified, otherwise the `ArrayElementParameterMissing` exception is thrown. If both parameters are set the `ArrayElementParameterConflict` exception is thrown.", + "description": "Gives the element with the specified index or label from the array.\n\nEither the parameter `index` or `label` must be specified, otherwise the `ArrayElementParameterMissing` exception is thrown. If both parameters are set the `ArrayElementParameterConflict` exception is thrown.", "categories": [ "arrays", "reducer" diff --git a/array_filter.json b/array_filter.json index cdf1d86f..bcb6dc16 100644 --- a/array_filter.json +++ b/array_filter.json @@ -66,7 +66,7 @@ } ], "returns": { - "description": "Return `true` if the value should be kept in the array, otherwise return `false`.", + "description": "`true` if the value should be kept in the array, otherwise `false`.", "schema": { "type": "boolean" } diff --git a/array_find.json b/array_find.json index d4268d6d..276fdd66 100644 --- a/array_find.json +++ b/array_find.json @@ -26,7 +26,7 @@ } ], "returns": { - "description": "Returns the index of the first element with the specified value. If no element was found, `null` is returned.", + "description": "The index of the first element with the specified value. If no element was found, `null` is a.", "schema": [ { "type": "null" diff --git a/array_labels.json b/array_labels.json index 2e26c62b..3a34d3a5 100644 --- a/array_labels.json +++ b/array_labels.json @@ -1,7 +1,7 @@ { "id": "array_labels", "summary": "Get the labels for an array", - "description": "Returns all labels for a labeled array in the data cube. The labels have the same order as in the array.", + "description": "Gives all labels for a labeled array in the data cube. The labels have the same order as in the array.", "categories": [ "arrays" ], diff --git a/count.json b/count.json index 32707910..1f044b16 100644 --- a/count.json +++ b/count.json @@ -45,7 +45,7 @@ } ], "returns": { - "description": "Return `true` if the element should increase the counter, otherwise return `false`.", + "description": "`true` if the element should increase the counter, otherwise `false`.", "schema": { "type": "boolean" } diff --git a/debug.json b/debug.json index a6726a0c..a073a3a9 100644 --- a/debug.json +++ b/debug.json @@ -48,7 +48,7 @@ } ], "returns": { - "description": "Returns the data as passed to the `data` parameter.", + "description": "The data as passed to the `data` parameter without any modification.", "schema": { "description": "Any data type is allowed." } diff --git a/dimension_labels.json b/dimension_labels.json index 6e3bc205..78716e95 100644 --- a/dimension_labels.json +++ b/dimension_labels.json @@ -1,7 +1,7 @@ { "id": "dimension_labels", "summary": "Get the dimension labels", - "description": "Returns all labels for a dimension in the data cube. The labels have the same order as in the data cube.", + "description": "Gives all labels for a dimension in the data cube. The labels have the same order as in the data cube.", "categories": [ "cubes" ], diff --git a/eq.json b/eq.json index 15f02968..f81c58b4 100644 --- a/eq.json +++ b/eq.json @@ -44,7 +44,7 @@ } ], "returns": { - "description": "Returns `true` if `x` is equal to `y`, `null` if any operand is `null`, otherwise `false`.", + "description": "`true` if `x` is equal to `y`, `null` if any operand is `null`, otherwise `false`.", "schema": { "type": [ "boolean", diff --git a/examples/array_contains_nodata.json b/examples/array_contains_nodata.json index 755d23fa..3b1fe6f0 100644 --- a/examples/array_contains_nodata.json +++ b/examples/array_contains_nodata.json @@ -18,7 +18,7 @@ } ], "returns": { - "description": "Returns `true` if the list contains a no-data value, false` otherwise.", + "description": "`true` if the list contains a no-data value, false` otherwise.", "schema": { "type": "boolean" } diff --git a/examples/array_find_nodata.json b/examples/array_find_nodata.json index a3bef55e..b57fc35b 100644 --- a/examples/array_find_nodata.json +++ b/examples/array_find_nodata.json @@ -18,7 +18,7 @@ } ], "returns": { - "description": "Returns the index of the first element with a no-data value. If only data values are available, `null` is returned.", + "description": "The index of the first element with a no-data value. If only data values are available, `null` is returned.", "schema": [ { "type": "null" diff --git a/filter_labels.json b/filter_labels.json index 8b7b61c2..2e5ea88c 100644 --- a/filter_labels.json +++ b/filter_labels.json @@ -45,7 +45,7 @@ } ], "returns": { - "description": "Return `true` if the dimension label should be kept in the data cube, otherwise return `false`.", + "description": "`true` if the dimension label should be kept in the data cube, otherwise `false`.", "schema": { "type": "boolean" } diff --git a/load_collection.json b/load_collection.json index d69c85ac..89f30daf 100644 --- a/load_collection.json +++ b/load_collection.json @@ -196,7 +196,7 @@ } ], "returns": { - "description": "Return `true` if the data should be loaded into the data cube, otherwise return `false`.", + "description": "`true` if the data should be loaded into the data cube, otherwise `false`.", "schema": { "type": "boolean" } diff --git a/merge_cubes.json b/merge_cubes.json index 752a4859..e362a57b 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -54,7 +54,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/meta/subtype-schemas.json b/meta/subtype-schemas.json index cc4b7b82..d94ed806 100644 --- a/meta/subtype-schemas.json +++ b/meta/subtype-schemas.json @@ -224,7 +224,7 @@ } ], "returns": { - "description": "Return `true` if the data should be used, otherwise return `false`.", + "description": "`true` if the data should be used, otherwise `false`.", "schema": { "type": "boolean" } diff --git a/neq.json b/neq.json index b19bf50b..76ab5e70 100644 --- a/neq.json +++ b/neq.json @@ -44,7 +44,7 @@ } ], "returns": { - "description": "Returns `true` if `x` is *not* equal to `y`, `null` if any operand is `null`, otherwise `false`.", + "description": "`true` if `x` is *not* equal to `y`, `null` if any operand is `null`, otherwise `false`.", "schema": { "type": [ "boolean", diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index f37eae36..5bbb74f0 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -56,7 +56,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json index 6af11b8f..fa82286b 100644 --- a/proposals/aggregate_spatial_window.json +++ b/proposals/aggregate_spatial_window.json @@ -44,7 +44,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json index 58659613..c576117e 100644 --- a/proposals/reduce_dimension_binary.json +++ b/proposals/reduce_dimension_binary.json @@ -48,7 +48,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/run_udf_externally.json b/proposals/run_udf_externally.json index f52efcc1..d9bec53f 100644 --- a/proposals/run_udf_externally.json +++ b/proposals/run_udf_externally.json @@ -43,7 +43,7 @@ }, { "name": "context", - "description": "Additional data such as configuration options that should be passed to the UDF.", + "description": "Additional data such as configuration options to be passed to the UDF.", "schema": { "type": "object" }, diff --git a/reduce_dimension.json b/reduce_dimension.json index 3f6bb79e..2b622a1b 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -44,7 +44,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/resample_cube_temporal.json b/resample_cube_temporal.json index 208539b1..fd7ff10c 100644 --- a/resample_cube_temporal.json +++ b/resample_cube_temporal.json @@ -52,7 +52,7 @@ } ], "returns": { - "description": "Return the value that should be stored in the data cube.", + "description": "The value to be stored in the data cube.", "schema": { "description": "Any data type." } diff --git a/run_udf.json b/run_udf.json index 43a933f8..7a5b071d 100644 --- a/run_udf.json +++ b/run_udf.json @@ -79,7 +79,7 @@ }, { "name": "context", - "description": "Additional data such as configuration options that should be passed to the UDF.", + "description": "Additional data such as configuration options to be passed to the UDF.", "schema": { "type": "object" }, diff --git a/text_merge.json b/text_merge.json index e65f49c7..13405728 100644 --- a/text_merge.json +++ b/text_merge.json @@ -37,7 +37,7 @@ } ], "returns": { - "description": "Returns a string containing a string representation of all the array elements in the same order, with the separator between each element.", + "description": "A string containing a string representation of all the array elements in the same order, with the separator between each element.", "schema": { "type": "string" } From ec5f84585bdf8c61df66806d5e8548c04aa6509b Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 4 Jan 2021 16:13:37 +0100 Subject: [PATCH 024/109] Align return value descriptions regarding where values are "stored". --- aggregate_spatial.json | 2 +- aggregate_temporal.json | 4 ++-- aggregate_temporal_period.json | 4 ++-- apply.json | 2 +- apply_dimension.json | 2 +- array_apply.json | 2 +- merge_cubes.json | 2 +- proposals/aggregate_spatial_binary.json | 2 +- proposals/aggregate_spatial_window.json | 2 +- proposals/reduce_dimension_binary.json | 2 +- reduce_dimension.json | 2 +- resample_cube_temporal.json | 2 +- 12 files changed, 14 insertions(+), 14 deletions(-) diff --git a/aggregate_spatial.json b/aggregate_spatial.json index df0d1654..9954fef6 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -51,7 +51,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the vector data cube.", "schema": { "description": "Any data type." } diff --git a/aggregate_temporal.json b/aggregate_temporal.json index 4f333238..f168268c 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -114,7 +114,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the new data cube.", "schema": { "description": "Any data type." } @@ -163,7 +163,7 @@ } ], "returns": { - "description": "A data cube with the same dimensions. The dimension properties (name, type, labels, reference system and resolution) remain unchanged, except for the resolution and dimension labels of the given temporal dimension.", + "description": "A new data cube with the same dimensions. The dimension properties (name, type, labels, reference system and resolution) remain unchanged, except for the resolution and dimension labels of the given temporal dimension.", "schema": { "type": "object", "subtype": "raster-cube" diff --git a/aggregate_temporal_period.json b/aggregate_temporal_period.json index 03454b5d..5edf583b 100644 --- a/aggregate_temporal_period.json +++ b/aggregate_temporal_period.json @@ -64,7 +64,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the new data cube.", "schema": { "description": "Any data type." } @@ -94,7 +94,7 @@ } ], "returns": { - "description": "A data cube with the same dimensions. The dimension properties (name, type, labels, reference system and resolution) remain unchanged, except for the resolution and dimension labels of the given temporal dimension. The specified temporal dimension has the following dimension labels (`YYYY` = four-digit year, `MM` = two-digit month, `DD` two-digit day of month):\n\n* `hour`: `YYYY-MM-DD-00` - `YYYY-MM-DD-23`\n* `day`: `YYYY-001` - `YYYY-365`\n* `week`: `YYYY-01` - `YYYY-52`\n* `dekad`: `YYYY-00` - `YYYY-36`\n* `month`: `YYYY-01` - `YYYY-12`\n* `season`: `YYYY-djf` (December - February), `YYYY-mam` (March - May), `YYYY-jja` (June - August), `YYYY-son` (September - November).\n* `tropical-season`: `YYYY-ndjfma` (November - April), `YYYY-mjjaso` (May - October).\n* `year`: `YYYY`\n* `decade`: `YYY0`\n* `decade-ad`: `YYY1`", + "description": "A new data cube with the same dimensions. The dimension properties (name, type, labels, reference system and resolution) remain unchanged, except for the resolution and dimension labels of the given temporal dimension. The specified temporal dimension has the following dimension labels (`YYYY` = four-digit year, `MM` = two-digit month, `DD` two-digit day of month):\n\n* `hour`: `YYYY-MM-DD-00` - `YYYY-MM-DD-23`\n* `day`: `YYYY-001` - `YYYY-365`\n* `week`: `YYYY-01` - `YYYY-52`\n* `dekad`: `YYYY-00` - `YYYY-36`\n* `month`: `YYYY-01` - `YYYY-12`\n* `season`: `YYYY-djf` (December - February), `YYYY-mam` (March - May), `YYYY-jja` (June - August), `YYYY-son` (September - November).\n* `tropical-season`: `YYYY-ndjfma` (November - April), `YYYY-mjjaso` (May - October).\n* `year`: `YYYY`\n* `decade`: `YYY0`\n* `decade-ad`: `YYY1`", "schema": { "type": "object", "subtype": "raster-cube" diff --git a/apply.json b/apply.json index 878d6d6a..61d23400 100644 --- a/apply.json +++ b/apply.json @@ -39,7 +39,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the new data cube.", "schema": { "description": "Any data type." } diff --git a/apply_dimension.json b/apply_dimension.json index 512924ba..139fea6b 100644 --- a/apply_dimension.json +++ b/apply_dimension.json @@ -43,7 +43,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the new data cube.", "schema": { "description": "Any data type." } diff --git a/array_apply.json b/array_apply.json index b3c3bc4e..c75d8a64 100644 --- a/array_apply.json +++ b/array_apply.json @@ -65,7 +65,7 @@ } ], "returns": { - "description": "The value to be stored in the array.", + "description": "The value to be stored in the new array.", "schema": { "description": "Any data type." } diff --git a/merge_cubes.json b/merge_cubes.json index e362a57b..603771d4 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -54,7 +54,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the merged data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 5bbb74f0..80cd9780 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -56,7 +56,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the vector data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json index fa82286b..657ccdca 100644 --- a/proposals/aggregate_spatial_window.json +++ b/proposals/aggregate_spatial_window.json @@ -44,7 +44,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the new data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json index c576117e..d1665abf 100644 --- a/proposals/reduce_dimension_binary.json +++ b/proposals/reduce_dimension_binary.json @@ -48,7 +48,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the new data cube.", "schema": { "description": "Any data type." } diff --git a/reduce_dimension.json b/reduce_dimension.json index 2b622a1b..44b4774d 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -44,7 +44,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the new data cube.", "schema": { "description": "Any data type." } diff --git a/resample_cube_temporal.json b/resample_cube_temporal.json index fd7ff10c..6fdb78e7 100644 --- a/resample_cube_temporal.json +++ b/resample_cube_temporal.json @@ -52,7 +52,7 @@ } ], "returns": { - "description": "The value to be stored in the data cube.", + "description": "The value to be stored in the resampled data cube.", "schema": { "description": "Any data type." } From d8bee57293b982a6d41a93b88292f2452eed117e Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 14:50:28 +0100 Subject: [PATCH 025/109] Moved some processes to proposals #207 --- CHANGELOG.md | 3 ++- README.md | 9 +++++++++ cummax.json => proposals/cummax.json | 1 + cummin.json => proposals/cummin.json | 1 + cumproduct.json => proposals/cumproduct.json | 1 + cumsum.json => proposals/cumsum.json | 1 + debug.json => proposals/debug.json | 1 + filter_labels.json => proposals/filter_labels.json | 1 + load_result.json => proposals/load_result.json | 1 + .../load_uploaded_files.json | 1 + .../resample_cube_temporal.json | 1 + 11 files changed, 20 insertions(+), 1 deletion(-) rename cummax.json => proposals/cummax.json (99%) rename cummin.json => proposals/cummin.json (99%) rename cumproduct.json => proposals/cumproduct.json (99%) rename cumsum.json => proposals/cumsum.json (99%) rename debug.json => proposals/debug.json (98%) rename filter_labels.json => proposals/filter_labels.json (99%) rename load_result.json => proposals/load_result.json (96%) rename load_uploaded_files.json => proposals/load_uploaded_files.json (98%) rename resample_cube_temporal.json => proposals/resample_cube_temporal.json (99%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16e3b746..2d3b2087 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,8 +12,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `nan` ### Changed -- Added `proposals` folder for experimental processes. Experimental processes are not covered by the CHANGELOG! +- Added `proposals` folder for experimental processes. Experimental processes are not covered by the CHANGELOG and MAY include breaking changes! [#196](https://github.com/Open-EO/openeo-processes/issues/196), [#207](https://github.com/Open-EO/openeo-processes/issues/207), [PSC#8](https://github.com/Open-EO/PSC/issues/8) - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. + - Moved the rarely used and implemented processes `cummax`, `cummin`, `cumproduct`, `cumsum`, `debug`, `filter_labels`, `load_result`, `load_uploaded_files`, `resample_cube_temporal` to the proposals. ### Fixed - Clarify that the `condition` parameter for `array_filter` works also on indices and labels. diff --git a/README.md b/README.md index 4fcece55..bfd2fa88 100644 --- a/README.md +++ b/README.md @@ -32,3 +32,12 @@ This repository contains a set of files formally describing the openEO Processes * [subtype-schemas.json](meta/subtype-schemas.json) in the `meta` folder defines common data types (`subtype`s) for JSON Schema used in openEO processes. * The [`examples`](examples/) folder contains some useful examples that the processes link to. All of these are non-binding additions. * The [`tests`](tests/) folder can be used to test the process specification for validity and and consistent "style". It also allows to render the processes in a web browser. + + +## Process + +* All new processes must be added to the [`proposals`](proposals/) folder. +* Processes will only be moved from proposals to the stable process specifications once there are at least two implementations and ann example process graph in the [`examples`](examples/) folder showing it in a use case. This doesn't require a PSC vote individually as it's not a breaking change, just an addition. +* The [`proposals`](proposals/) folder allows breaking changes without a PSC vote and without increasing the major version number (i.e. a breaking change in the proposals doesn't require us to make the next version number 2.0.0). +* The proposals are released as experimental processes with the other processes. +* Each release and all breaking changes in the stable process specifications must go through PSC vote. \ No newline at end of file diff --git a/cummax.json b/proposals/cummax.json similarity index 99% rename from cummax.json rename to proposals/cummax.json index 18a37abd..050eea11 100644 --- a/cummax.json +++ b/proposals/cummax.json @@ -5,6 +5,7 @@ "categories": [ "math > cumulative" ], + "experimental": true, "parameters": [ { "name": "data", diff --git a/cummin.json b/proposals/cummin.json similarity index 99% rename from cummin.json rename to proposals/cummin.json index fd20a92b..a8df7407 100644 --- a/cummin.json +++ b/proposals/cummin.json @@ -5,6 +5,7 @@ "categories": [ "math > cumulative" ], + "experimental": true, "parameters": [ { "name": "data", diff --git a/cumproduct.json b/proposals/cumproduct.json similarity index 99% rename from cumproduct.json rename to proposals/cumproduct.json index cf135e2f..c639ea98 100644 --- a/cumproduct.json +++ b/proposals/cumproduct.json @@ -5,6 +5,7 @@ "categories": [ "math > cumulative" ], + "experimental": true, "parameters": [ { "name": "data", diff --git a/cumsum.json b/proposals/cumsum.json similarity index 99% rename from cumsum.json rename to proposals/cumsum.json index 6ce6b10c..f9ce26f6 100644 --- a/cumsum.json +++ b/proposals/cumsum.json @@ -5,6 +5,7 @@ "categories": [ "math > cumulative" ], + "experimental": true, "parameters": [ { "name": "data", diff --git a/debug.json b/proposals/debug.json similarity index 98% rename from debug.json rename to proposals/debug.json index a6726a0c..a32f27d7 100644 --- a/debug.json +++ b/proposals/debug.json @@ -5,6 +5,7 @@ "categories": [ "development" ], + "experimental": true, "parameters": [ { "name": "data", diff --git a/filter_labels.json b/proposals/filter_labels.json similarity index 99% rename from filter_labels.json rename to proposals/filter_labels.json index 54aefcbd..5cadb7bc 100644 --- a/filter_labels.json +++ b/proposals/filter_labels.json @@ -6,6 +6,7 @@ "cubes", "filter" ], + "experimental": true, "parameters": [ { "name": "data", diff --git a/load_result.json b/proposals/load_result.json similarity index 96% rename from load_result.json rename to proposals/load_result.json index c0377abe..31505511 100644 --- a/load_result.json +++ b/proposals/load_result.json @@ -6,6 +6,7 @@ "cubes", "import" ], + "experimental": true, "parameters": [ { "name": "id", diff --git a/load_uploaded_files.json b/proposals/load_uploaded_files.json similarity index 98% rename from load_uploaded_files.json rename to proposals/load_uploaded_files.json index bcd11c1d..cf6735c5 100644 --- a/load_uploaded_files.json +++ b/proposals/load_uploaded_files.json @@ -6,6 +6,7 @@ "cubes", "import" ], + "experimental": true, "parameters": [ { "name": "paths", diff --git a/resample_cube_temporal.json b/proposals/resample_cube_temporal.json similarity index 99% rename from resample_cube_temporal.json rename to proposals/resample_cube_temporal.json index d31e31c9..6e7c97d5 100644 --- a/resample_cube_temporal.json +++ b/proposals/resample_cube_temporal.json @@ -6,6 +6,7 @@ "cubes", "aggregate & resample" ], + "experimental": true, "parameters": [ { "name": "data", From 8be67ff27b1b29933d4df683ffec97735ade052e Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 15:14:56 +0100 Subject: [PATCH 026/109] changed the term 'stored' to set/added/used... --- aggregate_spatial.json | 4 ++-- aggregate_temporal.json | 2 +- aggregate_temporal_period.json | 2 +- apply.json | 2 +- apply_dimension.json | 2 +- array_apply.json | 2 +- merge_cubes.json | 2 +- proposals/aggregate_spatial_binary.json | 4 ++-- proposals/aggregate_spatial_window.json | 2 +- proposals/filter_labels.json | 2 +- proposals/reduce_dimension_binary.json | 2 +- proposals/resample_cube_temporal.json | 2 +- reduce_dimension.json | 2 +- 13 files changed, 15 insertions(+), 15 deletions(-) diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 9954fef6..4f238064 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -51,7 +51,7 @@ } ], "returns": { - "description": "The value to be stored in the vector data cube.", + "description": "The value to be set in the vector data cube.", "schema": { "description": "Any data type." } @@ -78,7 +78,7 @@ } ], "returns": { - "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is stored in dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are stored as new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", + "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is used for the dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are added as new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", "schema": { "type": "object", "subtype": "vector-cube" diff --git a/aggregate_temporal.json b/aggregate_temporal.json index f168268c..56cbf39d 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -114,7 +114,7 @@ } ], "returns": { - "description": "The value to be stored in the new data cube.", + "description": "The value to be set in the new data cube.", "schema": { "description": "Any data type." } diff --git a/aggregate_temporal_period.json b/aggregate_temporal_period.json index 5edf583b..2f21a020 100644 --- a/aggregate_temporal_period.json +++ b/aggregate_temporal_period.json @@ -64,7 +64,7 @@ } ], "returns": { - "description": "The value to be stored in the new data cube.", + "description": "The value to be set in the new data cube.", "schema": { "description": "Any data type." } diff --git a/apply.json b/apply.json index 61d23400..7d0342de 100644 --- a/apply.json +++ b/apply.json @@ -39,7 +39,7 @@ } ], "returns": { - "description": "The value to be stored in the new data cube.", + "description": "The value to be set in the new data cube.", "schema": { "description": "Any data type." } diff --git a/apply_dimension.json b/apply_dimension.json index 139fea6b..07c235e3 100644 --- a/apply_dimension.json +++ b/apply_dimension.json @@ -43,7 +43,7 @@ } ], "returns": { - "description": "The value to be stored in the new data cube.", + "description": "The value to be set in the new data cube.", "schema": { "description": "Any data type." } diff --git a/array_apply.json b/array_apply.json index 25e9b000..61ec1d3e 100644 --- a/array_apply.json +++ b/array_apply.json @@ -66,7 +66,7 @@ } ], "returns": { - "description": "The value to be stored in the new array.", + "description": "The value to be set in the new array.", "schema": { "description": "Any data type." } diff --git a/merge_cubes.json b/merge_cubes.json index dd1e8ad7..b26960c6 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -54,7 +54,7 @@ } ], "returns": { - "description": "The value to be stored in the merged data cube.", + "description": "The value to be set in the merged data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 80cd9780..3cac1ae9 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -56,7 +56,7 @@ } ], "returns": { - "description": "The value to be stored in the vector data cube.", + "description": "The value to be set in the vector data cube.", "schema": { "description": "Any data type." } @@ -83,7 +83,7 @@ } ], "returns": { - "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is stored in dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are stored as new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", + "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is used for the dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are added as new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", "schema": { "type": "object", "subtype": "vector-cube" diff --git a/proposals/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json index 657ccdca..6c2a27e0 100644 --- a/proposals/aggregate_spatial_window.json +++ b/proposals/aggregate_spatial_window.json @@ -44,7 +44,7 @@ } ], "returns": { - "description": "The value to be stored in the new data cube.", + "description": "The value to be set in the new data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/filter_labels.json b/proposals/filter_labels.json index 39772b04..ebbae261 100644 --- a/proposals/filter_labels.json +++ b/proposals/filter_labels.json @@ -25,7 +25,7 @@ "parameters": [ { "name": "value", - "description": "A single dimension label to compare against. The data type of the parameter depends on the dimension labels stored for the dimension.", + "description": "A single dimension label to compare against. The data type of the parameter depends on the dimension labels set for the dimension.", "schema": [ { "type": "number" diff --git a/proposals/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json index d1665abf..7a70d9e0 100644 --- a/proposals/reduce_dimension_binary.json +++ b/proposals/reduce_dimension_binary.json @@ -48,7 +48,7 @@ } ], "returns": { - "description": "The value to be stored in the new data cube.", + "description": "The value to be set in the new data cube.", "schema": { "description": "Any data type." } diff --git a/proposals/resample_cube_temporal.json b/proposals/resample_cube_temporal.json index 1423eb43..c7e95124 100644 --- a/proposals/resample_cube_temporal.json +++ b/proposals/resample_cube_temporal.json @@ -53,7 +53,7 @@ } ], "returns": { - "description": "The value to be stored in the resampled data cube.", + "description": "The value to be set in the resampled data cube.", "schema": { "description": "Any data type." } diff --git a/reduce_dimension.json b/reduce_dimension.json index 44b4774d..67e1f84b 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -44,7 +44,7 @@ } ], "returns": { - "description": "The value to be stored in the new data cube.", + "description": "The value to be set in the new data cube.", "schema": { "description": "Any data type." } From d3fac34b5f269e407bb9bb9b4f949b7e639a88ad Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 15:54:48 +0100 Subject: [PATCH 027/109] Fixed "typo" --- array_find.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/array_find.json b/array_find.json index 276fdd66..a0d456cc 100644 --- a/array_find.json +++ b/array_find.json @@ -26,7 +26,7 @@ } ], "returns": { - "description": "The index of the first element with the specified value. If no element was found, `null` is a.", + "description": "The index of the first element with the specified value. If no element was found, `null` is returned.", "schema": [ { "type": "null" From 62b59bf3d3ac1cfe5dbeceec522160616acc5437 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 17:57:08 +0100 Subject: [PATCH 028/109] `array_element`: Clarify that `ArrayNotLabeled` exception is thrown when parameter `label` is specified and the given array is not labeled. --- CHANGELOG.md | 1 + array_element.json | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 020ac98c..9c2c462c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) - Improve and clarify specifications for `is_nan`, `is_nodata`, `is_valid`. [#189](https://github.com/Open-EO/openeo-processes/issues/189) - Improve and clarify specifications for `all` and `any`. [#189](https://github.com/Open-EO/openeo-processes/issues/199) +- `array_element`: Clarify that `ArrayNotLabeled` exception is thrown when parameter `label` is specified and the given array is not labeled. - `array_apply`, `array_element`, `array_filter`: Added the `minimum: 0` constraint to all schemas describing zero-based indices (parameter `index`). - Examples `array_contains_nodata` and `array_find_nodata` diff --git a/array_element.json b/array_element.json index d00c0499..2b8735b5 100644 --- a/array_element.json +++ b/array_element.json @@ -28,7 +28,7 @@ }, { "name": "label", - "description": "The label of the element to retrieve.", + "description": "The label of the element to retrieve. Throws a `ArrayNotLabeled` exception, if the given array is not a labeled array and this parameter is set.", "schema": [ { "type": "number" @@ -64,6 +64,9 @@ }, "ArrayElementParameterConflict": { "message": "The process 'array_element' only allows that either the 'index' or the 'labels' parameter is set." + }, + "ArrayNotLabeled": { + "message": "The array is not a labeled array, but the `label` parameter is set. Use the `index` instead." } }, "examples": [ From 87cd5d69a3df317c9d7604f43869a38037b53b19 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 17:57:54 +0100 Subject: [PATCH 029/109] Fix return value for process in apply_dimension --- apply_dimension.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/apply_dimension.json b/apply_dimension.json index 07c235e3..9d751154 100644 --- a/apply_dimension.json +++ b/apply_dimension.json @@ -45,7 +45,10 @@ "returns": { "description": "The value to be set in the new data cube.", "schema": { - "description": "Any data type." + "type": "array", + "items": { + "description": "Any data type." + } } } } From 314e9d7eec17b80291c27e83f906dee5b54f9098 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 18:14:13 +0100 Subject: [PATCH 030/109] `array_labels`: Clarified the accepted data type for array elements passed to the parameter `data`. --- CHANGELOG.md | 1 + array_labels.json | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c2c462c..c72db647 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Improve and clarify specifications for `all` and `any`. [#189](https://github.com/Open-EO/openeo-processes/issues/199) - `array_element`: Clarify that `ArrayNotLabeled` exception is thrown when parameter `label` is specified and the given array is not labeled. - `array_apply`, `array_element`, `array_filter`: Added the `minimum: 0` constraint to all schemas describing zero-based indices (parameter `index`). +- `array_labels`: Clarified the accepted data type for array elements passed to the parameter `data`. - Examples `array_contains_nodata` and `array_find_nodata` ## 1.0.0 - 2020-07-31 diff --git a/array_labels.json b/array_labels.json index 3a34d3a5..5058d286 100644 --- a/array_labels.json +++ b/array_labels.json @@ -11,7 +11,10 @@ "description": "An array with labels.", "schema": { "type": "array", - "subtype": "labeled-array" + "subtype": "labeled-array", + "items": { + "description": "Any data type." + } } } ], From 091ced2946a3c68d5936b896fc4671e1be9ca43c Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 18:25:40 +0100 Subject: [PATCH 031/109] Some clean-up around schema types --- extrema.json | 8 ++------ mask_polygon.json | 22 ++++++++++++++-------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/extrema.json b/extrema.json index d4a4c0d2..e2503f04 100644 --- a/extrema.json +++ b/extrema.json @@ -37,9 +37,7 @@ "minItems": 2, "maxItems": 2, "items": { - "type": [ - "number" - ] + "type": "number" } }, { @@ -47,9 +45,7 @@ "minItems": 2, "maxItems": 2, "items": { - "type": [ - "null" - ] + "type": "null" } } ] diff --git a/mask_polygon.json b/mask_polygon.json index adf15e61..20affdfe 100644 --- a/mask_polygon.json +++ b/mask_polygon.json @@ -26,14 +26,20 @@ { "name": "replacement", "description": "The value used to replace masked values with.", - "schema": { - "type": [ - "number", - "boolean", - "string", - "null" - ] - }, + "schema": [ + { + "type": "number" + }, + { + "type": "boolean" + }, + { + "type": "string" + }, + { + "type": "null" + } + ], "default": null, "optional": true }, From df99ee505ac908cf132638fb6b003654405787be Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 18:29:04 +0100 Subject: [PATCH 032/109] Add array_find_label so that we don't need index/label parameter "switches" everywhere, don't use labeled-array for parameter inputs so that we avoid type casting issues, clarifications. --- CHANGELOG.md | 2 ++ array_labels.json | 7 +++-- proposals/array_create.json | 2 +- proposals/array_find_label.json | 46 +++++++++++++++++++++++++++++++++ proposals/array_merge.json | 2 +- 5 files changed, 53 insertions(+), 6 deletions(-) create mode 100644 proposals/array_find_label.json diff --git a/CHANGELOG.md b/CHANGELOG.md index b6a2ab26..ad5007fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,10 +9,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New processes in proposal state - `array_create` + - `array_find_label` - `array_merge` - `is_infinite` - `nan` - Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) +- `apply_labels`: Also accept arrays without labels and return an empty array then. - `apply_neighborhood`: Clarify behavior for data cubes returned by the child processes and for that add the exception `DataCubePropertiesImmutable`. ### Changed diff --git a/array_labels.json b/array_labels.json index 3a34d3a5..bd36a254 100644 --- a/array_labels.json +++ b/array_labels.json @@ -1,17 +1,16 @@ { "id": "array_labels", "summary": "Get the labels for an array", - "description": "Gives all labels for a labeled array in the data cube. The labels have the same order as in the array.", + "description": "Gives all labels for a labeled array in the data cube. The labels have the same order as in the array.\n\nIf the array is not labeled, an empty array is returned.", "categories": [ "arrays" ], "parameters": [ { "name": "data", - "description": "An array with labels.", + "description": "An array.", "schema": { - "type": "array", - "subtype": "labeled-array" + "type": "array" } } ], diff --git a/proposals/array_create.json b/proposals/array_create.json index f0bc5016..099ea855 100644 --- a/proposals/array_create.json +++ b/proposals/array_create.json @@ -9,7 +9,7 @@ "parameters": [ { "name": "length", - "description": "The number of elements to fill the array with. Default to `0`.", + "description": "The number of elements to fill the array with. Defaults to `0`.", "optional": true, "default": 0, "schema": { diff --git a/proposals/array_find_label.json b/proposals/array_find_label.json new file mode 100644 index 00000000..7f501871 --- /dev/null +++ b/proposals/array_find_label.json @@ -0,0 +1,46 @@ +{ + "id": "array_find_label", + "summary": "Get the index for a label in a labeled array", + "description": "Checks whether the labeled array specified for `data` has the label specified in `label` and returns the zero-based index for it. If there's no match as either the label doesn't exist or the array is not labeled, `null` is returned.", + "categories": [ + "arrays", + "reducer" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "List to find the label in.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + { + "name": "label", + "description": "Label to find in `data`.", + "schema": [ + { + "type": "number" + }, + { + "type": "string" + } + ] + } + ], + "returns": { + "description": "The index of the element with the specified label assigned. If no such label was found, `null` is returned.", + "schema": [ + { + "type": "null" + }, + { + "type": "integer", + "minimum": 0 + } + ] + } +} \ No newline at end of file diff --git a/proposals/array_merge.json b/proposals/array_merge.json index f407991d..07e00858 100644 --- a/proposals/array_merge.json +++ b/proposals/array_merge.json @@ -1,7 +1,7 @@ { "id": "array_merge", "summary": "Merge two arrays", - "description": "Merges two arrays into a single array by appending the second array to the first array. Array labels get discarded.", + "description": "Merges two arrays into a single array by appending the second array to the first array. Array labels get discarded from both arrays before merging.", "categories": [ "arrays" ], From fbea7a240823aff6bc3b99580e057321f2c4d1e1 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 6 Jan 2021 19:17:59 +0100 Subject: [PATCH 033/109] Add array_drop --- CHANGELOG.md | 1 + proposals/array_drop.json | 79 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 proposals/array_drop.json diff --git a/CHANGELOG.md b/CHANGELOG.md index ad5007fc..ebb6a912 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New processes in proposal state - `array_create` + - `array_drop` - `array_find_label` - `array_merge` - `is_infinite` diff --git a/proposals/array_drop.json b/proposals/array_drop.json new file mode 100644 index 00000000..6ce52761 --- /dev/null +++ b/proposals/array_drop.json @@ -0,0 +1,79 @@ +{ + "id": "array_drop", + "summary": "Remove array elements", + "description": "Removes the number of array elements given in the parameter `length`, starting from the element at the index given in the parameter `index`.\n\nAll elements after the given index get a new index assigned so that the indices are always sequence of numbers with the step size of 1 and starting at 0. Labels assigned to the individual values are not affected.", + "categories": [ + "arrays" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "An array.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + { + "name": "index", + "description": "The zero-based index of the first element to remove.", + "schema": { + "type": "integer", + "minimum": 0 + } + }, + { + "name": "length", + "description": "The number of elements to remove from the array. Defaults to `1`.", + "optional": true, + "default": 1, + "schema": { + "type": "integer", + "minimum": 1 + } + } + ], + "returns": { + "description": "An array with values being removed. The number of elements get reduced by the number given in the parameter `length`.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + "examples": [ + { + "arguments": { + "data": [ + "a", + "b", + "c" + ], + "index": 1 + }, + "returns": [ + "a", + "c" + ] + }, + { + "arguments": { + "data": [ + 100, + 50, + 88, + 32 + ], + "index": 0, + "length": 3 + }, + "returns": [ + 32 + ] + } + ] +} \ No newline at end of file From ff95b538d66935ca6d218ad91fbf0da7a0f96356 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 7 Jan 2021 13:14:02 +0100 Subject: [PATCH 034/109] Remove array_drop, add array_set --- CHANGELOG.md | 2 +- proposals/array_drop.json | 79 -------------- proposals/array_set.json | 215 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 216 insertions(+), 80 deletions(-) delete mode 100644 proposals/array_drop.json create mode 100644 proposals/array_set.json diff --git a/CHANGELOG.md b/CHANGELOG.md index ebb6a912..b813efd5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New processes in proposal state - `array_create` - - `array_drop` - `array_find_label` - `array_merge` + - `array_set` - `is_infinite` - `nan` - Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) diff --git a/proposals/array_drop.json b/proposals/array_drop.json deleted file mode 100644 index 6ce52761..00000000 --- a/proposals/array_drop.json +++ /dev/null @@ -1,79 +0,0 @@ -{ - "id": "array_drop", - "summary": "Remove array elements", - "description": "Removes the number of array elements given in the parameter `length`, starting from the element at the index given in the parameter `index`.\n\nAll elements after the given index get a new index assigned so that the indices are always sequence of numbers with the step size of 1 and starting at 0. Labels assigned to the individual values are not affected.", - "categories": [ - "arrays" - ], - "experimental": true, - "parameters": [ - { - "name": "data", - "description": "An array.", - "schema": { - "type": "array", - "items": { - "description": "Any data type is allowed." - } - } - }, - { - "name": "index", - "description": "The zero-based index of the first element to remove.", - "schema": { - "type": "integer", - "minimum": 0 - } - }, - { - "name": "length", - "description": "The number of elements to remove from the array. Defaults to `1`.", - "optional": true, - "default": 1, - "schema": { - "type": "integer", - "minimum": 1 - } - } - ], - "returns": { - "description": "An array with values being removed. The number of elements get reduced by the number given in the parameter `length`.", - "schema": { - "type": "array", - "items": { - "description": "Any data type is allowed." - } - } - }, - "examples": [ - { - "arguments": { - "data": [ - "a", - "b", - "c" - ], - "index": 1 - }, - "returns": [ - "a", - "c" - ] - }, - { - "arguments": { - "data": [ - 100, - 50, - 88, - 32 - ], - "index": 0, - "length": 3 - }, - "returns": [ - 32 - ] - } - ] -} \ No newline at end of file diff --git a/proposals/array_set.json b/proposals/array_set.json new file mode 100644 index 00000000..f8cb8176 --- /dev/null +++ b/proposals/array_set.json @@ -0,0 +1,215 @@ +{ + "id": "array_set", + "summary": "Change the content of an array (insert, remove, update)", + "description": "Allows to insert into, remove from or update an array.\n\nAll labels get discarded and the array indices are always a sequence of numbers with the step size of 1 and starting at 0.", + "categories": [ + "arrays" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "An array.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + { + "name": "values", + "description": "The values to fill the array with.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + { + "name": "index", + "description": "The index of the element to insert the value(s) before. To insert after the last element, specify a number of elements in the array. If the index is greater than the number of elements, array is filled with `null` (no-data) values until the index and the values are added starting at the index given. The number of elements can be retrieved with the process ``count()`` having the parameter `condition` set to true.", + "optional": true, + "default": 0, + "schema": { + "type": "integer" + } + }, + { + "name": "length", + "description": "The number of elements to replace. This parameter has no effect in case the given `index` does not exist in the array given.", + "optional": true, + "default": 1, + "schema": { + "type": "integer", + "minimum": 0 + } + } + ], + "returns": { + "description": "An array with values added, updated or removed.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + "examples": [ + { + "description": "With the default values for the optional parameters, values are added at the beginning of the array.", + "arguments": { + "data": [ + "b", + "c" + ], + "values": [ + "a" + ] + }, + "returns": [ + "a", + "b", + "c" + ] + }, + { + "description": "Add a value at the end of the array.", + "arguments": { + "data": [ + "a", + "b" + ], + "values": [ + "c" + ], + "index": 2 + }, + "returns": [ + "a", + "b", + "c" + ] + }, + { + "description": "Add a value at a specific non-existing position after the array, fill missing elements with `null`.", + "arguments": { + "data": [ + "a", + "b" + ], + "values": [ + "e" + ], + "index": 4 + }, + "returns": [ + "a", + "b", + null, + null, + "e" + ] + }, + { + "description": "Update a single value in the array.", + "arguments": { + "data": [ + "a", + "d", + "c" + ], + "values": [ + "b" + ], + "index": 1 + }, + "returns": [ + "a", + "b", + "c" + ] + }, + { + "description": "Update multiple values in the array.", + "arguments": { + "data": [ + "a", + "b", + 4 + ], + "values": [ + 1, + 2 + ], + "index": 0, + "length": 2 + }, + "returns": [ + 1, + 2, + 3, + 4 + ] + }, + { + "description": "Replace a single values with two values in the array.", + "arguments": { + "data": [ + "a", + null, + "d" + ], + "values": [ + "b", + "c" + ], + "index": 1 + }, + "returns": [ + "a", + "b", + "c", + "d" + ] + }, + { + "description": "Remove a single value from the array.", + "arguments": { + "data": [ + "a", + "b", + null, + "c" + ], + "values": [], + "index": 1 + }, + "returns": [ + "a", + "b", + "c" + ] + }, + { + "description": "Remove multiple values from the array.", + "arguments": { + "data": [ + null, + null, + "a", + "b", + "c" + ], + "values": [], + "length": 2 + }, + "returns": [ + "a", + "b", + "c" + ] + } + ] +} \ No newline at end of file From cd7a43d84918fa2ba867537906cdc5ce02bb2a45 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 8 Jan 2021 13:22:42 +0100 Subject: [PATCH 035/109] Better spelling and grammar --- CHANGELOG.md | 7 ++++--- README.md | 8 ++++---- add_dimension.json | 2 +- aggregate_spatial.json | 4 ++-- aggregate_temporal.json | 4 ++-- aggregate_temporal_period.json | 2 +- apply_dimension.json | 6 +++--- apply_kernel.json | 2 +- apply_neighborhood.json | 14 +++++++------- arctan2.json | 4 ++-- array_contains.json | 2 +- array_element.json | 2 +- array_find.json | 2 +- array_labels.json | 2 +- between.json | 2 +- climatological_normal.json | 2 +- clip.json | 2 +- count.json | 2 +- dimension_labels.json | 2 +- divide.json | 2 +- drop_dimension.json | 2 +- eq.json | 4 ++-- filter_bands.json | 8 ++++---- filter_temporal.json | 2 +- is_nan.json | 2 +- is_valid.json | 2 +- ln.json | 2 +- load_collection.json | 4 ++-- log.json | 2 +- mask.json | 4 ++-- mask_polygon.json | 2 +- max.json | 2 +- median.json | 2 +- meta/subtype-schemas.json | 8 ++++---- min.json | 2 +- mod.json | 6 +++--- ndvi.json | 12 ++++++------ neq.json | 4 ++-- order.json | 4 ++-- proposals/aggregate_spatial_binary.json | 4 ++-- proposals/aggregate_spatial_window.json | 6 +++--- proposals/cummax.json | 4 ++-- proposals/cummin.json | 4 ++-- proposals/cumproduct.json | 4 ++-- proposals/cumsum.json | 4 ++-- proposals/filter_labels.json | 2 +- proposals/load_result.json | 2 +- proposals/load_uploaded_files.json | 4 ++-- proposals/reduce_dimension_binary.json | 2 +- proposals/resample_cube_temporal.json | 4 ++-- proposals/run_udf_externally.json | 6 +++--- quantiles.json | 8 ++++---- reduce_dimension.json | 2 +- rename_dimension.json | 4 ++-- rename_labels.json | 6 +++--- resample_spatial.json | 2 +- round.json | 2 +- run_udf.json | 14 +++++++------- save_result.json | 2 +- sort.json | 4 ++-- text_merge.json | 4 ++-- 61 files changed, 121 insertions(+), 120 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c72db647..f3d86630 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `array_element`: Clarify that `ArrayNotLabeled` exception is thrown when parameter `label` is specified and the given array is not labeled. - `array_apply`, `array_element`, `array_filter`: Added the `minimum: 0` constraint to all schemas describing zero-based indices (parameter `index`). - `array_labels`: Clarified the accepted data type for array elements passed to the parameter `data`. +- Fixed typos, grammar issues and other spelling-related issues in many of the processes. - Examples `array_contains_nodata` and `array_find_nodata` ## 1.0.0 - 2020-07-31 @@ -63,9 +64,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `aggregate_temporal`: Fixed outdated message for exception `TooManyDimensions`. - `clip`: Fixed examples. - `linear_scale_range`: Clarify that the process implicitly clips the values. [#159](https://github.com/Open-EO/openeo-processes/issues/159) -- `mean`: Clarify behaviour for arrays with `null`-values only. -- `mod`: Clarified behaviour. [#168](https://github.com/Open-EO/openeo-processes/issues/168) -- `resample_*`: Clarified behaviour. +- `mean`: Clarify behavior for arrays with `null`-values only. +- `mod`: Clarified behavior. [#168](https://github.com/Open-EO/openeo-processes/issues/168) +- `resample_*`: Clarified behavior. - `first`, `last`, `max`, `mean`, `median`, `min`, `sd`, `variance`: Clarify behavior for arrays with `null`-values only. - Clarified (and fixed if necessary) for all processes in the "cubes" category the descriptions for the returned data cube. [#149](https://github.com/Open-EO/openeo-processes/issues/149) diff --git a/README.md b/README.md index bfd2fa88..1a5afa5a 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ openEO develops interoperable processes for big Earth observation cloud processi ## Versions / Branches -The [master branch](https://github.com/Open-EO/openeo-processes/tree/master) is the 'stable' version of the openEO processes specification. Exception is the [`proposals`](proposals/) folder, which provides experimental new processes currently under discussion. They may still change, but everyone is encouraged to implement them and give feedback. +The [master branch](https://github.com/Open-EO/openeo-processes/tree/master) is the 'stable' version of the openEO processes specification. An exception is the [`proposals`](proposals/) folder, which provides experimental new processes currently under discussion. They may still change, but everyone is encouraged to implement them and give feedback. The latest release is version **1.0.0**. The [draft branch](https://github.com/Open-EO/openeo-processes/tree/draft) is where active development takes place. PRs should be made against the draft branch. @@ -27,17 +27,17 @@ See also the [changelog](CHANGELOG.md) for the changes between versions and the This repository contains a set of files formally describing the openEO Processes: -* The `*.json` files provide the stable process specifications as defined by openEO. Stable processes need at least two implementations and a use-case example added to the [`examples`](examples/) folder *or* consensus from the openEO PSC. +* The `*.json` files provide stable process specifications as defined by openEO. Stable processes need at least two implementations and a use-case example added to the [`examples`](examples/) folder *or* consensus from the openEO PSC. * The `*.json` files in the [`proposals`](proposals/) folder provide proposed new process specifications that are still experimental and subject to change, including breaking changes. Everyone is encouraged to base their work on the proposals and give feedback so that eventually the processes evolve into stable process specifications. * [subtype-schemas.json](meta/subtype-schemas.json) in the `meta` folder defines common data types (`subtype`s) for JSON Schema used in openEO processes. * The [`examples`](examples/) folder contains some useful examples that the processes link to. All of these are non-binding additions. -* The [`tests`](tests/) folder can be used to test the process specification for validity and and consistent "style". It also allows to render the processes in a web browser. +* The [`tests`](tests/) folder can be used to test the process specification for validity and consistent "style". It also allows rendering the processes in a web browser. ## Process * All new processes must be added to the [`proposals`](proposals/) folder. -* Processes will only be moved from proposals to the stable process specifications once there are at least two implementations and ann example process graph in the [`examples`](examples/) folder showing it in a use case. This doesn't require a PSC vote individually as it's not a breaking change, just an addition. +* Processes will only be moved from proposals to the stable process specifications once there are at least two implementations and an example process in the [`examples`](examples/) folder showing it in a use case. This doesn't require a PSC vote individually as it's not a breaking change, just an addition. * The [`proposals`](proposals/) folder allows breaking changes without a PSC vote and without increasing the major version number (i.e. a breaking change in the proposals doesn't require us to make the next version number 2.0.0). * The proposals are released as experimental processes with the other processes. * Each release and all breaking changes in the stable process specifications must go through PSC vote. \ No newline at end of file diff --git a/add_dimension.json b/add_dimension.json index 727456b2..a3b07075 100644 --- a/add_dimension.json +++ b/add_dimension.json @@ -1,7 +1,7 @@ { "id": "add_dimension", "summary": "Add a new dimension", - "description": "Adds a new named dimension to the data cube.\n\nAfterwards, the dimension can be referenced with the specified `name`. If a dimension with the specified name exists, the process fails with a `DimensionExists` error. The dimension label of the dimension is set to the specified `label`.", + "description": "Adds a new named dimension to the data cube.\n\nAfterwards, the dimension can be referred to with the specified `name`. If a dimension with the specified name exists, the process fails with a `DimensionExists` exception. The dimension label of the dimension is set to the specified `label`.", "categories": [ "cubes" ], diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 4f238064..b80a19cc 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial", "summary": "Zonal statistics for geometries", - "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process passes a list of values to the reducer. In contrast, ``aggregate_spatial_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.\n\nThe data cube must have been reduced to only contain two spatial dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise this process fails with the `TooManyDimensions` error.\n\nThe number of total and valid pixels is returned together with the calculated values.", + "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process passes a list of values to the reducer. In contrast, ``aggregate_spatial_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.\n\nThe data cube must have been reduced to only contain two spatial dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", "categories": [ "cubes", "aggregate & resample" @@ -78,7 +78,7 @@ } ], "returns": { - "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is used for the dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are added as new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", + "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is used for the dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are added as a new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", "schema": { "type": "object", "subtype": "vector-cube" diff --git a/aggregate_temporal.json b/aggregate_temporal.json index 56cbf39d..f5a26187 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -1,7 +1,7 @@ { "id": "aggregate_temporal", "summary": "Temporal aggregations", - "description": "Computes a temporal aggregation based on an array of temporal intervals.\n\nFor common regular calendar hierarchies such as year, month, week or seasons ``aggregate_temporal_period()`` can be used. Other calendar hierarchies must be transformed into specific intervals by the clients.\n\nFor each interval, all data along the dimension will be passed through the reducer.\n\nThe computed values will be projected to the labels. If no labels are specified, the start of the temporal interval will be used as label for the corresponding values. In case of a conflict (i.e. the user-specified values for the start times of the temporal intervals are not distinct), the user-defined labels must be specified in the parameter `labels` as otherwise a `DistinctDimensionLabelsRequired` error would be thrown. The number of user-defined labels and the number of intervals need to be equal.\n\nIf the dimension is not set or is set to `null`, the data cube is expected to only have one temporal dimension.", + "description": "Computes a temporal aggregation based on an array of temporal intervals.\n\nFor common regular calendar hierarchies such as year, month, week or seasons ``aggregate_temporal_period()`` can be used. Other calendar hierarchies must be transformed into specific intervals by the clients.\n\nFor each interval, all data along the dimension will be passed through the reducer.\n\nThe computed values will be projected to the labels. If no labels are specified, the start of the temporal interval will be used as label for the corresponding values. In case of a conflict (i.e. the user-specified values for the start times of the temporal intervals are not distinct), the user-defined labels must be specified in the parameter `labels` as otherwise a `DistinctDimensionLabelsRequired` exception would be thrown. The number of user-defined labels and the number of intervals need to be equal.\n\nIf the dimension is not set or is set to `null`, the data cube is expected to only have one temporal dimension.", "categories": [ "cubes", "aggregate & resample" @@ -142,7 +142,7 @@ }, { "name": "dimension", - "description": "The name of the temporal dimension for aggregation. All data along the dimension is passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` error if it has more dimensions. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the temporal dimension for aggregation. All data along the dimension is passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": [ "string", diff --git a/aggregate_temporal_period.json b/aggregate_temporal_period.json index 2f21a020..bbe52cb9 100644 --- a/aggregate_temporal_period.json +++ b/aggregate_temporal_period.json @@ -73,7 +73,7 @@ }, { "name": "dimension", - "description": "The name of the temporal dimension for aggregation. All data along the dimension is passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` error if it has more dimensions. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the temporal dimension for aggregation. All data along the dimension is passed through the specified reducer. If the dimension is not set or set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": [ "string", diff --git a/apply_dimension.json b/apply_dimension.json index 9d751154..1ecb4b73 100644 --- a/apply_dimension.json +++ b/apply_dimension.json @@ -16,7 +16,7 @@ }, { "name": "process", - "description": "Process to be applied on all pixel values. The specified process needs to accept an array as parameter and must return an array with least one element. A process may consist of multiple sub-processes.", + "description": "Process to be applied on all pixel values. The specified process needs to accept an array and must return an array with at least one element. A process may consist of multiple sub-processes.", "schema": { "type": "object", "subtype": "process-graph", @@ -55,14 +55,14 @@ }, { "name": "dimension", - "description": "The name of the source dimension to apply the process on. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the source dimension to apply the process on. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": "string" } }, { "name": "target_dimension", - "description": "The name of the target dimension or `null` (the default) to use the source dimension specified in the parameter `dimension`.\n\nBy specifying a target dimension, the source dimension is removed. The target dimension with the specified name and the type `other` (see ``add_dimension()``) is created, if if doesn't exist yet.", + "description": "The name of the target dimension or `null` (the default) to use the source dimension specified in the parameter `dimension`.\n\nBy specifying a target dimension, the source dimension is removed. The target dimension with the specified name and the type `other` (see ``add_dimension()``) is created, if it doesn't exist yet.", "schema": { "type": [ "string", diff --git a/apply_kernel.json b/apply_kernel.json index dddacd5f..f6512d45 100644 --- a/apply_kernel.json +++ b/apply_kernel.json @@ -17,7 +17,7 @@ }, { "name": "kernel", - "description": "Kernel as a two-dimensional array of weights. The inner level of the nested array aligns with the `x` axis and the outer level aligns with the `y` axis. Each level of the kernel must have an uneven number of elements, otherwise the process throws a `KernelDimensionsUneven` error.", + "description": "Kernel as a two-dimensional array of weights. The inner level of the nested array aligns with the `x` axis and the outer level aligns with the `y` axis. Each level of the kernel must have an uneven number of elements, otherwise the process throws a `KernelDimensionsUneven` exception.", "schema": { "description": "A two-dimensional array of numbers.", "type": "array", diff --git a/apply_neighborhood.json b/apply_neighborhood.json index 624002b6..f7ad3615 100644 --- a/apply_neighborhood.json +++ b/apply_neighborhood.json @@ -1,7 +1,7 @@ { "id": "apply_neighborhood", - "summary": "Apply a process to pixels in a n-dimensional neighbourhood", - "description": "Applies a focal process to a data cube.\n\nA focal process is a process that works on a 'neighbourhood' of pixels. The neighbourhood can extend into multiple dimensions, this extent is specified by the `size` argument. It is not only (part of) the size of the input window, but also the size of the output for a given position of the sliding window. The sliding window moves with multiples of `size`.\n\nAn overlap can be specified so that neighbourhoods can have overlapping boundaries. This allows for continuity of the output. The values included in the data cube as overlap can't be modified by the given `process`. The missing overlap at the borders of the original data cube are made available as no-data (`null`) in the sub data cubes.\n\nThe neighbourhood size should be kept small enough, to avoid running beyond computational resources, but a too small size will result in a larger number of process invocations, which may slow down processing. Window sizes for spatial dimensions typically are in the range of 64 to 512 pixels, while overlaps of 8 to 32 pixels are common.\n\nThe process must not add new dimensions, or remove entire dimensions, but the result can have different dimension labels.\n\nFor the special case of 2D convolution, it is recommended to use ``apply_kernel()``.", + "summary": "Apply a process to pixels in a n-dimensional neighborhood", + "description": "Applies a focal process to a data cube.\n\nA focal process is a process that works on a 'neighborhood' of pixels. The neighborhood can extend into multiple dimensions, this extent is specified by the `size` argument. It is not only (part of) the size of the input window, but also the size of the output for a given position of the sliding window. The sliding window moves with multiples of `size`.\n\nAn overlap can be specified so that neighborhoods can have overlapping boundaries. This allows for continuity of the output. The values included in the data cube as overlap can't be modified by the given `process`. The missing overlap at the borders of the original data cube are made available as no-data (`null`) in the sub data cubes.\n\nThe neighborhood size should be kept small enough, to avoid running beyond computational resources, but a too small size will result in a larger number of process invocations, which may slow down processing. Window sizes for spatial dimensions typically are in the range of 64 to 512 pixels, while overlaps of 8 to 32 pixels are common.\n\nThe process must not add new dimensions, or remove entire dimensions, but the result can have different dimension labels.\n\nFor the special case of 2D convolution, it is recommended to use ``apply_kernel()``.", "categories": [ "cubes" ], @@ -16,7 +16,7 @@ }, { "name": "process", - "description": "Process to be applied on all neighbourhoods.", + "description": "Process to be applied on all neighborhoods.", "schema": { "type": "object", "subtype": "process-graph", @@ -50,7 +50,7 @@ }, { "name": "size", - "description": "Neighbourhood sizes along each dimension.\n\nThis object maps dimension names to either a physical measure (e.g. 100 m, 10 days) or pixels (e.g. 32 pixels). For dimensions not specified, the default is to provide all values. Be aware that including all values from overly large dimensions may not be processed at once.", + "description": "Neighborhood sizes along each dimension.\n\nThis object maps dimension names to either a physical measure (e.g. 100 m, 10 days) or pixels (e.g. 32 pixels). For dimensions not specified, the default is to provide all values. Be aware that including all values from overly large dimensions may not be processed at once.", "schema": { "type": "array", "items": { @@ -86,7 +86,7 @@ }, "unit": { "type": "string", - "description": "The unit the values are given in, either meters (`m`) or pixels (`px`). If no unit is given, uses the unit specified for the dimension or otherwise the default unit of the reference system.", + "description": "The unit the values are given in, either in meters (`m`) or pixels (`px`). If no unit is given, uses the unit specified for the dimension or otherwise the default unit of the reference system.", "enum": [ "px", "m" @@ -98,7 +98,7 @@ }, { "name": "overlap", - "description": "Overlap of neighbourhoods along each dimension to avoid border effects.\n\nFor instance a temporal dimension can add 1 month before and after a neighbourhood. In the spatial dimensions, this is often a number of pixels. The overlap specified is added before and after, so an overlap of 8 pixels will add 8 pixels on both sides of the window, so 16 in total.\n\nBe aware that large overlaps increase the need for computational resources and modifying overlapping data in subsequent operations have no effect.", + "description": "Overlap of neighborhoods along each dimension to avoid border effects.\n\nFor instance a temporal dimension can add 1 month before and after a neighborhood. In the spatial dimensions, this is often a number of pixels. The overlap specified is added before and after, so an overlap of 8 pixels will add 8 pixels on both sides of the window, so 16 in total.\n\nBe aware that large overlaps increase the need for computational resources and modifying overlapping data in subsequent operations have no effect.", "optional": true, "schema": { "type": "array", @@ -135,7 +135,7 @@ }, "unit": { "type": "string", - "description": "The unit the values are given in, either meters (`m`) or pixels (`px`). If no unit is given, uses the unit specified for the dimension or otherwise the default unit of the reference system.", + "description": "The unit the values are given in, either in meters (`m`) or pixels (`px`). If no unit is given, uses the unit specified for the dimension or otherwise the default unit of the reference system.", "enum": [ "px", "m" diff --git a/arctan2.json b/arctan2.json index a4d91b2a..4e7e3311 100644 --- a/arctan2.json +++ b/arctan2.json @@ -8,7 +8,7 @@ "parameters": [ { "name": "y", - "description": "A number to be used as dividend.", + "description": "A number to be used as the dividend.", "schema": { "type": [ "number", @@ -18,7 +18,7 @@ }, { "name": "x", - "description": "A number to be used as divisor.", + "description": "A number to be used as the divisor.", "schema": { "type": [ "number", diff --git a/array_contains.json b/array_contains.json index 5a1732be..b2842ae0 100644 --- a/array_contains.json +++ b/array_contains.json @@ -1,7 +1,7 @@ { "id": "array_contains", "summary": "Check whether the array contains a given value", - "description": "Checks whether the array specified for `data` contains the value specified in `value`. Returns `true` if there's a match, otherwise `false`.\n\n**Remarks:**\n\n* To get the index or the label of the value found, use ``array_find()``.\n* All definitions for the process ``eq()`` regarding the comparison of values apply here as well. A `null` return value from ``eq()`` is handled exactly as `false` (no match).\n* Data types MUST be checked strictly, for example a string with the content *1* is not equal to the number *1*.\n* An integer *1* is equal to a floating point number *1.0* as `integer` is a sub-type of `number`. Still, this process may return unexpectedly `false` when comparing floating point numbers due to floating point inaccuracy in machine-based computation.\n* Temporal strings are treated as normal strings and MUST NOT be interpreted.\n* If the specified value is an array, object or null, the process always returns `false`. See the examples for one to check for `null` values.", + "description": "Checks whether the array specified for `data` contains the value specified in `value`. Returns `true` if there's a match, otherwise `false`.\n\n**Remarks:**\n\n* To get the index or the label of the value found, use ``array_find()``.\n* All definitions for the process ``eq()`` regarding the comparison of values apply here as well. A `null` return value from ``eq()`` is handled exactly as `false` (no match).\n* Data types MUST be checked strictly. For example, a string with the content *1* is not equal to the number *1*.\n* An integer *1* is equal to a floating-point number *1.0* as `integer` is a sub-type of `number`. Still, this process may return unexpectedly `false` when comparing floating-point numbers due to floating-point inaccuracy in machine-based computation.\n* Temporal strings are treated as normal strings and MUST NOT be interpreted.\n* If the specified value is an array, object or null, the process always returns `false`. See the examples for one to check for `null` values.", "categories": [ "arrays", "comparison", diff --git a/array_element.json b/array_element.json index 2b8735b5..495d19c3 100644 --- a/array_element.json +++ b/array_element.json @@ -28,7 +28,7 @@ }, { "name": "label", - "description": "The label of the element to retrieve. Throws a `ArrayNotLabeled` exception, if the given array is not a labeled array and this parameter is set.", + "description": "The label of the element to retrieve. Throws an `ArrayNotLabeled` exception, if the given array is not a labeled array and this parameter is set.", "schema": [ { "type": "number" diff --git a/array_find.json b/array_find.json index a0d456cc..872e0765 100644 --- a/array_find.json +++ b/array_find.json @@ -1,7 +1,7 @@ { "id": "array_find", "summary": "Get the index for a value in an array", - "description": "Checks whether the array specified for `data` contains the value specified in `value` and returns the zero-based index for the first match. If there's no match, `null` is returned.\n\n**Remarks:**\n\n* To get a boolean value returned use ``array_contains()``.\n* All definitions for the process ``eq()`` regarding the comparison of values apply here as well. A `null` return value from ``eq()`` is handled exactly as `false` (no match).\n* Data types MUST be checked strictly, for example a string with the content *1* is not equal to the number *1*.\n* An integer *1* is equal to a floating point number *1.0* as `integer` is a sub-type of `number`. Still, this process may return unexpectedly `false` when comparing floating point numbers due to floating point inaccuracy in machine-based computation.\n* Temporal strings are treated as normal strings and MUST NOT be interpreted.\n* If the specified value is an array, object or null, the process always returns `null`. See the examples for one to find `null` values.", + "description": "Checks whether the array specified for `data` contains the value specified in `value` and returns the zero-based index for the first match. If there's no match, `null` is returned.\n\n**Remarks:**\n\n* To get a boolean value returned use ``array_contains()``.\n* All definitions for the process ``eq()`` regarding the comparison of values apply here as well. A `null` return value from ``eq()`` is handled exactly as `false` (no match).\n* Data types MUST be checked strictly. For example, a string with the content *1* is not equal to the number *1*.\n* An integer *1* is equal to a floating-point number *1.0* as `integer` is a sub-type of `number`. Still, this process may return unexpectedly `false` when comparing floating-point numbers due to floating-point inaccuracy in machine-based computation.\n* Temporal strings are treated as normal strings and MUST NOT be interpreted.\n* If the specified value is an array, object or null, the process always returns `null`. See the examples for one to find `null` values.", "categories": [ "arrays", "reducer" diff --git a/array_labels.json b/array_labels.json index 5058d286..419d3c20 100644 --- a/array_labels.json +++ b/array_labels.json @@ -19,7 +19,7 @@ } ], "returns": { - "description": "The labels as array.", + "description": "The labels as an array.", "schema": { "type": "array", "items": { diff --git a/between.json b/between.json index dcd1fa92..7d8f14df 100644 --- a/between.json +++ b/between.json @@ -1,7 +1,7 @@ { "id": "between", "summary": "Between comparison", - "description": "By default this process checks whether `x` is greater than or equal to `min` and lower than or equal to `max`, which is the same as computing `and(gte(x, min), lte(x, max))`. Therefore, all definitions from ``and()``, ``gte()`` and ``lte()`` apply here as well.\n\nIf `exclude_max` is set to `true` the upper bound is excluded so that the process checks whether `x` is greater than or equal to `min` and lower than `max`. In this case the process works the same as computing `and(gte(x, min), lt(x, max))`.\n\nLower and upper bounds are not allowed to be swapped. So `min` MUST be lower than or equal to `max` or otherwise the process always returns `false`.", + "description": "By default, this process checks whether `x` is greater than or equal to `min` and lower than or equal to `max`, which is the same as computing `and(gte(x, min), lte(x, max))`. Therefore, all definitions from ``and()``, ``gte()`` and ``lte()`` apply here as well.\n\nIf `exclude_max` is set to `true` the upper bound is excluded so that the process checks whether `x` is greater than or equal to `min` and lower than `max`. In this case, the process works the same as computing `and(gte(x, min), lt(x, max))`.\n\nLower and upper bounds are not allowed to be swapped. So `min` MUST be lower than or equal to `max` or otherwise the process always returns `false`.", "categories": [ "comparison" ], diff --git a/climatological_normal.json b/climatological_normal.json index 57ce30db..efaedd74 100644 --- a/climatological_normal.json +++ b/climatological_normal.json @@ -30,7 +30,7 @@ }, { "name": "climatology_period", - "description": "The climatology period as closed temporal interval. The first element of the array is the first year to be fully included in the temporal interval. The second element is the last year to be fully included in the temporal interval. The default period is from 1981 until 2010 (both inclusive).", + "description": "The climatology period as a closed temporal interval. The first element of the array is the first year to be fully included in the temporal interval. The second element is the last year to be fully included in the temporal interval. The default period is from 1981 until 2010 (both inclusive).", "schema": { "type": "array", "subtype": "temporal-interval", diff --git a/clip.json b/clip.json index 9e5709dc..adbf7eaa 100644 --- a/clip.json +++ b/clip.json @@ -1,7 +1,7 @@ { "id": "clip", "summary": "Clip a value between a minimum and a maximum", - "description": "Clips a number between specified minimum and maximum values. A value larger than the maximal value will have the maximal value, a value lower than minimal value will have the minimal value.\n\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Clips a number between specified minimum and maximum values. A value larger than the maximum value is set to the maximum value, a value lower than the minimum value is set to the minimum value.\n\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math" ], diff --git a/count.json b/count.json index 1f044b16..ed1044bd 100644 --- a/count.json +++ b/count.json @@ -19,7 +19,7 @@ }, { "name": "condition", - "description": "A condition consists of one ore more processes, which in the end return a boolean value. It is evaluated against each element in the array. An element is counted only if the condition returns `true`. Defaults to count valid elements in a list (see ``is_valid()``). Setting this parameter to boolean `true` counts all elements in the list.", + "description": "A condition consists of one or more processes, which in the end return a boolean value. It is evaluated against each element in the array. An element is counted only if the condition returns `true`. Defaults to count valid elements in a list (see ``is_valid()``). Setting this parameter to boolean `true` counts all elements in the list.", "schema": [ { "title": "Condition", diff --git a/dimension_labels.json b/dimension_labels.json index 78716e95..7597d491 100644 --- a/dimension_labels.json +++ b/dimension_labels.json @@ -23,7 +23,7 @@ } ], "returns": { - "description": "The labels as array.", + "description": "The labels as an array.", "schema": { "type": "array", "items": { diff --git a/divide.json b/divide.json index d5cc39ac..064836ce 100644 --- a/divide.json +++ b/divide.json @@ -1,7 +1,7 @@ { "id": "divide", "summary": "Division of two numbers", - "description": "Divides argument `x` by the argument `y` (*x / y*) and returns the computed result.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, a division by zero results in ±infinity if the processing environment supports it. Otherwise a `DivisionByZero` error must the thrown.", + "description": "Divides argument `x` by the argument `y` (*x / y*) and returns the computed result.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, a division by zero results in ±infinity if the processing environment supports it. Otherwise, a `DivisionByZero` exception must the thrown.", "categories": [ "math" ], diff --git a/drop_dimension.json b/drop_dimension.json index b946aee0..90212dd9 100644 --- a/drop_dimension.json +++ b/drop_dimension.json @@ -1,7 +1,7 @@ { "id": "drop_dimension", "summary": "Remove a dimension", - "description": "Drops a dimension from the data cube.\n\nDropping a dimension only works on dimensions with a single dimension label left, otherwise the process fails with a `DimensionLabelCountMismatch` error. Dimension values can be reduced to a single value with a filter such as ``filter_bands()`` or the ``reduce_dimension()`` process. If a dimension with the specified name does not exist, the process fails with a `DimensionNotAvailable` error.", + "description": "Drops a dimension from the data cube.\n\nDropping a dimension only works on dimensions with a single dimension label left, otherwise the process fails with a `DimensionLabelCountMismatch` exception. Dimension values can be reduced to a single value with a filter such as ``filter_bands()`` or the ``reduce_dimension()`` process. If a dimension with the specified name does not exist, the process fails with a `DimensionNotAvailable` exception.", "categories": [ "cubes" ], diff --git a/eq.json b/eq.json index f81c58b4..6550098e 100644 --- a/eq.json +++ b/eq.json @@ -1,7 +1,7 @@ { "id": "eq", "summary": "Equal to comparison", - "description": "Compares whether `x` is strictly equal to `y`.\n\n**Remarks:**\n\n* Data types MUST be checked strictly, for example a string with the content *1* is not equal to the number *1*. Nevertheless, an integer *1* is equal to a floating point number *1.0* as `integer` is a sub-type of `number`.\n* If any operand is `null`, the return value is `null`. Therefore, `eq(null, null)` returns `null` instead of `true`.\n* If any operand is an array or object, the return value is `false`.\n* Strings are expected to be encoded in UTF-8 by default.\n* Temporal strings MUST be compared differently than other strings and MUST NOT be compared based on their string representation due to different possible representations. For example, the UTC time zone representation `Z` has the same meaning as `+00:00`.", + "description": "Compares whether `x` is strictly equal to `y`.\n\n**Remarks:**\n\n* Data types MUST be checked strictly. For example, a string with the content *1* is not equal to the number *1*. Nevertheless, an integer *1* is equal to a floating-point number *1.0* as `integer` is a sub-type of `number`.\n* If any operand is `null`, the return value is `null`. Therefore, `eq(null, null)` returns `null` instead of `true`.\n* If any operand is an array or object, the return value is `false`.\n* Strings are expected to be encoded in UTF-8 by default.\n* Temporal strings MUST be compared differently than other strings and MUST NOT be compared based on their string representation due to different possible representations. For example, the time zone representation `Z` (for UTC) has the same meaning as `+00:00`.", "categories": [ "texts", "comparison" @@ -23,7 +23,7 @@ }, { "name": "delta", - "description": "Only applicable for comparing two numbers. If this optional parameter is set to a positive non-zero number the equality of two numbers is checked against a delta value. This is especially useful to circumvent problems with floating point inaccuracy in machine-based computation.\n\nThis option is basically an alias for the following computation: `lte(abs(minus([x, y]), delta)`", + "description": "Only applicable for comparing two numbers. If this optional parameter is set to a positive non-zero number the equality of two numbers is checked against a delta value. This is especially useful to circumvent problems with floating-point inaccuracy in machine-based computation.\n\nThis option is basically an alias for the following computation: `lte(abs(minus([x, y]), delta)`", "schema": { "type": [ "number", diff --git a/filter_bands.json b/filter_bands.json index 94a9f448..0254ee7e 100644 --- a/filter_bands.json +++ b/filter_bands.json @@ -1,7 +1,7 @@ { "id": "filter_bands", - "summary": "Filter the bands by name", - "description": "Filters the bands in the data cube so that bands that don't match any of the criteria are dropped from the data cube. The data cube is expected to have only one dimension of type `bands`. Fails with a `DimensionMissing` error if no such dimension exists.\n\nThe following criteria can be used to select bands:\n\n* `bands`: band name or common band name (e.g. `B01`, `B8A`, `red` or `nir`)\n* `wavelengths`: ranges of wavelengths in micrometres (μm) (e.g. 0.5 - 0.6)\n\nAll these information are exposed in the band metadata of the collection. To keep algorithms interoperable it is recommended to prefer the common bands names or the wavelengths over collection and/or back-end specific band names.\n\nIf multiple criteria are specified, any of them must match and not all of them, i.e. they are combined with an OR-operation. If no criteria is specified, the `BandFilterParameterMissing` exception must be thrown.\n\n**Important:** The order of the specified array defines the order of the bands in the data cube, which can be important for subsequent processes. If multiple bands are matched by a single criterion (e.g. a range of wavelengths), they stay in the original order.", + "summary": "Filter the bands by names", + "description": "Filters the bands in the data cube so that bands that don't match any of the criteria are dropped from the data cube. The data cube is expected to have only one dimension of type `bands`. Fails with a `DimensionMissing` exception if no such dimension exists.\n\nThe following criteria can be used to select bands:\n\n* `bands`: band name or common band name (e.g. `B01`, `B8A`, `red` or `nir`)\n* `wavelengths`: ranges of wavelengths in micrometers (μm) (e.g. 0.5 - 0.6)\n\nAll these information are exposed in the band metadata of the collection. To keep algorithms interoperable it is recommended to prefer the common band names or the wavelengths over band names that are specific to the collection and/or back-end.\n\nIf multiple criteria are specified, any of them must match and not all of them, i.e. they are combined with an OR-operation. If no criteria are specified, the `BandFilterParameterMissing` exception must be thrown.\n\n**Important:** The order of the specified array defines the order of the bands in the data cube, which can be important for subsequent processes. If multiple bands are matched by a single criterion (e.g. a range of wavelengths), they stay in the original order.", "categories": [ "cubes", "filter" @@ -17,7 +17,7 @@ }, { "name": "bands", - "description": "A list of band names. Either the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands). If unique band name and common name conflict, the unique band name has higher priority.\n\nThe order of the specified array defines the order of the bands in the data cube. If multiple bands match a common name, all matched bands are included in the original order.", + "description": "A list of band names. Either the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands). If the unique band name and the common name conflict, the unique band name has a higher priority.\n\nThe order of the specified array defines the order of the bands in the data cube. If multiple bands match a common name, all matched bands are included in the original order.", "schema": { "type": "array", "items": { @@ -30,7 +30,7 @@ }, { "name": "wavelengths", - "description": "A list of sub-lists with each sub-list consisting of two elements. The first element is the minimum wavelength and the second element is the maximum wavelength. Wavelengths are specified in micrometres (μm).\n\nThe order of the specified array defines the order of the bands in the data cube. If multiple bands match the wavelengths, all matched bands are included in the original order.", + "description": "A list of sub-lists with each sub-list consisting of two elements. The first element is the minimum wavelength and the second element is the maximum wavelength. Wavelengths are specified in micrometers (μm).\n\nThe order of the specified array defines the order of the bands in the data cube. If multiple bands match the wavelengths, all matched bands are included in the original order.", "schema": { "type": "array", "items": { diff --git a/filter_temporal.json b/filter_temporal.json index 5e42cb0f..496a8792 100644 --- a/filter_temporal.json +++ b/filter_temporal.json @@ -61,7 +61,7 @@ }, { "name": "dimension", - "description": "The name of the temporal dimension to filter on. If no specific dimension is specified or it is set to `null`, the filter applies to all temporal dimensions. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the temporal dimension to filter on. If no specific dimension is specified or it is set to `null`, the filter applies to all temporal dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": [ "string", diff --git a/is_nan.json b/is_nan.json index 25650261..91f4bcf9 100644 --- a/is_nan.json +++ b/is_nan.json @@ -1,7 +1,7 @@ { "id": "is_nan", "summary": "Value is not a number", - "description": "Checks whether the specified value `x` is not a number. Returns `true` for numeric values (integers and floating point numbers), except for the special value `NaN` as defined by the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935). All non-numeric data types MUST also return `true`, including arrays that contain `NaN` values.", + "description": "Checks whether the specified value `x` is not a number. Returns `true` for numeric values (integers and floating-point numbers), except for the special value `NaN` as defined by the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935). All non-numeric data types MUST also return `true`, including arrays that contain `NaN` values.", "categories": [ "comparison", "math > constants" diff --git a/is_valid.json b/is_valid.json index 82bfe485..51924de4 100644 --- a/is_valid.json +++ b/is_valid.json @@ -1,7 +1,7 @@ { "id": "is_valid", "summary": "Value is valid data", - "description": "Checks whether the specified value `x` is valid. The following values are considered valid:\n\n* Any finite numerical value (integers and floating point numbers). The definition of finite numbers follows the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935) and excludes the special value `NaN` (not a number).\n* Any other value that is not a no-data value according to ``is_nodata()`. Thus all arrays, objects and strings are valid, regardless of their content.", + "description": "Checks whether the specified value `x` is valid. The following values are considered valid:\n\n* Any finite numerical value (integers and floating-point numbers). The definition of finite numbers follows the [IEEE Standard 754](https://ieeexplore.ieee.org/document/4610935) and excludes the special value `NaN` (not a number).\n* Any other value that is not a no-data value according to ``is_nodata()`. Thus all arrays, objects and strings are valid, regardless of their content.", "categories": [ "comparison" ], diff --git a/ln.json b/ln.json index 0b9fa6c3..de31161d 100644 --- a/ln.json +++ b/ln.json @@ -1,7 +1,7 @@ { "id": "ln", "summary": "Natural logarithm", - "description": "The natural logarithm is the logarithm to the base *e* of the number `x`, which equals to using the *log* process with the base set to *e*. The natural logarithm is the inverse function of taking *e* to the power x.\n\nThe no-data value `null` is passed through.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, `ln(0)` results in ±infinity if the processing environment supports it or otherwise an error is thrown.", + "description": "The natural logarithm is the logarithm to the base *e* of the number `x`, which equals to using the *log* process with the base set to *e*. The natural logarithm is the inverse function of taking *e* to the power x.\n\nThe no-data value `null` is passed through.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, `ln(0)` results in ±infinity if the processing environment supports it or otherwise an exception is thrown.", "categories": [ "math > exponential & logarithmic" ], diff --git a/load_collection.json b/load_collection.json index 89f30daf..2abf003f 100644 --- a/load_collection.json +++ b/load_collection.json @@ -1,7 +1,7 @@ { "id": "load_collection", "summary": "Load a collection", - "description": "Loads a collection from the current back-end by its id and returns it as processable data cube. The data that is added to the data cube can be restricted with the additional `spatial_extent`, `temporal_extent`, `bands` and `properties`.\n\n**Remarks:**\n\n* The bands (and all dimensions that specify nominal dimension labels) are expected to be ordered as specified in the metadata if the `bands` parameter is set to `null`.\n* If no additional parameter is specified this would imply that the whole data set is expected to be loaded. Due to the large size of many data sets this is not recommended and may be optimized by back-ends to only load the data that is actually required after evaluating subsequent processes such as filters. This means that the pixel values should be processed only after the data has been limited to the required extents and as a consequence also to a manageable size.", + "description": "Loads a collection from the current back-end by its id and returns it as a processable data cube. The data that is added to the data cube can be restricted with the additional `spatial_extent`, `temporal_extent`, `bands` and `properties`.\n\n**Remarks:**\n\n* The bands (and all dimensions that specify nominal dimension labels) are expected to be ordered as specified in the metadata if the `bands` parameter is set to `null`.\n* If no additional parameter is specified this would imply that the whole data set is expected to be loaded. Due to the large size of many data sets, this is not recommended and may be optimized by back-ends to only load the data that is actually required after evaluating subsequent processes such as filters. This means that the pixel values should be processed only after the data has been limited to the required extent and as a consequence also to a manageable size.", "categories": [ "cubes", "import" @@ -156,7 +156,7 @@ }, { "name": "bands", - "description": "Only adds the specified bands into the data cube so that bands that don't match the list of band names are not available. Applies to all dimensions of type `bands`.\n\nEither the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If unique band name and common name conflict, the unique band name has higher priority.\n\nThe order of the specified array defines the order of the bands in the data cube. f multiple bands match a common name, all matched bands are included in the original order.", + "description": "Only adds the specified bands into the data cube so that bands that don't match the list of band names are not available. Applies to all dimensions of type `bands`.\n\nEither the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If the unique band name and the common name conflict, the unique band name has a higher priority.\n\nThe order of the specified array defines the order of the bands in the data cube. f multiple bands match a common name, all matched bands are included in the original order.", "schema": [ { "type": "array", diff --git a/log.json b/log.json index e964961d..89500837 100644 --- a/log.json +++ b/log.json @@ -1,7 +1,7 @@ { "id": "log", "summary": "Logarithm to a base", - "description": "Logarithm to the base `base` of the number `x` is defined to be the inverse function of taking b to the power of x.\n\nThe no-data value `null` is passed through and therefore gets propagated if any of the arguments is `null`.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, `log(0, 2)` results in ±infinity if the processing environment supports it or otherwise an error is thrown.", + "description": "Logarithm to the base `base` of the number `x` is defined to be the inverse function of taking b to the power of x.\n\nThe no-data value `null` is passed through and therefore gets propagated if any of the arguments is `null`.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, `log(0, 2)` results in ±infinity if the processing environment supports it or otherwise an exception is thrown.", "categories": [ "math > exponential & logarithmic" ], diff --git a/mask.json b/mask.json index 95da2b66..d15964ad 100644 --- a/mask.json +++ b/mask.json @@ -1,7 +1,7 @@ { "id": "mask", "summary": "Apply a raster mask", - "description": "Applies a mask to a raster data cube. To apply a vector mask use ``mask_polygon()``.\n\nA mask is a raster data cube for which corresponding pixels among `data` and `mask` are compared and those pixels in `data` are replaced whose pixels in `mask` are non-zero (for numbers) or `true` (for boolean values). The pixel values are replaced with the value specified for `replacement`, which defaults to `null` (no data).\n\nThe data cubes have to be compatible so that each dimension in mask must also be available in the raster data cube with the same name, type, reference system, resolution and labels. Dimensions can be missing in the mask with the result that the mask is applied for each label of the missing dimension in the data cube. The process fails if there's an incompatibility found between the raster data cube and the mask.", + "description": "Applies a mask to a raster data cube. To apply a vector mask use ``mask_polygon()``.\n\nA mask is a raster data cube for which corresponding pixels among `data` and `mask` are compared and those pixels in `data` are replaced whose pixels in `mask` are non-zero (for numbers) or `true` (for boolean values). The pixel values are replaced with the value specified for `replacement`, which defaults to `null` (no data).\n\nThe data cubes have to be compatible so that each dimension in the mask must also be available in the raster data cube with the same name, type, reference system, resolution and labels. Dimensions can be missing in the mask with the result that the mask is applied for each label of the missing dimension in the data cube. The process fails if there's an incompatibility found between the raster data cube and the mask.", "categories": [ "masks" ], @@ -16,7 +16,7 @@ }, { "name": "mask", - "description": "A mask as raster data cube. Every pixel in `data` must have a corresponding element in `mask`.", + "description": "A mask as a raster data cube. Every pixel in `data` must have a corresponding element in `mask`.", "schema": { "type": "object", "subtype": "raster-cube" diff --git a/mask_polygon.json b/mask_polygon.json index 20affdfe..763ac80b 100644 --- a/mask_polygon.json +++ b/mask_polygon.json @@ -1,7 +1,7 @@ { "id": "mask_polygon", "summary": "Apply a polygon mask", - "description": "Applies a polygon mask to a raster data cube. To apply a raster mask use ``mask()``.\n\nAll pixels for which the point at the pixel center **does not** intersect with any polygon (as defined in the Simple Features standard by the OGC) are replaced. This behaviour can be inverted by setting the parameter `inside` to `true`.\n\nThe pixel values are replaced with the value specified for `replacement`, which defaults to `null` (no data). No data values in `data` will be left untouched by the masking operation.", + "description": "Applies a polygon mask to a raster data cube. To apply a raster mask use ``mask()``.\n\nAll pixels for which the point at the pixel center **does not** intersect with any polygon (as defined in the Simple Features standard by the OGC) are replaced. This behavior can be inverted by setting the parameter `inside` to `true`.\n\nThe pixel values are replaced with the value specified for `replacement`, which defaults to `null` (no data). No data values in `data` will be left untouched by the masking operation.", "categories": [ "cubes", "masks" diff --git a/max.json b/max.json index 71a13b61..5a5b7f71 100644 --- a/max.json +++ b/max.json @@ -1,7 +1,7 @@ { "id": "max", "summary": "Maximum value", - "description": "Computes the largest value of an array of numbers, which is is equal to the first element of a sorted (i.e., ordered) version the array.\n\nAn array without non-`null` elements resolves always with `null`.", + "description": "Computes the largest value of an array of numbers, which is equal to the first element of a sorted (i.e., ordered) version of the array.\n\nAn array without non-`null` elements resolves always with `null`.", "categories": [ "math", "reducer" diff --git a/median.json b/median.json index 2d5e49c1..3bc87d5a 100644 --- a/median.json +++ b/median.json @@ -1,7 +1,7 @@ { "id": "median", "summary": "Statistical median", - "description": "The statistical median of an array of numbers is the value separating the higher half from the lower half of the data.\n\nAn array without non-`null` elements resolves always with `null`.\n\n**Remarks:**\n\n* For a symmetric arrays, the result is equal to the ``mean()``.\n* The median can also be calculated by computing the ``quantiles()`` with a probability of *0.5*.", + "description": "The statistical median of an array of numbers is the value separating the higher half from the lower half of the data.\n\nAn array without non-`null` elements resolves always with `null`.\n\n**Remarks:**\n\n* For symmetric arrays, the result is equal to the ``mean()``.\n* The median can also be calculated by computing the ``quantiles()`` with a probability of *0.5*.", "categories": [ "math", "reducer" diff --git a/meta/subtype-schemas.json b/meta/subtype-schemas.json index d94ed806..fb9d6817 100644 --- a/meta/subtype-schemas.json +++ b/meta/subtype-schemas.json @@ -8,7 +8,7 @@ "type": "string", "subtype": "band-name", "title": "Band Name", - "description": "Either a unique band name (metadata field `name`) or a [common band name](https://github.com/radiantearth/stac-spec/tree/v0.9.0/extensions/eo#common-band-names) (metadata field `common_name`) available in the data cube. If unique band name and common name conflict, the unique band name has higher priority." + "description": "Either a unique band name (metadata field `name`) or a [common band name](https://github.com/radiantearth/stac-spec/tree/v0.9.0/extensions/eo#common-band-names) (metadata field `common_name`) available in the data cube. If the unique band name and the common name conflict, the unique band name has a higher priority." }, "bounding-box": { "type": "object", @@ -332,7 +332,7 @@ "type": "array", "subtype": "temporal-intervals", "title": "Multiple temporal intervals", - "description": "Left-closed temporal intervals, which are allowed to overlap. Formatted as array of two-element arrays, each being an array with subtype `temporal-interval`.", + "description": "Left-closed temporal intervals, which are allowed to overlap. Formatted as an array of two-element arrays, each being an array with subtype `temporal-interval`.", "items": { "$ref": "#/definitions/temporal-interval" }, @@ -387,13 +387,13 @@ "type": "string", "subtype": "udf-runtime", "title": "UDF runtime", - "description": "The name of an UDF runtime." + "description": "The name of a UDF runtime." }, "udf-runtime-version": { "type": "string", "subtype": "udf-runtime-version", "title": "UDF Runtime version", - "description": "The version of an UDF runtime." + "description": "The version of a UDF runtime." }, "uri": { "type": "string", diff --git a/min.json b/min.json index 9d55fb8a..ce161c95 100644 --- a/min.json +++ b/min.json @@ -1,7 +1,7 @@ { "id": "min", "summary": "Minimum value", - "description": "Computes the smallest value of an array of numbers, which is is equal to the last element of a sorted (i.e., ordered) version the array.\n\nAn array without non-`null` elements resolves always with `null`.", + "description": "Computes the smallest value of an array of numbers, which is equal to the last element of a sorted (i.e., ordered) version of the array.\n\nAn array without non-`null` elements resolves always with `null`.", "categories": [ "math", "reducer" diff --git a/mod.json b/mod.json index 5409df05..ca709386 100644 --- a/mod.json +++ b/mod.json @@ -1,14 +1,14 @@ { "id": "mod", "summary": "Modulo", - "description": "Remainder after division of `x` by `y` for both integers and floating-point numbers.\n\nThe result of a modulo operation has the sign of the divisor. The handling regarding the sign of the result [differs between programming languages](https://en.wikipedia.org/wiki/Modulo_operation#In_programming_languages) and needs careful consideration to avoid unexpected results.\n\nThe no-data value `null` is passed through and therefore gets propagated if any of the arguments is `null`. A modulo by zero results in ±infinity if the processing environment supports it. Otherwise a `DivisionByZero` error must the thrown.", + "description": "Remainder after a division of `x` by `y` for both integers and floating-point numbers.\n\nThe result of a modulo operation has the sign of the divisor. The handling regarding the sign of the result [differs between programming languages](https://en.wikipedia.org/wiki/Modulo_operation#In_programming_languages) and needs careful consideration to avoid unexpected results.\n\nThe no-data value `null` is passed through and therefore gets propagated if any of the arguments is `null`. A modulo by zero results in ±infinity if the processing environment supports it. Otherwise, a `DivisionByZero` exception must the thrown.", "categories": [ "math" ], "parameters": [ { "name": "x", - "description": "A number to be used as dividend.", + "description": "A number to be used as the dividend.", "schema": { "type": [ "number", @@ -18,7 +18,7 @@ }, { "name": "y", - "description": "A number to be used as divisor.", + "description": "A number to be used as the divisor.", "schema": { "type": [ "number", diff --git a/ndvi.json b/ndvi.json index 39a49f6b..b31634d5 100644 --- a/ndvi.json +++ b/ndvi.json @@ -1,7 +1,7 @@ { "id": "ndvi", "summary": "Normalized Difference Vegetation Index", - "description": "Computes the Normalized Difference Vegetation Index (NDVI). The NDVI is computed as *(nir - red) / (nir + red)*.\n\nThe `data` parameter expects a raster data cube with a dimension of type `bands` or a `DimensionAmbiguous` error is thrown otherwise. By default, the dimension must have at least two bands with the common names `red` and `nir` assigned or the user need to specify the parameters `nir` and `red`. Otherwise either the error `NirBandAmbiguous` or `RedBandAmbiguous` is thrown. The common names for each band are specified in the collection's band metadata and are *not* equal to the band names.\n\nBy default, the dimension of type `bands` is dropped by this process. To keep the dimension specify a new band name in the parameter `target_band`. This adds a new dimension label with the specified name to the dimension, which can be used to access the computed values. If a band with the specified name exists, a `BandExists` is thrown.\n\nThis process is very similar to the process ``normalized_difference()``, but determines the bands automatically based on the common names (`red`/`nir`) specified in the metadata.", + "description": "Computes the Normalized Difference Vegetation Index (NDVI). The NDVI is computed as *(nir - red) / (nir + red)*.\n\nThe `data` parameter expects a raster data cube with a dimension of type `bands` or a `DimensionAmbiguous` exception is thrown otherwise. By default, the dimension must have at least two bands with the common names `red` and `nir` assigned. Otherwise, the user has to specify the parameters `nir` and `red`. If neither is the case, either the exception `NirBandAmbiguous` or `RedBandAmbiguous` is thrown. The common names for each band are specified in the collection's band metadata and are *not* equal to the band names.\n\nBy default, the dimension of type `bands` is dropped by this process. To keep the dimension specify a new band name in the parameter `target_band`. This adds a new dimension label with the specified name to the dimension, which can be used to access the computed values. If a band with the specified name exists, a `BandExists` is thrown.\n\nThis process is very similar to the process ``normalized_difference()``, but determines the bands automatically based on the common names (`red`/`nir`) specified in the metadata.", "categories": [ "math > indices", "vegetation indices" @@ -17,7 +17,7 @@ }, { "name": "nir", - "description": "The name of the NIR band. Defaults to the band that has the common name `nir` assigned.\n\nEither the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If unique band name and common name conflict, the unique band name has higher priority.", + "description": "The name of the NIR band. Defaults to the band that has the common name `nir` assigned.\n\nEither the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If the unique band name and the common name conflict, the unique band name has a higher priority.", "schema": { "type": "string", "subtype": "band-name" @@ -27,7 +27,7 @@ }, { "name": "red", - "description": "The name of the red band. Defaults to the band that has the common name `red` assigned.\n\nEither the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If unique band name and common name conflict, the unique band name has higher priority.", + "description": "The name of the red band. Defaults to the band that has the common name `red` assigned.\n\nEither the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If the unique band name and the common name conflict, the unique band name has a higher priority.", "schema": { "type": "string", "subtype": "band-name" @@ -52,7 +52,7 @@ } ], "returns": { - "description": "A raster data cube containing the computed NDVI values. The structure of the data cube differs depending on the value passed to `target_band`:\n\n* `target_band` is `null`: The data cube does not contain the dimension of type `bands` any more, the number of dimensions decreases by one. The dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain unchanged.\n* `target_band` is a string: The data cube keeps the same dimensions. The dimension properties remain unchanged, but the number of dimension labels for the dimension of type `bands` increases by one. The additional label is named as specified in `target_band`.", + "description": "A raster data cube containing the computed NDVI values. The structure of the data cube differs depending on the value passed to `target_band`:\n\n* `target_band` is `null`: The data cube does not contain the dimension of type `bands` anymore, the number of dimensions decreases by one. The dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain unchanged.\n* `target_band` is a string: The data cube keeps the same dimensions. The dimension properties remain unchanged, but the number of dimension labels for the dimension of type `bands` increases by one. The additional label is named as specified in `target_band`.", "schema": { "type": "object", "subtype": "raster-cube" @@ -60,10 +60,10 @@ }, "exceptions": { "NirBandAmbiguous": { - "message": "The NIR band can't be resolved, please specify a band name." + "message": "The NIR band can't be resolved, please specify the specific NIR band name." }, "RedBandAmbiguous": { - "message": "The red band can't be resolved, please specify a band name." + "message": "The red band can't be resolved, please specify the specific red band name." }, "DimensionAmbiguous": { "message": "dimension of type `bands` is not available or is ambiguous.." diff --git a/neq.json b/neq.json index 76ab5e70..ec3908e4 100644 --- a/neq.json +++ b/neq.json @@ -1,7 +1,7 @@ { "id": "neq", "summary": "Not equal to comparison", - "description": "Compares whether `x` is *not* strictly equal to `y`.\n\n**Remarks:**\n\n* Data types MUST be checked strictly, for example a string with the content *1* is not equal to the number *1*. Nevertheless, an integer *1* is equal to a floating point number *1.0* as `integer` is a sub-type of `number`.\n* If any operand is `null`, the return value is `null`. Therefore, `neq(null, null)` returns `null` instead of `false`.\n* If any operand is an array or object, the return value is `false`.\n* Strings are expected to be encoded in UTF-8 by default.\n* Temporal strings MUST be compared differently than other strings and MUST NOT be compared based on their string representation due to different possible representations. For example, the UTC time zone representation `Z` has the same meaning as `+00:00`.", + "description": "Compares whether `x` is *not* strictly equal to `y`.\n\n**Remarks:**\n\n* Data types MUST be checked strictly. For example, a string with the content *1* is not equal to the number *1*. Nevertheless, an integer *1* is equal to a floating-point number *1.0* as `integer` is a sub-type of `number`.\n* If any operand is `null`, the return value is `null`. Therefore, `neq(null, null)` returns `null` instead of `false`.\n* If any operand is an array or object, the return value is `false`.\n* Strings are expected to be encoded in UTF-8 by default.\n* Temporal strings MUST be compared differently than other strings and MUST NOT be compared based on their string representation due to different possible representations. For example, the time zone representation `Z` (for UTC) has the same meaning as `+00:00`.", "categories": [ "texts", "comparison" @@ -23,7 +23,7 @@ }, { "name": "delta", - "description": "Only applicable for comparing two numbers. If this optional parameter is set to a positive non-zero number the non-equality of two numbers is checked against a delta value. This is especially useful to circumvent problems with floating point inaccuracy in machine-based computation.\n\nThis option is basically an alias for the following computation: `gt(abs(minus([x, y]), delta)`", + "description": "Only applicable for comparing two numbers. If this optional parameter is set to a positive non-zero number the non-equality of two numbers is checked against a delta value. This is especially useful to circumvent problems with floating-point inaccuracy in machine-based computation.\n\nThis option is basically an alias for the following computation: `gt(abs(minus([x, y]), delta)`", "schema": { "type": [ "number", diff --git a/order.json b/order.json index 72976dd1..0002d467 100644 --- a/order.json +++ b/order.json @@ -1,7 +1,7 @@ { "id": "order", "summary": "Create a permutation", - "description": "Computes a permutation which allows rearranging the data into ascending or descending order. In other words, this process computes the ranked (sorted) element positions in the original list.\n\n**Remarks:**\n\n* The positions in the result are zero-based.\n* Ties will be left in their original ordering.\n* Temporal strings can *not* be compared based on their string representation due to the time zone / time-offset representations.", + "description": "Computes a permutation which allows rearranging the data into ascending or descending order. In other words, this process computes the ranked (sorted) element positions in the original list.\n\n**Remarks:**\n\n* The positions in the result are zero-based.\n* Ties will be left in their original ordering.\n* Temporal strings can *not* be compared based on their string representation due to the time zone/time-offset representations.", "categories": [ "arrays", "sorting" @@ -50,7 +50,7 @@ }, { "name": "nodata", - "description": "Controls the handling of no-data values (`null`). By default they are removed. If `true`, missing values in the data are put last; if `false`, they are put first.", + "description": "Controls the handling of no-data values (`null`). By default, they are removed. If set to `true`, missing values in the data are put last; if set to `false`, they are put first.", "schema": { "type": [ "boolean", diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 3cac1ae9..02514030 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial_binary", "summary": "Zonal statistics for geometries by binary aggregation", - "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``aggregate_spatial()`` passes a list of values.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThe data cube must have been reduced to only contain two raster dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise this process fails with the `TooManyDimensions` error.\n\nThe number of total and valid pixels is returned together with the calculated values.", + "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``aggregate_spatial()`` passes a list of values.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThe data cube must have been reduced to only contain two raster dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", "categories": [ "cubes", "aggregate & resample" @@ -83,7 +83,7 @@ } ], "returns": { - "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is used for the dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are added as new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", + "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is used for the dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are added as a new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", "schema": { "type": "object", "subtype": "vector-cube" diff --git a/proposals/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json index 6c2a27e0..10bb011a 100644 --- a/proposals/aggregate_spatial_window.json +++ b/proposals/aggregate_spatial_window.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial_window", "summary": "Zonal statistics for rectangular windows", - "description": "Aggregates statistics over the horizontal spatial dimensions (axes `x` and `y`) of the data cube.\n\nThe pixel grid for the axes `x` and `y` is divided into non-overlapping windows with the size specified in the parameter `size`. If the number of values for the axes `x` and `y` is not a multiple of the corresponding window size, the behaviour specified in the parameters `boundary` and `align` is applied.\nFor each of these windows, the reducer process computes the result.", + "description": "Aggregates statistics over the horizontal spatial dimensions (axes `x` and `y`) of the data cube.\n\nThe pixel grid for the axes `x` and `y` is divided into non-overlapping windows with the size specified in the parameter `size`. If the number of values for the axes `x` and `y` is not a multiple of the corresponding window size, the behavior specified in the parameters `boundary` and `align` is applied.\nFor each of these windows, the reducer process computes the result.", "categories": [ "cubes", "aggregate & resample" @@ -53,7 +53,7 @@ }, { "name": "size", - "description": "Window sizes in pixels along the horizontal spatial dimensions.\n\nThe first value corresponds to the `x` axis, the second values corresponds to the `y` axis.", + "description": "Window size in pixels along the horizontal spatial dimensions.\n\nThe first value corresponds to the `x` axis, the second value corresponds to the `y` axis.", "schema": { "type": "array", "minItems": 2, @@ -66,7 +66,7 @@ }, { "name": "boundary", - "description": "Behaviour to apply if the number of values for the axes `x` and `y` is not a multiple of the corresponding value in the `size` parameter. Options are:\n\n- `pad` (default): pad the data cube with the no-data value `null` to fit the required window size.\n\n- `trim`: trim the data cube to fit the required window size.\n\nSet the parameter `align` to specifies to which corner the data is aligned to.", + "description": "Behavior to apply if the number of values for the axes `x` and `y` is not a multiple of the corresponding value in the `size` parameter. Options are:\n\n- `pad` (default): pad the data cube with the no-data value `null` to fit the required window size.\n\n- `trim`: trim the data cube to fit the required window size.\n\nSet the parameter `align` to specifies to which corner the data is aligned to.", "schema": { "type": "string", "enum": [ diff --git a/proposals/cummax.json b/proposals/cummax.json index 050eea11..69580459 100644 --- a/proposals/cummax.json +++ b/proposals/cummax.json @@ -1,7 +1,7 @@ { "id": "cummax", "summary": "Cumulative maxima", - "description": "Finds cumulative maxima of an array of numbers. Every computed element is equal to the bigger one between current element and the previously computed element. The returned array and the input array have always the same length.\n\nBy default, no-data values are skipped, but stay in the result. Setting the `ignore_nodata` flag to `true` makes that once a no-data value / `null` is reached all following elements are set to `null` in the result.", + "description": "Finds cumulative maxima of an array of numbers. Every computed element is equal to the bigger one between the current element and the previously computed element. The returned array and the input array have always the same length.\n\nBy default, no-data values are skipped, but stay in the result. Setting the `ignore_nodata` flag to `true` makes that once a no-data value (`null`) is reached all following elements are set to `null` in the result.", "categories": [ "math > cumulative" ], @@ -22,7 +22,7 @@ }, { "name": "ignore_nodata", - "description": "Indicates whether no-data values are ignored or not. Ignores them by default. Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.", + "description": "Indicates whether no-data values are ignored or not and ignores them by default. Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.", "schema": { "type": "boolean" }, diff --git a/proposals/cummin.json b/proposals/cummin.json index a8df7407..d84612e3 100644 --- a/proposals/cummin.json +++ b/proposals/cummin.json @@ -1,7 +1,7 @@ { "id": "cummin", "summary": "Cumulative minima", - "description": "Finds cumulative minima of an array of numbers. Every computed element is equal to the smaller one between current element and the previously computed element. The returned array and the input array have always the same length.\n\nBy default, no-data values are skipped, but stay in the result. Setting the `ignore_nodata` flag to `true` makes that once a no-data value / `null` is reached all following elements are set to `null` in the result.", + "description": "Finds cumulative minima of an array of numbers. Every computed element is equal to the smaller one between the current element and the previously computed element. The returned array and the input array have always the same length.\n\nBy default, no-data values are skipped, but stay in the result. Setting the `ignore_nodata` flag to `true` makes that once a no-data value (`null`) is reached all following elements are set to `null` in the result.", "categories": [ "math > cumulative" ], @@ -22,7 +22,7 @@ }, { "name": "ignore_nodata", - "description": "Indicates whether no-data values are ignored or not. Ignores them by default. Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.", + "description": "Indicates whether no-data values are ignored or not and ignores them by default. Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.", "schema": { "type": "boolean" }, diff --git a/proposals/cumproduct.json b/proposals/cumproduct.json index c639ea98..f5e6ae1b 100644 --- a/proposals/cumproduct.json +++ b/proposals/cumproduct.json @@ -1,7 +1,7 @@ { "id": "cumproduct", "summary": "Cumulative products", - "description": "Computes cumulative products of an array of numbers. Every computed element is equal to the product of current and all previous values. The returned array and the input array have always the same length.\n\nBy default, no-data values are skipped, but stay in the result. Setting the `ignore_nodata` flag to `true` makes that once a no-data value / `null` is reached all following elements are set to `null` in the result.", + "description": "Computes cumulative products of an array of numbers. Every computed element is equal to the product of the current and all previous values. The returned array and the input array have always the same length.\n\nBy default, no-data values are skipped, but stay in the result. Setting the `ignore_nodata` flag to `true` makes that once a no-data value (`null`) is reached all following elements are set to `null` in the result.", "categories": [ "math > cumulative" ], @@ -22,7 +22,7 @@ }, { "name": "ignore_nodata", - "description": "Indicates whether no-data values are ignored or not. Ignores them by default. Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.", + "description": "Indicates whether no-data values are ignored or not and ignores them by default. Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.", "schema": { "type": "boolean" }, diff --git a/proposals/cumsum.json b/proposals/cumsum.json index f9ce26f6..717999e5 100644 --- a/proposals/cumsum.json +++ b/proposals/cumsum.json @@ -1,7 +1,7 @@ { "id": "cumsum", "summary": "Cumulative sums", - "description": "Computes cumulative sums of an array of numbers. Every computed element is equal to the sum of current and all previous values. The returned array and the input array have always the same length.\n\nBy default, no-data values are skipped, but stay in the result. Setting the `ignore_nodata` flag to `true` makes that once a no-data value / `null` is reached all following elements are set to `null` in the result.", + "description": "Computes cumulative sums of an array of numbers. Every computed element is equal to the sum of current and all previous values. The returned array and the input array have always the same length.\n\nBy default, no-data values are skipped, but stay in the result. Setting the `ignore_nodata` flag to `true` makes that once a no-data value (`null`) is reached all following elements are set to `null` in the result.", "categories": [ "math > cumulative" ], @@ -22,7 +22,7 @@ }, { "name": "ignore_nodata", - "description": "Indicates whether no-data values are ignored or not. Ignores them by default. Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.", + "description": "Indicates whether no-data values are ignored or not and ignores them by default. Setting this flag to `false` considers no-data values so that `null` is set for all the following elements.", "schema": { "type": "boolean" }, diff --git a/proposals/filter_labels.json b/proposals/filter_labels.json index ebbae261..5cfb0264 100644 --- a/proposals/filter_labels.json +++ b/proposals/filter_labels.json @@ -55,7 +55,7 @@ }, { "name": "dimension", - "description": "The name of the dimension to filter on. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the dimension to filter on. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": "string" } diff --git a/proposals/load_result.json b/proposals/load_result.json index 31505511..6641ebdd 100644 --- a/proposals/load_result.json +++ b/proposals/load_result.json @@ -1,7 +1,7 @@ { "id": "load_result", "summary": "Load batch job results", - "description": "Loads batch job results by job id from the local user workspace / data store. The job must have been stored by the authenticated user on the back-end currently connected to.", + "description": "Loads batch job results by job id from the local user workspace/datastore. The job must have been stored by the authenticated user on the back-end currently connected to.", "categories": [ "cubes", "import" diff --git a/proposals/load_uploaded_files.json b/proposals/load_uploaded_files.json index cf6735c5..5b92f36b 100644 --- a/proposals/load_uploaded_files.json +++ b/proposals/load_uploaded_files.json @@ -1,7 +1,7 @@ { "id": "load_uploaded_files", "summary": "Load files from the user workspace", - "description": "Loads one or more user-uploaded files from the local user workspace / data store and returns them as a single data cube. The files must have been stored by the authenticated user on the back-end currently connected to.", + "description": "Loads one or more user-uploaded files from the local user workspace/datastore and returns them as a single data cube. The files must have been stored by the authenticated user on the back-end currently connected to.", "categories": [ "cubes", "import" @@ -10,7 +10,7 @@ "parameters": [ { "name": "paths", - "description": "The files to read. Folders can't be specified, instead specify all files. An error is thrown if a file can't be read.", + "description": "The files to read. Folders can't be specified, specify all files instead. An exception is thrown if a file can't be read.", "schema": { "type": "array", "subtype": "file-paths", diff --git a/proposals/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json index 7a70d9e0..e64142ed 100644 --- a/proposals/reduce_dimension_binary.json +++ b/proposals/reduce_dimension_binary.json @@ -57,7 +57,7 @@ }, { "name": "dimension", - "description": "The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": "string" } diff --git a/proposals/resample_cube_temporal.json b/proposals/resample_cube_temporal.json index c7e95124..0a335feb 100644 --- a/proposals/resample_cube_temporal.json +++ b/proposals/resample_cube_temporal.json @@ -62,7 +62,7 @@ }, { "name": "dimension", - "description": "The name of the temporal dimension to resample, which must exist with this name in both data cubes. If the dimension is not set or is set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` error if it has more dimensions. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the temporal dimension to resample, which must exist with this name in both data cubes. If the dimension is not set or is set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": [ "string", @@ -83,7 +83,7 @@ } ], "returns": { - "description": "A raster data cube with the same dimensions and the same dimension properties (name, type, labels, reference system and resolution) for all non-temporal dimensions. For the temporal dimension the name and type remain unchanged, but the reference system changes and the labels and resolution may change.", + "description": "A raster data cube with the same dimensions and the same dimension properties (name, type, labels, reference system and resolution) for all non-temporal dimensions. For the temporal dimension, the name and type remain unchanged, but the reference system changes and the labels and resolution may change.", "schema": { "type": "object", "subtype": "raster-cube" diff --git a/proposals/run_udf_externally.json b/proposals/run_udf_externally.json index d9bec53f..99918408 100644 --- a/proposals/run_udf_externally.json +++ b/proposals/run_udf_externally.json @@ -1,7 +1,7 @@ { "id": "run_udf_externally", "summary": "Run an externally hosted UDF container", - "description": "Runs a compatible UDF container that is either externally hosted by a service provider or running on a local machine of the user. The UDF container must follow the [openEO UDF specification](https://openeo.org/documentation/1.0/udfs.html).\n\nThe referenced UDF service can be executed in several processes such as ``aggregate_spatial()``, ``apply()``, ``apply_dimension()`` and ``reduce_dimension()``. In this case an array is passed instead of a raster data cube. The user must ensure that the data is properly passed as an array so that the UDF can make sense of it.", + "description": "Runs a compatible UDF container that is either externally hosted by a service provider or running on a local machine of the user. The UDF container must follow the [openEO UDF specification](https://openeo.org/documentation/1.0/udfs.html).\n\nThe referenced UDF service can be executed in several processes such as ``aggregate_spatial()``, ``apply()``, ``apply_dimension()`` and ``reduce_dimension()``. In this case, an array is passed instead of a raster data cube. The user must ensure that the data is properly passed as an array so that the UDF can make sense of it.", "categories": [ "cubes", "import", @@ -11,7 +11,7 @@ "parameters": [ { "name": "data", - "description": "The data to be passed to the UDF as array or raster data cube.", + "description": "The data to be passed to the UDF as an array or raster data cube.", "schema": [ { "title": "Raster data cube", @@ -52,7 +52,7 @@ } ], "returns": { - "description": "The data processed by the UDF service.\n\n* Returns a raster data cube, if a raster data cube is passed for `data`. Details on the dimensions and dimension properties (name, type, labels, reference system and resolution) depend on the UDF.\n* If an array is passed for `data`, the returned value can be of any data type, but is exactly what the UDF returns.", + "description": "The data processed by the UDF service.\n\n* Returns a raster data cube if a raster data cube is passed for `data`. Details on the dimensions and dimension properties (name, type, labels, reference system and resolution) depend on the UDF.\n* If an array is passed for `data`, the returned value can be of any data type, but is exactly what the UDF returns.", "schema": [ { "title": "Raster data cube", diff --git a/quantiles.json b/quantiles.json index 91b8756d..b91b02a0 100644 --- a/quantiles.json +++ b/quantiles.json @@ -1,7 +1,7 @@ { "id": "quantiles", "summary": "Quantiles", - "description": "Calculates quantiles, which are cut points dividing the range of a probability distribution into either\n\n* intervals corresponding to the given `probabilities` or\n* (nearly) equal-sized intervals (q-quantiles based on the parameter `q`).\n\nEither the parameter `probabilites` or `q` must be specified, otherwise the `QuantilesParameterMissing` exception is thrown. If both parameters are set the `QuantilesParameterConflict` exception is thrown.", + "description": "Calculates quantiles, which are cut points dividing the range of a probability distribution into either\n\n* intervals corresponding to the given `probabilities` or\n* (nearly) equal-sized intervals (q-quantiles based on the parameter `q`).\n\nEither the parameter `probabilities` or `q` must be specified, otherwise the `QuantilesParameterMissing` exception is thrown. If both parameters are set the `QuantilesParameterConflict` exception is thrown.", "categories": [ "math" ], @@ -34,7 +34,7 @@ }, { "name": "q", - "description": "A number of intervals to calculate quantiles for. Calculates q-quantiles with (nearly) equal-sized intervals.", + "description": "Intervals to calculate quantiles for. Calculates q-quantiles with (nearly) equal-sized intervals.", "schema": { "type": "integer", "minimum": 2 @@ -52,7 +52,7 @@ } ], "returns": { - "description": "An array with the computed quantiles. The list has either\n\n* as many elements as the given list of `probabilities` had or\n* *`q`-1* elements.\n\nIf the input array is empty the resulting array is filled with as many `null` values as required according to the list above. For an example, see the 'Empty array example'.", + "description": "An array with the computed quantiles. The list has either\n\n* as many elements as the given list of `probabilities` had or\n* *`q`-1* elements.\n\nIf the input array is empty the resulting array is filled with as many `null` values as required according to the list above. See the 'Empty array' example for an example.", "schema": { "type": "array", "items": { @@ -154,7 +154,7 @@ ] }, { - "title": "Empty array example", + "title": "Empty array", "arguments": { "data": [], "probabilities": [ diff --git a/reduce_dimension.json b/reduce_dimension.json index 67e1f84b..9c8dcaed 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -53,7 +53,7 @@ }, { "name": "dimension", - "description": "The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": "string" } diff --git a/rename_dimension.json b/rename_dimension.json index 6de0e3c2..15c46410 100644 --- a/rename_dimension.json +++ b/rename_dimension.json @@ -16,14 +16,14 @@ }, { "name": "source", - "description": "The current name of the dimension. Fails with a `DimensionNotAvailable` error if the specified dimension does not exist.", + "description": "The current name of the dimension. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", "schema": { "type": "string" } }, { "name": "target", - "description": "A new Name for the dimension. Fails with a `DimensionExists` error if a dimension with the specified name exists.", + "description": "A new Name for the dimension. Fails with a `DimensionExists` exception if a dimension with the specified name exists.", "schema": { "type": "string" } diff --git a/rename_labels.json b/rename_labels.json index 458a8a57..91610104 100644 --- a/rename_labels.json +++ b/rename_labels.json @@ -1,7 +1,7 @@ { "id": "rename_labels", "summary": "Rename dimension labels", - "description": "Renames the labels of the specified dimension in the data cube from `source` to `target`.\n\nIf the array for the source labels is empty (the default), the dimension labels are expected to be enumerated with zero-based numbering (0,1,2,3,...) so that the dimension labels directly map to the indices of the array specified for the parameter `target`. If the dimension labels are not enumerated and the `target` parameter is not specified, a `LabelsNotEnumerated` is thrown. The number of source and target labels must be equal, otherwise the error `LabelMismatch` is thrown.\n\nThis process doesn't change the order of the labels and their corresponding data.", + "description": "Renames the labels of the specified dimension in the data cube from `source` to `target`.\n\nIf the array for the source labels is empty (the default), the dimension labels are expected to be enumerated with zero-based numbering (0,1,2,3,...) so that the dimension labels directly map to the indices of the array specified for the parameter `target`. If the dimension labels are not enumerated and the `target` parameter is not specified, the `LabelsNotEnumerated` exception is thrown. The number of the source and target labels must be equal. Otherwise, the exception `LabelMismatch` is thrown.\n\nThis process doesn't change the order of the labels and their corresponding data.", "categories": [ "cubes" ], @@ -23,7 +23,7 @@ }, { "name": "target", - "description": "The new names for the labels. The dimension labels in the data cube are expected to be enumerated, if the parameter `target` is not specified. If a target dimension label already exists in the data cube, a `LabelExists` error is thrown.", + "description": "The new names for the labels. The dimension labels in the data cube are expected to be enumerated if the parameter `target` is not specified. If a target dimension label already exists in the data cube, a `LabelExists` exception is thrown.", "schema": { "type": "array", "items": { @@ -40,7 +40,7 @@ }, { "name": "source", - "description": "The names of the labels as they are currently in the data cube. The array defines an unsorted and potentially incomplete list of labels that should be renamed to the names available in the corresponding array elements in the parameter `target`. If one of the source dimension labels doesn't exist, a `LabelNotAvailable` error is thrown. By default, the array is empty so that the dimension labels in the data cube are expected to be enumerated.", + "description": "The names of the labels as they are currently in the data cube. The array defines an unsorted and potentially incomplete list of labels that should be renamed to the names available in the corresponding array elements in the parameter `target`. If one of the source dimension labels doesn't exist, the `LabelNotAvailable` exception is thrown. By default, the array is empty so that the dimension labels in the data cube are expected to be enumerated.", "schema": { "type": "array", "items": { diff --git a/resample_spatial.json b/resample_spatial.json index 29c53fd2..3a2bfed3 100644 --- a/resample_spatial.json +++ b/resample_spatial.json @@ -20,7 +20,7 @@ "description": "Resamples the data cube to the target resolution, which can be specified either as separate values for x and y or as a single value for both axes. Specified in the units of the target projection. Doesn't change the resolution by default (`0`).", "schema": [ { - "description": "A single number used as resolution for both x and y.", + "description": "A single number used as the resolution for both x and y.", "type": "number", "minimum": 0 }, diff --git a/round.json b/round.json index 1b95a444..dbdd1323 100644 --- a/round.json +++ b/round.json @@ -1,7 +1,7 @@ { "id": "round", "summary": "Round to a specified precision", - "description": "Rounds a real number `x` to specified precision `p`.\n\nIf the fractional part of `x` is halfway between two integers, one of which is even and the other odd, then the even number is returned.\nThis behaviour follows [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229). This kind of rounding is also called \"round to nearest (even)\" or \"banker's rounding\". It minimizes rounding errors that result from consistently rounding a midpoint value in a single direction.\n\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Rounds a real number `x` to specified precision `p`.\n\nIf the fractional part of `x` is halfway between two integers, one of which is even and the other odd, then the even number is returned.\nThis behavior follows [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229). This kind of rounding is also called \"round to nearest (even)\" or \"banker's rounding\". It minimizes rounding errors that result from consistently rounding a midpoint value in a single direction.\n\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math > rounding" ], diff --git a/run_udf.json b/run_udf.json index 7a5b071d..82e10902 100644 --- a/run_udf.json +++ b/run_udf.json @@ -1,7 +1,7 @@ { "id": "run_udf", - "summary": "Run an UDF", - "description": "Runs an UDF in one of the supported runtime environments.\n\nThe process can either:\n\n1. load and run a locally stored UDF from a file in the workspace of the authenticated user. The path to the UDF file must be relative to the root directory of the user's workspace.\n2. fetch and run a remotely stored and published UDF by absolute URI, for example from [openEO Hub](https://hub.openeo.org)).\n3. run the source code specified inline as string.\n\nThe loaded UDF can be executed in several processes such as ``aggregate_spatial()``, ``apply()``, ``apply_dimension()`` and ``reduce_dimension()``. In this case an array is passed instead of a raster data cube. The user must ensure that the data is properly passed as an array so that the UDF can make sense of it.", + "summary": "Run a UDF", + "description": "Runs a UDF in one of the supported runtime environments.\n\nThe process can either:\n\n1. load and run a locally stored UDF from a file in the workspace of the authenticated user. The path to the UDF file must be relative to the root directory of the user's workspace.\n2. fetch and run a remotely stored and published UDF by absolute URI, for example from [openEO Hub](https://hub.openeo.org)).\n3. run the source code specified inline as string.\n\nThe loaded UDF can be executed in several processes such as ``aggregate_spatial()``, ``apply()``, ``apply_dimension()`` and ``reduce_dimension()``. In this case, an array is passed instead of a raster data cube. The user must ensure that the data is properly passed as an array so that the UDF can make sense of it.", "categories": [ "cubes", "import", @@ -10,7 +10,7 @@ "parameters": [ { "name": "data", - "description": "The data to be passed to the UDF as array or raster data cube.", + "description": "The data to be passed to the UDF as an array or raster data cube.", "schema": [ { "title": "Raster data cube", @@ -33,16 +33,16 @@ }, { "name": "udf", - "description": "Either source code, an absolute URL or a path to an UDF script.", + "description": "Either source code, an absolute URL or a path to a UDF script.", "schema": [ { - "description": "URI to an UDF", + "description": "URI to a UDF", "type": "string", "format": "uri", "subtype": "uri" }, { - "description": "Path to an UDF uploaded to the server.", + "description": "Path to a UDF uploaded to the server.", "type": "string", "subtype": "file-path" }, @@ -55,7 +55,7 @@ }, { "name": "runtime", - "description": "An UDF runtime identifier available at the back-end.", + "description": "A UDF runtime identifier available at the back-end.", "schema": { "type": "string", "subtype": "udf-runtime" diff --git a/save_result.json b/save_result.json index 2dbffeb9..b0912d5f 100644 --- a/save_result.json +++ b/save_result.json @@ -1,7 +1,7 @@ { "id": "save_result", "summary": "Save processed data to storage", - "description": "Saves processed data to the local user workspace / data store of the authenticated user. This process aims to be compatible to GDAL/OGR formats and options. STAC-compatible metadata should be stored with the processed data.\n\nCalling this process may be rejected by back-ends in the context of secondary web services.", + "description": "Saves processed data to the local user workspace/datastore of the authenticated user. This process aims to be compatible with GDAL/OGR formats and options. STAC-compatible metadata should be stored with the processed data.\n\nCalling this process may be rejected by back-ends in the context of secondary web services.", "categories": [ "cubes", "export" diff --git a/sort.json b/sort.json index ff510a90..14e56e9d 100644 --- a/sort.json +++ b/sort.json @@ -1,7 +1,7 @@ { "id": "sort", "summary": "Sort data", - "description": "Sorts an array into ascending (default) or descending order.\n\n**Remarks:**\n\n* Ties will be left in their original ordering.\n* Temporal strings can *not* be compared based on their string representation due to the time zone / time-offset representations.", + "description": "Sorts an array into ascending (default) or descending order.\n\n**Remarks:**\n\n* Ties will be left in their original ordering.\n* Temporal strings can *not* be compared based on their string representation due to the time zone/time-offset representations.", "categories": [ "arrays", "sorting" @@ -50,7 +50,7 @@ }, { "name": "nodata", - "description": "Controls the handling of no-data values (`null`). By default they are removed. If `true`, missing values in the data are put last; if `false`, they are put first.", + "description": "Controls the handling of no-data values (`null`). By default, they are removed. If set to `true`, missing values in the data are put last; if set to `false`, they are put first.", "schema": { "type": [ "boolean", diff --git a/text_merge.json b/text_merge.json index 13405728..ff6e07c2 100644 --- a/text_merge.json +++ b/text_merge.json @@ -1,7 +1,7 @@ { "id": "text_merge", - "summary": "Concatenate elements to a string", - "description": "Merges string representations of a set of elements together to a single string, with the separator between each element.", + "summary": "Concatenate elements to a single text", + "description": "Merges text representations (also known as *string*) of a set of elements to a single text, having the separator between each element.", "categories": [ "texts" ], From a9b900c757e510714246e51a773f9173684977c1 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 8 Jan 2021 14:36:21 +0100 Subject: [PATCH 036/109] Better spelling and grammar --- proposals/array_create.json | 4 ++-- proposals/array_set.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/proposals/array_create.json b/proposals/array_create.json index 099ea855..82911c73 100644 --- a/proposals/array_create.json +++ b/proposals/array_create.json @@ -1,7 +1,7 @@ { "id": "array_create", "summary": "Create an array", - "description": "Creates a new array, which by default is empty.\n\nBy providing the parameter `length`, the array can be pre-filled with the given number of elements. By default each element is set to `null` unless another value is specified through the parameter `value`.", + "description": "Creates a new array, which by default is empty.\n\nBy providing the parameter `length`, the array can be pre-filled with the given number of elements. By default, each element is set to `null` unless another value is specified through the parameter `value`.", "categories": [ "arrays" ], @@ -19,7 +19,7 @@ }, { "name": "value", - "description": "The value to fill the array with in case `length` is greater than 0. Defaults to `null` (no data).", + "description": "The value to fill the array with, in case `length` is greater than 0. Defaults to `null` (no data).", "optional": true, "default": null, "schema": { diff --git a/proposals/array_set.json b/proposals/array_set.json index f8cb8176..2d4bc378 100644 --- a/proposals/array_set.json +++ b/proposals/array_set.json @@ -29,7 +29,7 @@ }, { "name": "index", - "description": "The index of the element to insert the value(s) before. To insert after the last element, specify a number of elements in the array. If the index is greater than the number of elements, array is filled with `null` (no-data) values until the index and the values are added starting at the index given. The number of elements can be retrieved with the process ``count()`` having the parameter `condition` set to true.", + "description": "The index of the element to insert the value(s) before. To insert after the last element, specify the number of elements in the array. If the index is greater than the number of elements, the array is filled with `null` (no-data) values until the index is reached and then the values are added starting at the index given. The number of elements can be retrieved with the process ``count()`` having the parameter `condition` set to true.", "optional": true, "default": 0, "schema": { @@ -154,7 +154,7 @@ ] }, { - "description": "Replace a single values with two values in the array.", + "description": "Replace a single value with two values in the array.", "arguments": { "data": [ "a", From 361d060e502ae1c7cdda61b4ca944ab64aae0cac Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 11 Jan 2021 12:56:33 +0100 Subject: [PATCH 037/109] Fixed array_set examples --- proposals/array_set.json | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/proposals/array_set.json b/proposals/array_set.json index 2d4bc378..8f813ec7 100644 --- a/proposals/array_set.json +++ b/proposals/array_set.json @@ -58,9 +58,10 @@ }, "examples": [ { - "description": "With the default values for the optional parameters, values are added at the beginning of the array.", + "description": "With the default values for the optional parameters, the first element in the array gets replaced.", "arguments": { "data": [ + "z", "b", "c" ], @@ -74,6 +75,24 @@ "c" ] }, + { + "description": "Add a value at the beginning of the array.", + "arguments": { + "data": [ + "b", + "c" + ], + "values": [ + "a" + ], + "length": 0 + }, + "returns": [ + "a", + "b", + "c" + ] + }, { "description": "Add a value at the end of the array.", "arguments": { @@ -84,7 +103,8 @@ "values": [ "c" ], - "index": 2 + "index": 2, + "length": 0 }, "returns": [ "a", @@ -137,6 +157,7 @@ "data": [ "a", "b", + 3, 4 ], "values": [ @@ -184,7 +205,7 @@ "c" ], "values": [], - "index": 1 + "index": 2 }, "returns": [ "a", From 19206e6a2051b9fd742890cb3e6ac260f807134a Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 11 Jan 2021 16:01:11 +0100 Subject: [PATCH 038/109] array_merge -> array_concat, array_set -> array_modify --- CHANGELOG.md | 4 ++-- .../{array_merge.json => array_concat.json} | 20 +++---------------- .../{array_set.json => array_modify.json} | 2 +- 3 files changed, 6 insertions(+), 20 deletions(-) rename proposals/{array_merge.json => array_concat.json} (64%) rename proposals/{array_set.json => array_modify.json} (99%) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc048253..f32f6c87 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,10 +8,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New processes in proposal state + - `array_concat` - `array_create` - `array_find_label` - - `array_merge` - - `array_set` + - `array_modify` - `is_infinite` - `nan` - Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) diff --git a/proposals/array_merge.json b/proposals/array_concat.json similarity index 64% rename from proposals/array_merge.json rename to proposals/array_concat.json index 07e00858..d095973a 100644 --- a/proposals/array_merge.json +++ b/proposals/array_concat.json @@ -1,7 +1,7 @@ { - "id": "array_merge", + "id": "array_concat", "summary": "Merge two arrays", - "description": "Merges two arrays into a single array by appending the second array to the first array. Array labels get discarded from both arrays before merging.", + "description": "Concatenates two arrays into a single array by appending the second array to the first array. Array labels get discarded from both arrays before merging.", "categories": [ "arrays" ], @@ -39,7 +39,7 @@ }, "examples": [ { - "description": "Merges two arrays containing different data type.", + "description": "Concatenates two arrays containing different data type.", "arguments": { "array1": [ "a", @@ -56,20 +56,6 @@ 1, 2 ] - }, - { - "description": "Duplicates an array by merging it with an empty array. Labels will be removed so this could also be used to just remove array labels.", - "arguments": { - "array1": [ - null, - 1.23 - ], - "array2": [] - }, - "returns": [ - null, - 1.23 - ] } ] } \ No newline at end of file diff --git a/proposals/array_set.json b/proposals/array_modify.json similarity index 99% rename from proposals/array_set.json rename to proposals/array_modify.json index 8f813ec7..9a06a663 100644 --- a/proposals/array_set.json +++ b/proposals/array_modify.json @@ -1,5 +1,5 @@ { - "id": "array_set", + "id": "array_modify", "summary": "Change the content of an array (insert, remove, update)", "description": "Allows to insert into, remove from or update an array.\n\nAll labels get discarded and the array indices are always a sequence of numbers with the step size of 1 and starting at 0.", "categories": [ From f3ffc8d7001e54822382049a9a98e9608685d5d3 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 11 Jan 2021 16:39:15 +0100 Subject: [PATCH 039/109] array_modify: Make index required --- proposals/array_modify.json | 109 +++++++++--------------------------- 1 file changed, 27 insertions(+), 82 deletions(-) diff --git a/proposals/array_modify.json b/proposals/array_modify.json index 9a06a663..50e83a70 100644 --- a/proposals/array_modify.json +++ b/proposals/array_modify.json @@ -30,10 +30,9 @@ { "name": "index", "description": "The index of the element to insert the value(s) before. To insert after the last element, specify the number of elements in the array. If the index is greater than the number of elements, the array is filled with `null` (no-data) values until the index is reached and then the values are added starting at the index given. The number of elements can be retrieved with the process ``count()`` having the parameter `condition` set to true.", - "optional": true, - "default": 0, "schema": { - "type": "integer" + "type": "integer", + "minimum": 0 } }, { @@ -58,16 +57,17 @@ }, "examples": [ { - "description": "With the default values for the optional parameters, the first element in the array gets replaced.", + "description": "Replace a single value in the array.", "arguments": { "data": [ - "z", - "b", + "a", + "d", "c" ], "values": [ - "a" - ] + "b" + ], + "index": 1 }, "returns": [ "a", @@ -76,34 +76,41 @@ ] }, { - "description": "Add a value at the beginning of the array.", + "description": "Replace multiple values in the array.", "arguments": { "data": [ + "a", "b", - "c" + 4, + 5 ], "values": [ - "a" + 1, + 2, + 3 ], - "length": 0 + "index": 0, + "length": 2 }, "returns": [ - "a", - "b", - "c" + 1, + 2, + 3, + 4, + 5 ] }, { - "description": "Add a value at the end of the array.", + "description": "Insert a value to the array at a given position.", "arguments": { "data": [ "a", - "b" + "c" ], "values": [ - "c" + "b" ], - "index": 2, + "index": 1, "length": 0 }, "returns": [ @@ -132,69 +139,6 @@ "e" ] }, - { - "description": "Update a single value in the array.", - "arguments": { - "data": [ - "a", - "d", - "c" - ], - "values": [ - "b" - ], - "index": 1 - }, - "returns": [ - "a", - "b", - "c" - ] - }, - { - "description": "Update multiple values in the array.", - "arguments": { - "data": [ - "a", - "b", - 3, - 4 - ], - "values": [ - 1, - 2 - ], - "index": 0, - "length": 2 - }, - "returns": [ - 1, - 2, - 3, - 4 - ] - }, - { - "description": "Replace a single value with two values in the array.", - "arguments": { - "data": [ - "a", - null, - "d" - ], - "values": [ - "b", - "c" - ], - "index": 1 - }, - "returns": [ - "a", - "b", - "c", - "d" - ] - }, { "description": "Remove a single value from the array.", "arguments": { @@ -224,6 +168,7 @@ "c" ], "values": [], + "index": 0, "length": 2 }, "returns": [ From 7db0a9b8bdc0828c0435d317ecd76fe65482833b Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 11 Jan 2021 17:40:06 +0100 Subject: [PATCH 040/109] merge_cubes: order of dimension labels after merge (#219) * `merge_cubes`: Clarified the dimension label order after the merge. #212 * Fix spelling (inherit -> inherent) --- CHANGELOG.md | 3 ++- merge_cubes.json | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3d86630..57b3763b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,8 +27,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `array_element`: Clarify that `ArrayNotLabeled` exception is thrown when parameter `label` is specified and the given array is not labeled. - `array_apply`, `array_element`, `array_filter`: Added the `minimum: 0` constraint to all schemas describing zero-based indices (parameter `index`). - `array_labels`: Clarified the accepted data type for array elements passed to the parameter `data`. +- `merge_cubes`: Clarified the dimension label order after the merge. [#212](https://github.com/Open-EO/openeo-processes/issues/212) - Fixed typos, grammar issues and other spelling-related issues in many of the processes. -- Examples `array_contains_nodata` and `array_find_nodata` +- Fixed the examples `array_contains_nodata` and `array_find_nodata` ## 1.0.0 - 2020-07-31 diff --git a/merge_cubes.json b/merge_cubes.json index b26960c6..1ec724d1 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -1,7 +1,7 @@ { "id": "merge_cubes", "summary": "Merge two data cubes", - "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`,`y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`,`y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions `x`, `y` and `t` have the same dimension labels in `x`,`y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. Merging a data cube with dimensions `x`, `y`, `t` with another cube with dimensions `x`, `y` will join on the `x`, `y` dimension, so the lower dimension cube is merged with each time step in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatiotemporal data cube.", + "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions `x`, `y` and `t` have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. Merging a data cube with dimensions `x`, `y`, `t` with another cube with dimensions `x`, `y` will join on the `x`, `y` dimension, so the lower dimension cube is merged with each time step in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatiotemporal data cube.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", "categories": [ "cubes" ], From 7776f54b087b6c54cb188e467609cd76e9460403 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 1 Feb 2021 13:35:50 +0100 Subject: [PATCH 041/109] Improvements wrt to spell checking and formulas --- CHANGELOG.md | 1 + add.json | 2 +- aggregate_spatial.json | 2 +- aggregate_temporal_period.json | 2 +- anomaly.json | 2 +- arccos.json | 2 +- arcosh.json | 2 +- arcsin.json | 2 +- arctan.json | 2 +- arctan2.json | 2 +- array_element.json | 4 +- arsinh.json | 2 +- artanh.json | 2 +- divide.json | 2 +- e.json | 2 +- filter_bands.json | 2 +- linear_scale_range.json | 2 +- ln.json | 2 +- merge_cubes.json | 2 +- multiply.json | 2 +- ndvi.json | 4 +- normalized_difference.json | 2 +- proposals/aggregate_spatial_binary.json | 2 +- proposals/load_result.json | 2 +- proposals/load_uploaded_files.json | 2 +- proposals/resample_cube_temporal.json | 2 +- quantiles.json | 4 +- resample_cube_spatial.json | 2 +- resample_spatial.json | 2 +- save_result.json | 2 +- sqrt.json | 2 +- subtract.json | 2 +- tests/.words | 49 ++++++++++--------------- 33 files changed, 54 insertions(+), 64 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 57b3763b..997a76bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `proposals` folder for experimental processes. Experimental processes are not covered by the CHANGELOG and MAY include breaking changes! [#196](https://github.com/Open-EO/openeo-processes/issues/196), [#207](https://github.com/Open-EO/openeo-processes/issues/207), [PSC#8](https://github.com/Open-EO/PSC/issues/8) - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. - Moved the rarely used and implemented processes `cummax`, `cummin`, `cumproduct`, `cumsum`, `debug`, `filter_labels`, `load_result`, `load_uploaded_files`, `resample_cube_temporal` to the proposals. +- Exception messages have been aligned always use ` instead of '. Tooling could render it with CommonMark. ### Fixed - Clarify that the `condition` parameter for `array_filter` works also on indices and labels. diff --git a/add.json b/add.json index 58db5f2e..ac65541c 100644 --- a/add.json +++ b/add.json @@ -1,7 +1,7 @@ { "id": "add", "summary": "Addition of two numbers", - "description": "Sums up the two numbers `x` and `y` (*x + y*) and returns the computed sum.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it.", + "description": "Sums up the two numbers `x` and `y` (*`x + y`*) and returns the computed sum.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it.", "categories": [ "math" ], diff --git a/aggregate_spatial.json b/aggregate_spatial.json index b80a19cc..0bd4c95c 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -86,7 +86,7 @@ }, "exceptions": { "TooManyDimensions": { - "message": "The number of dimensions must be reduced to three for 'aggregate_spatial'." + "message": "The number of dimensions must be reduced to three for `aggregate_spatial`." } }, "links": [ diff --git a/aggregate_temporal_period.json b/aggregate_temporal_period.json index bbe52cb9..4f4008b1 100644 --- a/aggregate_temporal_period.json +++ b/aggregate_temporal_period.json @@ -18,7 +18,7 @@ }, { "name": "period", - "description": "The time intervals to aggregate. The following pre-defined values are available:\n\n* `hour`: Hour of the day\n* `day`: Day of the year\n* `week`: Week of the year\n* `dekad`: Ten day periods, counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The third dekad of the month can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10 each year.\n* `month`: Month of the year\n* `season`: Three month periods of the calendar seasons (December - February, March - May, June - August, September - November).\n* `tropical-season`: Six month periods of the tropical seasons (November - April, May - October).\n* `year`: Proleptic years\n* `decade`: Ten year periods ([0-to-9 decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the next year ending in a 9.\n* `decade-ad`: Ten year periods ([1-to-0 decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the Anno Domini (AD) calendar era, from a year ending in a 1 to the next year ending in a 0.", + "description": "The time intervals to aggregate. The following pre-defined values are available:\n\n* `hour`: Hour of the day\n* `day`: Day of the year\n* `week`: Week of the year\n* `dekad`: Ten day periods, counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The third dekad of the month can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10 each year.\n* `month`: Month of the year\n* `season`: Three month periods of the calendar seasons (December - February, March - May, June - August, September - November).\n* `tropical-season`: Six month periods of the tropical seasons (November - April, May - October).\n* `year`: Proleptic years\n* `decade`: Ten year periods ([0-to-9 decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the next year ending in a 9.\n* `decade-ad`: Ten year periods ([1-to-0 decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD) calendar era, from a year ending in a 1 to the next year ending in a 0.", "schema": { "type": "string", "enum": [ diff --git a/anomaly.json b/anomaly.json index e0e1a3a0..8dee2b68 100644 --- a/anomaly.json +++ b/anomaly.json @@ -25,7 +25,7 @@ }, { "name": "period", - "description": "Specifies the time intervals available in the normals data cube. The following options are available:\n\n* `hour`: Hour of the day\n* `day`: Day of the year\n* `week`: Week of the year\n* `dekad`: Ten day periods, counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The third dekad of the month can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10 each year.\n* `month`: Month of the year\n* `season`: Three month periods of the calendar seasons (December - February, March - May, June - August, September - November).\n* `tropical-season`: Six month periods of the tropical seasons (November - April, May - October).\n* `year`: Proleptic years\n* `decade`: Ten year periods ([0-to-9 decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the next year ending in a 9.\n* `decade-ad`: Ten year periods ([1-to-0 decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the Anno Domini (AD) calendar era, from a year ending in a 1 to the next year ending in a 0.\n* `single-period` / `climatology-period`: A single period of arbitrary length", + "description": "Specifies the time intervals available in the normals data cube. The following options are available:\n\n* `hour`: Hour of the day\n* `day`: Day of the year\n* `week`: Week of the year\n* `dekad`: Ten day periods, counted per year with three periods per month (day 1 - 10, 11 - 20 and 21 - end of month). The third dekad of the month can range from 8 to 11 days. For example, the fourth dekad is Feb, 1 - Feb, 10 each year.\n* `month`: Month of the year\n* `season`: Three month periods of the calendar seasons (December - February, March - May, June - August, September - November).\n* `tropical-season`: Six month periods of the tropical seasons (November - April, May - October).\n* `year`: Proleptic years\n* `decade`: Ten year periods ([0-to-9 decade](https://en.wikipedia.org/wiki/Decade#0-to-9_decade)), from a year ending in a 0 to the next year ending in a 9.\n* `decade-ad`: Ten year periods ([1-to-0 decade](https://en.wikipedia.org/wiki/Decade#1-to-0_decade)) better aligned with the anno Domini (AD) calendar era, from a year ending in a 1 to the next year ending in a 0.\n* `single-period` / `climatology-period`: A single period of arbitrary length", "schema": { "type": "string", "enum": [ diff --git a/arccos.json b/arccos.json index a75c7300..5ffbce35 100644 --- a/arccos.json +++ b/arccos.json @@ -1,7 +1,7 @@ { "id": "arccos", "summary": "Inverse cosine", - "description": "Computes the arc cosine of `x`. The arc cosine is the inverse function of the cosine so that *arccos(cos(x)) = x*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Computes the arc cosine of `x`. The arc cosine is the inverse function of the cosine so that *`arccos(cos(x)) = x`*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math > trigonometric" ], diff --git a/arcosh.json b/arcosh.json index 6b5dc948..6ed581fe 100644 --- a/arcosh.json +++ b/arcosh.json @@ -1,7 +1,7 @@ { "id": "arcosh", "summary": "Inverse hyperbolic cosine", - "description": "Computes the inverse hyperbolic cosine of `x`. It is the inverse function of the hyperbolic cosine so that *arcosh(cosh(x)) = x*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Computes the inverse hyperbolic cosine of `x`. It is the inverse function of the hyperbolic cosine so that *`arcosh(cosh(x)) = x`*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math > trigonometric" ], diff --git a/arcsin.json b/arcsin.json index b85e0eaf..e37eb2d3 100644 --- a/arcsin.json +++ b/arcsin.json @@ -1,7 +1,7 @@ { "id": "arcsin", "summary": "Inverse sine", - "description": "Computes the arc sine of `x`. The arc sine is the inverse function of the sine so that *arcsin(sin(x)) = x*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Computes the arc sine of `x`. The arc sine is the inverse function of the sine so that *`arcsin(sin(x)) = x`*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math > trigonometric" ], diff --git a/arctan.json b/arctan.json index 6c9e2be3..dc8d5a68 100644 --- a/arctan.json +++ b/arctan.json @@ -1,7 +1,7 @@ { "id": "arctan", "summary": "Inverse tangent", - "description": "Computes the arc tangent of `x`. The arc tangent is the inverse function of the tangent so that *arctan(tan(x)) = x*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Computes the arc tangent of `x`. The arc tangent is the inverse function of the tangent so that *`arctan(tan(x)) = x`*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math > trigonometric" ], diff --git a/arctan2.json b/arctan2.json index 4e7e3311..ca7d507f 100644 --- a/arctan2.json +++ b/arctan2.json @@ -1,7 +1,7 @@ { "id": "arctan2", "summary": "Inverse tangent of two numbers", - "description": "Computes the arc tangent of two numbers `x` and `y`. It is similar to calculating the arc tangent of *y / x*, except that the signs of both arguments are used to determine the quadrant of the result.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated if any of the arguments is `null`.", + "description": "Computes the arc tangent of two numbers `x` and `y`. It is similar to calculating the arc tangent of *`y / x`*, except that the signs of both arguments are used to determine the quadrant of the result.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated if any of the arguments is `null`.", "categories": [ "math > trigonometric" ], diff --git a/array_element.json b/array_element.json index 495d19c3..8b70a2e5 100644 --- a/array_element.json +++ b/array_element.json @@ -60,10 +60,10 @@ "message": "The array has no element with the specified index or label." }, "ArrayElementParameterMissing": { - "message": "The process 'array_element' requires either the 'index' or 'labels' parameter to be set." + "message": "The process `array_element` requires either the `index` or `labels` parameter to be set." }, "ArrayElementParameterConflict": { - "message": "The process 'array_element' only allows that either the 'index' or the 'labels' parameter is set." + "message": "The process `array_element` only allows that either the `index` or the `labels` parameter is set." }, "ArrayNotLabeled": { "message": "The array is not a labeled array, but the `label` parameter is set. Use the `index` instead." diff --git a/arsinh.json b/arsinh.json index 1475772f..37384dcd 100644 --- a/arsinh.json +++ b/arsinh.json @@ -1,7 +1,7 @@ { "id": "arsinh", "summary": "Inverse hyperbolic sine", - "description": "Computes the inverse hyperbolic sine of `x`. It is the inverse function of the hyperbolic sine so that *arsinh(sinh(x)) = x*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Computes the inverse hyperbolic sine of `x`. It is the inverse function of the hyperbolic sine so that *`arsinh(sinh(x)) = x`*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math > trigonometric" ], diff --git a/artanh.json b/artanh.json index add380b0..926b48ea 100644 --- a/artanh.json +++ b/artanh.json @@ -1,7 +1,7 @@ { "id": "artanh", "summary": "Inverse hyperbolic tangent", - "description": "Computes the inverse hyperbolic tangent of `x`. It is the inverse function of the hyperbolic tangent so that *artanh(tanh(x)) = x*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Computes the inverse hyperbolic tangent of `x`. It is the inverse function of the hyperbolic tangent so that *`artanh(tanh(x)) = x`*.\n\nWorks on radians only.\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math > trigonometric" ], diff --git a/divide.json b/divide.json index 064836ce..5dd664f1 100644 --- a/divide.json +++ b/divide.json @@ -1,7 +1,7 @@ { "id": "divide", "summary": "Division of two numbers", - "description": "Divides argument `x` by the argument `y` (*x / y*) and returns the computed result.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, a division by zero results in ±infinity if the processing environment supports it. Otherwise, a `DivisionByZero` exception must the thrown.", + "description": "Divides argument `x` by the argument `y` (*`x / y`*) and returns the computed result.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, a division by zero results in ±infinity if the processing environment supports it. Otherwise, a `DivisionByZero` exception must the thrown.", "categories": [ "math" ], diff --git a/e.json b/e.json index 7c1c5ff6..79506861 100644 --- a/e.json +++ b/e.json @@ -1,7 +1,7 @@ { "id": "e", "summary": "Euler's number (e)", - "description": "The real number *e* is a mathematical constant that is the base of the natural logarithm such that *ln(e) = 1*. The numerical value is approximately *2.71828*.", + "description": "The real number *e* is a mathematical constant that is the base of the natural logarithm such that *`ln(e) = 1`*. The numerical value is approximately *2.71828*.", "categories": [ "math > constants", "math > exponential & logarithmic" diff --git a/filter_bands.json b/filter_bands.json index 0254ee7e..9d49e3cd 100644 --- a/filter_bands.json +++ b/filter_bands.json @@ -67,7 +67,7 @@ }, "exceptions": { "BandFilterParameterMissing": { - "message": "The process 'filter_bands' requires any of the parameters 'bands', 'common_names' or 'wavelengths' to be set." + "message": "The process `filter_bands` requires any of the parameters `bands`, `common_names` or `wavelengths` to be set." }, "DimensionMissing": { "message": "A band dimension is missing." diff --git a/linear_scale_range.json b/linear_scale_range.json index c29978fe..172027c9 100644 --- a/linear_scale_range.json +++ b/linear_scale_range.json @@ -1,7 +1,7 @@ { "id": "linear_scale_range", "summary": "Linear transformation between two ranges", - "description": "Performs a linear transformation between the input and output range.\n\nThe given number in `x` is clipped to the bounds specified in `inputMin` and `inputMax` so that the underlying formula *((x - inputMin) / (inputMax - inputMin)) * (outputMax - outputMin) + outputMin* never returns any value lower than `outputMin` or greater than `outputMax`.\n\nPotential use case include\n\n* scaling values to the 8-bit range (0 - 255) often used for numeric representation of values in one of the channels of the [RGB colour model](https://en.wikipedia.org/wiki/RGB_color_model#Numeric_representations) or\n* calculating percentages (0 - 100).\n\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Performs a linear transformation between the input and output range.\n\nThe given number in `x` is clipped to the bounds specified in `inputMin` and `inputMax` so that the underlying formula *`((x - inputMin) / (inputMax - inputMin)) * (outputMax - outputMin) + outputMin`* never returns any value lower than `outputMin` or greater than `outputMax`.\n\nPotential use case include\n\n* scaling values to the 8-bit range (0 - 255) often used for numeric representation of values in one of the channels of the [RGB colour model](https://en.wikipedia.org/wiki/RGB_color_model#Numeric_representations) or\n* calculating percentages (0 - 100).\n\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math" ], diff --git a/ln.json b/ln.json index de31161d..e073c7a2 100644 --- a/ln.json +++ b/ln.json @@ -1,7 +1,7 @@ { "id": "ln", "summary": "Natural logarithm", - "description": "The natural logarithm is the logarithm to the base *e* of the number `x`, which equals to using the *log* process with the base set to *e*. The natural logarithm is the inverse function of taking *e* to the power x.\n\nThe no-data value `null` is passed through.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, `ln(0)` results in ±infinity if the processing environment supports it or otherwise an exception is thrown.", + "description": "The natural logarithm is the logarithm to the base *e* of the number `x`, which equals to using the *log* process with the base set to *e*. The natural logarithm is the inverse function of taking *e* to the power x.\n\nThe no-data value `null` is passed through.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it. Therefore, *`ln(0)`* results in ±infinity if the processing environment supports it or otherwise an exception is thrown.", "categories": [ "math > exponential & logarithmic" ], diff --git a/merge_cubes.json b/merge_cubes.json index 1ec724d1..625d64e9 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -1,7 +1,7 @@ { "id": "merge_cubes", "summary": "Merge two data cubes", - "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions `x`, `y` and `t` have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. Merging a data cube with dimensions `x`, `y`, `t` with another cube with dimensions `x`, `y` will join on the `x`, `y` dimension, so the lower dimension cube is merged with each time step in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatiotemporal data cube.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", + "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions `x`, `y` and `t` have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. Merging a data cube with dimensions `x`, `y`, `t` with another cube with dimensions `x`, `y` will join on the `x`, `y` dimension, so the lower dimension cube is merged with each time step in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatio-temporal data cube.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", "categories": [ "cubes" ], diff --git a/multiply.json b/multiply.json index ef0d5cb3..afa88daa 100644 --- a/multiply.json +++ b/multiply.json @@ -1,7 +1,7 @@ { "id": "multiply", "summary": "Multiplication of two numbers", - "description": "Multiplies the two numbers `x` and `y` (*x * y*) and returns the computed product.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it.", + "description": "Multiplies the two numbers `x` and `y` (*`x * y`*) and returns the computed product.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it.", "categories": [ "math" ], diff --git a/ndvi.json b/ndvi.json index b31634d5..ba5e54fb 100644 --- a/ndvi.json +++ b/ndvi.json @@ -1,7 +1,7 @@ { "id": "ndvi", "summary": "Normalized Difference Vegetation Index", - "description": "Computes the Normalized Difference Vegetation Index (NDVI). The NDVI is computed as *(nir - red) / (nir + red)*.\n\nThe `data` parameter expects a raster data cube with a dimension of type `bands` or a `DimensionAmbiguous` exception is thrown otherwise. By default, the dimension must have at least two bands with the common names `red` and `nir` assigned. Otherwise, the user has to specify the parameters `nir` and `red`. If neither is the case, either the exception `NirBandAmbiguous` or `RedBandAmbiguous` is thrown. The common names for each band are specified in the collection's band metadata and are *not* equal to the band names.\n\nBy default, the dimension of type `bands` is dropped by this process. To keep the dimension specify a new band name in the parameter `target_band`. This adds a new dimension label with the specified name to the dimension, which can be used to access the computed values. If a band with the specified name exists, a `BandExists` is thrown.\n\nThis process is very similar to the process ``normalized_difference()``, but determines the bands automatically based on the common names (`red`/`nir`) specified in the metadata.", + "description": "Computes the Normalized Difference Vegetation Index (NDVI). The NDVI is computed as *`(nir - red) / (nir + red)`*.\n\nThe `data` parameter expects a raster data cube with a dimension of type `bands` or a `DimensionAmbiguous` exception is thrown otherwise. By default, the dimension must have at least two bands with the common names `red` and `nir` assigned. Otherwise, the user has to specify the parameters `nir` and `red`. If neither is the case, either the exception `NirBandAmbiguous` or `RedBandAmbiguous` is thrown. The common names for each band are specified in the collection's band metadata and are *not* equal to the band names.\n\nBy default, the dimension of type `bands` is dropped by this process. To keep the dimension specify a new band name in the parameter `target_band`. This adds a new dimension label with the specified name to the dimension, which can be used to access the computed values. If a band with the specified name exists, a `BandExists` is thrown.\n\nThis process is very similar to the process ``normalized_difference()``, but determines the bands automatically based on the common names (`red`/`nir`) specified in the metadata.", "categories": [ "math > indices", "vegetation indices" @@ -52,7 +52,7 @@ } ], "returns": { - "description": "A raster data cube containing the computed NDVI values. The structure of the data cube differs depending on the value passed to `target_band`:\n\n* `target_band` is `null`: The data cube does not contain the dimension of type `bands` anymore, the number of dimensions decreases by one. The dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain unchanged.\n* `target_band` is a string: The data cube keeps the same dimensions. The dimension properties remain unchanged, but the number of dimension labels for the dimension of type `bands` increases by one. The additional label is named as specified in `target_band`.", + "description": "A raster data cube containing the computed NDVI values. The structure of the data cube differs depending on the value passed to `target_band`:\n\n* `target_band` is `null`: The data cube does not contain the dimension of type `bands`, the number of dimensions decreases by one. The dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain unchanged.\n* `target_band` is a string: The data cube keeps the same dimensions. The dimension properties remain unchanged, but the number of dimension labels for the dimension of type `bands` increases by one. The additional label is named as specified in `target_band`.", "schema": { "type": "object", "subtype": "raster-cube" diff --git a/normalized_difference.json b/normalized_difference.json index 9d766d94..7050822c 100644 --- a/normalized_difference.json +++ b/normalized_difference.json @@ -1,7 +1,7 @@ { "id": "normalized_difference", "summary": "Normalized difference", - "description": "Computes the normalized difference for two bands. The normalized difference is computed as *(x - y) / (x + y)*.\n\nThis process could be used for a number of remote sensing indices such as:\n\n* [NDVI](https://eos.com/ndvi/): `x` = NIR band, `y` = red band\n* [NDWI](https://eos.com/ndwi/): `x` = NIR band, `y` = SWIR band\n* [NDSI](https://eos.com/ndsi/): `x` = green band, `y` = SWIR band\n\nSome back-ends may have native processes such as ``ndvi()`` available for convenience.", + "description": "Computes the normalized difference for two bands. The normalized difference is computed as *`(x - y) / (x + y)`*.\n\nThis process could be used for a number of remote sensing indices such as:\n\n* [NDVI](https://eos.com/ndvi/): `x` = NIR band, `y` = red band\n* [NDWI](https://eos.com/ndwi/): `x` = NIR band, `y` = SWIR band\n* [NDSI](https://eos.com/ndsi/): `x` = green band, `y` = SWIR band\n\nSome back-ends may have native processes such as ``ndvi()`` available for convenience.", "categories": [ "math > indices", "vegetation indices" diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 02514030..214dae99 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -91,7 +91,7 @@ }, "exceptions": { "TooManyDimensions": { - "message": "The number of dimensions must be reduced to three for 'aggregate_spatial_binary'." + "message": "The number of dimensions must be reduced to three for `aggregate_spatial_binary`." } }, "links": [ diff --git a/proposals/load_result.json b/proposals/load_result.json index 6641ebdd..175b49ea 100644 --- a/proposals/load_result.json +++ b/proposals/load_result.json @@ -1,7 +1,7 @@ { "id": "load_result", "summary": "Load batch job results", - "description": "Loads batch job results by job id from the local user workspace/datastore. The job must have been stored by the authenticated user on the back-end currently connected to.", + "description": "Loads batch job results by job id from the local user workspace/data store. The job must have been stored by the authenticated user on the back-end currently connected to.", "categories": [ "cubes", "import" diff --git a/proposals/load_uploaded_files.json b/proposals/load_uploaded_files.json index 5b92f36b..fd7b37ed 100644 --- a/proposals/load_uploaded_files.json +++ b/proposals/load_uploaded_files.json @@ -1,7 +1,7 @@ { "id": "load_uploaded_files", "summary": "Load files from the user workspace", - "description": "Loads one or more user-uploaded files from the local user workspace/datastore and returns them as a single data cube. The files must have been stored by the authenticated user on the back-end currently connected to.", + "description": "Loads one or more user-uploaded files from the local user workspace/data store and returns them as a single data cube. The files must have been stored by the authenticated user on the back-end currently connected to.", "categories": [ "cubes", "import" diff --git a/proposals/resample_cube_temporal.json b/proposals/resample_cube_temporal.json index 0a335feb..12aec39f 100644 --- a/proposals/resample_cube_temporal.json +++ b/proposals/resample_cube_temporal.json @@ -91,7 +91,7 @@ }, "exceptions": { "TooManyDimensions": { - "message": "The number of temporal dimensions must be reduced to one for 'resample_cube_temporal'." + "message": "The number of temporal dimensions must be reduced to one for `resample_cube_temporal`." }, "DimensionNotAvailable": { "message": "A dimension with the specified name does not exist." diff --git a/quantiles.json b/quantiles.json index b91b02a0..35079b86 100644 --- a/quantiles.json +++ b/quantiles.json @@ -65,10 +65,10 @@ }, "exceptions": { "QuantilesParameterMissing": { - "message": "The process 'quantiles' requires either the 'probabilities' or 'q' parameter to be set." + "message": "The process `quantiles` requires either the `probabilities` or `q` parameter to be set." }, "QuantilesParameterConflict": { - "message": "The process 'quantiles' only allows that either the 'probabilities' or the 'q' parameter is set." + "message": "The process `quantiles` only allows that either the `probabilities` or the `q` parameter is set." } }, "examples": [ diff --git a/resample_cube_spatial.json b/resample_cube_spatial.json index d35a127f..0f043007 100644 --- a/resample_cube_spatial.json +++ b/resample_cube_spatial.json @@ -25,7 +25,7 @@ }, { "name": "method", - "description": "Resampling method. Methods are inspired by GDAL, see [gdalwarp](https://www.gdal.org/gdalwarp.html) for more information.", + "description": "Resampling method. Methods are inspired by GDAL, see [`gdalwarp`](https://www.gdal.org/gdalwarp.html) for more information.", "schema": { "type": "string", "enum": [ diff --git a/resample_spatial.json b/resample_spatial.json index 3a2bfed3..03c832cf 100644 --- a/resample_spatial.json +++ b/resample_spatial.json @@ -72,7 +72,7 @@ }, { "name": "method", - "description": "Resampling method. Methods are inspired by GDAL, see [gdalwarp](https://www.gdal.org/gdalwarp.html) for more information.", + "description": "Resampling method. Methods are inspired by GDAL, see [`gdalwarp`](https://www.gdal.org/gdalwarp.html) for more information.", "schema": { "type": "string", "enum": [ diff --git a/save_result.json b/save_result.json index b0912d5f..d64d3dc4 100644 --- a/save_result.json +++ b/save_result.json @@ -1,7 +1,7 @@ { "id": "save_result", "summary": "Save processed data to storage", - "description": "Saves processed data to the local user workspace/datastore of the authenticated user. This process aims to be compatible with GDAL/OGR formats and options. STAC-compatible metadata should be stored with the processed data.\n\nCalling this process may be rejected by back-ends in the context of secondary web services.", + "description": "Saves processed data to the local user workspace/data store of the authenticated user. This process aims to be compatible with GDAL/OGR formats and options. STAC-compatible metadata should be stored with the processed data.\n\nCalling this process may be rejected by back-ends in the context of secondary web services.", "categories": [ "cubes", "export" diff --git a/sqrt.json b/sqrt.json index 99762bb3..bc1aeb6c 100644 --- a/sqrt.json +++ b/sqrt.json @@ -1,7 +1,7 @@ { "id": "sqrt", "summary": "Square root", - "description": "Computes the square root of a real number `x`, which is equal to calculating `x` to the power of *0.5*.\n\nA square root of x is a number a such that *a^2^ = x*. Therefore, the square root is the inverse function of a to the power of 2, but only for *a >= 0*.\n\nThe no-data value `null` is passed through and therefore gets propagated.", + "description": "Computes the square root of a real number `x`, which is equal to calculating `x` to the power of *0.5*.\n\nA square root of x is a number a such that *`a² = x`*. Therefore, the square root is the inverse function of a to the power of 2, but only for *a >= 0*.\n\nThe no-data value `null` is passed through and therefore gets propagated.", "categories": [ "math", "math > exponential & logarithmic" diff --git a/subtract.json b/subtract.json index 5f8f8d0c..2cf8aba7 100644 --- a/subtract.json +++ b/subtract.json @@ -1,7 +1,7 @@ { "id": "subtract", "summary": "Subtraction of two numbers", - "description": "Subtracts argument `y` from the argument `x` (*x - y*) and returns the computed result.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it.", + "description": "Subtracts argument `y` from the argument `x` (*`x - y`*) and returns the computed result.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it.", "categories": [ "math" ], diff --git a/tests/.words b/tests/.words index 67f19cb4..4b575076 100644 --- a/tests/.words +++ b/tests/.words @@ -1,39 +1,28 @@ +0-to-9 +1-to-0 +anno +behavior boolean -MathWorld -openEO -signum +center +centers +dekad +Domini GeoJSON +labeled +MathWorld n-ary -unary -STAC -band1 -band2 -Resample +neighborhood +neighborhoods +openEO resample -Resamples -resamples -Resampled resampled -Resampling +Resamples resampling -nir -common_names -gdalwarp -center -UDFs -summand -inputMin -inputMax -outputMin -outputMax Sentinel-2 Sentinel-2A Sentinel-2B -labeled -centers -spatiotemporal -0-to-9 -1-to-0 -Anno -Domini -dekad \ No newline at end of file +signum +STAC +summand +UDFs +unary \ No newline at end of file From 10fc2d5e7c9969dd48d64fee40a08ecb350411e7 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 15 Feb 2021 12:32:05 +0100 Subject: [PATCH 042/109] Update dependency --- tests/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/package.json b/tests/package.json index ad1ff9c9..c2137589 100644 --- a/tests/package.json +++ b/tests/package.json @@ -19,7 +19,7 @@ }, "devDependencies": { "@apidevtools/json-schema-ref-parser": "^9.0.6", - "@openeo/js-processgraphs": "^1.0.0-beta.6", + "@openeo/js-processgraphs": "^1.0.0", "ajv": "^6.12.4", "concat-json-files": "^1.1.0", "glob": "^7.1.6", From 538c20cdd71e296a4d3dae368549f88ca711d14f Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 15 Feb 2021 14:42:55 +0100 Subject: [PATCH 043/109] Migrate to GitHub Actions --- .github/workflows/docs.yml | 55 ++++++++++++++++++++++++++++++++++ .github/workflows/tests.yml | 16 ++++++++++ .travis.yml | 60 ------------------------------------- 3 files changed, 71 insertions(+), 60 deletions(-) create mode 100644 .github/workflows/docs.yml create mode 100644 .github/workflows/tests.yml delete mode 100644 .travis.yml diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..fdf2c439 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,55 @@ +name: Deploy Documentation +on: + release: + types: [published] + push: + branches: + - draft + - master +jobs: + deploy: + runs-on: ubuntu-latest + strategy: + matrix: + node-version: [14.x] + steps: + - name: Inject env variables + uses: rlespinasse/github-slug-action@v3.x + - uses: actions/setup-node@v1 + - uses: actions/checkout@v2 + - run: | + npm install + npm run generate + working-directory: tests + - name: clone gh-pages and clean-up + if: ${{ env.GITHUB_REF_SLUG == 'master' }} + run: | + git clone --branch gh-pages https://$GITHUB_TOKEN@github.com/Open-EO/openeo-processes.git gh-pages + find gh-pages -maxdepth 1 -type f -delete + rm -rf gh-pages/examples/ + rm -rf gh-pages/meta/ + rm -rf gh-pages/proposals/ + - name: create empty gh-pages folder + if: ${{ env.GITHUB_REF_SLUG != 'master' }} + run: mkdir gh-pages + - run: | + cp tests/docs.html index.html + cp tests/processes.json processes.json + rsync -vrm --include='*.json' --include='*.html' --include='examples/***' --include='meta/***' --include='proposals/***' --exclude='*' . gh-pages + - name: deploy to root (master) + uses: peaceiris/actions-gh-pages@v3 + if: ${{ env.GITHUB_REF_SLUG == 'master' }} + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: gh-pages + user_name: 'openEO CI' + user_email: openeo.ci@uni-muenster.de + - name: deploy to ${{ env.GITHUB_REF_SLUG }} + uses: peaceiris/actions-gh-pages@v3 + if: ${{ env.GITHUB_REF_SLUG != 'master' }} + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: gh-pages + destination_dir: ${{ env.GITHUB_REF_SLUG }} + user_name: 'openEO CI' + user_email: openeo.ci@uni-muenster.de \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 00000000..a2efdf8c --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,16 @@ +name: Test Processes +on: [push, pull_request] +jobs: + deploy: + runs-on: ubuntu-latest + strategy: + matrix: + node-version: [14.x] + steps: + - uses: actions/setup-node@v1 + - uses: actions/checkout@v2 + - name: Run tests + run: | + npm install + npm run test + working-directory: tests \ No newline at end of file diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 82ce515a..00000000 --- a/.travis.yml +++ /dev/null @@ -1,60 +0,0 @@ -language: node_js - -node_js: - - stable - -sudo: false - -install: -- cd tests -- npm install - -script: -- npm run test - -before_deploy: -- npm run generate -- cd .. -- git clone --branch gh-pages https://$GITHUB_TOKEN@github.com/Open-EO/openeo-processes.git gh-pages -- cp tests/docs.html index.html -- cp tests/processes.json processes.json -- | - if [[ $TRAVIS_BRANCH == 'draft' ]] ; then - rm -rf gh-pages/draft - mkdir -p gh-pages/draft - rsync -vrm --include='*.json' --include='*.html' --include='examples/***' --include='meta/***' --exclude='*' . gh-pages/draft - fi -- | - if [[ $TRAVIS_BRANCH == 'master' ]] ; then - find gh-pages -maxdepth 1 -type f -delete - rm -rf gh-pages/examples/ - mkdir -p gh-pages - rsync -vrm --include='*.json' --include='*.html' --include='examples/***' --include='meta/***' --exclude='*' . gh-pages - fi -- | - if [ -n "$TRAVIS_TAG" ] ; then - rm -rf gh-pages/$TRAVIS_TAG - mkdir -p gh-pages/$TRAVIS_TAG - rsync -vrm --include='*.json' --include='*.html' --include='examples/***' --include='meta/***' --exclude='*' . gh-pages/$TRAVIS_TAG - fi - -deploy: - - provider: pages - skip-cleanup: true - github-token: $GITHUB_TOKEN - local-dir: gh-pages - keep-history: true - name: openEO CI - email: openeo.ci@uni-muenster.de - on: - tags: true - - provider: pages - skip-cleanup: true - github-token: $GITHUB_TOKEN - local-dir: gh-pages - keep-history: true - name: openEO CI - email: openeo.ci@uni-muenster.de - on: - all_branches: true - condition: $TRAVIS_BRANCH =~ ^(master|draft)$ \ No newline at end of file From 744c81c80cd092213fc6affcb53c0f13e0cf3b52 Mon Sep 17 00:00:00 2001 From: Jeroen Dries Date: Wed, 17 Feb 2021 15:52:09 +0100 Subject: [PATCH 044/109] atmospheric_correction, cloud_detection, sar_backscatter process (#210) * atmospheric correction proposal * add more ARD processes * add orthorectify parameter * Several improvements, make processes valid * Update proposals/sar_backscatter.json Co-authored-by: Matthias Mohr * Add words to test dict * ar_backscatter: Rename parameter backscatter_coefficient to coefficient * Some updates to the processes * Make cloud_detection more general and remove the ability to apply it directly. * Add parameters to sar_backscatter (first draft) * Templates for the ARD processes * sar_backscatter: scaling: Remode db, add description * Fill ard_normalized_radar_backscatter, some improvements to related processes * Further improvements for NRB * First incomplete draft for ard_surface_reflectance with the methods being removed from the dependent processes * Improvements from code review * Added specific band names * Final draft of the SAR NRB processes * Another draft for optical processes * It also needs apply, so remove for now. * Updates from code review * Fix processes / tests * Remove FORCE as cloud detection method again * Replaced parameters orthorectify and rtc with coefficient Co-authored-by: Matthias Mohr --- .../ard_normalized_radar_backscatter.json | 97 +++++++++++++ proposals/ard_surface_reflectance.json | 77 ++++++++++ proposals/atmospheric_correction.json | 72 ++++++++++ proposals/cloud_detection.json | 53 +++++++ proposals/sar_backscatter.json | 131 ++++++++++++++++++ tests/.words | 6 + tests/processes.test.js | 5 + 7 files changed, 441 insertions(+) create mode 100644 proposals/ard_normalized_radar_backscatter.json create mode 100644 proposals/ard_surface_reflectance.json create mode 100644 proposals/atmospheric_correction.json create mode 100644 proposals/cloud_detection.json create mode 100644 proposals/sar_backscatter.json diff --git a/proposals/ard_normalized_radar_backscatter.json b/proposals/ard_normalized_radar_backscatter.json new file mode 100644 index 00000000..a5776c2e --- /dev/null +++ b/proposals/ard_normalized_radar_backscatter.json @@ -0,0 +1,97 @@ +{ + "id": "ard_normalized_radar_backscatter", + "summary": "CARD4L compliant SAR NRB generation", + "description": "Computes CARD4L compliant backscatter (gamma0) from SAR input.\n\nNote that backscatter computation may require instrument specific metadata that is tightly coupled to the original SAR products. As a result, this process may only work in combination with loading data from specific collections, not with general data cubes.", + "categories": [ + "cubes", + "sar", + "ard" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "The source data cube containing SAR input.", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + { + "name": "elevation_model", + "description": "The digital elevation model to use. Set to `null` (the default) to allow the back-end to choose, which will improve portability, but reduce reproducibility.", + "optional": true, + "default": null, + "schema": [ + { + "type": "string", + "subtype": "collection-id" + }, + { + "type": "null" + } + ] + }, + { + "name": "ellipsoid_incidence_angle", + "description": "If set to `true`, an ellipsoidal incidence angle band named `ellipsoid_incidence_angle` is added. The values are given in degrees.", + "optional": true, + "default": false, + "schema": { + "type": "boolean" + } + }, + { + "name": "noise_removal", + "description": "If set to `false`, no noise removal is applied. Defaults to `true`, which removes noise.", + "optional": true, + "default": true, + "schema": { + "type": "boolean" + } + } + ], + "returns": { + "description": "Backscatter values expressed as gamma0. The data returned is CARD4L compliant and contains metadata.\n\nBy default, the backscatter values are given in linear scale.", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + "exceptions": { + "DigitalElevationModelInvalid": { + "message": "The digital elevation model specified is either not a DEM or can't be used with the data cube given." + } + }, + "links": [ + { + "rel": "about", + "href": "http://ceos.org/ard/files/PFS/NRB/v5.0/CARD4L-PFS_Normalised_Radar_Backscatter-v5.0.pdf", + "title": "CEOS CARD4L specification" + } + ], + "process_graph": { + "nrb": { + "process_id": "sar_backscatter", + "arguments": { + "data": { + "from_parameter": "data" + }, + "coefficient": "gamma0-terrain", + "elevation_model": { + "from_parameter": "elevation_model" + }, + "mask": true, + "contributing_area": true, + "local_incidence_angle": true, + "ellipsoid_incidence_angle": { + "from_parameter": "ellipsoid_incidence_angle" + }, + "noise_removal": { + "from_parameter": "noise_removal" + } + }, + "result": true + } + } +} \ No newline at end of file diff --git a/proposals/ard_surface_reflectance.json b/proposals/ard_surface_reflectance.json new file mode 100644 index 00000000..c7ec2d41 --- /dev/null +++ b/proposals/ard_surface_reflectance.json @@ -0,0 +1,77 @@ +{ + "id": "ard_surface_reflectance", + "summary": "CARD4L compliant Surface Reflectance generation", + "description": "Computes CARD4L compliant surface reflectance values from optical input.", + "categories": [ + "cubes", + "sar", + "ard" + ], + "experimental": true, + "parameters": [ + { + "description": "The source data cube containing multi-spectral optical top of the atmosphere (TOA) reflectances. There must be a single dimension of type bands available.", + "name": "data", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + { + "description": "The atmospheric correction method to use.", + "name": "atmospheric_correction_method", + "schema": { + "type": [ + "string" + ], + "enum": [ + "FORCE", + "iCOR" + ] + } + }, + { + "description": "The cloud detection method to use.", + "name": "cloud_detection_method", + "schema": { + "type": [ + "string" + ], + "enum": [ + "Fmask", + "s2cloudless", + "Sen2Cor" + ] + } + }, + { + "description": "The digital elevation model to use, leave empty to allow the back-end to make a suitable choice.", + "name": "elevation_model", + "optional": true, + "default": null, + "schema": [ + { + "type": "string", + "subtype": "collection-id" + }, + { + "type": "null" + } + ] + } + ], + "returns": { + "description": "Data cube containing bottom of atmosphere reflectances with atmospheric disturbances like clouds and cloud shadows removed. The data returned is CARD4L compliant and contains metadata.", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + "links": [ + { + "rel": "about", + "href": "http://ceos.org/ard/files/PFS/SR/v5.0/CARD4L_Product_Family_Specification_Surface_Reflectance-v5.0.pdf", + "title": "CEOS CARD4L specification" + } + ] +} \ No newline at end of file diff --git a/proposals/atmospheric_correction.json b/proposals/atmospheric_correction.json new file mode 100644 index 00000000..0df98018 --- /dev/null +++ b/proposals/atmospheric_correction.json @@ -0,0 +1,72 @@ +{ + "id": "atmospheric_correction", + "summary": "Apply atmospheric correction", + "description": "Applies an atmospheric correction that converts top of atmosphere reflectance values into bottom of atmosphere/top of canopy reflectance values.", + "categories": [ + "cubes", + "optical" + ], + "experimental": true, + "parameters": [ + { + "description": "Data cube containing multi-spectral optical top of atmosphere reflectances to be corrected.", + "name": "data", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + { + "description": "The atmospheric correction method to use. To get reproducible results, you have to set a specific method.\n\nSet to `null` to allow the back-end to choose, which will improve portability, but reduce reproducibility as you *may* get different results if you run the processes multiple times.", + "name": "method", + "schema": [ + { + "type": [ + "string" + ], + "enum": [ + "FORCE", + "iCOR" + ] + }, + { + "type": "null" + } + ] + }, + { + "description": "The digital elevation model to use, leave empty to allow the back-end to make a suitable choice.", + "name": "elevation_model", + "optional": true, + "default": null, + "schema": [ + { + "type": "string", + "subtype": "collection-id" + }, + { + "type": "null" + } + ] + } + ], + "returns": { + "description": "Data cube containing bottom of atmosphere reflectances.", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + "exceptions": { + "DigitalElevationModelInvalid": { + "message": "The digital elevation model specified is either not a DEM or can't be used with the data cube given." + } + }, + "links": [ + { + "rel": "about", + "href": "https://bok.eo4geo.eu/IP1-7-1", + "title": "Atmospheric correction explained by EO4GEO body of knowledge." + } + ] +} \ No newline at end of file diff --git a/proposals/cloud_detection.json b/proposals/cloud_detection.json new file mode 100644 index 00000000..4699fcb5 --- /dev/null +++ b/proposals/cloud_detection.json @@ -0,0 +1,53 @@ +{ + "id": "cloud_detection", + "summary": "Create cloud masks", + "description": "Detects atmospheric disturbances such as clouds, cloud shadows, aerosols, haze, ozone and/or water vapour in optical imagery.\n\nIt creates a data cube with the spatial and temporal dimensions compatible to the source data cube and a dimension that contains a dimension label for each of the supported/considered atmospheric disturbances. The naming of the bands should follow the following pre-defined values:\n\n- `clouds`\n- `shadows`\n- `aerosol`\n- `haze`\n- `ozone`\n- `water_vapor`\n\nAll bands have values between 0 (clear) and 1, which describes the probability that it is an atmospheric disturbance.", + "categories": [ + "cubes", + "optical" + ], + "experimental": true, + "parameters": [ + { + "description": "The source data cube containing multi-spectral optical top of the atmosphere (TOA) reflectances on which to perform cloud detection.", + "name": "data", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + { + "description": "The cloud detection method to use. To get reproducible results, you have to set a specific method.\n\nSet to `null` to allow the back-end to choose, which will improve portability, but reduce reproducibility as you *may* get different results if you run the processes multiple times.", + "name": "method", + "schema": [ + { + "type": [ + "string" + ], + "enum": [ + "Fmask", + "s2cloudless", + "Sen2Cor" + ] + }, + { + "type": "null" + } + ] + } + ], + "returns": { + "description": "A data cube with bands for the atmospheric disturbances. Each of the masks contains values between 0 and 1. The data cube has the same spatial and temporal dimensions as the source data cube and a dimension that contains a dimension label for each of the supported/considered atmospheric disturbance.", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + "links": [ + { + "rel": "about", + "href": "https://bok.eo4geo.eu/TA14-2-2-1-3", + "title": "Cloud mask explained by EO4GEO body of knowledge." + } + ] +} \ No newline at end of file diff --git a/proposals/sar_backscatter.json b/proposals/sar_backscatter.json new file mode 100644 index 00000000..024e90c5 --- /dev/null +++ b/proposals/sar_backscatter.json @@ -0,0 +1,131 @@ +{ + "id": "sar_backscatter", + "summary": "Computes backscatter from SAR input", + "description": "Computes backscatter from SAR input.\n\nNote that backscatter computation may require instrument specific metadata that is tightly coupled to the original SAR products. As a result, this process may only work in combination with loading data from specific collections, not with general data cubes.", + "categories": [ + "cubes", + "sar" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "The source data cube containing SAR input.", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + { + "name": "coefficient", + "description": "Select the radiometric correction coefficient. The following options are available:\n\n* `beta0`: radar brightness\n* `sigma0-ellipsoid`: ground area computed with ellipsoid earth model\n* `sigma0-terrain`: ground area computed with terrain earth model\n* `gamma0-ellipsoid`: ground area computed with ellipsoid earth model in sensor line of sight\n* `gamma0-terrain`: ground area computed with terrain earth model in sensor line of sight (default)\n* `null`: non-normalized backscatter", + "optional": true, + "default": "gamma0-terrain", + "schema": [ + { + "type": "string", + "enum": [ + "beta0", + "sigma0-ellipsoid", + "sigma0-terrain", + "gamma0-ellipsoid", + "gamma0-terrain" + ] + }, + { + "title": "Non-normalized backscatter", + "type": "null" + } + ] + }, + { + "name": "elevation_model", + "description": "The digital elevation model to use. Set to `null` (the default) to allow the back-end to choose, which will improve portability, but reduce reproducibility.", + "optional": true, + "default": null, + "schema": [ + { + "type": "string", + "subtype": "collection-id" + }, + { + "type": "null" + } + ] + }, + { + "name": "mask", + "description": "If set to `true`, a data mask is added to the bands with the name `mask`. It indicates which values are valid (1), invalid (0) or contain no-data (null).", + "optional": true, + "default": false, + "schema": { + "type": "boolean" + } + }, + { + "name": "contributing_area", + "description": "If set to `true`, a DEM-based local contributing area band named `contributing_area` is added. The values are given in square meters.", + "optional": true, + "default": false, + "schema": { + "type": "boolean" + } + }, + { + "name": "local_incidence_angle", + "description": "If set to `true`, a DEM-based local incidence angle band named `local_incidence_angle` is added. The values are given in degrees.", + "optional": true, + "default": false, + "schema": { + "type": "boolean" + } + }, + { + "name": "ellipsoid_incidence_angle", + "description": "If set to `true`, an ellipsoidal incidence angle band named `ellipsoid_incidence_angle` is added. The values are given in degrees.", + "optional": true, + "default": false, + "schema": { + "type": "boolean" + } + }, + { + "name": "noise_removal", + "description": "If set to `false`, no noise removal is applied. Defaults to `true`, which removes noise.", + "optional": true, + "default": true, + "schema": { + "type": "boolean" + } + } + ], + "returns": { + "description": "Backscatter values corresponding to the chosen parametrization.\n\nBy default, the backscatter values are given in linear scale.", + "schema": { + "subtype": "raster-cube", + "type": "object" + } + }, + "exceptions": { + "DigitalElevationModelInvalid": { + "message": "The digital elevation model specified is either not a DEM or can't be used with the data cube given." + } + }, + "links": [ + { + "rel": "about", + "href": "https://bok.eo4geo.eu/PP2-2-4-3", + "title": "Gamma nought (0) explained by EO4GEO body of knowledge." + }, + { + "rel": "about", + "href": "https://bok.eo4geo.eu/PP2-2-4-2", + "title": "Sigma nought (0) explained by EO4GEO body of knowledge." + }, + { + "rel": "about", + "href": "https://www.geo.uzh.ch/microsite/rsl-documents/research/publications/peer-reviewed-articles/201108-TGRS-Small-tcGamma-3809999360/201108-TGRS-Small-tcGamma.pdf", + "title": "Flattening Gamma: Radiometric Terrain Correction for SAR Imagery" + } + ] +} diff --git a/tests/.words b/tests/.words index 4b575076..568fd814 100644 --- a/tests/.words +++ b/tests/.words @@ -6,7 +6,9 @@ boolean center centers dekad +DEM-based Domini +gamma0 GeoJSON labeled MathWorld @@ -14,6 +16,10 @@ n-ary neighborhood neighborhoods openEO +orthorectification +orthorectified +radiometrically +reflectances resample resampled Resamples diff --git a/tests/processes.test.js b/tests/processes.test.js index 3d81952c..089328f9 100644 --- a/tests/processes.test.js +++ b/tests/processes.test.js @@ -103,11 +103,14 @@ describe.each(processes)("%s", (file, p, fileContent, proposal) => { // return value description expect(typeof p.returns.description).toBe('string'); + // lint: Description should not be empty + expect(p.returns.description.length).toBeGreaterThan(0); checkDescription(p.returns.description, p); // return value schema expect(typeof p.returns.schema).toBe('object'); expect(p.returns.schema).not.toBeNull(); + // lint: Description should not be empty checkJsonSchema(jsv, p.returns.schema); }); @@ -233,6 +236,8 @@ function checkParam(param, p, checkCbParams = true) { // parameter description expect(typeof param.description).toBe('string'); + // lint: Description should not be empty + expect(param.description.length).toBeGreaterThan(0); checkDescription(param.description, p); // Parameter flags From 8abb2773b2b71a4d9f1e5d65db414ab4dd1406b5 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 19 Feb 2021 14:51:08 +0100 Subject: [PATCH 045/109] Fix invalid var in CI script --- .github/workflows/docs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index fdf2c439..b0057d30 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -24,7 +24,7 @@ jobs: - name: clone gh-pages and clean-up if: ${{ env.GITHUB_REF_SLUG == 'master' }} run: | - git clone --branch gh-pages https://$GITHUB_TOKEN@github.com/Open-EO/openeo-processes.git gh-pages + git clone --branch gh-pages https://github.com/Open-EO/openeo-processes.git gh-pages find gh-pages -maxdepth 1 -type f -delete rm -rf gh-pages/examples/ rm -rf gh-pages/meta/ From 24c920e2e669e717f80f1f1409fa2f1d3f3b618d Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 22 Feb 2021 12:14:03 +0100 Subject: [PATCH 046/109] Fix RFC links https://github.com/Open-EO/openeo-api/issues/361 --- aggregate_temporal.json | 2 +- filter_temporal.json | 2 +- load_collection.json | 2 +- meta/subtype-schemas.json | 14 +++++++------- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/aggregate_temporal.json b/aggregate_temporal.json index f5a26187..47d63d28 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -17,7 +17,7 @@ }, { "name": "intervals", - "description": "Left-closed temporal intervals, which are allowed to overlap. Each temporal interval in the array has exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://tools.ietf.org/html/rfc3339). Although [RFC 3339 prohibits the hour to be '24'](https://tools.ietf.org/html/rfc3339#section-5.7), **this process allows the value '24' for the hour** of an end time in order to make it possible that left-closed time intervals can fully cover the day.", + "description": "Left-closed temporal intervals, which are allowed to overlap. Each temporal interval in the array has exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Although [RFC 3339 prohibits the hour to be '24'](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.7), **this process allows the value '24' for the hour** of an end time in order to make it possible that left-closed time intervals can fully cover the day.", "schema": { "type": "array", "subtype": "temporal-intervals", diff --git a/filter_temporal.json b/filter_temporal.json index 496a8792..81336123 100644 --- a/filter_temporal.json +++ b/filter_temporal.json @@ -17,7 +17,7 @@ }, { "name": "extent", - "description": "Left-closed temporal interval, i.e. an array with exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://tools.ietf.org/html/rfc3339). Also supports open intervals by setting one of the boundaries to `null`, but never both.", + "description": "Left-closed temporal interval, i.e. an array with exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`, but never both.", "schema": { "type": "array", "subtype": "temporal-interval", diff --git a/load_collection.json b/load_collection.json index 2abf003f..2320cc9e 100644 --- a/load_collection.json +++ b/load_collection.json @@ -105,7 +105,7 @@ }, { "name": "temporal_extent", - "description": "Limits the data to load from the collection to the specified left-closed temporal interval. Applies to all temporal dimensions. The interval has to be specified as an array with exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://tools.ietf.org/html/rfc3339). Also supports open intervals by setting one of the boundaries to `null`, but never both.\n\nSet this parameter to `null` to set no limit for the spatial extent. Be careful with this when loading large datasets!", + "description": "Limits the data to load from the collection to the specified left-closed temporal interval. Applies to all temporal dimensions. The interval has to be specified as an array with exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`, but never both.\n\nSet this parameter to `null` to set no limit for the spatial extent. Be careful with this when loading large datasets!", "schema": [ { "type": "array", diff --git a/meta/subtype-schemas.json b/meta/subtype-schemas.json index fb9d6817..160c8168 100644 --- a/meta/subtype-schemas.json +++ b/meta/subtype-schemas.json @@ -115,14 +115,14 @@ "subtype": "date", "format": "date", "title": "Date only", - "description": "Date only representation, as defined for `full-date` by [RFC 3339 in section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6). The time zone is UTC." + "description": "Date only representation, as defined for `full-date` by [RFC 3339 in section 5.6](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.6). The time zone is UTC." }, "date-time": { "type": "string", "subtype": "date-time", "format": "date-time", "title": "Date with Time", - "description": "Date and time representation, as defined for `date-time` by [RFC 3339 in section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6)." + "description": "Date and time representation, as defined for `date-time` by [RFC 3339 in section 5.6](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.6)." }, "duration": { "type": "string", @@ -159,7 +159,7 @@ "type": "object", "subtype": "geojson", "title": "GeoJSON", - "description": "GeoJSON as defined by [RFC 7946](https://tools.ietf.org/html/rfc7946).", + "description": "GeoJSON as defined by [RFC 7946](https://www.rfc-editor.org/rfc/rfc7946.html).", "allOf": [ { "$ref": "https://geojson.org/schema/GeoJSON.json" @@ -286,7 +286,7 @@ "type": "array", "subtype": "temporal-interval", "title": "Single temporal interval", - "description": "Left-closed temporal interval, represented as two-element array with the following elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://tools.ietf.org/html/rfc3339). Although [RFC 3339 prohibits the hour to be '24'](https://tools.ietf.org/html/rfc3339#section-5.7), **this process allows the value '24' for the hour** of an end time in order to make it possible that left-closed time intervals can fully cover the day. `null` can be used to specify open intervals.", + "description": "Left-closed temporal interval, represented as two-element array with the following elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Although [RFC 3339 prohibits the hour to be '24'](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.7), **this process allows the value '24' for the hour** of an end time in order to make it possible that left-closed time intervals can fully cover the day. `null` can be used to specify open intervals.", "minItems": 2, "maxItems": 2, "items": { @@ -375,7 +375,7 @@ "subtype": "time", "format": "time", "title": "Time only", - "description": "Time only representation, as defined for `full-time` by [RFC 3339 in section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6). Although [RFC 3339 prohibits the hour to be '24'](https://tools.ietf.org/html/rfc3339#section-5.7), this definition allows the value '24' for the hour as end time in an interval in order to make it possible that left-closed time intervals can fully cover the day." + "description": "Time only representation, as defined for `full-time` by [RFC 3339 in section 5.6](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.6). Although [RFC 3339 prohibits the hour to be '24'](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.7), this definition allows the value '24' for the hour as end time in an interval in order to make it possible that left-closed time intervals can fully cover the day." }, "udf-code": { "type": "string", @@ -400,7 +400,7 @@ "subtype": "uri", "format": "uri", "title": "URI", - "description": "A valid URI according to [RFC3986](https://tools.ietf.org/html/rfc3986)." + "description": "A valid URI according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986.html)." }, "vector-cube": { "type": "object", @@ -421,7 +421,7 @@ "maxLength": 4, "pattern": "^\\d{4}$", "title": "Year only", - "description": "Year representation, as defined for `date-fullyear` by [RFC 3339 in section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6)." + "description": "Year representation, as defined for `date-fullyear` by [RFC 3339 in section 5.6](https://www.rfc-editor.org/rfc/rfc3339.html#section-5.6)." } } } \ No newline at end of file From 4a250f49f022474467fd487facb2b7e5aafeec7e Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 10 Mar 2021 14:19:52 +0100 Subject: [PATCH 047/109] ARD: Don't require contributing area by default #223 --- proposals/ard_normalized_radar_backscatter.json | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/proposals/ard_normalized_radar_backscatter.json b/proposals/ard_normalized_radar_backscatter.json index a5776c2e..0a44f840 100644 --- a/proposals/ard_normalized_radar_backscatter.json +++ b/proposals/ard_normalized_radar_backscatter.json @@ -32,6 +32,15 @@ } ] }, + { + "name": "contributing_area", + "description": "If set to `true`, a DEM-based local contributing area band named `contributing_area` is added. The values are given in square meters.", + "optional": true, + "default": false, + "schema": { + "type": "boolean" + } + }, { "name": "ellipsoid_incidence_angle", "description": "If set to `true`, an ellipsoidal incidence angle band named `ellipsoid_incidence_angle` is added. The values are given in degrees.", @@ -82,7 +91,9 @@ "from_parameter": "elevation_model" }, "mask": true, - "contributing_area": true, + "contributing_area": { + "from_parameter": "contributing_area" + }, "local_incidence_angle": true, "ellipsoid_incidence_angle": { "from_parameter": "ellipsoid_incidence_angle" From 7531b3933d648d7590ca83ce5cb0f2e5e8c77fff Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 2 Apr 2021 15:19:58 +0200 Subject: [PATCH 048/109] Clarify bands for ard_normalized_radar_backscatter #228 --- proposals/ard_normalized_radar_backscatter.json | 11 ++++++++--- proposals/sar_backscatter.json | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/proposals/ard_normalized_radar_backscatter.json b/proposals/ard_normalized_radar_backscatter.json index 0a44f840..fd88eb45 100644 --- a/proposals/ard_normalized_radar_backscatter.json +++ b/proposals/ard_normalized_radar_backscatter.json @@ -1,7 +1,7 @@ { "id": "ard_normalized_radar_backscatter", "summary": "CARD4L compliant SAR NRB generation", - "description": "Computes CARD4L compliant backscatter (gamma0) from SAR input.\n\nNote that backscatter computation may require instrument specific metadata that is tightly coupled to the original SAR products. As a result, this process may only work in combination with loading data from specific collections, not with general data cubes.", + "description": "Computes CARD4L compliant backscatter from SAR input. The radiometric correction coefficient is gamma0 (terrain), which is the ground area computed with terrain earth model in sensor line of sight.\n\nNote that backscatter computation may require instrument specific metadata that is tightly coupled to the original SAR products. As a result, this process may only work in combination with loading data from specific collections, not with general data cubes.", "categories": [ "cubes", "sar", @@ -61,7 +61,7 @@ } ], "returns": { - "description": "Backscatter values expressed as gamma0. The data returned is CARD4L compliant and contains metadata.\n\nBy default, the backscatter values are given in linear scale.", + "description": "Backscatter values expressed as gamma0 in linear scale.\n\nIn addition to the bands `contributing_area` and `ellipsoid_incidence_angle` that can optionally be added with corresponding parameters, the following bands are always added to the data cube:\n- `mask`: A data mask that indicates which values are valid (1), invalid (0) or contain no-data (null).\n- `local_incidence_angle`: A band with DEM-based local incidence angles in degrees.\n\nThe data returned is CARD4L compliant with corresponsing metadata.", "schema": { "subtype": "raster-cube", "type": "object" @@ -77,7 +77,12 @@ "rel": "about", "href": "http://ceos.org/ard/files/PFS/NRB/v5.0/CARD4L-PFS_Normalised_Radar_Backscatter-v5.0.pdf", "title": "CEOS CARD4L specification" - } + }, + { + "rel": "about", + "href": "https://bok.eo4geo.eu/PP2-2-4-3", + "title": "Gamma nought (0) explained by EO4GEO body of knowledge." + }, ], "process_graph": { "nrb": { diff --git a/proposals/sar_backscatter.json b/proposals/sar_backscatter.json index 024e90c5..ddfe8bb9 100644 --- a/proposals/sar_backscatter.json +++ b/proposals/sar_backscatter.json @@ -100,7 +100,7 @@ } ], "returns": { - "description": "Backscatter values corresponding to the chosen parametrization.\n\nBy default, the backscatter values are given in linear scale.", + "description": "Backscatter values corresponding to the chosen parametrization. The values are given in linear scale.", "schema": { "subtype": "raster-cube", "type": "object" From 5b9596f6eda7080a70ff41ff7c0d54dcbaac9aa1 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 2 Apr 2021 15:58:44 +0200 Subject: [PATCH 049/109] SR: Minor clarifications and changes for consistency #228 --- proposals/ard_surface_reflectance.json | 8 ++++---- proposals/atmospheric_correction.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/proposals/ard_surface_reflectance.json b/proposals/ard_surface_reflectance.json index c7ec2d41..676e2f4c 100644 --- a/proposals/ard_surface_reflectance.json +++ b/proposals/ard_surface_reflectance.json @@ -1,7 +1,7 @@ { "id": "ard_surface_reflectance", "summary": "CARD4L compliant Surface Reflectance generation", - "description": "Computes CARD4L compliant surface reflectance values from optical input.", + "description": "Computes CARD4L compliant surface (bottom of atmosphere/top of canopy) reflectance values from optical input.", "categories": [ "cubes", "sar", @@ -10,7 +10,7 @@ "experimental": true, "parameters": [ { - "description": "The source data cube containing multi-spectral optical top of the atmosphere (TOA) reflectances. There must be a single dimension of type bands available.", + "description": "The source data cube containing multi-spectral optical top of the atmosphere (TOA) reflectances. There must be a single dimension of type `bands` available.", "name": "data", "schema": { "subtype": "raster-cube", @@ -31,7 +31,7 @@ } }, { - "description": "The cloud detection method to use.", + "description": "The cloud detection method to use.\n\nEach method supprts detecting different atmospheric disturbances such as clouds, cloud shadows, aerosols, haze, ozone and/or water vapour in optical imagery.", "name": "cloud_detection_method", "schema": { "type": [ @@ -45,7 +45,7 @@ } }, { - "description": "The digital elevation model to use, leave empty to allow the back-end to make a suitable choice.", + "description": "The digital elevation model to use. Set to `null` (the default) to allow the back-end to choose, which will improve portability, but reduce reproducibility.", "name": "elevation_model", "optional": true, "default": null, diff --git a/proposals/atmospheric_correction.json b/proposals/atmospheric_correction.json index 0df98018..87c4aab3 100644 --- a/proposals/atmospheric_correction.json +++ b/proposals/atmospheric_correction.json @@ -35,7 +35,7 @@ ] }, { - "description": "The digital elevation model to use, leave empty to allow the back-end to make a suitable choice.", + "description": "The digital elevation model to use. Set to `null` (the default) to allow the back-end to choose, which will improve portability, but reduce reproducibility.", "name": "elevation_model", "optional": true, "default": null, From 9d1f4a0b127b5c1d7cf84f70c0253d3b1fceacad Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 2 Apr 2021 16:00:16 +0200 Subject: [PATCH 050/109] Fix issues --- proposals/ard_normalized_radar_backscatter.json | 4 ++-- proposals/ard_surface_reflectance.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/proposals/ard_normalized_radar_backscatter.json b/proposals/ard_normalized_radar_backscatter.json index fd88eb45..7e9f923b 100644 --- a/proposals/ard_normalized_radar_backscatter.json +++ b/proposals/ard_normalized_radar_backscatter.json @@ -61,7 +61,7 @@ } ], "returns": { - "description": "Backscatter values expressed as gamma0 in linear scale.\n\nIn addition to the bands `contributing_area` and `ellipsoid_incidence_angle` that can optionally be added with corresponding parameters, the following bands are always added to the data cube:\n- `mask`: A data mask that indicates which values are valid (1), invalid (0) or contain no-data (null).\n- `local_incidence_angle`: A band with DEM-based local incidence angles in degrees.\n\nThe data returned is CARD4L compliant with corresponsing metadata.", + "description": "Backscatter values expressed as gamma0 in linear scale.\n\nIn addition to the bands `contributing_area` and `ellipsoid_incidence_angle` that can optionally be added with corresponding parameters, the following bands are always added to the data cube:\n\n- `mask`: A data mask that indicates which values are valid (1), invalid (0) or contain no-data (null).\n- `local_incidence_angle`: A band with DEM-based local incidence angles in degrees.\n\nThe data returned is CARD4L compliant with corresponding metadata.", "schema": { "subtype": "raster-cube", "type": "object" @@ -82,7 +82,7 @@ "rel": "about", "href": "https://bok.eo4geo.eu/PP2-2-4-3", "title": "Gamma nought (0) explained by EO4GEO body of knowledge." - }, + } ], "process_graph": { "nrb": { diff --git a/proposals/ard_surface_reflectance.json b/proposals/ard_surface_reflectance.json index 676e2f4c..3159583c 100644 --- a/proposals/ard_surface_reflectance.json +++ b/proposals/ard_surface_reflectance.json @@ -31,7 +31,7 @@ } }, { - "description": "The cloud detection method to use.\n\nEach method supprts detecting different atmospheric disturbances such as clouds, cloud shadows, aerosols, haze, ozone and/or water vapour in optical imagery.", + "description": "The cloud detection method to use.\n\nEach method supports detecting different atmospheric disturbances such as clouds, cloud shadows, aerosols, haze, ozone and/or water vapour in optical imagery.", "name": "cloud_detection_method", "schema": { "type": [ From 636f9a8734e840a0af464472b0faaebd26d8a8ca Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 2 Apr 2021 16:42:44 +0200 Subject: [PATCH 051/109] Define band names and values for ard_surface_reflectance. Be more strict with band names in cloud_detection #228 --- proposals/ard_surface_reflectance.json | 2 +- proposals/cloud_detection.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/proposals/ard_surface_reflectance.json b/proposals/ard_surface_reflectance.json index 3159583c..f6da0375 100644 --- a/proposals/ard_surface_reflectance.json +++ b/proposals/ard_surface_reflectance.json @@ -61,7 +61,7 @@ } ], "returns": { - "description": "Data cube containing bottom of atmosphere reflectances with atmospheric disturbances like clouds and cloud shadows removed. The data returned is CARD4L compliant and contains metadata.", + "description": "Data cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. No-data values (null) are directly set in the bands. Depending on the methods used, several additional bands will be added to the data cube:\n\nData cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. Depending on the methods used, several additional bands will be added to the data cube:\n\n- `date` (optional): Specifies per-pixel acquisition timestamps.\n- `incomplete-testing` (required): Identifies pixels for which the per-pixel tests have not all been successfully completed with a value of 1. Otherwise the value is 0.\n- `saturation` (required) / `saturation_{band}` (optional): Indicates where pixels in the input spectral bands are saturated (1) or not (0). If the saturation is given per band, the band names are `saturation_{band}` with `{band}` being the band name from the source data cube.\n- `cloud`, `shadow` (both required),`aerosol`, `haze`, `ozone`, `water_vapor` (all optional): Indicates the probability of pixels being an atmospheric disturbances such as clouds. All bands have values between 0 (clear) and 1, which describes the probability that it is an atmospheric disturbance.\n- `snow-ice` (optional): Points to a file that indicates whether a pixel is assessed as being snow/ice (1) or not (0). All values describes the probability and must be between 0 and 1.\n- `land-water` (optional): Indicates whether a pixel is assessed as being land (1) or water (0). All values describes the probability and must be between 0 and 1.\n- `incidence-angle` (optional): Specifies per-pixel incidence angles in degrees.\n- `azimuth` (optional): Specifies per-pixel azimuth angles in degrees.\n- `sun-azimuth:` (optional): Specifies per-pixel sun azimuth angles in degrees.\n- `sun-elevation` (optional): Specifies per-pixel sun elevation angles in degrees.\n- `terrain-shadow` (optional): Indicates with a value of 1 whether a pixel is not directly illuminated due to terrain shadowing. Otherwise the value is 0.\n- `terrain-occlusion` (optional): Indicates with a value of 1 whether a pixel is not visible to the sensor due to terrain occlusion during off-nadir viewing. Otherwise the value is 0.\n- `terrain-illumination` (optional): Contains coefficients used for terrain illumination correction are provided for each pixel.\n\nThe data returned is CARD4L compliant and contains metadata.\n\nThe data returned is CARD4L compliant with corresponding metadata.", "schema": { "subtype": "raster-cube", "type": "object" diff --git a/proposals/cloud_detection.json b/proposals/cloud_detection.json index 4699fcb5..3c90a7f2 100644 --- a/proposals/cloud_detection.json +++ b/proposals/cloud_detection.json @@ -1,7 +1,7 @@ { "id": "cloud_detection", "summary": "Create cloud masks", - "description": "Detects atmospheric disturbances such as clouds, cloud shadows, aerosols, haze, ozone and/or water vapour in optical imagery.\n\nIt creates a data cube with the spatial and temporal dimensions compatible to the source data cube and a dimension that contains a dimension label for each of the supported/considered atmospheric disturbances. The naming of the bands should follow the following pre-defined values:\n\n- `clouds`\n- `shadows`\n- `aerosol`\n- `haze`\n- `ozone`\n- `water_vapor`\n\nAll bands have values between 0 (clear) and 1, which describes the probability that it is an atmospheric disturbance.", + "description": "Detects atmospheric disturbances such as clouds, cloud shadows, aerosols, haze, ozone and/or water vapour in optical imagery.\n\nIt creates a data cube with the spatial and temporal dimensions compatible to the source data cube and a dimension that contains a dimension label for each of the supported/considered atmospheric disturbances. The naming of the bands follows these pre-defined values:\n\n- `cloud`\n- `shadow`\n- `aerosol`\n- `haze`\n- `ozone`\n- `water_vapor`\n\nAll bands have values between 0 (clear) and 1, which describes the probability that it is an atmospheric disturbance.", "categories": [ "cubes", "optical" From 3f3460e41be95395c9ae653672871d8641142b93 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 12 Apr 2021 11:21:26 +0200 Subject: [PATCH 052/109] Clarify that the user workspace is server-side. #225 --- CHANGELOG.md | 1 + proposals/load_result.json | 2 +- proposals/load_uploaded_files.json | 2 +- run_udf.json | 2 +- save_result.json | 2 +- 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 997a76bd..6df07c49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Exception messages have been aligned always use ` instead of '. Tooling could render it with CommonMark. ### Fixed +- Clarify that the user workspace is server-side. [#225](https://github.com/Open-EO/openeo-processes/issues/225) - Clarify that the `condition` parameter for `array_filter` works also on indices and labels. - Clarify contradicting statements in `filter_temporal` for the default value of the `dimension` parameter. By default *all* temporal dimensions are affected by the process. [#203](https://github.com/Open-EO/openeo-processes/issues/203) - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) diff --git a/proposals/load_result.json b/proposals/load_result.json index 175b49ea..ebc81718 100644 --- a/proposals/load_result.json +++ b/proposals/load_result.json @@ -1,7 +1,7 @@ { "id": "load_result", "summary": "Load batch job results", - "description": "Loads batch job results by job id from the local user workspace/data store. The job must have been stored by the authenticated user on the back-end currently connected to.", + "description": "Loads batch job results by job id from the server-side user workspace. The job must have been stored by the authenticated user on the back-end currently connected to.", "categories": [ "cubes", "import" diff --git a/proposals/load_uploaded_files.json b/proposals/load_uploaded_files.json index fd7b37ed..da017227 100644 --- a/proposals/load_uploaded_files.json +++ b/proposals/load_uploaded_files.json @@ -1,7 +1,7 @@ { "id": "load_uploaded_files", "summary": "Load files from the user workspace", - "description": "Loads one or more user-uploaded files from the local user workspace/data store and returns them as a single data cube. The files must have been stored by the authenticated user on the back-end currently connected to.", + "description": "Loads one or more user-uploaded files from the server-side workspace of the authenticated user and returns them as a single data cube. The files must have been stored by the authenticated user on the back-end currently connected to.", "categories": [ "cubes", "import" diff --git a/run_udf.json b/run_udf.json index 82e10902..37509d65 100644 --- a/run_udf.json +++ b/run_udf.json @@ -1,7 +1,7 @@ { "id": "run_udf", "summary": "Run a UDF", - "description": "Runs a UDF in one of the supported runtime environments.\n\nThe process can either:\n\n1. load and run a locally stored UDF from a file in the workspace of the authenticated user. The path to the UDF file must be relative to the root directory of the user's workspace.\n2. fetch and run a remotely stored and published UDF by absolute URI, for example from [openEO Hub](https://hub.openeo.org)).\n3. run the source code specified inline as string.\n\nThe loaded UDF can be executed in several processes such as ``aggregate_spatial()``, ``apply()``, ``apply_dimension()`` and ``reduce_dimension()``. In this case, an array is passed instead of a raster data cube. The user must ensure that the data is properly passed as an array so that the UDF can make sense of it.", + "description": "Runs a UDF in one of the supported runtime environments.\n\nThe process can either:\n\n1. load and run a UDF stored in a file on the server-side workspace of the authenticated user. The path to the UDF file must be relative to the root directory of the user's workspace.\n2. fetch and run a remotely stored and published UDF by absolute URI.\n3. run the source code specified inline as string.\n\nThe loaded UDF can be executed in several processes such as ``aggregate_spatial()``, ``apply()``, ``apply_dimension()`` and ``reduce_dimension()``. In this case, an array is passed instead of a raster data cube. The user must ensure that the data is properly passed as an array so that the UDF can make sense of it.", "categories": [ "cubes", "import", diff --git a/save_result.json b/save_result.json index d64d3dc4..72ecfaae 100644 --- a/save_result.json +++ b/save_result.json @@ -1,7 +1,7 @@ { "id": "save_result", "summary": "Save processed data to storage", - "description": "Saves processed data to the local user workspace/data store of the authenticated user. This process aims to be compatible with GDAL/OGR formats and options. STAC-compatible metadata should be stored with the processed data.\n\nCalling this process may be rejected by back-ends in the context of secondary web services.", + "description": "Saves processed data to the server-side user workspace of the authenticated user. This process aims to be compatible with GDAL/OGR formats and options. STAC-compatible metadata should be stored with the processed data.\n\nCalling this process may be rejected by back-ends in the context of secondary web services.", "categories": [ "cubes", "export" From 1ad45ace970ae90d4e45c45944056d2f976899b8 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 12 Apr 2021 11:48:08 +0200 Subject: [PATCH 053/109] New definition for array_create --- proposals/array_create.json | 55 +++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 14 deletions(-) diff --git a/proposals/array_create.json b/proposals/array_create.json index 82911c73..2b05005f 100644 --- a/proposals/array_create.json +++ b/proposals/array_create.json @@ -1,29 +1,29 @@ { "id": "array_create", "summary": "Create an array", - "description": "Creates a new array, which by default is empty.\n\nBy providing the parameter `length`, the array can be pre-filled with the given number of elements. By default, each element is set to `null` unless another value is specified through the parameter `value`.", + "description": "Creates a new array, which by default is empty.\n\nThe second parameter `repeat` allows to add the given array multiple times to the new array.\n\nIn most cases you can simply pass a (native) array to processes directly, but this process is especially useful to create a new array that is getting returned by a child process, for example in ``apply_dimension()``.", "categories": [ "arrays" ], "experimental": true, "parameters": [ { - "name": "length", - "description": "The number of elements to fill the array with. Defaults to `0`.", + "name": "data", + "description": "A (native) array to fill the newly created array with. Defaults to an empty array.", "optional": true, - "default": 0, + "default": [], "schema": { - "type": "integer", - "minimum": 0 + "description": "Any data type is allowed." } }, { - "name": "value", - "description": "The value to fill the array with, in case `length` is greater than 0. Defaults to `null` (no data).", + "name": "repeat", + "description": "The number of times the (native) array specified in `data` is repeatedly added after each other to the new array being created. Defaults to `1`.", "optional": true, - "default": null, + "default": 1, "schema": { - "description": "Any data type is allowed." + "type": "integer", + "minimum": 1 } } ], @@ -43,7 +43,26 @@ }, { "arguments": { - "length": 3 + "data": [ + "this", + "is", + "a", + "test" + ] + }, + "returns": [ + "this", + "is", + "a", + "test" + ] + }, + { + "arguments": { + "data": [ + null + ], + "repeat": 3 }, "returns": [ null, @@ -53,12 +72,20 @@ }, { "arguments": { - "length": 2, - "value": 1 + "data": [ + 1, + 2, + 3 + ], + "repeat": 2 }, "returns": [ 1, - 1 + 2, + 3, + 1, + 2, + 3 ] } ] From a30b9140f629cbe4b51b439427d8267bb602f247 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 12 Apr 2021 18:50:38 +0200 Subject: [PATCH 054/109] Show notice about back-end variations https://github.com/Open-EO/openeo.org/issues/27 --- tests/docs.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/docs.html b/tests/docs.html index 3d51ae7d..162d28fb 100644 --- a/tests/docs.html +++ b/tests/docs.html @@ -114,7 +114,8 @@ document: 'processes.json', categorize: true, apiVersion: '1.0.0', - title: 'openEO processes (draft)' + title: 'openEO processes (draft)', + notice: '**Note:** This is the list of all processes specified by the openEO project. Back-ends implement a varying set of processes. Thus, the processes you can use at a specific back-end may derive from the specification, may include non-standardized processes and may not implement all processes listed here. Please check each back-end individually for the processes they support. The client libraries usually have a function called `listProcesses` or `list_processes` for that.' } }) }); From e8bfd29248d199e34320cb54f514dcab08b34a93 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 13 Apr 2021 16:51:40 +0200 Subject: [PATCH 055/109] Added array_interpolate_linear proposal #173 --- CHANGELOG.md | 1 + proposals/array_interpolate_linear.json | 101 ++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 proposals/array_interpolate_linear.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 6df07c49..893663e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New processes in proposal state + - `array_interpolate_linear` [#173](https://github.com/Open-EO/openeo-processes/issues/173) - `is_infinite` - `nan` - Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) diff --git a/proposals/array_interpolate_linear.json b/proposals/array_interpolate_linear.json new file mode 100644 index 00000000..58741dc4 --- /dev/null +++ b/proposals/array_interpolate_linear.json @@ -0,0 +1,101 @@ +{ + "id": "array_interpolate_linear", + "summary": "One-dimensional linear interpolation for arrays", + "description": "Performs a linear interpolation for each of the no-data values (`null`) in the array given, except for the first and last value. If one of those values is a no-data value, they can be replaced with the parameters `first` and `last`.", + "categories": [ + "arrays", + "math", + "math > interpolation" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "An array of numbers and no-data values.\n\nIf the given array is a labeled array, the labels must have a natural/inherent label order and it expects the labels to be sorted accordingly. This is the default behavior in openEO for spatial and temporal dimensions.", + "schema": { + "type": "array", + "items": { + "type": [ + "number", + "null" + ] + } + } + }, + { + "name": "first", + "description": "A value to set for the first value, if the first value is a no-data value. By default (`null`), the value is not changed.", + "default": null, + "optional": true, + "schema": { + "type": [ + "number", + "null" + ] + } + }, + { + "name": "last", + "description": "A value to set for the last value, if the last value is a no-data value. By default (`null`), the value is not changed.", + "default": null, + "optional": true, + "schema": { + "type": [ + "number", + "null" + ] + } + } + ], + "returns": { + "description": "An array with no-data values being replaced with either interpolated values or the values given in the parameters `first` and `last`.", + "schema": { + "type": [ + "number", + "null" + ] + } + }, + "examples": [ + { + "arguments": { + "data": [ + null, + 1, + null, + 6, + null, + 5 + ] + }, + "returns": [ + null, + 1, + 3.5, + 6, + 5.5, + 5 + ] + }, + { + "arguments": { + "data": [ + null, + 1, + null, + -2, + null + ], + "first": 0, + "last": -1 + }, + "returns": [ + 0, + 1, + -0.5, + -2, + -1 + ] + } + ] +} \ No newline at end of file From 78cbb722bc8391f9e0874e25f8873122ab766d4b Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 13 Apr 2021 17:19:25 +0200 Subject: [PATCH 056/109] Simplify process --- proposals/array_interpolate_linear.json | 54 +++---------------------- 1 file changed, 5 insertions(+), 49 deletions(-) diff --git a/proposals/array_interpolate_linear.json b/proposals/array_interpolate_linear.json index 58741dc4..7e093f30 100644 --- a/proposals/array_interpolate_linear.json +++ b/proposals/array_interpolate_linear.json @@ -1,7 +1,7 @@ { "id": "array_interpolate_linear", "summary": "One-dimensional linear interpolation for arrays", - "description": "Performs a linear interpolation for each of the no-data values (`null`) in the array given, except for the first and last value. If one of those values is a no-data value, they can be replaced with the parameters `first` and `last`.", + "description": "Performs a linear interpolation for each of the no-data values (`null`) in the array given, except for the first and last value.", "categories": [ "arrays", "math", @@ -21,34 +21,10 @@ ] } } - }, - { - "name": "first", - "description": "A value to set for the first value, if the first value is a no-data value. By default (`null`), the value is not changed.", - "default": null, - "optional": true, - "schema": { - "type": [ - "number", - "null" - ] - } - }, - { - "name": "last", - "description": "A value to set for the last value, if the last value is a no-data value. By default (`null`), the value is not changed.", - "default": null, - "optional": true, - "schema": { - "type": [ - "number", - "null" - ] - } } ], "returns": { - "description": "An array with no-data values being replaced with either interpolated values or the values given in the parameters `first` and `last`.", + "description": "An array with no-data values being replaced with interpolated values. If not at least 2 numerical values are available in the array, the array stays the same.", "schema": { "type": [ "number", @@ -65,7 +41,7 @@ null, 6, null, - 5 + -8 ] }, "returns": [ @@ -73,28 +49,8 @@ 1, 3.5, 6, - 5.5, - 5 - ] - }, - { - "arguments": { - "data": [ - null, - 1, - null, - -2, - null - ], - "first": 0, - "last": -1 - }, - "returns": [ - 0, - 1, - -0.5, - -2, - -1 + -1, + -8 ] } ] From 7127bbe110b1faaf48c77c75020f178915a62768 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 14 Apr 2021 15:43:40 +0200 Subject: [PATCH 057/109] Incorporate code review feedback --- proposals/array_interpolate_linear.json | 9 ++++++++- tests/.words | 3 ++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/proposals/array_interpolate_linear.json b/proposals/array_interpolate_linear.json index 7e093f30..976c5f03 100644 --- a/proposals/array_interpolate_linear.json +++ b/proposals/array_interpolate_linear.json @@ -1,7 +1,7 @@ { "id": "array_interpolate_linear", "summary": "One-dimensional linear interpolation for arrays", - "description": "Performs a linear interpolation for each of the no-data values (`null`) in the array given, except for the first and last value.", + "description": "Performs a linear interpolation for each of the no-data values (`null`) in the array given, except for leading and trailing no-data values.\n\nThe linear interpolants are defined by the array indices or labels (x) and the values in the array (y).", "categories": [ "arrays", "math", @@ -53,5 +53,12 @@ -8 ] } + ], + "links": [ + { + "rel": "about", + "href": "https://en.wikipedia.org/wiki/Linear_interpolation", + "title": "Linear interpolation explained by Wikipedia" + } ] } \ No newline at end of file diff --git a/tests/.words b/tests/.words index 568fd814..db598908 100644 --- a/tests/.words +++ b/tests/.words @@ -31,4 +31,5 @@ signum STAC summand UDFs -unary \ No newline at end of file +unary +interpolants \ No newline at end of file From 96f3e8999ffed5d6e6a558827e893a4b49b20411 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 14 Apr 2021 16:01:24 +0200 Subject: [PATCH 058/109] Minor improvement --- proposals/array_interpolate_linear.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proposals/array_interpolate_linear.json b/proposals/array_interpolate_linear.json index 976c5f03..3a106c77 100644 --- a/proposals/array_interpolate_linear.json +++ b/proposals/array_interpolate_linear.json @@ -11,7 +11,7 @@ "parameters": [ { "name": "data", - "description": "An array of numbers and no-data values.\n\nIf the given array is a labeled array, the labels must have a natural/inherent label order and it expects the labels to be sorted accordingly. This is the default behavior in openEO for spatial and temporal dimensions.", + "description": "An array of numbers and no-data values.\n\nIf the given array is a labeled array, the labels must have a natural/inherent label order and the process expects the labels to be sorted accordingly. This is the default behavior in openEO for spatial and temporal dimensions.", "schema": { "type": "array", "items": { From 611eac3eb545fb437b6362412839c51bf3c04f0d Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 16 Apr 2021 13:29:55 +0200 Subject: [PATCH 059/109] Added array_append --- CHANGELOG.md | 1 + proposals/array_append.json | 53 +++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 proposals/array_append.json diff --git a/CHANGELOG.md b/CHANGELOG.md index f32f6c87..76ead45a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New processes in proposal state + - `array_append` - `array_concat` - `array_create` - `array_find_label` diff --git a/proposals/array_append.json b/proposals/array_append.json new file mode 100644 index 00000000..40d8c8e7 --- /dev/null +++ b/proposals/array_append.json @@ -0,0 +1,53 @@ +{ + "id": "array_append", + "summary": "Append a value to an array", + "description": "Appends a value to the end of the array. Array labels get discarded from the array.", + "categories": [ + "arrays" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "An array.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + { + "name": "value", + "description": "Value to append to the array.", + "schema": { + "description": "Any data type is allowed." + } + } + ], + "returns": { + "description": "The new array with the value being appended.", + "schema": { + "type": "array", + "items": { + "description": "Any data type is allowed." + } + } + }, + "examples": [ + { + "arguments": { + "data": [ + 1, + 2 + ], + "value": 3 + }, + "returns": [ + 1, + 2, + 3 + ] + } + ] +} \ No newline at end of file From 4116395754c07af807bb99751c31093a7381eb64 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 16 Apr 2021 13:41:31 +0200 Subject: [PATCH 060/109] Add process graph for array_append --- proposals/array_append.json | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/proposals/array_append.json b/proposals/array_append.json index 40d8c8e7..59d8b178 100644 --- a/proposals/array_append.json +++ b/proposals/array_append.json @@ -49,5 +49,21 @@ 3 ] } - ] + ], + "process_graph": { + "append": { + "process_id": "array_concat", + "arguments": { + "array1": { + "from_parameter": "data" + }, + "array2": [ + { + "from_parameter": "value" + } + ] + }, + "result": true + } + } } \ No newline at end of file From bbff1c6bfe1d8ea9067775a36189f8925cd80de3 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 21 Apr 2021 15:50:45 +0200 Subject: [PATCH 061/109] Added links to data cube description #216 --- CHANGELOG.md | 3 ++- aggregate_spatial.json | 4 ++-- aggregate_temporal.json | 4 ++-- aggregate_temporal_period.json | 4 ++-- apply.json | 9 ++++++++- apply_dimension.json | 9 ++++++++- apply_kernel.json | 5 +++++ apply_neighborhood.json | 9 ++++++++- create_raster_cube.json | 9 ++++++++- filter_bands.json | 5 +++++ filter_bbox.json | 5 +++++ filter_spatial.json | 5 +++++ filter_temporal.json | 9 ++++++++- load_collection.json | 5 +++++ proposals/aggregate_spatial_binary.json | 4 ++-- proposals/aggregate_spatial_window.json | 9 ++++++++- proposals/filter_labels.json | 7 +++++++ proposals/reduce_dimension_binary.json | 5 +++++ proposals/resample_cube_temporal.json | 4 ++-- reduce_dimension.json | 9 ++++++++- resample_cube_spatial.json | 4 ++-- resample_spatial.json | 5 +++++ 22 files changed, 112 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6df07c49..31fd6dea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,7 +31,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `array_labels`: Clarified the accepted data type for array elements passed to the parameter `data`. - `merge_cubes`: Clarified the dimension label order after the merge. [#212](https://github.com/Open-EO/openeo-processes/issues/212) - Fixed typos, grammar issues and other spelling-related issues in many of the processes. -- Fixed the examples `array_contains_nodata` and `array_find_nodata` +- Fixed the examples `array_contains_nodata` and `array_find_nodata`. +- Fixed links to openEO glossary and added links to data cube introduction. [#216](https://github.com/Open-EO/openeo-processes/issues/216) ## 1.0.0 - 2020-07-31 diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 0bd4c95c..58303652 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -91,9 +91,9 @@ }, "links": [ { - "href": "https://openeo.org/documentation/1.0/glossary.html#aggregate-reducing-resolution", + "href": "https://openeo.org/documentation/1.0/datacubes.html#aggregate", "rel": "about", - "title": "Aggregation explained in the openEO glossary" + "title": "Aggregation explained in the openEO documentation" }, { "href": "http://www.opengeospatial.org/standards/sfa", diff --git a/aggregate_temporal.json b/aggregate_temporal.json index 47d63d28..7c146326 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -233,9 +233,9 @@ }, "links": [ { - "href": "https://openeo.org/documentation/1.0/glossary.html#aggregate-reducing-resolution", + "href": "https://openeo.org/documentation/1.0/datacubes.html#aggregate", "rel": "about", - "title": "Aggregation explained in the openEO glossary" + "title": "Aggregation explained in the openEO documentation" } ] } \ No newline at end of file diff --git a/aggregate_temporal_period.json b/aggregate_temporal_period.json index 4f4008b1..936410c1 100644 --- a/aggregate_temporal_period.json +++ b/aggregate_temporal_period.json @@ -113,9 +113,9 @@ }, "links": [ { - "href": "https://openeo.org/documentation/1.0/glossary.html#aggregate-reducing-resolution", + "href": "https://openeo.org/documentation/1.0/datacubes.html#aggregate", "rel": "about", - "title": "Aggregation explained in the openEO glossary" + "title": "Aggregation explained in the openEO documentation" } ] } \ No newline at end of file diff --git a/apply.json b/apply.json index 7d0342de..3a24080b 100644 --- a/apply.json +++ b/apply.json @@ -62,5 +62,12 @@ "type": "object", "subtype": "raster-cube" } - } + }, + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#apply", + "rel": "about", + "title": "Apply explained in the openEO documentation" + } + ] } \ No newline at end of file diff --git a/apply_dimension.json b/apply_dimension.json index 1ecb4b73..d0b6d6da 100644 --- a/apply_dimension.json +++ b/apply_dimension.json @@ -93,5 +93,12 @@ "DimensionNotAvailable": { "message": "A dimension with the specified name does not exist." } - } + }, + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#apply", + "rel": "about", + "title": "Apply explained in the openEO documentation" + } + ] } \ No newline at end of file diff --git a/apply_kernel.json b/apply_kernel.json index f6512d45..20d940c2 100644 --- a/apply_kernel.json +++ b/apply_kernel.json @@ -82,6 +82,11 @@ } }, "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#apply", + "rel": "about", + "title": "Apply explained in the openEO documentation" + }, { "rel": "about", "href": "http://www.songho.ca/dsp/convolution/convolution.html", diff --git a/apply_neighborhood.json b/apply_neighborhood.json index f7ad3615..d168c2c9 100644 --- a/apply_neighborhood.json +++ b/apply_neighborhood.json @@ -225,5 +225,12 @@ "DataCubePropertiesImmutable": { "message": "The dimension properties (name, type, labels, reference system and resolution) must remain unchanged." } - } + }, + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#apply", + "rel": "about", + "title": "Apply explained in the openEO documentation" + } + ] } \ No newline at end of file diff --git a/create_raster_cube.json b/create_raster_cube.json index 8872931f..576728ee 100644 --- a/create_raster_cube.json +++ b/create_raster_cube.json @@ -12,5 +12,12 @@ "type": "object", "subtype": "raster-cube" } - } + }, + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html", + "rel": "about", + "title": "Data Cubes explained in the openEO documentation" + } + ] } \ No newline at end of file diff --git a/filter_bands.json b/filter_bands.json index 9d49e3cd..ee9c9aae 100644 --- a/filter_bands.json +++ b/filter_bands.json @@ -78,6 +78,11 @@ "rel": "about", "href": "https://github.com/radiantearth/stac-spec/tree/master/extensions/eo#common-band-names", "title": "List of common band names as specified by the STAC specification" + }, + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#filter", + "rel": "about", + "title": "Filters explained in the openEO documentation" } ] } \ No newline at end of file diff --git a/filter_bbox.json b/filter_bbox.json index c1339111..8cc2103a 100644 --- a/filter_bbox.json +++ b/filter_bbox.json @@ -98,6 +98,11 @@ } }, "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#filter", + "rel": "about", + "title": "Filters explained in the openEO documentation" + }, { "rel": "about", "href": "https://proj.org/usage/projections.html", diff --git a/filter_spatial.json b/filter_spatial.json index 101469c5..b807b8df 100644 --- a/filter_spatial.json +++ b/filter_spatial.json @@ -32,6 +32,11 @@ } }, "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#filter", + "rel": "about", + "title": "Filters explained in the openEO documentation" + }, { "href": "http://www.opengeospatial.org/standards/sfa", "rel": "about", diff --git a/filter_temporal.json b/filter_temporal.json index 81336123..a94366a3 100644 --- a/filter_temporal.json +++ b/filter_temporal.json @@ -83,5 +83,12 @@ "DimensionNotAvailable": { "message": "A dimension with the specified name does not exist." } - } + }, + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#filter", + "rel": "about", + "title": "Filters explained in the openEO documentation" + } + ] } diff --git a/load_collection.json b/load_collection.json index 2320cc9e..8248d388 100644 --- a/load_collection.json +++ b/load_collection.json @@ -271,6 +271,11 @@ } ], "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html", + "rel": "about", + "title": "Data Cubes explained in the openEO documentation" + }, { "rel": "about", "href": "https://proj.org/usage/projections.html", diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 214dae99..17632722 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -96,9 +96,9 @@ }, "links": [ { - "href": "https://openeo.org/documentation/1.0/glossary.html#aggregate-reducing-resolution", + "href": "https://openeo.org/documentation/1.0/datacubes.html#aggregate", "rel": "about", - "title": "Aggregation explained in the openEO glossary" + "title": "Aggregation explained in the openEO documentation" }, { "href": "http://www.opengeospatial.org/standards/sfa", diff --git a/proposals/aggregate_spatial_window.json b/proposals/aggregate_spatial_window.json index 10bb011a..77230275 100644 --- a/proposals/aggregate_spatial_window.json +++ b/proposals/aggregate_spatial_window.json @@ -108,5 +108,12 @@ "type": "object", "subtype": "raster-cube" } - } + }, + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#aggregate", + "rel": "about", + "title": "Aggregation explained in the openEO documentation" + } + ] } \ No newline at end of file diff --git a/proposals/filter_labels.json b/proposals/filter_labels.json index 5cfb0264..01d77035 100644 --- a/proposals/filter_labels.json +++ b/proposals/filter_labels.json @@ -107,5 +107,12 @@ "dimension": "platform" } } + ], + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#filter", + "rel": "about", + "title": "Filters explained in the openEO documentation" + } ] } \ No newline at end of file diff --git a/proposals/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json index e64142ed..3ca58341 100644 --- a/proposals/reduce_dimension_binary.json +++ b/proposals/reduce_dimension_binary.json @@ -85,6 +85,11 @@ } }, "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#reduce", + "rel": "about", + "title": "Reducers explained in the openEO documentation" + }, { "rel": "about", "href": "https://en.wikipedia.org/wiki/Reduction_Operator", diff --git a/proposals/resample_cube_temporal.json b/proposals/resample_cube_temporal.json index 12aec39f..ac8414a1 100644 --- a/proposals/resample_cube_temporal.json +++ b/proposals/resample_cube_temporal.json @@ -99,9 +99,9 @@ }, "links": [ { - "href": "https://openeo.org/documentation/1.0/glossary.html#resample-changing-data-cube-geometry", + "href": "https://openeo.org/documentation/1.0/datacubes.html#resample", "rel": "about", - "title": "Resampling explained in the openEO glossary" + "title": "Resampling explained in the openEO documentation" } ] } \ No newline at end of file diff --git a/reduce_dimension.json b/reduce_dimension.json index 9c8dcaed..e52eaf52 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -79,5 +79,12 @@ "DimensionNotAvailable": { "message": "A dimension with the specified name does not exist." } - } + }, + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#reduce", + "rel": "about", + "title": "Reducers explained in the openEO documentation" + } + ] } \ No newline at end of file diff --git a/resample_cube_spatial.json b/resample_cube_spatial.json index 0f043007..a038b82d 100644 --- a/resample_cube_spatial.json +++ b/resample_cube_spatial.json @@ -56,9 +56,9 @@ }, "links": [ { - "href": "https://openeo.org/documentation/1.0/glossary.html#resample-changing-data-cube-geometry", + "href": "https://openeo.org/documentation/1.0/datacubes.html#resample", "rel": "about", - "title": "Resampling explained in the openEO glossary" + "title": "Resampling explained in the openEO documentation" } ] } diff --git a/resample_spatial.json b/resample_spatial.json index 03c832cf..5f8a1f4d 100644 --- a/resample_spatial.json +++ b/resample_spatial.json @@ -117,6 +117,11 @@ } }, "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#resample", + "rel": "about", + "title": "Resampling explained in the openEO documentation" + }, { "rel": "about", "href": "https://proj.org/usage/projections.html", From 3f29c0759e993364b5b20b2db928578164cc0f4f Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 21 Apr 2021 18:08:07 +0200 Subject: [PATCH 062/109] Make apply_dimension description more precise and fix how ref sys are handled #234 --- CHANGELOG.md | 1 + apply_dimension.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31fd6dea..4a40f8a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed typos, grammar issues and other spelling-related issues in many of the processes. - Fixed the examples `array_contains_nodata` and `array_find_nodata`. - Fixed links to openEO glossary and added links to data cube introduction. [#216](https://github.com/Open-EO/openeo-processes/issues/216) +- Fixed description of `apply_dimension` with regards to reference systems. Made description easier to understand, too. [#234](https://github.com/Open-EO/openeo-processes/issues/234) ## 1.0.0 - 2020-07-31 diff --git a/apply_dimension.json b/apply_dimension.json index d0b6d6da..796393f4 100644 --- a/apply_dimension.json +++ b/apply_dimension.json @@ -83,7 +83,7 @@ } ], "returns": { - "description": "A data cube with the newly computed values. All dimensions stay the same, except for the dimensions specified in corresponding parameters. There are three cases how the data cube changes:\n\n1. The source dimension **is** the target dimension:\n * The (number of) dimensions remain unchanged.\n * The source dimension properties name, type and reference system remain unchanged.\n * The dimension labels and the resolution are preserved when the number of pixel values in the source dimension is equal to the number of values computed by the process. The other case is described below.\n2. The source dimension **is not** the target dimension and the latter **exists**:\n * The number of dimensions decreases by one as the source dimension is dropped.\n * The target dimension properties name, type and reference system remain unchanged.\n * The resolution changes, the number of dimension labels is equal to the number of values computed by the process and the dimension labels are incrementing integers starting from zero\n3. The source dimension **is not** the target dimension and the latter **does not exist**:\n * The number of dimensions remain unchanged, but the source dimension is replaced with the target dimension.\n * The target dimension has the specified name and the type other. The reference system is not changed.\n * The resolution changes, the number of dimension labels is equal to the number of values computed by the process and the dimension labels are incrementing integers starting from zero\n\nFor all three cases except for the exception in the first case, the resolution changes, the number of dimension labels is equal to the number of values computed by the process and the dimension labels are incrementing integers starting from zero.", + "description": "A data cube with the newly computed values.\n\nAll dimensions stay the same, except for the dimensions specified in corresponding parameters. There are three cases how the dimensions can change:\n\n1. The source dimension is the target dimension:\n - The (number of) dimensions remain unchanged as the source dimension is the target dimension.\n - The source dimension properties name and type remain unchanged.\n - The dimension labels, the reference system and the resolution are preserved only if the number of pixel values in the source dimension is equal to the number of values computed by the process. Otherwise, all other dimension properties change as defined in the list below.\n2. The source dimension is not the target dimension and the latter exists:\n - The number of dimensions decreases by one as the source dimension is dropped.\n - The target dimension properties name and type remain unchanged. All other dimension properties change as defined in the list below.\n3. The source dimension is not the target dimension and the latter does not exist:\n - The number of dimensions remain unchanged, but the source dimension is replaced with the target dimension.\n - The target dimension has the specified name and the type other. All other dimension properties are set as defined in the list below.\n\nUnless otherwise stated above, for the given (target) dimension the following applies:\n\n- the number of dimension labels is equal to the number of values computed by the process,\n- the dimension labels are incrementing integers starting from zero,\n- the resolution changes, and\n- the reference system is undefined.", "schema": { "type": "object", "subtype": "raster-cube" From 45d1bbea57c457a30f0bf504aa44bcced10471fb Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 26 Apr 2021 16:52:51 +0200 Subject: [PATCH 063/109] unify categories for mask and mask_polygon #238 --- mask.json | 1 + 1 file changed, 1 insertion(+) diff --git a/mask.json b/mask.json index d15964ad..d7b591e1 100644 --- a/mask.json +++ b/mask.json @@ -3,6 +3,7 @@ "summary": "Apply a raster mask", "description": "Applies a mask to a raster data cube. To apply a vector mask use ``mask_polygon()``.\n\nA mask is a raster data cube for which corresponding pixels among `data` and `mask` are compared and those pixels in `data` are replaced whose pixels in `mask` are non-zero (for numbers) or `true` (for boolean values). The pixel values are replaced with the value specified for `replacement`, which defaults to `null` (no data).\n\nThe data cubes have to be compatible so that each dimension in the mask must also be available in the raster data cube with the same name, type, reference system, resolution and labels. Dimensions can be missing in the mask with the result that the mask is applied for each label of the missing dimension in the data cube. The process fails if there's an incompatibility found between the raster data cube and the mask.", "categories": [ + "cubes", "masks" ], "parameters": [ From 767ce3e688cd3743dbd0a99d550d650659c1d903 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 27 Apr 2021 12:42:06 +0200 Subject: [PATCH 064/109] =?UTF-8?q?`mask=5Fpolygon`:=C2=A0Also=20support?= =?UTF-8?q?=20multi=20polygons=20instead=20of=20just=20polygons.=20#237?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CHANGELOG.md | 3 ++- mask_polygon.json | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31fd6dea..0d7c3d9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,11 +18,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. - Moved the rarely used and implemented processes `cummax`, `cummin`, `cumproduct`, `cumsum`, `debug`, `filter_labels`, `load_result`, `load_uploaded_files`, `resample_cube_temporal` to the proposals. - Exception messages have been aligned always use ` instead of '. Tooling could render it with CommonMark. +- `mask_polygon`: Also support multi polygons instead of just polygons. [#237](https://github.com/Open-EO/openeo-processes/issues/237) ### Fixed - Clarify that the user workspace is server-side. [#225](https://github.com/Open-EO/openeo-processes/issues/225) - Clarify that the `condition` parameter for `array_filter` works also on indices and labels. -- Clarify contradicting statements in `filter_temporal` for the default value of the `dimension` parameter. By default *all* temporal dimensions are affected by the process. [#203](https://github.com/Open-EO/openeo-processes/issues/203) +- Clarify contradicting statements in `filter_temporal` for the default value of the `dimension` parameter. By default *all* temporal dimensions are affected by the process. [#203](https://github.com/Open-EO/openeo-processes/issues/203) - Clarify how the parameters passed to the overlap resolver correspond to the data cubes. [#184](https://github.com/Open-EO/openeo-processes/issues/184) - Improve and clarify specifications for `is_nan`, `is_nodata`, `is_valid`. [#189](https://github.com/Open-EO/openeo-processes/issues/189) - Improve and clarify specifications for `all` and `any`. [#189](https://github.com/Open-EO/openeo-processes/issues/199) diff --git a/mask_polygon.json b/mask_polygon.json index 763ac80b..c7e7d4cf 100644 --- a/mask_polygon.json +++ b/mask_polygon.json @@ -1,7 +1,7 @@ { "id": "mask_polygon", "summary": "Apply a polygon mask", - "description": "Applies a polygon mask to a raster data cube. To apply a raster mask use ``mask()``.\n\nAll pixels for which the point at the pixel center **does not** intersect with any polygon (as defined in the Simple Features standard by the OGC) are replaced. This behavior can be inverted by setting the parameter `inside` to `true`.\n\nThe pixel values are replaced with the value specified for `replacement`, which defaults to `null` (no data). No data values in `data` will be left untouched by the masking operation.", + "description": "Applies a (multi) polygon mask to a raster data cube. To apply a raster mask use ``mask()``.\n\nAll pixels for which the point at the pixel center **does not** intersect with any polygon (as defined in the Simple Features standard by the OGC) are replaced. This behavior can be inverted by setting the parameter `inside` to `true`.\n\nThe pixel values are replaced with the value specified for `replacement`, which defaults to `null` (no data). No data values in `data` will be left untouched by the masking operation.", "categories": [ "cubes", "masks" @@ -17,7 +17,7 @@ }, { "name": "mask", - "description": "A GeoJSON object containing a polygon. The provided feature types can be one of the following:\n\n* A `Polygon` geometry,\n* a `GeometryCollection` containing Polygons,\n* a `Feature` with a `Polygon` geometry or\n* a `FeatureCollection` containing `Feature`s with a `Polygon` geometry.", + "description": "A GeoJSON object containing at least one polygon. The provided feature types can be one of the following:\n\n* A `Polygon` or `MultiPolygon` geometry,\n* a `GeometryCollection` containing `Polygon` or `MultiPolygon` geometries,\n* a `Feature` with a `Polygon` or `MultiPolygon` geometry, or\n* a `FeatureCollection` containing at least one `Feature` with `Polygon` or `MultiPolygon` geometries.", "schema": { "type": "object", "subtype": "geojson" From 767bfeb15cbec86fa0c461ef8e9c7e89c458c57b Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 27 Apr 2021 14:41:41 +0200 Subject: [PATCH 065/109] Revert array_label changes. --- CHANGELOG.md | 1 - array_labels.json | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76ead45a..61436a2d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `is_infinite` - `nan` - Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) -- `apply_labels`: Also accept arrays without labels and return an empty array then. - `apply_neighborhood`: Clarify behavior for data cubes returned by the child processes and for that add the exception `DataCubePropertiesImmutable`. ### Changed diff --git a/array_labels.json b/array_labels.json index e18542af..461a2ed2 100644 --- a/array_labels.json +++ b/array_labels.json @@ -1,16 +1,17 @@ { "id": "array_labels", "summary": "Get the labels for an array", - "description": "Gives all labels for a labeled array in the data cube. The labels have the same order as in the array.\n\nIf the array is not labeled, an empty array is returned.", + "description": "Gives all labels for a labeled array. The labels have the same order as in the array.", "categories": [ "arrays" ], "parameters": [ { "name": "data", - "description": "An array.", + "description": "An array with labels.", "schema": { "type": "array", + "subtype": "labeled-array", "items": { "description": "Any data type." } From 9f78a5030dbe6c22e8b6df752f7321a565e31ec8 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 27 Apr 2021 14:51:01 +0200 Subject: [PATCH 066/109] Simplified array_modify --- proposals/array_modify.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/proposals/array_modify.json b/proposals/array_modify.json index 50e83a70..497016db 100644 --- a/proposals/array_modify.json +++ b/proposals/array_modify.json @@ -29,7 +29,7 @@ }, { "name": "index", - "description": "The index of the element to insert the value(s) before. To insert after the last element, specify the number of elements in the array. If the index is greater than the number of elements, the array is filled with `null` (no-data) values until the index is reached and then the values are added starting at the index given. The number of elements can be retrieved with the process ``count()`` having the parameter `condition` set to true.", + "description": "The index of the element to insert the value(s) before. If the index is greater than the number of elements, the process throws an `ArrayElementNotAvailable` exception.\n\nTo insert after the last element, there are two options:\n\n1. Use the simpler processes ``array_append()`` to append a single value or ``array_concat`` to append multiple values.\n2. Specify the number of elements in the array. You can retrieve the number of elements with the process ``count()``, having the parameter `condition` set to `true`.", "schema": { "type": "integer", "minimum": 0 @@ -55,6 +55,11 @@ } } }, + "exceptions": { + "ArrayElementNotAvailable": { + "message": "The array has no element with the specified index." + } + }, "examples": [ { "description": "Replace a single value in the array.", From ffda4104afed75bfbb395cfcfc4857eb72039fa2 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 28 Apr 2021 11:00:22 +0200 Subject: [PATCH 067/109] Make it easier to distinguish strings (paths/urls/code/...) --- CHANGELOG.md | 3 +++ meta/subtype-schemas.json | 6 ++++-- proposals/load_uploaded_files.json | 3 ++- proposals/run_udf_externally.json | 5 +++-- run_udf.json | 13 ++++++++----- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31fd6dea..58bf3615 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. - Moved the rarely used and implemented processes `cummax`, `cummin`, `cumproduct`, `cumsum`, `debug`, `filter_labels`, `load_result`, `load_uploaded_files`, `resample_cube_temporal` to the proposals. - Exception messages have been aligned always use ` instead of '. Tooling could render it with CommonMark. +- `run_udf` and `run_udf_externally`: Specify specific (extensible) protocols for UDF URIs. ### Fixed - Clarify that the user workspace is server-side. [#225](https://github.com/Open-EO/openeo-processes/issues/225) @@ -33,6 +34,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed typos, grammar issues and other spelling-related issues in many of the processes. - Fixed the examples `array_contains_nodata` and `array_find_nodata`. - Fixed links to openEO glossary and added links to data cube introduction. [#216](https://github.com/Open-EO/openeo-processes/issues/216) +- Clarified disallowed characters in subtype `file-path`. +- Clarified that UDF source code must contain a newline/line-break (affects `run_udf`). ## 1.0.0 - 2020-07-31 diff --git a/meta/subtype-schemas.json b/meta/subtype-schemas.json index 160c8168..92c240bd 100644 --- a/meta/subtype-schemas.json +++ b/meta/subtype-schemas.json @@ -143,6 +143,7 @@ "file-path": { "type": "string", "subtype": "file-path", + "pattern": "^[^\r\n\\:'\"]+$", "title": "Single File path", "description": "A relative path to a user-uploaded file. Folders can't be specified." }, @@ -381,7 +382,8 @@ "type": "string", "subtype": "udf-code", "title": "UDF source code", - "description": "The (multi-line) source code of an user-defined function (UDF)." + "description": "The multi-line source code of a user-defined function (UDF), must contain a newline/line-break.", + "pattern": "(\r\n|\r|\n)" }, "udf-runtime": { "type": "string", @@ -400,7 +402,7 @@ "subtype": "uri", "format": "uri", "title": "URI", - "description": "A valid URI according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986.html)." + "description": "A valid URI according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986.html). Can be restricted using a regex pattern." }, "vector-cube": { "type": "object", diff --git a/proposals/load_uploaded_files.json b/proposals/load_uploaded_files.json index da017227..bf811b4e 100644 --- a/proposals/load_uploaded_files.json +++ b/proposals/load_uploaded_files.json @@ -16,7 +16,8 @@ "subtype": "file-paths", "items": { "type": "string", - "subtype": "file-path" + "subtype": "file-path", + "pattern": "^[^\r\n\\:'\"]+$" } } }, diff --git a/proposals/run_udf_externally.json b/proposals/run_udf_externally.json index 99918408..521f7bef 100644 --- a/proposals/run_udf_externally.json +++ b/proposals/run_udf_externally.json @@ -34,11 +34,12 @@ }, { "name": "url", - "description": "URL to a remote UDF service.", + "description": "Absolute URL to a remote UDF service.", "schema": { "type": "string", "format": "uri", - "subtype": "uri" + "subtype": "uri", + "pattern": "^(http|https)://" } }, { diff --git a/run_udf.json b/run_udf.json index 37509d65..3586f06c 100644 --- a/run_udf.json +++ b/run_udf.json @@ -36,20 +36,23 @@ "description": "Either source code, an absolute URL or a path to a UDF script.", "schema": [ { - "description": "URI to a UDF", + "description": "Absolute URL to a UDF", "type": "string", "format": "uri", - "subtype": "uri" + "subtype": "uri", + "pattern": "^(http|https)://" }, { "description": "Path to a UDF uploaded to the server.", "type": "string", - "subtype": "file-path" + "subtype": "file-path", + "pattern": "^[^\r\n\\:'\"]+$" }, { - "description": "Source code as string", + "description": "The multi-line source code of a UDF, must contain a newline/line-break.", "type": "string", - "subtype": "udf-code" + "subtype": "udf-code", + "pattern": "(\r\n|\r|\n)" } ] }, From ffe710e50cf52308d3fca910fb9839583a1a9859 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 29 Apr 2021 10:13:24 +0200 Subject: [PATCH 068/109] Improve description for ard_surface_reflectance --- proposals/ard_surface_reflectance.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proposals/ard_surface_reflectance.json b/proposals/ard_surface_reflectance.json index f6da0375..8bbea7ff 100644 --- a/proposals/ard_surface_reflectance.json +++ b/proposals/ard_surface_reflectance.json @@ -61,7 +61,7 @@ } ], "returns": { - "description": "Data cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. No-data values (null) are directly set in the bands. Depending on the methods used, several additional bands will be added to the data cube:\n\nData cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. Depending on the methods used, several additional bands will be added to the data cube:\n\n- `date` (optional): Specifies per-pixel acquisition timestamps.\n- `incomplete-testing` (required): Identifies pixels for which the per-pixel tests have not all been successfully completed with a value of 1. Otherwise the value is 0.\n- `saturation` (required) / `saturation_{band}` (optional): Indicates where pixels in the input spectral bands are saturated (1) or not (0). If the saturation is given per band, the band names are `saturation_{band}` with `{band}` being the band name from the source data cube.\n- `cloud`, `shadow` (both required),`aerosol`, `haze`, `ozone`, `water_vapor` (all optional): Indicates the probability of pixels being an atmospheric disturbances such as clouds. All bands have values between 0 (clear) and 1, which describes the probability that it is an atmospheric disturbance.\n- `snow-ice` (optional): Points to a file that indicates whether a pixel is assessed as being snow/ice (1) or not (0). All values describes the probability and must be between 0 and 1.\n- `land-water` (optional): Indicates whether a pixel is assessed as being land (1) or water (0). All values describes the probability and must be between 0 and 1.\n- `incidence-angle` (optional): Specifies per-pixel incidence angles in degrees.\n- `azimuth` (optional): Specifies per-pixel azimuth angles in degrees.\n- `sun-azimuth:` (optional): Specifies per-pixel sun azimuth angles in degrees.\n- `sun-elevation` (optional): Specifies per-pixel sun elevation angles in degrees.\n- `terrain-shadow` (optional): Indicates with a value of 1 whether a pixel is not directly illuminated due to terrain shadowing. Otherwise the value is 0.\n- `terrain-occlusion` (optional): Indicates with a value of 1 whether a pixel is not visible to the sensor due to terrain occlusion during off-nadir viewing. Otherwise the value is 0.\n- `terrain-illumination` (optional): Contains coefficients used for terrain illumination correction are provided for each pixel.\n\nThe data returned is CARD4L compliant and contains metadata.\n\nThe data returned is CARD4L compliant with corresponding metadata.", + "description": "Data cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. No-data values (null) are directly set in the bands. Depending on the methods used, several additional bands will be added to the data cube:\n\nData cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. Depending on the methods used, several additional bands will be added to the data cube:\n\n- `date` (optional): Specifies per-pixel acquisition timestamps.\n- `incomplete-testing` (required): Identifies pixels with a value of 1 for which the per-pixel tests (at least saturation, cloud and cloud shadows, see CARD4L specification for details) have not all been successfully completed. Otherwise, the value is 0.\n- `saturation` (required) / `saturation_{band}` (optional): Indicates where pixels in the input spectral bands are saturated (1) or not (0). If the saturation is given per band, the band names are `saturation_{band}` with `{band}` being the band name from the source data cube.\n- `cloud`, `shadow` (both required),`aerosol`, `haze`, `ozone`, `water_vapor` (all optional): Indicates the probability of pixels being an atmospheric disturbance such as clouds. All bands have values between 0 (clear) and 1, which describes the probability that it is an atmospheric disturbance.\n- `snow-ice` (optional): Points to a file that indicates whether a pixel is assessed as being snow/ice (1) or not (0). All values describe the probability and must be between 0 and 1.\n- `land-water` (optional): Indicates whether a pixel is assessed as being land (1) or water (0). All values describe the probability and must be between 0 and 1.\n- `incidence-angle` (optional): Specifies per-pixel incidence angles in degrees.\n- `azimuth` (optional): Specifies per-pixel azimuth angles in degrees.\n- `sun-azimuth:` (optional): Specifies per-pixel sun azimuth angles in degrees.\n- `sun-elevation` (optional): Specifies per-pixel sun elevation angles in degrees.\n- `terrain-shadow` (optional): Indicates with a value of 1 whether a pixel is not directly illuminated due to terrain shadowing. Otherwise, the value is 0.\n- `terrain-occlusion` (optional): Indicates with a value of 1 whether a pixel is not visible to the sensor due to terrain occlusion during off-nadir viewing. Otherwise, the value is 0.\n- `terrain-illumination` (optional): Contains coefficients used for terrain illumination correction are provided for each pixel.\n\nThe data returned is CARD4L compliant and contains metadata.\n\nThe data returned is CARD4L compliant with corresponding metadata.", "schema": { "subtype": "raster-cube", "type": "object" From 6680ecf108bb317bb757455ba8facdea84bb6936 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 29 Apr 2021 14:08:37 +0200 Subject: [PATCH 069/109] Add options parameters and add implementation guide for back-ends. --- README.md | 1 + meta/implementation.md | 76 ++++++++++++++++++++++++++ proposals/ard_surface_reflectance.json | 20 +++++++ proposals/atmospheric_correction.json | 10 ++++ proposals/cloud_detection.json | 10 ++++ 5 files changed, 117 insertions(+) create mode 100644 meta/implementation.md diff --git a/README.md b/README.md index 1a5afa5a..0b6f45cc 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ This repository contains a set of files formally describing the openEO Processes * The `*.json` files provide stable process specifications as defined by openEO. Stable processes need at least two implementations and a use-case example added to the [`examples`](examples/) folder *or* consensus from the openEO PSC. * The `*.json` files in the [`proposals`](proposals/) folder provide proposed new process specifications that are still experimental and subject to change, including breaking changes. Everyone is encouraged to base their work on the proposals and give feedback so that eventually the processes evolve into stable process specifications. +* [implementation.md](meta/implementation.md) in the `meta` folder provide some additional implementation details for back-ends. For back-end implementors, it's highly recommended to read them. * [subtype-schemas.json](meta/subtype-schemas.json) in the `meta` folder defines common data types (`subtype`s) for JSON Schema used in openEO processes. * The [`examples`](examples/) folder contains some useful examples that the processes link to. All of these are non-binding additions. * The [`tests`](tests/) folder can be used to test the process specification for validity and consistent "style". It also allows rendering the processes in a web browser. diff --git a/meta/implementation.md b/meta/implementation.md new file mode 100644 index 00000000..7cef09c8 --- /dev/null +++ b/meta/implementation.md @@ -0,0 +1,76 @@ +# Implementation Guide for back-ends + +This file is meant to provide some additional implementation details for back-ends. + +## Enums for processing methods + +There are numerours processes that provide a predefined set of processing methods. +For example: +- `ard_surface_reflectance`: `atmospheric_correction_method` and `cloud_detection_method` +- `athmospheric_correction`: `method` +- `cloud_detection`: `method` +- `resample_cube_spatial`: `method` +- `resample_spatial`: `method` + +Those methods are meant to provide a common naming for well-known processing methods. +Back-ends should check which methods they can implement and remove all the methods +they can't implement. Similarly, you can add new methods. We'd likely ask you to +open [a new issue](https://github.com/Open-EO/openeo-processes/issues) and provide +us your additions so that we can align implementations and eventually update the +process specifications with all methods out there. Thanks in advance! + +Also make sure to update the textual descriptions accordingly. + +This applies similarly to other enums specied in parameter schemas, e.g. the +`period` parameter in `aggregate_temporal_period`. + +## Proprietary options in `ard_surface_reflectance`, `athmospheric_correction` and `cloud_detection` + +The processes mentioned above have all at least one parameter for proprietary +options that can be passed to the corresponsing `methods`: +- `ard_surface_reflectance`: `atmospheric_correction_options` and `cloud_detection_options` +- `athmospheric_correction`: `options` +- `cloud_detection`: `options` + +By default, the parameters don't allow any value except an empty opject. +Back-ends have to either remove the parameter or define schema to give user +details about the supported parameters per supported method. + +For example, if you support the methods `iCor` and `FORCE` in `atmospheric_correction`, +you may define something like the following for the parameter: + +```json +{ + "description": "Proprietary options for the atmospheric correction method.", + "name": "options", + "optional": true, + "default": {}, + "schema": [ + { + "title": "FORCE options", + "type": "object", + "properties": { + "force_option1": { + "type": "number", + "description": "Description for option 1" + }, + "force_option2": { + "type": "boolean", + "description": "Description for option 1" + } + } + }, + { + "title": "iCor options", + "type": "object", + "properties": { + "icor_option1": { + "type": "string", + "description": "Description for option 1" + } + } + } + + ] +} +``` \ No newline at end of file diff --git a/proposals/ard_surface_reflectance.json b/proposals/ard_surface_reflectance.json index 8bbea7ff..9daec107 100644 --- a/proposals/ard_surface_reflectance.json +++ b/proposals/ard_surface_reflectance.json @@ -58,6 +58,26 @@ "type": "null" } ] + }, + { + "description": "Proprietary options for the atmospheric correction method. Specifying proprietary options will reduce portability.", + "name": "atmospheric_correction_options", + "optional": true, + "default": {}, + "schema": { + "type": "object", + "additionalProperties": false + } + }, + { + "description": "Proprietary options for the cloud detection method. Specifying proprietary options will reduce portability.", + "name": "cloud_detection_options", + "optional": true, + "default": {}, + "schema": { + "type": "object", + "additionalProperties": false + } } ], "returns": { diff --git a/proposals/atmospheric_correction.json b/proposals/atmospheric_correction.json index 87c4aab3..9b537322 100644 --- a/proposals/atmospheric_correction.json +++ b/proposals/atmospheric_correction.json @@ -48,6 +48,16 @@ "type": "null" } ] + }, + { + "description": "Proprietary options for the atmospheric correction method. Specifying proprietary options will reduce portability.", + "name": "options", + "optional": true, + "default": {}, + "schema": { + "type": "object", + "additionalProperties": false + } } ], "returns": { diff --git a/proposals/cloud_detection.json b/proposals/cloud_detection.json index 3c90a7f2..f9025c5b 100644 --- a/proposals/cloud_detection.json +++ b/proposals/cloud_detection.json @@ -34,6 +34,16 @@ "type": "null" } ] + }, + { + "description": "Proprietary options for the cloud detection method. Specifying proprietary options will reduce portability.", + "name": "options", + "optional": true, + "default": {}, + "schema": { + "type": "object", + "additionalProperties": false + } } ], "returns": { From 2e4b2daa5437e5184ba46dfe4d09d16e4a71be17 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 7 May 2021 10:59:12 +0200 Subject: [PATCH 070/109] Fixed duplicate text in ard_surface_reflectance --- proposals/ard_surface_reflectance.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proposals/ard_surface_reflectance.json b/proposals/ard_surface_reflectance.json index 9daec107..3fee8b54 100644 --- a/proposals/ard_surface_reflectance.json +++ b/proposals/ard_surface_reflectance.json @@ -81,7 +81,7 @@ } ], "returns": { - "description": "Data cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. No-data values (null) are directly set in the bands. Depending on the methods used, several additional bands will be added to the data cube:\n\nData cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. Depending on the methods used, several additional bands will be added to the data cube:\n\n- `date` (optional): Specifies per-pixel acquisition timestamps.\n- `incomplete-testing` (required): Identifies pixels with a value of 1 for which the per-pixel tests (at least saturation, cloud and cloud shadows, see CARD4L specification for details) have not all been successfully completed. Otherwise, the value is 0.\n- `saturation` (required) / `saturation_{band}` (optional): Indicates where pixels in the input spectral bands are saturated (1) or not (0). If the saturation is given per band, the band names are `saturation_{band}` with `{band}` being the band name from the source data cube.\n- `cloud`, `shadow` (both required),`aerosol`, `haze`, `ozone`, `water_vapor` (all optional): Indicates the probability of pixels being an atmospheric disturbance such as clouds. All bands have values between 0 (clear) and 1, which describes the probability that it is an atmospheric disturbance.\n- `snow-ice` (optional): Points to a file that indicates whether a pixel is assessed as being snow/ice (1) or not (0). All values describe the probability and must be between 0 and 1.\n- `land-water` (optional): Indicates whether a pixel is assessed as being land (1) or water (0). All values describe the probability and must be between 0 and 1.\n- `incidence-angle` (optional): Specifies per-pixel incidence angles in degrees.\n- `azimuth` (optional): Specifies per-pixel azimuth angles in degrees.\n- `sun-azimuth:` (optional): Specifies per-pixel sun azimuth angles in degrees.\n- `sun-elevation` (optional): Specifies per-pixel sun elevation angles in degrees.\n- `terrain-shadow` (optional): Indicates with a value of 1 whether a pixel is not directly illuminated due to terrain shadowing. Otherwise, the value is 0.\n- `terrain-occlusion` (optional): Indicates with a value of 1 whether a pixel is not visible to the sensor due to terrain occlusion during off-nadir viewing. Otherwise, the value is 0.\n- `terrain-illumination` (optional): Contains coefficients used for terrain illumination correction are provided for each pixel.\n\nThe data returned is CARD4L compliant and contains metadata.\n\nThe data returned is CARD4L compliant with corresponding metadata.", + "description": "Data cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. No-data values (null) are directly set in the bands. Depending on the methods used, several additional bands will be added to the data cube:\n\nData cube containing bottom of atmosphere reflectances for each spectral band in the source data cube, with atmospheric disturbances like clouds and cloud shadows removed. Depending on the methods used, several additional bands will be added to the data cube:\n\n- `date` (optional): Specifies per-pixel acquisition timestamps.\n- `incomplete-testing` (required): Identifies pixels with a value of 1 for which the per-pixel tests (at least saturation, cloud and cloud shadows, see CARD4L specification for details) have not all been successfully completed. Otherwise, the value is 0.\n- `saturation` (required) / `saturation_{band}` (optional): Indicates where pixels in the input spectral bands are saturated (1) or not (0). If the saturation is given per band, the band names are `saturation_{band}` with `{band}` being the band name from the source data cube.\n- `cloud`, `shadow` (both required),`aerosol`, `haze`, `ozone`, `water_vapor` (all optional): Indicates the probability of pixels being an atmospheric disturbance such as clouds. All bands have values between 0 (clear) and 1, which describes the probability that it is an atmospheric disturbance.\n- `snow-ice` (optional): Points to a file that indicates whether a pixel is assessed as being snow/ice (1) or not (0). All values describe the probability and must be between 0 and 1.\n- `land-water` (optional): Indicates whether a pixel is assessed as being land (1) or water (0). All values describe the probability and must be between 0 and 1.\n- `incidence-angle` (optional): Specifies per-pixel incidence angles in degrees.\n- `azimuth` (optional): Specifies per-pixel azimuth angles in degrees.\n- `sun-azimuth:` (optional): Specifies per-pixel sun azimuth angles in degrees.\n- `sun-elevation` (optional): Specifies per-pixel sun elevation angles in degrees.\n- `terrain-shadow` (optional): Indicates with a value of 1 whether a pixel is not directly illuminated due to terrain shadowing. Otherwise, the value is 0.\n- `terrain-occlusion` (optional): Indicates with a value of 1 whether a pixel is not visible to the sensor due to terrain occlusion during off-nadir viewing. Otherwise, the value is 0.\n- `terrain-illumination` (optional): Contains coefficients used for terrain illumination correction are provided for each pixel.\n\nThe data returned is CARD4L compliant with corresponding metadata.", "schema": { "subtype": "raster-cube", "type": "object" From 26d64fb287395e19617af5fed84797c3a1e31aa8 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 11 May 2021 15:05:26 +0200 Subject: [PATCH 071/109] Proposal for date_shift #247 --- CHANGELOG.md | 1 + proposals/date_shfit.json | 86 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 proposals/date_shfit.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 31fd6dea..7722bf33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - New processes in proposal state + - `date_shift` - `is_infinite` - `nan` - Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) diff --git a/proposals/date_shfit.json b/proposals/date_shfit.json new file mode 100644 index 00000000..fd148a65 --- /dev/null +++ b/proposals/date_shfit.json @@ -0,0 +1,86 @@ +{ + "id": "date_shfit", + "summary": "Calculates and manipulates dates and times", + "description": "Allows to calculate a new date and time based on a given date and time by adding or subtracting a given temporal period.", + "categories": [ + "date & time" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "The base date and time to manipulate.\n\nThe millisecond part of the date and time is optional and defaults to 0 if not given. The time zone never changes.", + "schema": { + "type": "string", + "format": "date-time", + "subtype": "date-time" + } + }, + { + "name": "value", + "description": "The period of time in the unit given that is added (positive numbers) or subtracted (negative numbers).", + "schema": { + "type": "integer" + } + }, + { + "name": "unit", + "description": "The unit for the value given. The following pre-defined units are available:\n\n- millisecond: Milliseconds\n- second: Seconds\n- minute: Minutes\n- hour: Hours\n- day: Days - changes only the the day part of a date (and potentially also the month and the year)\n- week: Weeks (equivalent to 7 days)\n- month: Months - changes only the month part of a date (and potentially also the year)\n- year: Years - changes only the year part of a date\n", + "schema": { + "type": "string", + "enum": [ + "millisecond", + "second", + "minute", + "hour", + "day", + "week", + "month", + "year" + ] + } + } + ], + "returns": { + "description": "The manipulated date and time.", + "schema": { + "type": "string", + "format": "date-time", + "subtype": "date-time" + } + }, + "examples": [ + { + "arguments": { + "data": "2020-02-01T17:22:45Z", + "value": 1, + "unit": "year" + }, + "returns": "2021-02-01T17:22:45Z" + }, + { + "arguments": { + "data": "2020-02-01T17:22:45Z", + "value": 6, + "unit": "month" + }, + "returns": "2020-08-01T17:22:45Z" + }, + { + "arguments": { + "data": "2021-03-31T00:00:00+02:00", + "value": -7, + "unit": "day" + }, + "returns": "2021-03-24T00:00:00+02:00" + }, + { + "arguments": { + "data": "2018-12-31T17:22:45Z", + "value": 1150, + "unit": "millisecond" + }, + "returns": "2018-12-31T17:22:46.150Z" + } + ] +} \ No newline at end of file From 3ad8a483c67fec447c56246499461033febf8b80 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 11 May 2021 16:52:26 +0200 Subject: [PATCH 072/109] Fix typo --- proposals/{date_shfit.json => date_shift.json} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename proposals/{date_shfit.json => date_shift.json} (99%) diff --git a/proposals/date_shfit.json b/proposals/date_shift.json similarity index 99% rename from proposals/date_shfit.json rename to proposals/date_shift.json index fd148a65..5464727b 100644 --- a/proposals/date_shfit.json +++ b/proposals/date_shift.json @@ -1,5 +1,5 @@ { - "id": "date_shfit", + "id": "date_shift", "summary": "Calculates and manipulates dates and times", "description": "Allows to calculate a new date and time based on a given date and time by adding or subtracting a given temporal period.", "categories": [ From 76782c69950e65c83b288c733d7c1437faca4a1d Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 12 May 2021 13:02:20 +0200 Subject: [PATCH 073/109] Allow dates to be computed, other improvements from review --- proposals/date_shift.json | 62 +++++++++++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 16 deletions(-) diff --git a/proposals/date_shift.json b/proposals/date_shift.json index 5464727b..a9c74c68 100644 --- a/proposals/date_shift.json +++ b/proposals/date_shift.json @@ -1,7 +1,7 @@ { "id": "date_shift", "summary": "Calculates and manipulates dates and times", - "description": "Allows to calculate a new date and time based on a given date and time by adding or subtracting a given temporal period.", + "description": "Based on a given date (and optionally time), calculates a new date (and time if given) by adding or subtracting a given temporal period. If the given date doesn't include a time component, the returned values will also not include the time component.\n\nThis process doesn't change the time zone and also doesn't take daylight saving time (DST) into account.", "categories": [ "date & time" ], @@ -9,12 +9,19 @@ "parameters": [ { "name": "data", - "description": "The base date and time to manipulate.\n\nThe millisecond part of the date and time is optional and defaults to 0 if not given. The time zone never changes.", - "schema": { - "type": "string", - "format": "date-time", - "subtype": "date-time" - } + "description": "The date (and optionally time) to manipulate.\n\nIf the given date doesn't include the time, the process assumes that the time component is `00:00:00Z` (i.e. midnight, in UTC). The millisecond part of the time is optional and defaults to 0 if not given.", + "schema": [ + { + "type": "string", + "format": "date-time", + "subtype": "date-time" + }, + { + "type": "string", + "format": "date", + "subtype": "date" + } + ] }, { "name": "value", @@ -25,7 +32,7 @@ }, { "name": "unit", - "description": "The unit for the value given. The following pre-defined units are available:\n\n- millisecond: Milliseconds\n- second: Seconds\n- minute: Minutes\n- hour: Hours\n- day: Days - changes only the the day part of a date (and potentially also the month and the year)\n- week: Weeks (equivalent to 7 days)\n- month: Months - changes only the month part of a date (and potentially also the year)\n- year: Years - changes only the year part of a date\n", + "description": "The unit for the value given. The following pre-defined units are available:\n\n- millisecond: Milliseconds\n- second: Seconds\n- minute: Minutes\n- hour: Hours\n- day: Days - changes only the the day part of a date (and potentially also the month and the year)\n- week: Weeks (equivalent to 7 days)\n- month: Months - changes only the month part of a date (and potentially also the year)\n- year: Years - changes only the year part of a date\n\nIf any of the changes result in an invalid date, the corresponding part is rounded down to the next valid date. For example, adding a month to `2020-01-31` would result in `2020-02-29`.", "schema": { "type": "string", "enum": [ @@ -42,21 +49,28 @@ } ], "returns": { - "description": "The manipulated date and time.", - "schema": { - "type": "string", - "format": "date-time", - "subtype": "date-time" - } + "description": "The manipulated date (and time if a time component is given in the parameter `data`).", + "schema": [ + { + "type": "string", + "format": "date-time", + "subtype": "date-time" + }, + { + "type": "string", + "format": "date", + "subtype": "date" + } + ] }, "examples": [ { "arguments": { - "data": "2020-02-01T17:22:45Z", + "data": "2020-02-29T17:22:45Z", "value": 1, "unit": "year" }, - "returns": "2021-02-01T17:22:45Z" + "returns": "2021-02-28T17:22:45Z" }, { "arguments": { @@ -81,6 +95,22 @@ "unit": "millisecond" }, "returns": "2018-12-31T17:22:46.150Z" + }, + { + "arguments": { + "data": "2018-01-01", + "value": 25, + "unit": "hour" + }, + "returns": "2018-01-02" + }, + { + "arguments": { + "data": "2018-01-01", + "value": -1, + "unit": "hour" + }, + "returns": "2017-12-31" } ] } \ No newline at end of file From 1781e1644f226008eba414b689ab0085fda300f0 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 17 May 2021 11:41:36 +0200 Subject: [PATCH 074/109] Specify resampling method in sar backscatter processes to be bilinear #249 --- proposals/ard_normalized_radar_backscatter.json | 7 ++++++- proposals/sar_backscatter.json | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/proposals/ard_normalized_radar_backscatter.json b/proposals/ard_normalized_radar_backscatter.json index 7e9f923b..46f5f48a 100644 --- a/proposals/ard_normalized_radar_backscatter.json +++ b/proposals/ard_normalized_radar_backscatter.json @@ -1,7 +1,7 @@ { "id": "ard_normalized_radar_backscatter", "summary": "CARD4L compliant SAR NRB generation", - "description": "Computes CARD4L compliant backscatter from SAR input. The radiometric correction coefficient is gamma0 (terrain), which is the ground area computed with terrain earth model in sensor line of sight.\n\nNote that backscatter computation may require instrument specific metadata that is tightly coupled to the original SAR products. As a result, this process may only work in combination with loading data from specific collections, not with general data cubes.", + "description": "Computes CARD4L compliant backscatter from SAR input. The radiometric correction coefficient is gamma0 (terrain), which is the ground area computed with terrain earth model in sensor line of sight.\n\nNote that backscatter computation may require instrument specific metadata that is tightly coupled to the original SAR products. As a result, this process may only work in combination with loading data from specific collections, not with general data cubes.\n\nThis process uses bilinear interpolation, both for resampling the DEM and the backscatter.", "categories": [ "cubes", "sar", @@ -82,6 +82,11 @@ "rel": "about", "href": "https://bok.eo4geo.eu/PP2-2-4-3", "title": "Gamma nought (0) explained by EO4GEO body of knowledge." + }, + { + "rel": "about", + "href": "https://doi.org/10.3390/data4030093", + "title": "Reasoning behind the choice of bilinear resampling" } ], "process_graph": { diff --git a/proposals/sar_backscatter.json b/proposals/sar_backscatter.json index ddfe8bb9..07945438 100644 --- a/proposals/sar_backscatter.json +++ b/proposals/sar_backscatter.json @@ -1,7 +1,7 @@ { "id": "sar_backscatter", "summary": "Computes backscatter from SAR input", - "description": "Computes backscatter from SAR input.\n\nNote that backscatter computation may require instrument specific metadata that is tightly coupled to the original SAR products. As a result, this process may only work in combination with loading data from specific collections, not with general data cubes.", + "description": "Computes backscatter from SAR input.\n\nNote that backscatter computation may require instrument specific metadata that is tightly coupled to the original SAR products. As a result, this process may only work in combination with loading data from specific collections, not with general data cubes.\n\nThis process uses bilinear interpolation, both for resampling the DEM and the backscatter.", "categories": [ "cubes", "sar" @@ -126,6 +126,11 @@ "rel": "about", "href": "https://www.geo.uzh.ch/microsite/rsl-documents/research/publications/peer-reviewed-articles/201108-TGRS-Small-tcGamma-3809999360/201108-TGRS-Small-tcGamma.pdf", "title": "Flattening Gamma: Radiometric Terrain Correction for SAR Imagery" + }, + { + "rel": "about", + "href": "https://doi.org/10.3390/data4030093", + "title": "Reasoning behind the choice of bilinear resampling" } ] } From 0e240ede6726d10a81cace900e6b760f86e11953 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 17 May 2021 12:56:50 +0200 Subject: [PATCH 075/109] Better wording --- meta/subtype-schemas.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meta/subtype-schemas.json b/meta/subtype-schemas.json index 92c240bd..41deaaf2 100644 --- a/meta/subtype-schemas.json +++ b/meta/subtype-schemas.json @@ -402,7 +402,7 @@ "subtype": "uri", "format": "uri", "title": "URI", - "description": "A valid URI according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986.html). Can be restricted using a regex pattern." + "description": "A valid URI according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986.html). Can be restricted using a regular expression, e.g. to (dis)allow certain protocols." }, "vector-cube": { "type": "object", From 90d7467e063817df9b3fc4739e2049438e739c61 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 18 May 2021 18:15:41 +0200 Subject: [PATCH 076/109] `aggregate_spatial`, `aggregate_spatial_binary`: Clarified that Multi* geometries are a single entity in computations. GeometryCollections are considered being multiple entities. #252 --- CHANGELOG.md | 1 + aggregate_spatial.json | 4 ++-- proposals/aggregate_spatial_binary.json | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 58bf3615..77b45741 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed links to openEO glossary and added links to data cube introduction. [#216](https://github.com/Open-EO/openeo-processes/issues/216) - Clarified disallowed characters in subtype `file-path`. - Clarified that UDF source code must contain a newline/line-break (affects `run_udf`). +- `aggregate_spatial`, `aggregate_spatial_binary`: Clarified that Multi* geometries are a single entity in computations. GeometryCollections are considered being multiple entities. [#252](https://github.com/Open-EO/openeo-processes/issues/252) ## 1.0.0 - 2020-07-31 diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 58303652..de38115e 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial", "summary": "Zonal statistics for geometries", - "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process passes a list of values to the reducer. In contrast, ``aggregate_spatial_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.\n\nThe data cube must have been reduced to only contain two spatial dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", + "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process passes a list of values to the reducer. In contrast, ``aggregate_spatial_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.\n\nThe data cube must have been reduced to only contain two spatial dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", "categories": [ "cubes", "aggregate & resample" @@ -17,7 +17,7 @@ }, { "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based.", + "description": "Geometries as GeoJSON on which the aggregation will be based. One value will be computed per GeoJSON geometry, which means that, for example, a single value will be computed for a `MultiPolgon`, but two values will be computed for a `FeatureCollection` or `GeometryCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", "schema": { "type": "object", "subtype": "geojson" diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 17632722..6dc761bc 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial_binary", "summary": "Zonal statistics for geometries by binary aggregation", - "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``aggregate_spatial()`` passes a list of values.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThe data cube must have been reduced to only contain two raster dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", + "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``aggregate_spatial()`` passes a list of values.\n\nThe data cube must have been reduced to only contain two raster dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", "categories": [ "cubes", "aggregate & resample" @@ -18,7 +18,7 @@ }, { "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based.", + "description": "Geometries as GeoJSON on which the aggregation will be based. One value will be computed per GeoJSON geometry, which means that, for example, a single value will be computed for a `MultiPolgon`, but two values will be computed for a `FeatureCollection` or `GeometryCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", "schema": { "type": "object", "subtype": "geojson" From 91eee13945a4b6a3ec19925773b443bdeca2ee1c Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 19 May 2021 10:46:41 +0200 Subject: [PATCH 077/109] Clarified GeometryCollections #252 --- CHANGELOG.md | 2 +- aggregate_spatial.json | 2 +- proposals/aggregate_spatial_binary.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77b45741..847bc1ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,7 +36,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed links to openEO glossary and added links to data cube introduction. [#216](https://github.com/Open-EO/openeo-processes/issues/216) - Clarified disallowed characters in subtype `file-path`. - Clarified that UDF source code must contain a newline/line-break (affects `run_udf`). -- `aggregate_spatial`, `aggregate_spatial_binary`: Clarified that Multi* geometries are a single entity in computations. GeometryCollections are considered being multiple entities. [#252](https://github.com/Open-EO/openeo-processes/issues/252) +- `aggregate_spatial`, `aggregate_spatial_binary`: Clarified that Features, Geometries and GeometryCollections are a single entity in computations. Only FeatureCollections are multiple entities. [#252](https://github.com/Open-EO/openeo-processes/issues/252) ## 1.0.0 - 2020-07-31 diff --git a/aggregate_spatial.json b/aggregate_spatial.json index de38115e..81ced4e8 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -17,7 +17,7 @@ }, { "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based. One value will be computed per GeoJSON geometry, which means that, for example, a single value will be computed for a `MultiPolgon`, but two values will be computed for a `FeatureCollection` or `GeometryCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", + "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolgon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", "schema": { "type": "object", "subtype": "geojson" diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 6dc761bc..52c1516b 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -18,7 +18,7 @@ }, { "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based. One value will be computed per GeoJSON geometry, which means that, for example, a single value will be computed for a `MultiPolgon`, but two values will be computed for a `FeatureCollection` or `GeometryCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", + "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolgon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", "schema": { "type": "object", "subtype": "geojson" From 5f9c557adb1eb5fc6abb38eafe8721120af8916c Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 19 May 2021 11:01:00 +0200 Subject: [PATCH 078/109] Fixed typo Co-authored-by: Jonathan Bahlmann <45635480+jonathom@users.noreply.github.com> --- aggregate_spatial.json | 4 ++-- proposals/aggregate_spatial_binary.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 81ced4e8..fbb176df 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -17,7 +17,7 @@ }, { "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolgon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", + "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", "schema": { "type": "object", "subtype": "geojson" @@ -101,4 +101,4 @@ "title": "Simple Features standard by the OGC" } ] -} \ No newline at end of file +} diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 52c1516b..150212f0 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -18,7 +18,7 @@ }, { "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolgon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", + "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", "schema": { "type": "object", "subtype": "geojson" @@ -111,4 +111,4 @@ "title": "Background information on reduction operators (binary reducers) by Wikipedia" } ] -} \ No newline at end of file +} From f5edd72c7905eec10a5edd6e5329cfbeeb7b8852 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 19 May 2021 14:39:11 +0200 Subject: [PATCH 079/109] Improve date_shift and add implementation details, incl. list of supporting libraries --- meta/implementation.md | 27 +++++++++++++++++++++++- proposals/date_shift.json | 44 ++++++++++++++++++++++++++++----------- 2 files changed, 58 insertions(+), 13 deletions(-) diff --git a/meta/implementation.md b/meta/implementation.md index 7cef09c8..4873cce5 100644 --- a/meta/implementation.md +++ b/meta/implementation.md @@ -73,4 +73,29 @@ you may define something like the following for the parameter: ] } -``` \ No newline at end of file +``` + +## Date and Time manipulation + +Working with dates is a lot more complex than it seems to be at first sight. Issues arise especially with daylight saving times (DST), time zones, leap years and leap seconds. + +The date/time functions in openEO don't have any effect right now as only timestamps in UTC (with potential numerical time zone modifier) are supported. + +Leap years are implemented in a way that computations handle them gracefully. For example: + +- If you add a month to January, 31th, it will result in February 29th (leap year) or 28th (other years). This means for invalid dates we round down (or "snap") to the next valid date. +- If you add a month to February, 29th, it will result in March, 29. So the "snap" behavior doesn't work the other way round. + +Leap seconds are basically ignored in manipulations as they don't follow a regular pattern. So leap seconds may be passed into the processes, but will never be returned by date manipulation processes in openEO. See the examples for the leap second `2016-12-31T23:59:60Z`: + +- If you add a minute to `2016-12-31T23:59:60Z`, it will result in `2017-01-01T00:00:59Z`. This means for invalid times we round down (or "snap") to the next valid time. +- If you add a seconds to `2016-12-31T23:59:59Z`, it will result in `2017-01-01T00:00:00Z`. + +### Language support + +To make `date_shift` easier to implement, we have found some libraries that follow this specification and can be used for implementations: + +- Java: [java.time](https://docs.oracle.com/javase/8/docs/api/java/time/package-summary.html) +- JavaScript: [Moment.js](https://momentjs.com/) +- Python: [dateutil](https://dateutil.readthedocs.io/en/stable/index.html) +- R: [lubridate](https://lubridate.tidyverse.org/) ([Cheatsheet](https://rawgit.com/rstudio/cheatsheets/master/lubridate.pdf)) \ No newline at end of file diff --git a/proposals/date_shift.json b/proposals/date_shift.json index a9c74c68..c4a687b2 100644 --- a/proposals/date_shift.json +++ b/proposals/date_shift.json @@ -1,7 +1,7 @@ { "id": "date_shift", "summary": "Calculates and manipulates dates and times", - "description": "Based on a given date (and optionally time), calculates a new date (and time if given) by adding or subtracting a given temporal period. If the given date doesn't include a time component, the returned values will also not include the time component.\n\nThis process doesn't change the time zone and also doesn't take daylight saving time (DST) into account.", + "description": "Based on a given date (and optionally time), calculates a new date (and time if given) by adding or subtracting a given temporal period.\n\nSome specifics about dates and times need to be taken into account:\n\n* This process doesn't have any effect on the time zone.\n* It doesn't take daylight saving time (DST) into account as only dates and time in UTC (with potential numerical time zone modifier) are supported.\n* Leap years are implemented in a way that computations handle them gracefully (see parameter `unit` for details).\n* Leap seconds are mostly ignored in manipulations as they don't follow a regular pattern. Leap seconds can be passed to the process, but will never be returned.", "categories": [ "date & time" ], @@ -9,7 +9,7 @@ "parameters": [ { "name": "data", - "description": "The date (and optionally time) to manipulate.\n\nIf the given date doesn't include the time, the process assumes that the time component is `00:00:00Z` (i.e. midnight, in UTC). The millisecond part of the time is optional and defaults to 0 if not given.", + "description": "The date (and optionally time) to manipulate.\n\nIf the given date doesn't include the time, the process assumes that the time component is `00:00:00Z` (i.e. midnight, in UTC). The millisecond part of the time is optional and defaults to `0` if not given.", "schema": [ { "type": "string", @@ -25,14 +25,14 @@ }, { "name": "value", - "description": "The period of time in the unit given that is added (positive numbers) or subtracted (negative numbers).", + "description": "The period of time in the unit given that is added (positive numbers) or subtracted (negative numbers). The value `0` doesn't have any effect.", "schema": { "type": "integer" } }, { "name": "unit", - "description": "The unit for the value given. The following pre-defined units are available:\n\n- millisecond: Milliseconds\n- second: Seconds\n- minute: Minutes\n- hour: Hours\n- day: Days - changes only the the day part of a date (and potentially also the month and the year)\n- week: Weeks (equivalent to 7 days)\n- month: Months - changes only the month part of a date (and potentially also the year)\n- year: Years - changes only the year part of a date\n\nIf any of the changes result in an invalid date, the corresponding part is rounded down to the next valid date. For example, adding a month to `2020-01-31` would result in `2020-02-29`.", + "description": "The unit for the value given. The following pre-defined units are available:\n\n- millisecond: Milliseconds\n- second: Seconds - leap seconds are ignored in computations.\n- minute: Minutes\n- hour: Hours\n- day: Days - changes only the the day part of a date\n- week: Weeks (equivalent to 7 days)\n- month: Months\n- year: Years\n\nManipulations with the unit `year`, `month`, `week` or `day` do never change the time. If any of the manipulations result in an invalid date or time, the corresponding part is rounded down to the next valid date or time respectively. For example, adding a month to `2020-01-31` would result in `2020-02-29`.", "schema": { "type": "string", "enum": [ @@ -49,7 +49,7 @@ } ], "returns": { - "description": "The manipulated date (and time if a time component is given in the parameter `data`).", + "description": "The manipulated date. If a time component was given in the parameter `data`, the time component is returned with the date.", "schema": [ { "type": "string", @@ -65,6 +65,23 @@ }, "examples": [ { + "arguments": { + "data": "2020-02-01T17:22:45Z", + "value": 6, + "unit": "month" + }, + "returns": "2020-08-01T17:22:45Z" + }, + { + "arguments": { + "data": "2021-03-31T00:00:00+02:00", + "value": -7, + "unit": "day" + }, + "returns": "2021-03-24T00:00:00+02:00" + }, + { + "description": "Adding a year to February 29th in a leap year will result in February 28th in the next (non-leap) year.", "arguments": { "data": "2020-02-29T17:22:45Z", "value": 1, @@ -73,22 +90,25 @@ "returns": "2021-02-28T17:22:45Z" }, { + "description": "Adding a month to January 31th will result in February 29th in leap years.", "arguments": { - "data": "2020-02-01T17:22:45Z", - "value": 6, + "data": "2020-01-31", + "value": 1, "unit": "month" }, - "returns": "2020-08-01T17:22:45Z" + "returns": "2020-02-29" }, { + "description": "The process skips over the leap second `2016-12-31T23:59:60Z`.", "arguments": { - "data": "2021-03-31T00:00:00+02:00", - "value": -7, - "unit": "day" + "data": "2016-12-31T23:59:59Z", + "value": 1, + "unit": "second" }, - "returns": "2021-03-24T00:00:00+02:00" + "returns": "2017-01-01T00:00:00Z" }, { + "description": "Milliseconds can be added or subtracted. If not given, the default value is `0`.", "arguments": { "data": "2018-12-31T17:22:45Z", "value": 1150, From a47b678625c3737ed250639ea9aef491db4bbab8 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 19 May 2021 14:54:52 +0200 Subject: [PATCH 080/109] Add recommendation to not use special types of GeometryCollections --- aggregate_spatial.json | 2 +- proposals/aggregate_spatial_binary.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aggregate_spatial.json b/aggregate_spatial.json index fbb176df..45f36e9d 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -17,7 +17,7 @@ }, { "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", + "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.\n\nTo maximize interoperability, a nested `GeometryCollection` should be avoided. Furthermore, a `GeometryCollection` composed of a single type of geometries should be avoided in favour of the corresponding multi-part type (e.g. `MultiPolygon`).", "schema": { "type": "object", "subtype": "geojson" diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index 150212f0..f09909a7 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -18,7 +18,7 @@ }, { "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.", + "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.\n\nTo maximize interoperability, a nested `GeometryCollection` should be avoided. Furthermore, a `GeometryCollection` composed of a single type of geometries should be avoided in favour of the corresponding multi-part type (e.g. `MultiPolygon`).", "schema": { "type": "object", "subtype": "geojson" From ae6c0f7bda204a38b319002e2b85a2c364134262 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 20 May 2021 12:28:01 +0200 Subject: [PATCH 081/109] Apply suggestions from code review --- meta/implementation.md | 8 ++++---- proposals/date_shift.json | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/meta/implementation.md b/meta/implementation.md index 4873cce5..61730cd3 100644 --- a/meta/implementation.md +++ b/meta/implementation.md @@ -79,11 +79,11 @@ you may define something like the following for the parameter: Working with dates is a lot more complex than it seems to be at first sight. Issues arise especially with daylight saving times (DST), time zones, leap years and leap seconds. -The date/time functions in openEO don't have any effect right now as only timestamps in UTC (with potential numerical time zone modifier) are supported. +The date/time functions in openEO don't have any effect on time zones right now as only dates and times in UTC (with potential numerical time zone modifier) are supported. -Leap years are implemented in a way that computations handle them gracefully. For example: +Month overflows, including the specific case of leap years, are implemented in a way that computations handle them gracefully. For example: -- If you add a month to January, 31th, it will result in February 29th (leap year) or 28th (other years). This means for invalid dates we round down (or "snap") to the next valid date. +- If you add a month to January, 31th, it will result in February 29th (leap year) or 28th (other years). This means for invalid dates due to month overflow we round down (or "snap") to the last valid date of the month. - If you add a month to February, 29th, it will result in March, 29. So the "snap" behavior doesn't work the other way round. Leap seconds are basically ignored in manipulations as they don't follow a regular pattern. So leap seconds may be passed into the processes, but will never be returned by date manipulation processes in openEO. See the examples for the leap second `2016-12-31T23:59:60Z`: @@ -98,4 +98,4 @@ To make `date_shift` easier to implement, we have found some libraries that foll - Java: [java.time](https://docs.oracle.com/javase/8/docs/api/java/time/package-summary.html) - JavaScript: [Moment.js](https://momentjs.com/) - Python: [dateutil](https://dateutil.readthedocs.io/en/stable/index.html) -- R: [lubridate](https://lubridate.tidyverse.org/) ([Cheatsheet](https://rawgit.com/rstudio/cheatsheets/master/lubridate.pdf)) \ No newline at end of file +- R: [lubridate](https://lubridate.tidyverse.org/) ([Cheatsheet](https://rawgit.com/rstudio/cheatsheets/master/lubridate.pdf)) diff --git a/proposals/date_shift.json b/proposals/date_shift.json index c4a687b2..18f762a9 100644 --- a/proposals/date_shift.json +++ b/proposals/date_shift.json @@ -1,6 +1,6 @@ { "id": "date_shift", - "summary": "Calculates and manipulates dates and times", + "summary": "Manipulates dates and times by addition or subtraction", "description": "Based on a given date (and optionally time), calculates a new date (and time if given) by adding or subtracting a given temporal period.\n\nSome specifics about dates and times need to be taken into account:\n\n* This process doesn't have any effect on the time zone.\n* It doesn't take daylight saving time (DST) into account as only dates and time in UTC (with potential numerical time zone modifier) are supported.\n* Leap years are implemented in a way that computations handle them gracefully (see parameter `unit` for details).\n* Leap seconds are mostly ignored in manipulations as they don't follow a regular pattern. Leap seconds can be passed to the process, but will never be returned.", "categories": [ "date & time" @@ -133,4 +133,4 @@ "returns": "2017-12-31" } ] -} \ No newline at end of file +} From 1ce0dd5e872353211dd791a0aa95348558ca489c Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 20 May 2021 14:34:14 +0200 Subject: [PATCH 082/109] data parameter renamed to date --- proposals/date_shift.json | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/proposals/date_shift.json b/proposals/date_shift.json index 18f762a9..e9b6226b 100644 --- a/proposals/date_shift.json +++ b/proposals/date_shift.json @@ -8,7 +8,7 @@ "experimental": true, "parameters": [ { - "name": "data", + "name": "date", "description": "The date (and optionally time) to manipulate.\n\nIf the given date doesn't include the time, the process assumes that the time component is `00:00:00Z` (i.e. midnight, in UTC). The millisecond part of the time is optional and defaults to `0` if not given.", "schema": [ { @@ -49,7 +49,7 @@ } ], "returns": { - "description": "The manipulated date. If a time component was given in the parameter `data`, the time component is returned with the date.", + "description": "The manipulated date. If a time component was given in the parameter `date`, the time component is returned with the date.", "schema": [ { "type": "string", @@ -66,7 +66,7 @@ "examples": [ { "arguments": { - "data": "2020-02-01T17:22:45Z", + "date": "2020-02-01T17:22:45Z", "value": 6, "unit": "month" }, @@ -74,7 +74,7 @@ }, { "arguments": { - "data": "2021-03-31T00:00:00+02:00", + "date": "2021-03-31T00:00:00+02:00", "value": -7, "unit": "day" }, @@ -83,7 +83,7 @@ { "description": "Adding a year to February 29th in a leap year will result in February 28th in the next (non-leap) year.", "arguments": { - "data": "2020-02-29T17:22:45Z", + "date": "2020-02-29T17:22:45Z", "value": 1, "unit": "year" }, @@ -92,7 +92,7 @@ { "description": "Adding a month to January 31th will result in February 29th in leap years.", "arguments": { - "data": "2020-01-31", + "date": "2020-01-31", "value": 1, "unit": "month" }, @@ -101,7 +101,7 @@ { "description": "The process skips over the leap second `2016-12-31T23:59:60Z`.", "arguments": { - "data": "2016-12-31T23:59:59Z", + "date": "2016-12-31T23:59:59Z", "value": 1, "unit": "second" }, @@ -110,7 +110,7 @@ { "description": "Milliseconds can be added or subtracted. If not given, the default value is `0`.", "arguments": { - "data": "2018-12-31T17:22:45Z", + "date": "2018-12-31T17:22:45Z", "value": 1150, "unit": "millisecond" }, @@ -118,7 +118,7 @@ }, { "arguments": { - "data": "2018-01-01", + "date": "2018-01-01", "value": 25, "unit": "hour" }, @@ -126,7 +126,7 @@ }, { "arguments": { - "data": "2018-01-01", + "date": "2018-01-01", "value": -1, "unit": "hour" }, From 33dbaedcee0fd4cf17095003ae92deab354df9e5 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 26 May 2021 12:15:48 +0200 Subject: [PATCH 083/109] Clarified how default values should be specified for additional options etc. --- meta/implementation.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/meta/implementation.md b/meta/implementation.md index 61730cd3..b01d219c 100644 --- a/meta/implementation.md +++ b/meta/implementation.md @@ -52,11 +52,13 @@ you may define something like the following for the parameter: "properties": { "force_option1": { "type": "number", - "description": "Description for option 1" + "description": "Description for option 1", + "default": 0 }, "force_option2": { "type": "boolean", - "description": "Description for option 1" + "description": "Description for option 1", + "default": true } } }, @@ -66,7 +68,8 @@ you may define something like the following for the parameter: "properties": { "icor_option1": { "type": "string", - "description": "Description for option 1" + "description": "Description for option 1", + "default": "example" } } } @@ -75,6 +78,10 @@ you may define something like the following for the parameter: } ``` +Default values should be specified for each of the additional options given in `properties`. +The top-level default value should always be an empty object `{}`. The default values for the empty object will be provided by the schema. +None of the additional options should be required for better interoperability. + ## Date and Time manipulation Working with dates is a lot more complex than it seems to be at first sight. Issues arise especially with daylight saving times (DST), time zones, leap years and leap seconds. From c10582f7dec5a2d810b02d25105f615667413222 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Thu, 27 May 2021 15:49:46 +0200 Subject: [PATCH 084/109] Fixed typo in load_collection: temporal instead of spatial extent Co-Authored By: @clausmichele --- load_collection.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/load_collection.json b/load_collection.json index 8248d388..58a4842e 100644 --- a/load_collection.json +++ b/load_collection.json @@ -105,7 +105,7 @@ }, { "name": "temporal_extent", - "description": "Limits the data to load from the collection to the specified left-closed temporal interval. Applies to all temporal dimensions. The interval has to be specified as an array with exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`, but never both.\n\nSet this parameter to `null` to set no limit for the spatial extent. Be careful with this when loading large datasets!", + "description": "Limits the data to load from the collection to the specified left-closed temporal interval. Applies to all temporal dimensions. The interval has to be specified as an array with exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`, but never both.\n\nSet this parameter to `null` to set no limit for the temporal extent. Be careful with this when loading large datasets!", "schema": [ { "type": "array", From 6d24fe869a89214463e06e439e2d0c59dd97ebb1 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 28 May 2021 15:42:49 +0200 Subject: [PATCH 085/109] Allow MultiPolygon in load_collection, discourage use of GeometryCollection --- CHANGELOG.md | 6 +++++- load_collection.json | 2 +- mask_polygon.json | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 601aaf82..6513322e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,9 +24,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. - Moved the rarely used and implemented processes `cummax`, `cummin`, `cumproduct`, `cumsum`, `debug`, `filter_labels`, `load_result`, `load_uploaded_files`, `resample_cube_temporal` to the proposals. - Exception messages have been aligned always use ` instead of '. Tooling could render it with CommonMark. -- `mask_polygon`: Also support multi polygons instead of just polygons. [#237](https://github.com/Open-EO/openeo-processes/issues/237) +- `load_collection` and `mask_polygon`: Also support multi polygons instead of just polygons. [#237](https://github.com/Open-EO/openeo-processes/issues/237) - `run_udf` and `run_udf_externally`: Specify specific (extensible) protocols for UDF URIs. +### Deprecated + +- `GeometryCollection`s are discouraged in all relevant processes. + ### Fixed - Clarify that the user workspace is server-side. [#225](https://github.com/Open-EO/openeo-processes/issues/225) - Clarify that the `condition` parameter for `array_filter` works also on indices and labels. diff --git a/load_collection.json b/load_collection.json index 58a4842e..3c12970e 100644 --- a/load_collection.json +++ b/load_collection.json @@ -18,7 +18,7 @@ }, { "name": "spatial_extent", - "description": "Limits the data to load from the collection to the specified bounding box or polygons.\n\nThe process puts a pixel into the data cube if the point at the pixel center intersects with the bounding box or any of the polygons (as defined in the Simple Features standard by the OGC).\n\nThe GeoJSON can be one of the following GeoJSON types:\n\n* A `Polygon` geometry,\n* a `GeometryCollection` containing Polygons,\n* a `Feature` with a `Polygon` geometry or\n* a `FeatureCollection` containing `Feature`s with a `Polygon` geometry.\n\nSet this parameter to `null` to set no limit for the spatial extent. Be careful with this when loading large datasets!", + "description": "Limits the data to load from the collection to the specified bounding box or polygons.\n\nThe process puts a pixel into the data cube if the point at the pixel center intersects with the bounding box or any of the polygons (as defined in the Simple Features standard by the OGC).\n\nThe GeoJSON can be one of the following feature types:\n\n* A `Polygon` or `MultiPolygon` geometry,\n* a `Feature` with a `Polygon` or `MultiPolygon` geometry,\n* a `FeatureCollection` containing at least one `Feature` with `Polygon` or `MultiPolygon` geometries, or\n* a `GeometryCollection` containing `Polygon` or `MultiPolygon` geometries. To maximize interoperability, `GeometryCollection` should be avoided in favour of one of the alternatives above.\n\nSet this parameter to `null` to set no limit for the spatial extent. Be careful with this when loading large datasets!", "schema": [ { "title": "Bounding Box", diff --git a/mask_polygon.json b/mask_polygon.json index c7e7d4cf..c1f59d4e 100644 --- a/mask_polygon.json +++ b/mask_polygon.json @@ -17,7 +17,7 @@ }, { "name": "mask", - "description": "A GeoJSON object containing at least one polygon. The provided feature types can be one of the following:\n\n* A `Polygon` or `MultiPolygon` geometry,\n* a `GeometryCollection` containing `Polygon` or `MultiPolygon` geometries,\n* a `Feature` with a `Polygon` or `MultiPolygon` geometry, or\n* a `FeatureCollection` containing at least one `Feature` with `Polygon` or `MultiPolygon` geometries.", + "description": "A GeoJSON object containing at least one polygon. The provided feature types can be one of the following:\n\n* A `Polygon` or `MultiPolygon` geometry,\n* a `Feature` with a `Polygon` or `MultiPolygon` geometry,\n* a `FeatureCollection` containing at least one `Feature` with `Polygon` or `MultiPolygon` geometries, or\n* a `GeometryCollection` containing `Polygon` or `MultiPolygon` geometries. To maximize interoperability, `GeometryCollection` should be avoided in favour of one of the alternatives above.", "schema": { "type": "object", "subtype": "geojson" From 58268036307454c80ffb0ea3d5cc0f8b0eab709f Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 28 May 2021 17:52:49 +0200 Subject: [PATCH 086/109] Clarifications for load_collection #256 --- CHANGELOG.md | 2 ++ load_collection.json | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6513322e..9366d2ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Clarified disallowed characters in subtype `file-path`. - Clarified that UDF source code must contain a newline/line-break (affects `run_udf`). - `aggregate_spatial`, `aggregate_spatial_binary`: Clarified that Features, Geometries and GeometryCollections are a single entity in computations. Only FeatureCollections are multiple entities. [#252](https://github.com/Open-EO/openeo-processes/issues/252) +- `load_collection`, parameter `spatial_extent`: Clarified that all pixels that are inside the bounding box of the given polygons but do not intersect with any polygon have to be set to no-data (`null`). [#256](https://github.com/Open-EO/openeo-processes/issues/256) +- `load_collection`: Clarified that the parameters are recommended to be used in favor of `filter_*` processes. ## 1.0.0 - 2020-07-31 diff --git a/load_collection.json b/load_collection.json index 3c12970e..6a0a080b 100644 --- a/load_collection.json +++ b/load_collection.json @@ -18,7 +18,7 @@ }, { "name": "spatial_extent", - "description": "Limits the data to load from the collection to the specified bounding box or polygons.\n\nThe process puts a pixel into the data cube if the point at the pixel center intersects with the bounding box or any of the polygons (as defined in the Simple Features standard by the OGC).\n\nThe GeoJSON can be one of the following feature types:\n\n* A `Polygon` or `MultiPolygon` geometry,\n* a `Feature` with a `Polygon` or `MultiPolygon` geometry,\n* a `FeatureCollection` containing at least one `Feature` with `Polygon` or `MultiPolygon` geometries, or\n* a `GeometryCollection` containing `Polygon` or `MultiPolygon` geometries. To maximize interoperability, `GeometryCollection` should be avoided in favour of one of the alternatives above.\n\nSet this parameter to `null` to set no limit for the spatial extent. Be careful with this when loading large datasets!", + "description": "Limits the data to load from the collection to the specified bounding box or polygons.\n\nThe process puts a pixel into the data cube if the point at the pixel center intersects with the bounding box or any of the polygons (as defined in the Simple Features standard by the OGC).\n\nThe GeoJSON can be one of the following feature types:\n\n* A `Polygon` or `MultiPolygon` geometry,\n* a `Feature` with a `Polygon` or `MultiPolygon` geometry,\n* a `FeatureCollection` containing at least one `Feature` with `Polygon` or `MultiPolygon` geometries, or\n* a `GeometryCollection` containing `Polygon` or `MultiPolygon` geometries. To maximize interoperability, `GeometryCollection` should be avoided in favour of one of the alternatives above.\n\nSet this parameter to `null` to set no limit for the spatial extent. Be careful with this when loading large datasets! It is recommended to use this parameter instead of using ``filter_bbox()`` or ``filter_spatial()`` directly after loading unbounded data.", "schema": [ { "title": "Bounding Box", @@ -93,6 +93,7 @@ }, { "title": "GeoJSON", + "description": "Limits the data cube to the bounding box of the given geometry. All pixels inside the bounding box that do not intersect with any of the polygons will be set to no data (`null`).", "type": "object", "subtype": "geojson" }, @@ -105,7 +106,7 @@ }, { "name": "temporal_extent", - "description": "Limits the data to load from the collection to the specified left-closed temporal interval. Applies to all temporal dimensions. The interval has to be specified as an array with exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`, but never both.\n\nSet this parameter to `null` to set no limit for the temporal extent. Be careful with this when loading large datasets!", + "description": "Limits the data to load from the collection to the specified left-closed temporal interval. Applies to all temporal dimensions. The interval has to be specified as an array with exactly two elements:\n\n1. The first element is the start of the temporal interval. The specified instance in time is **included** in the interval.\n2. The second element is the end of the temporal interval. The specified instance in time is **excluded** from the interval.\n\nThe specified temporal strings follow [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the boundaries to `null`, but never both.\n\nSet this parameter to `null` to set no limit for the temporal extent. Be careful with this when loading large datasets! It is recommended to use this parameter instead of using ``filter_temporal()`` directly after loading unbounded data.", "schema": [ { "type": "array", @@ -156,7 +157,7 @@ }, { "name": "bands", - "description": "Only adds the specified bands into the data cube so that bands that don't match the list of band names are not available. Applies to all dimensions of type `bands`.\n\nEither the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If the unique band name and the common name conflict, the unique band name has a higher priority.\n\nThe order of the specified array defines the order of the bands in the data cube. f multiple bands match a common name, all matched bands are included in the original order.", + "description": "Only adds the specified bands into the data cube so that bands that don't match the list of band names are not available. Applies to all dimensions of type `bands`.\n\nEither the unique band name (metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands) can be specified. If the unique band name and the common name conflict, the unique band name has a higher priority.\n\nThe order of the specified array defines the order of the bands in the data cube. If multiple bands match a common name, all matched bands are included in the original order.\n\nIt is recommended to use this parameter instead of using ``filter_bands()`` directly after loading unbounded data.", "schema": [ { "type": "array", From 279293464fe9c86cfd5480c5aceeb80e217bb6ea Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 31 May 2021 14:05:57 +0200 Subject: [PATCH 087/109] branching behavior and short circuit conditions #246 --- CHANGELOG.md | 1 + meta/implementation.md | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9366d2ed..ef230f6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `nan` - Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) - `apply_neighborhood`: Clarify behavior for data cubes returned by the child processes and for that add the exception `DataCubePropertiesImmutable`. +- Added a guide for implementors that describes numerours implementation details for processes that could not be covered in the specifications itself, for example a recommended implementation for the `if` process. [#246](https://github.com/Open-EO/openeo-processes/issues/246) ### Changed - Added `proposals` folder for experimental processes. Experimental processes are not covered by the CHANGELOG and MAY include breaking changes! [#196](https://github.com/Open-EO/openeo-processes/issues/196), [#207](https://github.com/Open-EO/openeo-processes/issues/207), [PSC#8](https://github.com/Open-EO/PSC/issues/8) diff --git a/meta/implementation.md b/meta/implementation.md index b01d219c..faf13f4f 100644 --- a/meta/implementation.md +++ b/meta/implementation.md @@ -2,6 +2,39 @@ This file is meant to provide some additional implementation details for back-ends. +## Optimizations for conditions (e.g. `if`) + +None of the openEO processes per se is "special" and thus all are treated the same way by default. +Nevertheless, there are some cases where a special treatment can make a huge difference. + +### Branching behavior + +The `if` process (and any process that is working on some kind of condition) are usually +special control structures and not normal function. Those conditionals usually decide between +one outcome or the other. Evaluating them in a naive way would compute both outcomes and depending +on the condition use one outcome and discard the other. +This can and should be optimized by "lazily" only computing the outcome that is actually used. +This could have a huge impact on performance as some computation doesn't need to be executed at all. + +openEO doesn't require special handling for the `if` process, but it is **strongly recommended** +that back-ends treat them special and only compute the outcome that is actually needed. +In the end, this is faster and cheaper for the user and thus users may prefer back-ends +that offer this optimization. Fortunately, both ways still lead to the same results +and comparability and reproducibility of the results is still given. + +### Short-circuit evaluation + +Similarly, back-ends **should** ["short-circuit"](https://en.wikipedia.org/wiki/Short-circuit_evaluation) +the evaluation of conditions, which means that once a condition has reached an unambiguous result +the evaluation should stop and provide the result directly. + +For example, the condition `A > 0 or A < 0` should only execute `A < 0` if `A > 0` is false as +otherwise the result is already unambiguous and will be `true` regardless of the rest of the +condition. + +Implementing this behavior does not have any negative side-effects so that +comparability and reproducibility of the results is still given. + ## Enums for processing methods There are numerours processes that provide a predefined set of processing methods. From 4a0fdf8762e9650d58a11f52860d7b77cce50c9c Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 31 May 2021 14:41:45 +0200 Subject: [PATCH 088/109] Remove binary reducers #258 --- CHANGELOG.md | 6 +- aggregate_spatial.json | 2 +- proposals/aggregate_spatial_binary.json | 114 ------------------------ proposals/reduce_dimension_binary.json | 99 -------------------- reduce_dimension.json | 2 +- 5 files changed, 7 insertions(+), 216 deletions(-) delete mode 100644 proposals/aggregate_spatial_binary.json delete mode 100644 proposals/reduce_dimension_binary.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 9366d2ed..2958d525 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Added `proposals` folder for experimental processes. Experimental processes are not covered by the CHANGELOG and MAY include breaking changes! [#196](https://github.com/Open-EO/openeo-processes/issues/196), [#207](https://github.com/Open-EO/openeo-processes/issues/207), [PSC#8](https://github.com/Open-EO/PSC/issues/8) - - Moved the experimental processes `aggregate_spatial_binary`, `reduce_dimension_binary` and `run_udf_externally` to the proposals. + - Moved the experimental process `run_udf_externally` to the proposals. - Moved the rarely used and implemented processes `cummax`, `cummin`, `cumproduct`, `cumsum`, `debug`, `filter_labels`, `load_result`, `load_uploaded_files`, `resample_cube_temporal` to the proposals. - Exception messages have been aligned always use ` instead of '. Tooling could render it with CommonMark. - `load_collection` and `mask_polygon`: Also support multi polygons instead of just polygons. [#237](https://github.com/Open-EO/openeo-processes/issues/237) @@ -31,6 +31,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `GeometryCollection`s are discouraged in all relevant processes. +### Removed + +- Removed the experimental processes `aggregate_spatial_binary` and `reduce_dimension_binary`. [#258](https://github.com/Open-EO/openeo-processes/issues/258) + ### Fixed - Clarify that the user workspace is server-side. [#225](https://github.com/Open-EO/openeo-processes/issues/225) - Clarify that the `condition` parameter for `array_filter` works also on indices and labels. diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 45f36e9d..419694f1 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial", "summary": "Zonal statistics for geometries", - "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process passes a list of values to the reducer. In contrast, ``aggregate_spatial_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.\n\nThe data cube must have been reduced to only contain two spatial dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", + "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process passes a list of values to the reducer.\n\nThe data cube must have been reduced to only contain two spatial dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", "categories": [ "cubes", "aggregate & resample" diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json deleted file mode 100644 index f09909a7..00000000 --- a/proposals/aggregate_spatial_binary.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "id": "aggregate_spatial_binary", - "summary": "Zonal statistics for geometries by binary aggregation", - "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``aggregate_spatial()`` passes a list of values.\n\nThe data cube must have been reduced to only contain two raster dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", - "categories": [ - "cubes", - "aggregate & resample" - ], - "experimental": true, - "parameters": [ - { - "name": "data", - "description": "A raster data cube. The data cube implicitly gets restricted to the bounds of the geometries as if ``filter_spatial()`` would have been used with the same values for the corresponding parameters immediately before this process.", - "schema": { - "type": "object", - "subtype": "raster-cube" - } - }, - { - "name": "geometries", - "description": "Geometries as GeoJSON on which the aggregation will be based.\n\nOne value will be computed per GeoJSON `Feature`, `Geometry` or `GeometryCollection`. For a `FeatureCollection` multiple values will be computed, one value per contained `Feature`. For example, a single value will be computed for a `MultiPolygon`, but two values will be computed for a `FeatureCollection` containing two polygons.\n\n- For **polygons**, the process considers all pixels for which the point at the pixel center intersects with the corresponding polygon (as defined in the Simple Features standard by the OGC).\n- For **points**, the process considers the closest pixel center.\n- For **lines** (line strings), the process considers all the pixels whose centers are closest to at least one point on the line.\n\nThus, pixels may be part of multiple geometries and be part of multiple aggregations.\n\nTo maximize interoperability, a nested `GeometryCollection` should be avoided. Furthermore, a `GeometryCollection` composed of a single type of geometries should be avoided in favour of the corresponding multi-part type (e.g. `MultiPolygon`).", - "schema": { - "type": "object", - "subtype": "geojson" - } - }, - { - "name": "reducer", - "description": "A reduction operator to be applied consecutively on tuples of values. It must be both associative and commutative as the execution may be executed in parallel and therefore the order of execution is arbitrary. The reduction operator may be a single process such as ``multiply()`` or consist of multiple sub-processes.", - "schema": { - "type": "object", - "subtype": "process-graph", - "parameters": [ - { - "name": "x", - "description": "The first value.", - "schema": { - "description": "Any data type." - } - }, - { - "name": "y", - "description": "The second value.", - "schema": { - "description": "Any data type." - } - }, - { - "name": "context", - "description": "Additional data passed by the user.", - "schema": { - "description": "Any data type." - }, - "optional": true, - "default": null - } - ], - "returns": { - "description": "The value to be set in the vector data cube.", - "schema": { - "description": "Any data type." - } - } - } - }, - { - "name": "target_dimension", - "description": "The new dimension name to be used for storing the results. Defaults to `result`.", - "schema": { - "type": "string" - }, - "default": "result", - "optional": true - }, - { - "name": "context", - "description": "Additional data to be passed to the reducer.", - "schema": { - "description": "Any data type." - }, - "optional": true, - "default": null - } - ], - "returns": { - "description": "A vector data cube with the computed results and restricted to the bounds of the geometries.\n\nThe computed value is used for the dimension with the name that was specified in the parameter `target_dimension`.\n\nThe computation also stores information about the total count of pixels (valid + invalid pixels) and the number of valid pixels (see ``is_valid()``) for each geometry. These values are added as a new dimension with a dimension name derived from `target_dimension` by adding the suffix `_meta`. The new dimension has the dimension labels `total_count` and `valid_count`.", - "schema": { - "type": "object", - "subtype": "vector-cube" - } - }, - "exceptions": { - "TooManyDimensions": { - "message": "The number of dimensions must be reduced to three for `aggregate_spatial_binary`." - } - }, - "links": [ - { - "href": "https://openeo.org/documentation/1.0/datacubes.html#aggregate", - "rel": "about", - "title": "Aggregation explained in the openEO documentation" - }, - { - "href": "http://www.opengeospatial.org/standards/sfa", - "rel": "about", - "title": "Simple Features standard by the OGC" - }, - { - "rel": "about", - "href": "https://en.wikipedia.org/wiki/Reduction_Operator", - "title": "Background information on reduction operators (binary reducers) by Wikipedia" - } - ] -} diff --git a/proposals/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json deleted file mode 100644 index 3ca58341..00000000 --- a/proposals/reduce_dimension_binary.json +++ /dev/null @@ -1,99 +0,0 @@ -{ - "id": "reduce_dimension_binary", - "summary": "Reduce dimensions using binary reduction", - "description": "Applies a binary reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer. This process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``reduce_dimension()`` passes a list of values.\n\nThe dimension is dropped. To avoid this, use ``apply_dimension()`` instead.", - "categories": [ - "cubes", - "reducer" - ], - "experimental": true, - "parameters": [ - { - "name": "data", - "description": "A data cube.", - "schema": { - "type": "object", - "subtype": "raster-cube" - } - }, - { - "name": "reducer", - "description": "A reduction operator to be applied consecutively on pairs of values. It must be both associative and commutative as the execution may be executed in parallel and therefore the order of execution is arbitrary. The reduction operator may be a single process such as ``multiply()`` or consist of multiple sub-processes.", - "schema": { - "type": "object", - "subtype": "process-graph", - "parameters": [ - { - "name": "x", - "description": "The first value.", - "schema": { - "description": "Any data type." - } - }, - { - "name": "y", - "description": "The second value.", - "schema": { - "description": "Any data type." - } - }, - { - "name": "context", - "description": "Additional data passed by the user.", - "schema": { - "description": "Any data type." - }, - "optional": true, - "default": null - } - ], - "returns": { - "description": "The value to be set in the new data cube.", - "schema": { - "description": "Any data type." - } - } - } - }, - { - "name": "dimension", - "description": "The name of the dimension over which to reduce. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", - "schema": { - "type": "string" - } - }, - { - "name": "context", - "description": "Additional data to be passed to the reducer.", - "schema": { - "description": "Any data type." - }, - "optional": true, - "default": null - } - ], - "returns": { - "description": "A data cube with the newly computed values. It is missing the given dimension, the number of dimensions decreases by one. The dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain unchanged.", - "schema": { - "type": "object", - "subtype": "raster-cube" - } - }, - "exceptions": { - "DimensionNotAvailable": { - "message": "A dimension with the specified name does not exist." - } - }, - "links": [ - { - "href": "https://openeo.org/documentation/1.0/datacubes.html#reduce", - "rel": "about", - "title": "Reducers explained in the openEO documentation" - }, - { - "rel": "about", - "href": "https://en.wikipedia.org/wiki/Reduction_Operator", - "title": "Background information on reduction operators (binary reducers) by Wikipedia" - } - ] -} \ No newline at end of file diff --git a/reduce_dimension.json b/reduce_dimension.json index e52eaf52..1956f026 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -1,7 +1,7 @@ { "id": "reduce_dimension", "summary": "Reduce dimensions", - "description": "Applies a unary reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer. This process passes a list of values to the reducer. In contrast, ``reduce_dimension_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.\n\nThe dimension is dropped. To avoid this, use ``apply_dimension()`` instead.", + "description": "Applies a unary reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer. The dimension is dropped. To avoid this, use ``apply_dimension()`` instead.", "categories": [ "cubes", "reducer" From 185a4e9d3e510d1879fa80d14991d51c4639ebfa Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 31 May 2021 14:45:45 +0200 Subject: [PATCH 089/109] reduce_spatial #226 and clarifications #260 --- CHANGELOG.md | 3 +- aggregate_spatial.json | 2 +- proposals/aggregate_spatial_binary.json | 2 +- proposals/reduce_dimension_binary.json | 2 +- proposals/reduce_spatial.json | 79 +++++++++++++++++++++++++ reduce_dimension.json | 2 +- 6 files changed, 85 insertions(+), 5 deletions(-) create mode 100644 proposals/reduce_spatial.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 9366d2ed..6748ec82 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `date_shift` - `is_infinite` - `nan` + - `reduce_spatial` - Added return value details (property `returns`) for the schemas with the subtype `process-graph`. [API#350](https://github.com/Open-EO/openeo-api/issues/350) - `apply_neighborhood`: Clarify behavior for data cubes returned by the child processes and for that add the exception `DataCubePropertiesImmutable`. @@ -28,7 +29,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `run_udf` and `run_udf_externally`: Specify specific (extensible) protocols for UDF URIs. ### Deprecated - - `GeometryCollection`s are discouraged in all relevant processes. ### Fixed @@ -48,6 +48,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Clarified disallowed characters in subtype `file-path`. - Clarified that UDF source code must contain a newline/line-break (affects `run_udf`). - `aggregate_spatial`, `aggregate_spatial_binary`: Clarified that Features, Geometries and GeometryCollections are a single entity in computations. Only FeatureCollections are multiple entities. [#252](https://github.com/Open-EO/openeo-processes/issues/252) +- `aggregate_spatial`: Clarified that the values have no predefined order and reducers such as `first`, `last` and `median` return unpredictable results. [#260](https://github.com/Open-EO/openeo-processes/issues/260) - `load_collection`, parameter `spatial_extent`: Clarified that all pixels that are inside the bounding box of the given polygons but do not intersect with any polygon have to be set to no-data (`null`). [#256](https://github.com/Open-EO/openeo-processes/issues/256) - `load_collection`: Clarified that the parameters are recommended to be used in favor of `filter_*` processes. diff --git a/aggregate_spatial.json b/aggregate_spatial.json index 45f36e9d..1cded4e5 100644 --- a/aggregate_spatial.json +++ b/aggregate_spatial.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial", "summary": "Zonal statistics for geometries", - "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process passes a list of values to the reducer. In contrast, ``aggregate_spatial_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.\n\nThe data cube must have been reduced to only contain two spatial dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", + "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions.\n\nThis process passes a list of values to the reducer. The list of values has an undefined order, therefore processes such as ``last()`` and ``first()`` that depend on the order of the values will lead to unpredictable results. In contrast, ``aggregate_spatial_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once. An 'unbounded' aggregation over the full extent of the horizontal spatial dimensions can be computed with the process ``reduce_spatial()``.\n\nThe data cube must have been reduced to only contain two spatial dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", "categories": [ "cubes", "aggregate & resample" diff --git a/proposals/aggregate_spatial_binary.json b/proposals/aggregate_spatial_binary.json index f09909a7..d85f80a4 100644 --- a/proposals/aggregate_spatial_binary.json +++ b/proposals/aggregate_spatial_binary.json @@ -1,7 +1,7 @@ { "id": "aggregate_spatial_binary", "summary": "Zonal statistics for geometries by binary aggregation", - "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions. This process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``aggregate_spatial()`` passes a list of values.\n\nThe data cube must have been reduced to only contain two raster dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", + "description": "Aggregates statistics for one or more geometries (e.g. zonal statistics for polygons) over the spatial dimensions.\n\nThis process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``aggregate_spatial()`` passes a list of values. An 'unbounded' aggregation over the full extent of the horizontal spatial dimensions can be computed with the process ``reduce_spatial_binary()``.\n\nThe data cube must have been reduced to only contain two raster dimensions and a third dimension the values are aggregated for, for example the temporal dimension to get a time series. Otherwise, this process fails with the `TooManyDimensions` exception.\n\nThe number of total and valid pixels is returned together with the calculated values.", "categories": [ "cubes", "aggregate & resample" diff --git a/proposals/reduce_dimension_binary.json b/proposals/reduce_dimension_binary.json index 3ca58341..98e5477a 100644 --- a/proposals/reduce_dimension_binary.json +++ b/proposals/reduce_dimension_binary.json @@ -1,7 +1,7 @@ { "id": "reduce_dimension_binary", "summary": "Reduce dimensions using binary reduction", - "description": "Applies a binary reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer. This process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``reduce_dimension()`` passes a list of values.\n\nThe dimension is dropped. To avoid this, use ``apply_dimension()`` instead.", + "description": "Applies a binary reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer. The dimension is dropped.\n\nThis process consecutively passes a pair of values to the reducer. This may be better suited especially for UDFs in case the number of values gets too large to be processed at once. In contrast, ``reduce_dimension()`` passes a list of values. An aggregation over certain spatial areas can be computed with the process ``aggregate_spatial_binary()``.", "categories": [ "cubes", "reducer" diff --git a/proposals/reduce_spatial.json b/proposals/reduce_spatial.json new file mode 100644 index 00000000..8924ee41 --- /dev/null +++ b/proposals/reduce_spatial.json @@ -0,0 +1,79 @@ +{ + "id": "reduce_spatial", + "summary": "Reduce horizontal spatial dimensions", + "description": "Applies a unary reducer to a data cube dimension by collapsing all the pixel values along the horizontal spatial dimensions (i.e. axes `x` and `y`) into an output value computed by the reducer. The horizontal spatial dimensions are dropped.\n\nThis process passes a list of values to the reducer. The list of values has an undefined order, therefore processes such as ``last()`` and ``first()`` that depend on the order of the values will lead to unpredictable results.\n\nIn contrast to this process, ``reduce_spatial_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once. An aggregation over certain spatial areas can be computed with the process ``aggregate_spatial()``.", + "categories": [ + "aggregate & resample", + "cubes", + "reducer" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "A data cube.", + "schema": { + "type": "object", + "subtype": "raster-cube" + } + }, + { + "name": "reducer", + "description": "A reducer to apply on the horizontal spatial dimensions. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes.", + "schema": { + "type": "object", + "subtype": "process-graph", + "parameters": [ + { + "name": "data", + "description": "An array with elements of any type.", + "schema": { + "type": "array", + "items": { + "description": "Any data type." + } + } + }, + { + "name": "context", + "description": "Additional data passed by the user.", + "schema": { + "description": "Any data type." + }, + "optional": true, + "default": null + } + ], + "returns": { + "description": "The value to be set in the new data cube.", + "schema": { + "description": "Any data type." + } + } + } + }, + { + "name": "context", + "description": "Additional data to be passed to the reducer.", + "schema": { + "description": "Any data type." + }, + "optional": true, + "default": null + } + ], + "returns": { + "description": "A data cube with the newly computed values. It is missing the horizontal spatial dimensions, the number of dimensions decreases by two. The dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain unchanged.", + "schema": { + "type": "object", + "subtype": "raster-cube" + } + }, + "links": [ + { + "href": "https://openeo.org/documentation/1.0/datacubes.html#reduce", + "rel": "about", + "title": "Reducers explained in the openEO documentation" + } + ] +} \ No newline at end of file diff --git a/reduce_dimension.json b/reduce_dimension.json index e52eaf52..34ddc060 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -1,7 +1,7 @@ { "id": "reduce_dimension", "summary": "Reduce dimensions", - "description": "Applies a unary reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer. This process passes a list of values to the reducer. In contrast, ``reduce_dimension_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.\n\nThe dimension is dropped. To avoid this, use ``apply_dimension()`` instead.", + "description": "Applies a unary reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer. The dimension is dropped. To avoid this, use ``apply_dimension()`` instead.\n\nThis process passes a list of values to the reducer. In contrast, ``reduce_dimension_binary()`` passes two values, which may be better suited especially for UDFs in case the number of values gets too large to be processed at once.", "categories": [ "cubes", "reducer" From 1169a1c2103b6b60618ccb1017efcbda64fadd7a Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 1 Jun 2021 15:51:41 +0200 Subject: [PATCH 090/109] Incorporated feedback from review --- CHANGELOG.md | 2 +- proposals/reduce_spatial.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3517415d..5b1f1e38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,7 +52,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Clarified disallowed characters in subtype `file-path`. - Clarified that UDF source code must contain a newline/line-break (affects `run_udf`). - `aggregate_spatial`, `aggregate_spatial_binary`: Clarified that Features, Geometries and GeometryCollections are a single entity in computations. Only FeatureCollections are multiple entities. [#252](https://github.com/Open-EO/openeo-processes/issues/252) -- `aggregate_spatial`: Clarified that the values have no predefined order and reducers such as `first`, `last` and `median` return unpredictable results. [#260](https://github.com/Open-EO/openeo-processes/issues/260) +- `aggregate_spatial`: Clarified that the values have no predefined order and reducers such as `first` and `last` return unpredictable results. [#260](https://github.com/Open-EO/openeo-processes/issues/260) - `load_collection`, parameter `spatial_extent`: Clarified that all pixels that are inside the bounding box of the given polygons but do not intersect with any polygon have to be set to no-data (`null`). [#256](https://github.com/Open-EO/openeo-processes/issues/256) - `load_collection`: Clarified that the parameters are recommended to be used in favor of `filter_*` processes. diff --git a/proposals/reduce_spatial.json b/proposals/reduce_spatial.json index 660d28f4..1564d036 100644 --- a/proposals/reduce_spatial.json +++ b/proposals/reduce_spatial.json @@ -1,7 +1,7 @@ { "id": "reduce_spatial", - "summary": "Reduce horizontal spatial dimensions", - "description": "Applies a unary reducer to a data cube dimension by collapsing all the pixel values along the horizontal spatial dimensions (i.e. axes `x` and `y`) into an output value computed by the reducer. The horizontal spatial dimensions are dropped.\n\nAn aggregation over certain spatial areas can be computed with the process ``aggregate_spatial()``.\n\nThis process passes a list of values to the reducer. The list of values has an undefined order, therefore processes such as ``last()`` and ``first()`` that depend on the order of the values will lead to unpredictable results.", + "summary": "Reduce spatial dimensions 'x' and 'y", + "description": "Applies a unary reducer to a data cube by collapsing all the pixel values along the horizontal spatial dimensions (i.e. axes `x` and `y`) into an output value computed by the reducer. The horizontal spatial dimensions are dropped.\n\nAn aggregation over certain spatial areas can be computed with the process ``aggregate_spatial()``.\n\nThis process passes a list of values to the reducer. The list of values has an undefined order, therefore processes such as ``last()`` and ``first()`` that depend on the order of the values will lead to unpredictable results.", "categories": [ "aggregate & resample", "cubes", From dda26168b7517c416bb73771f2ce5c9560680b17 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 1 Jun 2021 16:27:35 +0200 Subject: [PATCH 091/109] Improvements from code review --- meta/implementation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meta/implementation.md b/meta/implementation.md index faf13f4f..b610d007 100644 --- a/meta/implementation.md +++ b/meta/implementation.md @@ -10,7 +10,7 @@ Nevertheless, there are some cases where a special treatment can make a huge dif ### Branching behavior The `if` process (and any process that is working on some kind of condition) are usually -special control structures and not normal function. Those conditionals usually decide between +special control structures and not normal functions. Those conditionals usually decide between one outcome or the other. Evaluating them in a naive way would compute both outcomes and depending on the condition use one outcome and discard the other. This can and should be optimized by "lazily" only computing the outcome that is actually used. @@ -28,7 +28,7 @@ Similarly, back-ends **should** ["short-circuit"](https://en.wikipedia.org/wiki/ the evaluation of conditions, which means that once a condition has reached an unambiguous result the evaluation should stop and provide the result directly. -For example, the condition `A > 0 or A < 0` should only execute `A < 0` if `A > 0` is false as +For example, the condition `A > 0 or B > 0` should only execute `B > 0` if `A > 0` is false as otherwise the result is already unambiguous and will be `true` regardless of the rest of the condition. From 302cc56ae69f70381ebaa004b07b38f10ad99769 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 2 Jun 2021 11:59:00 +0200 Subject: [PATCH 092/109] Replace "unary" with better easier to understand language --- apply.json | 4 ++-- array_apply.json | 6 +++--- proposals/reduce_spatial.json | 4 ++-- reduce_dimension.json | 2 +- tests/.words | 3 +-- 5 files changed, 9 insertions(+), 10 deletions(-) diff --git a/apply.json b/apply.json index 3a24080b..7090a009 100644 --- a/apply.json +++ b/apply.json @@ -1,7 +1,7 @@ { "id": "apply", "summary": "Apply a process to each pixel", - "description": "Applies a *unary* process to each pixel value in the data cube (i.e. a local operation). A unary process takes a single value and returns a single value, for example ``abs()`` or ``linear_scale_range()``. In contrast, the process ``apply_dimension()`` applies a process to all pixel values along a particular dimension.", + "description": "Applies a process to each pixel value in the data cube (i.e. a local operation). In contrast, the process ``apply_dimension()`` applies a process to all pixel values along a particular dimension.", "categories": [ "cubes" ], @@ -16,7 +16,7 @@ }, { "name": "process", - "description": "A unary process to be applied on each value, may consist of multiple sub-processes.", + "description": "A process to be applied on each individual value, may consist of multiple sub-processes. The process must accept and return a single value, for example ``abs()`` or ``linear_scale_range()``.", "schema": { "type": "object", "subtype": "process-graph", diff --git a/array_apply.json b/array_apply.json index 61ec1d3e..885b3a64 100644 --- a/array_apply.json +++ b/array_apply.json @@ -1,7 +1,7 @@ { "id": "array_apply", - "summary": "Apply a unary process to each array element", - "description": "Applies a **unary** process which takes a single value such as `abs` or `sqrt` to each value in the array. This is basically what other languages call either a `for each` loop or a `map` function.", + "summary": "Apply a process to each array element", + "description": "Applies a process to each individual value in the array. This is basically what other languages call either a `for each` loop or a `map` function.", "categories": [ "arrays" ], @@ -18,7 +18,7 @@ }, { "name": "process", - "description": "A process to be applied on each value, may consist of multiple sub-processes. The specified process must be unary meaning that it must work on a single value.", + "description": "A process to be applied on each value, may consist of multiple sub-processes. The process must accept and return a single value, for example ``abs()`` or ``sqrt()``.", "schema": { "type": "object", "subtype": "process-graph", diff --git a/proposals/reduce_spatial.json b/proposals/reduce_spatial.json index 1564d036..d9a2fb56 100644 --- a/proposals/reduce_spatial.json +++ b/proposals/reduce_spatial.json @@ -1,7 +1,7 @@ { "id": "reduce_spatial", - "summary": "Reduce spatial dimensions 'x' and 'y", - "description": "Applies a unary reducer to a data cube by collapsing all the pixel values along the horizontal spatial dimensions (i.e. axes `x` and `y`) into an output value computed by the reducer. The horizontal spatial dimensions are dropped.\n\nAn aggregation over certain spatial areas can be computed with the process ``aggregate_spatial()``.\n\nThis process passes a list of values to the reducer. The list of values has an undefined order, therefore processes such as ``last()`` and ``first()`` that depend on the order of the values will lead to unpredictable results.", + "summary": "Reduce spatial dimensions 'x' and 'y'", + "description": "Applies a reducer to a data cube by collapsing all the pixel values along the horizontal spatial dimensions (i.e. axes `x` and `y`) into an output value computed by the reducer. The horizontal spatial dimensions are dropped.\n\nAn aggregation over certain spatial areas can be computed with the process ``aggregate_spatial()``.\n\nThis process passes a list of values to the reducer. The list of values has an undefined order, therefore processes such as ``last()`` and ``first()`` that depend on the order of the values will lead to unpredictable results.", "categories": [ "aggregate & resample", "cubes", diff --git a/reduce_dimension.json b/reduce_dimension.json index e4d360d3..27ed34de 100644 --- a/reduce_dimension.json +++ b/reduce_dimension.json @@ -1,7 +1,7 @@ { "id": "reduce_dimension", "summary": "Reduce dimensions", - "description": "Applies a unary reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer.\n\nThe dimension is dropped. To avoid this, use ``apply_dimension()`` instead.", + "description": "Applies a reducer to a data cube dimension by collapsing all the pixel values along the specified dimension into an output value computed by the reducer.\n\nThe dimension is dropped. To avoid this, use ``apply_dimension()`` instead.", "categories": [ "cubes", "reducer" diff --git a/tests/.words b/tests/.words index 568fd814..5c8a96df 100644 --- a/tests/.words +++ b/tests/.words @@ -30,5 +30,4 @@ Sentinel-2B signum STAC summand -UDFs -unary \ No newline at end of file +UDFs \ No newline at end of file From 8ecd1d072f99c870775e5d62dfc030f1477994ea Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 2 Jun 2021 12:07:30 +0200 Subject: [PATCH 093/109] Added references to processes --- meta/implementation.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/meta/implementation.md b/meta/implementation.md index b610d007..af8ac782 100644 --- a/meta/implementation.md +++ b/meta/implementation.md @@ -25,8 +25,10 @@ and comparability and reproducibility of the results is still given. ### Short-circuit evaluation Similarly, back-ends **should** ["short-circuit"](https://en.wikipedia.org/wiki/Short-circuit_evaluation) -the evaluation of conditions, which means that once a condition has reached an unambiguous result +the evaluation of conditions that use processes processes such as `and`, `or` or `xor`, +which means that once a condition has reached an unambiguous result the evaluation should stop and provide the result directly. +This is basically the same behavior that is also described in the processes `all` and `any`. For example, the condition `A > 0 or B > 0` should only execute `B > 0` if `A > 0` is false as otherwise the result is already unambiguous and will be `true` regardless of the rest of the From 5f76884edb7722928e353961fd66f0db913c2cc5 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 4 Jun 2021 14:02:46 +0200 Subject: [PATCH 094/109] Improved wording around process to be applied on pixels --- apply.json | 2 +- array_apply.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/apply.json b/apply.json index 7090a009..a39292e0 100644 --- a/apply.json +++ b/apply.json @@ -16,7 +16,7 @@ }, { "name": "process", - "description": "A process to be applied on each individual value, may consist of multiple sub-processes. The process must accept and return a single value, for example ``abs()`` or ``linear_scale_range()``.", + "description": "A process that accepts and returns a single value and is applied on each individual value in the data cube. The process may consist of multiple sub-processes and could, for example, consist of processes such as ``abs()`` or ``linear_scale_range()``.", "schema": { "type": "object", "subtype": "process-graph", diff --git a/array_apply.json b/array_apply.json index 885b3a64..a0e248fa 100644 --- a/array_apply.json +++ b/array_apply.json @@ -18,7 +18,7 @@ }, { "name": "process", - "description": "A process to be applied on each value, may consist of multiple sub-processes. The process must accept and return a single value, for example ``abs()`` or ``sqrt()``.", + "description": "A process that accepts and returns a single value and is applied on each individual value in the array. The process may consist of multiple sub-processes and could, for example, consist of processes such as ``abs()`` or ``linear_scale_range()``.", "schema": { "type": "object", "subtype": "process-graph", From 94d5c3333bde7ba7739587ccbd4d733ef7661392 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Mon, 14 Jun 2021 11:57:12 +0200 Subject: [PATCH 095/109] `aggregate_temporal` and `aggregate_temporal_period`: Clarified that reducers are also executed for intervals/periods with no data. #263 --- CHANGELOG.md | 1 + aggregate_temporal.json | 4 ++-- aggregate_temporal_period.json | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cc72b20..0e8a62a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `aggregate_spatial`: Clarified that the values have no predefined order and reducers such as `first` and `last` return unpredictable results. [#260](https://github.com/Open-EO/openeo-processes/issues/260) - `load_collection`, parameter `spatial_extent`: Clarified that all pixels that are inside the bounding box of the given polygons but do not intersect with any polygon have to be set to no-data (`null`). [#256](https://github.com/Open-EO/openeo-processes/issues/256) - `load_collection`: Clarified that the parameters are recommended to be used in favor of `filter_*` processes. +- `aggregate_temporal` and `aggregate_temporal_period`: Clarified that reducers are also executed for intervals/periods with no data. [#263](https://github.com/Open-EO/openeo-processes/issues/263) ## 1.0.0 - 2020-07-31 diff --git a/aggregate_temporal.json b/aggregate_temporal.json index 7c146326..480ffd98 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -87,14 +87,14 @@ }, { "name": "reducer", - "description": "A reducer to be applied on all values along the specified dimension. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes.", + "description": "A reducer to be applied for the values contained in each interval. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes. Intervals may not contain any values, which for most reducers leads to no-data (`null`) values by default.", "schema": { "type": "object", "subtype": "process-graph", "parameters": [ { "name": "data", - "description": "A labeled array with elements of any type.", + "description": "A labeled array with elements of any type. If there's no data for the interval, the array is empty.", "schema": { "type": "array", "subtype": "labeled-array", diff --git a/aggregate_temporal_period.json b/aggregate_temporal_period.json index 936410c1..b0d4c110 100644 --- a/aggregate_temporal_period.json +++ b/aggregate_temporal_period.json @@ -37,14 +37,14 @@ }, { "name": "reducer", - "description": "A reducer to be applied on all values along the specified dimension. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes.", + "description": "A reducer to be applied for the values contained in each period. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes. Periods may not contain any values, which for most reducers leads to no-data (`null`) values by default.", "schema": { "type": "object", "subtype": "process-graph", "parameters": [ { "name": "data", - "description": "A labeled array with elements of any type.", + "description": "A labeled array with elements of any type. If there's no data for the period, the array is empty.", "schema": { "type": "array", "subtype": "labeled-array", From 1904b3fe7c7717ccb44390c7ce26139dfe208157 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 15 Jun 2021 16:55:07 +0200 Subject: [PATCH 096/109] `dimension_labels`: Clarified that the process fails with a `DimensionNotAvailable` exception, if a dimension with the specified name does not exist. --- CHANGELOG.md | 1 + dimension_labels.json | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6823de0c..03cff55f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `load_collection`, parameter `spatial_extent`: Clarified that all pixels that are inside the bounding box of the given polygons but do not intersect with any polygon have to be set to no-data (`null`). [#256](https://github.com/Open-EO/openeo-processes/issues/256) - `load_collection`: Clarified that the parameters are recommended to be used in favor of `filter_*` processes. - `aggregate_temporal` and `aggregate_temporal_period`: Clarified that reducers are also executed for intervals/periods with no data. [#263](https://github.com/Open-EO/openeo-processes/issues/263) +- `dimension_labels`: Clarified that the process fails with a `DimensionNotAvailable` exception, if a dimension with the specified name does not exist. ## 1.0.0 - 2020-07-31 diff --git a/dimension_labels.json b/dimension_labels.json index 7597d491..8f01c577 100644 --- a/dimension_labels.json +++ b/dimension_labels.json @@ -1,7 +1,7 @@ { "id": "dimension_labels", "summary": "Get the dimension labels", - "description": "Gives all labels for a dimension in the data cube. The labels have the same order as in the data cube.", + "description": "Gives all labels for a dimension in the data cube. The labels have the same order as in the data cube.\n\nIf a dimension with the specified name does not exist, the process fails with a `DimensionNotAvailable` exception.", "categories": [ "cubes" ], @@ -37,5 +37,10 @@ ] } } + }, + "exceptions": { + "DimensionNotAvailable": { + "message": "A dimension with the specified name does not exist." + } } } \ No newline at end of file From e697a1f9e0891416df4ca8e8e3105f99e50c4dfc Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 18 Jun 2021 11:31:03 +0200 Subject: [PATCH 097/109] `merge_cubes`: Clarified the fourth example. #266 --- CHANGELOG.md | 1 + merge_cubes.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03cff55f..f7fd195c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `array_apply`, `array_element`, `array_filter`: Added the `minimum: 0` constraint to all schemas describing zero-based indices (parameter `index`). - `array_labels`: Clarified the accepted data type for array elements passed to the parameter `data`. - `merge_cubes`: Clarified the dimension label order after the merge. [#212](https://github.com/Open-EO/openeo-processes/issues/212) +- `merge_cubes`: Clarified the fourth example. [#266](https://github.com/Open-EO/openeo-processes/issues/266) - Fixed typos, grammar issues and other spelling-related issues in many of the processes. - Fixed the examples `array_contains_nodata` and `array_find_nodata`. - Fixed links to openEO glossary and added links to data cube introduction. [#216](https://github.com/Open-EO/openeo-processes/issues/216) diff --git a/merge_cubes.json b/merge_cubes.json index 625d64e9..85a431d2 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -1,7 +1,7 @@ { "id": "merge_cubes", "summary": "Merge two data cubes", - "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions `x`, `y`, `t` and `bands` have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions `x`, `y` and `t` have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. Merging a data cube with dimensions `x`, `y`, `t` with another cube with dimensions `x`, `y` will join on the `x`, `y` dimension, so the lower dimension cube is merged with each time step in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatio-temporal data cube.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", + "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions (`x`, `y`, `t`) have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. Merging a data cube with dimensions (`x`, `y`, `t` / `bands`) or (`x`, `y`, `t`, `bands`) with another cube with dimensions (`x`, `y`) will join on the `x`, `y` dimension, so the lower dimension cube is merged with each time step and band available in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatio-temporal data cube. An overlap resolver is *required* to resolve the overlap for all pixels.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", "categories": [ "cubes" ], From b65bc5ca8f1f6d600a9116b9463755b1ac578a94 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 18 Jun 2021 11:52:53 +0200 Subject: [PATCH 098/109] `merge_cubes`: Clarified the fourth example even more. #266 --- merge_cubes.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/merge_cubes.json b/merge_cubes.json index 85a431d2..dea0b068 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -1,7 +1,7 @@ { "id": "merge_cubes", "summary": "Merge two data cubes", - "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions (`x`, `y`, `t`) have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. Merging a data cube with dimensions (`x`, `y`, `t` / `bands`) or (`x`, `y`, `t`, `bands`) with another cube with dimensions (`x`, `y`) will join on the `x`, `y` dimension, so the lower dimension cube is merged with each time step and band available in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatio-temporal data cube. An overlap resolver is *required* to resolve the overlap for all pixels.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", + "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions (`x`, `y`, `t`) have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. A data cube with dimensions (`x`, `y`, `t` / `bands`) or (`x`, `y`, `t`, `bands`) and another data cube with dimensions (`x`, `y`) have the same dimension labels in x and y. Merging them will join dimensions `x` and `y`, so the lower dimension cube is merged with each time step and band available in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatio-temporal data cube. An overlap resolver is *required* to resolve the overlap for all pixels.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", "categories": [ "cubes" ], From 90a24404b8ba2596f3e0141a0d10a994971336aa Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 18 Jun 2021 13:15:15 +0200 Subject: [PATCH 099/109] Fix schema for array_create --- proposals/array_create.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/proposals/array_create.json b/proposals/array_create.json index 2b05005f..71d7003d 100644 --- a/proposals/array_create.json +++ b/proposals/array_create.json @@ -13,7 +13,10 @@ "optional": true, "default": [], "schema": { - "description": "Any data type is allowed." + "type": "array", + "items": { + "description": "Any data type is allowed." + } } }, { From 7a506ef068863541d0ae10ee4c18a3bea680a564 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 18 Jun 2021 13:15:47 +0200 Subject: [PATCH 100/109] Replace oneOf with anyOf for consistency --- apply_neighborhood.json | 4 ++-- meta/subtype-schemas.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apply_neighborhood.json b/apply_neighborhood.json index d168c2c9..05fb117a 100644 --- a/apply_neighborhood.json +++ b/apply_neighborhood.json @@ -66,7 +66,7 @@ }, "value": { "default": null, - "oneOf": [ + "anyOf": [ { "type": "null", "title": "All values" @@ -115,7 +115,7 @@ }, "value": { "default": null, - "oneOf": [ + "anyOf": [ { "type": "null", "title": "No values" diff --git a/meta/subtype-schemas.json b/meta/subtype-schemas.json index 41deaaf2..a08c11a7 100644 --- a/meta/subtype-schemas.json +++ b/meta/subtype-schemas.json @@ -84,7 +84,7 @@ }, "value": { "default": null, - "oneOf": [ + "anyOf": [ { "type": "null" }, From a443ff31cebc0225e04f8a3c5fd48337cb97201b Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 18 Jun 2021 13:17:34 +0200 Subject: [PATCH 101/109] Make schema for (array/dimension) labels more easy to digest --- aggregate_temporal.json | 10 +++------- array_labels.json | 10 +++------- dimension_labels.json | 10 +++------- rename_labels.json | 20 ++++++-------------- 4 files changed, 15 insertions(+), 35 deletions(-) diff --git a/aggregate_temporal.json b/aggregate_temporal.json index 480ffd98..b68b366c 100644 --- a/aggregate_temporal.json +++ b/aggregate_temporal.json @@ -127,13 +127,9 @@ "schema": { "type": "array", "items": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "string" - } + "type": [ + "number", + "string" ] } }, diff --git a/array_labels.json b/array_labels.json index 1201ca92..52dcad0e 100644 --- a/array_labels.json +++ b/array_labels.json @@ -23,13 +23,9 @@ "schema": { "type": "array", "items": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "string" - } + "type": [ + "number", + "string" ] } } diff --git a/dimension_labels.json b/dimension_labels.json index 8f01c577..37a5908d 100644 --- a/dimension_labels.json +++ b/dimension_labels.json @@ -27,13 +27,9 @@ "schema": { "type": "array", "items": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "string" - } + "type": [ + "number", + "string" ] } } diff --git a/rename_labels.json b/rename_labels.json index 91610104..b033ba4f 100644 --- a/rename_labels.json +++ b/rename_labels.json @@ -27,13 +27,9 @@ "schema": { "type": "array", "items": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "string" - } + "type": [ + "number", + "string" ] } } @@ -44,13 +40,9 @@ "schema": { "type": "array", "items": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "string" - } + "type": [ + "number", + "string" ] } }, From 935cb293d406d1ce5da220d2f38e4eb73003654b Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 18 Jun 2021 13:24:12 +0200 Subject: [PATCH 102/109] Add array_create_labels process --- CHANGELOG.md | 1 + proposals/array_create_labels.json | 46 ++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 proposals/array_create_labels.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 03cff55f..0568eb14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `array_append` - `array_concat` - `array_create` + - `array_create_labels` - `array_find_label` - `array_interpolate_linear` [#173](https://github.com/Open-EO/openeo-processes/issues/173) - `array_modify` diff --git a/proposals/array_create_labels.json b/proposals/array_create_labels.json new file mode 100644 index 00000000..719e85d9 --- /dev/null +++ b/proposals/array_create_labels.json @@ -0,0 +1,46 @@ +{ + "id": "array_create_labels", + "summary": "Create a labeled array", + "description": "Creates a new labeled array by using the values from the `labels` array as labels and the values from the `data` array as the corresponding values.\n\nThe exception `ArrayLengthMismatch` is thrown, if the number of the values in the given arrays don't match exactly.\n\nThe primary use case here is to be able to transmit labeled arrays from the client to the server as JSON doesn't support this data type.", + "categories": [ + "arrays" + ], + "experimental": true, + "parameters": [ + { + "name": "data", + "description": "An array of values to be used.", + "schema": { + "description": "Any data type is allowed." + } + }, + { + "name": "labels", + "description": "An array of labels to be used.", + "schema": { + "type": "array", + "items": { + "type": [ + "number", + "string" + ] + } + } + } + ], + "returns": { + "description": "The newly created labeled array.", + "schema": { + "type": "array", + "subtype": "labeled-array", + "items": { + "description": "Any data type is allowed." + } + } + }, + "exceptions": { + "ArrayLengthMismatch": { + "message": "The number of values in the parameters `data` and `labels` don't match." + } + } +} \ No newline at end of file From cfd311c064a48d5c32414707b1b6bdf5a830afd6 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 22 Jun 2021 16:31:33 +0200 Subject: [PATCH 103/109] Fix nan --- proposals/nan.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/proposals/nan.json b/proposals/nan.json index 6a3cc01c..ae19dd6e 100644 --- a/proposals/nan.json +++ b/proposals/nan.json @@ -1,16 +1,16 @@ { "id": "nan", "summary": "Not a Number (NaN)", - "description": "NaN (not a number) is a symbolic floating-point representation which is neither a signed infinity nor a finite number.", + "description": "`NaN` (not a number) is a symbolic floating-point representation which is neither a signed infinity nor a finite number.", "categories": [ "math > constants" ], "experimental": true, "parameters": [], "returns": { - "description": "Returns NaN.", + "description": "Returns `NaN`.", "schema": { - "description": "Returns NaN.\n\n*SON Schema can't represent NaN, thus a schema can't be specified.*" + "description": "JSON Schema can't represent `NaN` and thus a schema can't be specified." } }, "links": [ From 015cefd9242728e257dca21701e6d68cc683500a Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Wed, 23 Jun 2021 15:16:53 +0200 Subject: [PATCH 104/109] Rename array_create_labels to array_create_labeled --- CHANGELOG.md | 2 +- .../{array_create_labels.json => array_create_labeled.json} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename proposals/{array_create_labels.json => array_create_labeled.json} (97%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0568eb14..fece48f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `array_append` - `array_concat` - `array_create` - - `array_create_labels` + - `array_create_labeled` - `array_find_label` - `array_interpolate_linear` [#173](https://github.com/Open-EO/openeo-processes/issues/173) - `array_modify` diff --git a/proposals/array_create_labels.json b/proposals/array_create_labeled.json similarity index 97% rename from proposals/array_create_labels.json rename to proposals/array_create_labeled.json index 719e85d9..8b5d2034 100644 --- a/proposals/array_create_labels.json +++ b/proposals/array_create_labeled.json @@ -1,5 +1,5 @@ { - "id": "array_create_labels", + "id": "array_create_labeled", "summary": "Create a labeled array", "description": "Creates a new labeled array by using the values from the `labels` array as labels and the values from the `data` array as the corresponding values.\n\nThe exception `ArrayLengthMismatch` is thrown, if the number of the values in the given arrays don't match exactly.\n\nThe primary use case here is to be able to transmit labeled arrays from the client to the server as JSON doesn't support this data type.", "categories": [ From 5e4b5edadd8329e6f2bd3df241c44c4b08fd5deb Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 25 Jun 2021 12:08:47 +0200 Subject: [PATCH 105/109] Change resample_cube_temporal, align with GDAL and improve resampling descriptions in general (#244) --- CHANGELOG.md | 2 + proposals/resample_cube_temporal.json | 63 +++++++-------------------- resample_cube_spatial.json | 14 +++--- resample_spatial.json | 19 +++++--- tests/.words | 5 +++ 5 files changed, 43 insertions(+), 60 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9ca0bed..1e055ccf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Exception messages have been aligned always use ` instead of '. Tooling could render it with CommonMark. - `load_collection` and `mask_polygon`: Also support multi polygons instead of just polygons. [#237](https://github.com/Open-EO/openeo-processes/issues/237) - `run_udf` and `run_udf_externally`: Specify specific (extensible) protocols for UDF URIs. +- `resample_cube_spatial` and `resample_spatial`: Aligned with GDAL and added `rms` and `sum` options to methods. Also added better descriptions. +- `resample_cube_temporal`: Process has been simplified and only offers the nearest neighbor method now. The `process` parameter has been removed, the `dimension` parameter was made less restrictive, the parameter `valid_within` was added. [#194](https://github.com/Open-EO/openeo-processes/issues/194) ### Deprecated - `GeometryCollection`s are discouraged in all relevant processes. diff --git a/proposals/resample_cube_temporal.json b/proposals/resample_cube_temporal.json index ac8414a1..2bd38dde 100644 --- a/proposals/resample_cube_temporal.json +++ b/proposals/resample_cube_temporal.json @@ -1,7 +1,7 @@ { "id": "resample_cube_temporal", - "summary": "Resample a temporal dimension to match a target data cube", - "description": "Resamples the given temporal dimension from a source data cube to align with the corresponding dimension of the given target data cube. Returns a new data cube with the resampled dimension.\n\nIf the dimension is not set or is set to `null`, the data cube is expected to have one temporal dimension only.", + "summary": "Resample temporal dimensions to match a target data cube", + "description": "Resamples one or more given temporal dimensions from a source data cube to align with the corresponding dimensions of the given target data cube using the nearest neighbor method. Returns a new data cube with the resampled dimensions.\n\nBy default, this process simply takes the nearest neighbor independent of the value (including values such as no-data / `null`). Depending on the data cubes this may lead to values being assigned to two target timestamps. To only consider valid values in a specific range around the target timestamps, use the parameter `valid_within`.\n\nThe rare case of ties is resolved by choosing the earlier timestamps.", "categories": [ "cubes", "aggregate & resample" @@ -10,7 +10,7 @@ "parameters": [ { "name": "data", - "description": "A data cube.", + "description": "A data cube with one or more temporal dimensions.", "schema": { "type": "object", "subtype": "raster-cube" @@ -24,45 +24,9 @@ "subtype": "raster-cube" } }, - { - "name": "method", - "description": "A resampling method to be applied, could be a reducer for downsampling or other methods for upsampling. A reducer is a single process such as ``mean()`` or a set of processes, which computes a single value for a list of values, see the category 'reducer' for such processes.", - "schema": { - "type": "object", - "subtype": "process-graph", - "parameters": [ - { - "name": "data", - "description": "A labeled array with elements of any type.", - "schema": { - "type": "array", - "subtype": "labeled-array", - "items": { - "description": "Any data type." - } - } - }, - { - "name": "context", - "description": "Additional data passed by the user.", - "schema": { - "description": "Any data type." - }, - "optional": true, - "default": null - } - ], - "returns": { - "description": "The value to be set in the resampled data cube.", - "schema": { - "description": "Any data type." - } - } - } - }, { "name": "dimension", - "description": "The name of the temporal dimension to resample, which must exist with this name in both data cubes. If the dimension is not set or is set to `null`, the data cube is expected to only have one temporal dimension. Fails with a `TooManyDimensions` exception if it has more dimensions. Fails with a `DimensionNotAvailable` exception if the specified dimension does not exist.", + "description": "The name of the temporal dimension to resample, which must exist with this name in both data cubes. If the dimension is not set or is set to `null`, the process resamples all temporal dimensions that exist with the same names in both data cubes.\n\nThe following exceptions may occur:\n\n* A dimension is given, but it does not exist in any of the data cubes: `DimensionNotAvailable`\n* A dimension is given, but one of them is not temporal: `DimensionMismatch`\n\n* No specific dimension name is given and there are no temporal dimensions with the same name in the data: `DimensionMismatch`", "schema": { "type": [ "string", @@ -73,25 +37,28 @@ "optional": true }, { - "name": "context", - "description": "Additional data to be passed to the process specified for the parameter `method`.", + "name": "valid_within", + "description": "Setting this parameter to a numerical value enables that the process searches for valid values within the given period of days before and after the target timestamps. Valid values are determined based on the function ``is_valid()``. For example, the limit of `7` for the target timestamps `2020-01-15 12:00:00` looks for a nearest neighbor after `2020-01-08 12:00:00` and before `2020-01-22 12:00:00`. If no valid value is found within the given period, the value will be set to no-data (`null`).", "schema": { - "description": "Any data type." + "type": [ + "number", + "null" + ] }, - "optional": true, - "default": null + "default": null, + "optional": true } ], "returns": { - "description": "A raster data cube with the same dimensions and the same dimension properties (name, type, labels, reference system and resolution) for all non-temporal dimensions. For the temporal dimension, the name and type remain unchanged, but the reference system changes and the labels and resolution may change.", + "description": "A raster data cube with the same dimensions and the same dimension properties (name, type, labels, reference system and resolution) for all non-temporal dimensions. For the temporal dimension, the name and type remain unchanged, but the dimension labels, resolution and reference system may change.", "schema": { "type": "object", "subtype": "raster-cube" } }, "exceptions": { - "TooManyDimensions": { - "message": "The number of temporal dimensions must be reduced to one for `resample_cube_temporal`." + "DimensionMismatch": { + "message": "The temporal dimensions for resampling don't match." }, "DimensionNotAvailable": { "message": "A dimension with the specified name does not exist." diff --git a/resample_cube_spatial.json b/resample_cube_spatial.json index a038b82d..54a5f801 100644 --- a/resample_cube_spatial.json +++ b/resample_cube_spatial.json @@ -25,22 +25,24 @@ }, { "name": "method", - "description": "Resampling method. Methods are inspired by GDAL, see [`gdalwarp`](https://www.gdal.org/gdalwarp.html) for more information.", + "description": "Resampling method to use. The following options are available and are meant to align with [`gdalwarp`](https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r):\n\n* `average`: average (mean) resampling, computes the weighted average of all valid pixels\n* `bilinear`: bilinear resampling\n* `cubic`: cubic resampling\n* `cubicspline`: cubic spline resampling\n* `lanczos`: Lanczos windowed sinc resampling\n* `max`: maximum resampling, selects the maximum value from all valid pixels\n* `med`: median resampling, selects the median value of all valid pixels\n* `min`: minimum resampling, selects the minimum value from all valid pixels\n* `mode`: mode resampling, selects the value which appears most often of all the sampled points\n* `near`: nearest neighbour resampling (default)\n* `q1`: first quartile resampling, selects the first quartile value of all valid pixels\n* `q3`: third quartile resampling, selects the third quartile value of all valid pixels\n* `rms` root mean square (quadratic mean) of all valid pixels\n* `sum`: compute the weighted sum of all valid pixels\n\nValid pixels are determined based on the function ``is_valid()``.", "schema": { "type": "string", "enum": [ - "near", + "average", "bilinear", "cubic", "cubicspline", "lanczos", - "average", - "mode", "max", - "min", "med", + "min", + "mode", + "near", "q1", - "q3" + "q3", + "rms", + "sum" ] }, "default": "near", diff --git a/resample_spatial.json b/resample_spatial.json index 5f8a1f4d..91d6bc5f 100644 --- a/resample_spatial.json +++ b/resample_spatial.json @@ -72,22 +72,24 @@ }, { "name": "method", - "description": "Resampling method. Methods are inspired by GDAL, see [`gdalwarp`](https://www.gdal.org/gdalwarp.html) for more information.", + "description": "Resampling method to use. The following options are available and are meant to align with [`gdalwarp`](https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r):\n\n* `average`: average (mean) resampling, computes the weighted average of all valid pixels\n* `bilinear`: bilinear resampling\n* `cubic`: cubic resampling\n* `cubicspline`: cubic spline resampling\n* `lanczos`: Lanczos windowed sinc resampling\n* `max`: maximum resampling, selects the maximum value from all valid pixels\n* `med`: median resampling, selects the median value of all valid pixels\n* `min`: minimum resampling, selects the minimum value from all valid pixels\n* `mode`: mode resampling, selects the value which appears most often of all the sampled points\n* `near`: nearest neighbour resampling (default)\n* `q1`: first quartile resampling, selects the first quartile value of all valid pixels\n* `q3`: third quartile resampling, selects the third quartile value of all valid pixels\n* `rms` root mean square (quadratic mean) of all valid pixels\n* `sum`: compute the weighted sum of all valid pixels\n\nValid pixels are determined based on the function ``is_valid()``.", "schema": { "type": "string", "enum": [ - "near", + "average", "bilinear", "cubic", "cubicspline", "lanczos", - "average", - "mode", "max", - "min", "med", + "min", + "mode", + "near", "q1", - "q3" + "q3", + "rms", + "sum" ] }, "default": "near", @@ -136,6 +138,11 @@ "rel": "about", "href": "http://www.epsg.io", "title": "Unofficial EPSG code database" + }, + { + "href": "https://gdal.org/programs/gdalwarp.html#cmdoption-gdalwarp-r", + "rel": "about", + "title": "gdalwarp resampling methods" } ] } \ No newline at end of file diff --git a/tests/.words b/tests/.words index 2c955278..b9fe6130 100644 --- a/tests/.words +++ b/tests/.words @@ -13,6 +13,7 @@ GeoJSON labeled MathWorld n-ary +neighbor neighborhood neighborhoods openEO @@ -22,6 +23,7 @@ radiometrically reflectances resample resampled +resamples Resamples resampling Sentinel-2 @@ -31,4 +33,7 @@ signum STAC summand UDFs +gdalwarp +Lanczos +sinc interpolants From 768282b211776e0e37cf8e5bb85da14bc79163f3 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 25 Jun 2021 12:16:28 +0200 Subject: [PATCH 106/109] Update Changelog --- CHANGELOG.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e055ccf..b935cab8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased / Draft +## [1.1.0] - 2021-07-02 + ### Added - New processes in proposal state - `array_append` @@ -65,7 +67,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `aggregate_temporal` and `aggregate_temporal_period`: Clarified that reducers are also executed for intervals/periods with no data. [#263](https://github.com/Open-EO/openeo-processes/issues/263) - `dimension_labels`: Clarified that the process fails with a `DimensionNotAvailable` exception, if a dimension with the specified name does not exist. -## 1.0.0 - 2020-07-31 +## [1.0.0] - 2020-07-31 ### Added - `subtype-schemas.json`: A list of predefined subtypes is available as JSON Schema; Moved over from openeo-api. @@ -105,7 +107,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - `first`, `last`, `max`, `mean`, `median`, `min`, `sd`, `variance`: Clarify behavior for arrays with `null`-values only. - Clarified (and fixed if necessary) for all processes in the "cubes" category the descriptions for the returned data cube. [#149](https://github.com/Open-EO/openeo-processes/issues/149) -## 1.0.0-rc.1 - 2020-01-31 +## [1.0.0-rc.1] - 2020-01-31 ### Added - Processes: @@ -217,3 +219,13 @@ First version which is separated from the openEO API. Complete rework of all pro ## Legacy versions Older versions of the processes were released as part of the openEO API, see the corresponding changelog for more information. + + +[Unreleased]: +[1.1.0]: +[1.0.0]: +[1.0.0-rc.1]: +[0.4.2]: +[0.4.1]: +[0.4.0]: + From fddbd53ffe35c55c6e3cb263e71dd96080c6aae0 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 25 Jun 2021 13:59:40 +0200 Subject: [PATCH 107/109] Update version numbers --- README.md | 5 +++-- array_apply.json | 4 ++-- array_contains.json | 2 +- array_find.json | 2 +- meta/subtype-schemas.json | 2 +- rename_labels.json | 2 +- tests/docs.html | 2 +- 7 files changed, 10 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 0b6f45cc..e819ce3c 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,13 @@ openEO develops interoperable processes for big Earth observation cloud processi The [master branch](https://github.com/Open-EO/openeo-processes/tree/master) is the 'stable' version of the openEO processes specification. An exception is the [`proposals`](proposals/) folder, which provides experimental new processes currently under discussion. They may still change, but everyone is encouraged to implement them and give feedback. -The latest release is version **1.0.0**. The [draft branch](https://github.com/Open-EO/openeo-processes/tree/draft) is where active development takes place. PRs should be made against the draft branch. +The latest release is version **1.1.0**. The [draft branch](https://github.com/Open-EO/openeo-processes/tree/draft) is where active development takes place. PRs should be made against the draft branch. | Version / Branch | Status | openEO API versions | | ------------------------------------------------------------ | ------------------------- | ------------------- | | [unreleased / draft](https://processes.openeo.org/draft) | in development | 1.x.x | -| [**1.0.0** / master](https://processes.openeo.org/1.0.0/) | **latest stable version** | 1.x.x | +| [**1.1.0** / master](https://processes.openeo.org/1.1.0/) | **latest stable version** | 1.x.x | +| [1.0.0](https://processes.openeo.org/1.0.0/) | legacy version | 1.x.x | | [1.0.0 RC1](https://processes.openeo.org/1.0.0-rc.1/) | legacy version | 1.x.x | | [0.4.2](https://processes.openeo.org/0.4.2/) | legacy version | 0.4.x | | [0.4.1](https://processes.openeo.org/0.4.1/) | legacy version | 0.4.x | diff --git a/array_apply.json b/array_apply.json index 2819ff7d..bea8a744 100644 --- a/array_apply.json +++ b/array_apply.json @@ -96,13 +96,13 @@ { "rel": "example", "type": "application/json", - "href": "https://processes.openeo.org/1.0.0/examples/array_find_nodata.json", + "href": "https://processes.openeo.org/1.1.0/examples/array_find_nodata.json", "title": "Find no-data values in arrays" }, { "rel": "example", "type": "application/json", - "href": "https://processes.openeo.org/1.0.0/examples/array_contains_nodata.json", + "href": "https://processes.openeo.org/1.1.0/examples/array_contains_nodata.json", "title": "Check for no-data values in arrays" } ] diff --git a/array_contains.json b/array_contains.json index df5b425a..cabfcf23 100644 --- a/array_contains.json +++ b/array_contains.json @@ -133,7 +133,7 @@ { "rel": "example", "type": "application/json", - "href": "https://processes.openeo.org/1.0.0/examples/array_contains_nodata.json", + "href": "https://processes.openeo.org/1.1.0/examples/array_contains_nodata.json", "title": "Check for no-data values in arrays" } ], diff --git a/array_find.json b/array_find.json index 40c74563..d60a450d 100644 --- a/array_find.json +++ b/array_find.json @@ -139,7 +139,7 @@ { "rel": "example", "type": "application/json", - "href": "https://processes.openeo.org/1.0.0/examples/array_find_nodata.json", + "href": "https://processes.openeo.org/1.1.0/examples/array_find_nodata.json", "title": "Find no-data values in arrays" } ] diff --git a/meta/subtype-schemas.json b/meta/subtype-schemas.json index 4fb02e17..fbbdb867 100644 --- a/meta/subtype-schemas.json +++ b/meta/subtype-schemas.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://processes.openeo.org/1.0.0/meta/subtype-schemas.json", + "$id": "http://processes.openeo.org/1.1.0/meta/subtype-schemas.json", "title": "Subtype Schemas", "description": "This file defines the schemas for subtypes we define for openEO processes.", "definitions": { diff --git a/rename_labels.json b/rename_labels.json index e91c1afd..6ed32f6f 100644 --- a/rename_labels.json +++ b/rename_labels.json @@ -97,7 +97,7 @@ { "rel": "example", "type": "application/json", - "href": "https://processes.openeo.org/1.0.0/examples/rename-enumerated-labels.json", + "href": "https://processes.openeo.org/1.1.0/examples/rename-enumerated-labels.json", "title": "Rename enumerated labels" } ] diff --git a/tests/docs.html b/tests/docs.html index 162d28fb..797aec96 100644 --- a/tests/docs.html +++ b/tests/docs.html @@ -114,7 +114,7 @@ document: 'processes.json', categorize: true, apiVersion: '1.0.0', - title: 'openEO processes (draft)', + title: 'openEO processes (1.1.0)', notice: '**Note:** This is the list of all processes specified by the openEO project. Back-ends implement a varying set of processes. Thus, the processes you can use at a specific back-end may derive from the specification, may include non-standardized processes and may not implement all processes listed here. Please check each back-end individually for the processes they support. The client libraries usually have a function called `listProcesses` or `list_processes` for that.' } }) From 177a053ee17ea60ab84cfe8a2d62721dc26f7f6f Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Fri, 25 Jun 2021 15:44:25 +0200 Subject: [PATCH 108/109] Fix formatting issue in merge_cubes --- merge_cubes.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/merge_cubes.json b/merge_cubes.json index dea0b068..28b4803b 100644 --- a/merge_cubes.json +++ b/merge_cubes.json @@ -1,7 +1,7 @@ { "id": "merge_cubes", "summary": "Merge two data cubes", - "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions (`x`, `y`, `t`) have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. A data cube with dimensions (`x`, `y`, `t` / `bands`) or (`x`, `y`, `t`, `bands`) and another data cube with dimensions (`x`, `y`) have the same dimension labels in x and y. Merging them will join dimensions `x` and `y`, so the lower dimension cube is merged with each time step and band available in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatio-temporal data cube. An overlap resolver is *required* to resolve the overlap for all pixels.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", + "description": "The data cubes have to be compatible. A merge operation without overlap should be reversible with (a set of) filter operations for each of the two cubes. The process performs the join on overlapping dimensions, with the same name and type.\n\nAn overlapping dimension has the same name, type, reference system and resolution in both dimensions, but can have different labels. One of the dimensions can have different labels, for all other dimensions the labels must be equal. If data overlaps, the parameter `overlap_resolver` must be specified to resolve the overlap.\n\n**Examples for merging two data cubes:**\n\n1. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first cube and `B3` and `B4`. An overlap resolver is *not needed*. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has four dimension labels: `B1`, `B2`, `B3`, `B4`.\n2. Data cubes with the dimensions (`x`, `y`, `t`, `bands`) have the same dimension labels in `x`, `y` and `t`, but the labels for the dimension `bands` are `B1` and `B2` for the first data cube and `B2` and `B3` for the second. An overlap resolver is *required* to resolve overlap in band `B2`. The merged data cube has the dimensions `x`, `y`, `t` and `bands` and the dimension `bands` has three dimension labels: `B1`, `B2`, `B3`.\n3. Data cubes with the dimensions (`x`, `y`, `t`) have the same dimension labels in `x`, `y` and `t`. There are two options:\n 1. Keep the overlapping values separately in the merged data cube: An overlap resolver is *not needed*, but for each data cube you need to add a new dimension using ``add_dimension()``. The new dimensions must be equal, except that the labels for the new dimensions must differ by name. The merged data cube has the same dimensions and labels as the original data cubes, plus the dimension added with ``add_dimension()``, which has the two dimension labels after the merge.\n 2. Combine the overlapping values into a single value: An overlap resolver is *required* to resolve the overlap for all pixels. The merged data cube has the same dimensions and labels as the original data cubes, but all pixel values have been processed by the overlap resolver.\n4. A data cube with dimensions (`x`, `y`, `t` / `bands`) or (`x`, `y`, `t`, `bands`) and another data cube with dimensions (`x`, `y`) have the same dimension labels in `x` and `y`. Merging them will join dimensions `x` and `y`, so the lower dimension cube is merged with each time step and band available in the higher dimensional cube. This can for instance be used to apply a digital elevation model to a spatio-temporal data cube. An overlap resolver is *required* to resolve the overlap for all pixels.\n\nAfter the merge, the dimensions with a natural/inherent label order (with a reference system this is each spatial and temporal dimensions) still have all dimension labels sorted. For other dimensions where there is no inherent order, including bands, the dimension labels keep the order in which they are present in the original data cubes and the dimension labels of `cube2` are appended to the dimension labels of `cube1`.", "categories": [ "cubes" ], From f70a550e250100f2f79a990fee2bb429d1a11301 Mon Sep 17 00:00:00 2001 From: Matthias Mohr Date: Tue, 29 Jun 2021 13:10:38 +0200 Subject: [PATCH 109/109] Update changelog date --- CHANGELOG.md | 2 +- tests/docs.html | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b935cab8..fcb4bc3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased / Draft -## [1.1.0] - 2021-07-02 +## [1.1.0] - 2021-06-29 ### Added - New processes in proposal state diff --git a/tests/docs.html b/tests/docs.html index 797aec96..d7ba767c 100644 --- a/tests/docs.html +++ b/tests/docs.html @@ -113,7 +113,7 @@ props: { document: 'processes.json', categorize: true, - apiVersion: '1.0.0', + apiVersion: '1.1.0', title: 'openEO processes (1.1.0)', notice: '**Note:** This is the list of all processes specified by the openEO project. Back-ends implement a varying set of processes. Thus, the processes you can use at a specific back-end may derive from the specification, may include non-standardized processes and may not implement all processes listed here. Please check each back-end individually for the processes they support. The client libraries usually have a function called `listProcesses` or `list_processes` for that.' }