diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index 4365257..7f5a46c 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -17,6 +17,8 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.10" + - name: Pin pip version + run: python -m pip install --upgrade "pip<24.1" - name: Install requirements run: | pip install . @@ -40,12 +42,14 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.10" + - name: Pin pip version + run: python -m pip install --upgrade "pip<24.1" - name: Install requirements run: | pip install . pip install -r requirements_dev.txt - name: Deploy app - run: docker-compose up -d --build + run: docker compose up -d --build - name: Wait for app startup run: sleep 20 - name: Run integration tests diff --git a/README.md b/README.md index 82b2ad3..0c9bfea 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ environments composed of GA4GH TES nodes. proTES gateway may serve as a crucial component in federated compute networks based on the GA4GH Cloud ecosystem. Its primary purpose is to provide -centralized features to a federated network of independently operated GA4GH TES +centralized features to a federated network of independently operated GA4GH TES instances. As such, it can serve, for example, as a compatibility layer, a load balancer workload distribution layer, a public entry point to an enclave of independent compute nodes, or a means of collecting telemetry. @@ -38,9 +38,9 @@ proof-of-concept examples for different task distribution scenarios: evenly (actually: randomly!) distributes workloads across a network of TES endpoints * **Bringing compute to the data**: The - `pro_tes.middleware.task_distribution.distance` plugin selects TES endpoints + `pro_tes.middleware.task_distribution.distance` plugin selects TES endpoints to relay incoming requests to in such a way that the distance the (input) data - of a task has to travel across the network of TES endpoints is minimized. + of a task has to travel across the network of TES endpoints is minimized. ### Implementation notes @@ -76,7 +76,7 @@ Ensure you have the following software installed: ### Prerequisites -Create data directory and required subdiretories +Create data directory and required subdirectories ```bash export PROTES_DATA_DIR=/path/to/data/directory @@ -135,7 +135,7 @@ interactions with the community. ## Versioning -The project adopts the [semantic versioning][semver] scheme for versioning. +The project adopts the [semantic versioning][res-sem-ver] scheme for versioning. Currently the service is in beta stage, so the API may change and even break without further notice. However, once we deem the service stable and "feature complete", the major, minor and patch version will shadow the supported TES @@ -174,22 +174,19 @@ thread in our [Q&A forum][contact-qa], or send us an [email][contact-email]. [docs-contributing]: [docs-deploy]: deployment/README.md [docs-license]: LICENSE -[GA4GH TES OpenAPI specification]: [image-protes-overview]: [res-celery]: -[res-connexion]: [res-docker]: [res-docker-compose]: [res-elixir-cloud-aai]: [res-flask]: [res-foca]: [res-ga4gh]: -[res-ga4gh-cloud]: [res-ga4gh-tes]: [res-git]: [res-helm]: [res-kubernetes]: -[res-mondodb]: -[res-ouath2]: +[res-mongodb]: +[res-oauth2]: [res-rabbitmq]: [res-sem-ver]: diff --git a/pro_tes/ga4gh/tes/models.py b/pro_tes/ga4gh/tes/models.py index a9bdb2c..6aa1e92 100644 --- a/pro_tes/ga4gh/tes/models.py +++ b/pro_tes/ga4gh/tes/models.py @@ -38,7 +38,7 @@ class TesCreateTaskResponse(CustomBaseModel): class TesExecutor(CustomBaseModel): image: str = Field( - default=[""], + default="", description=( "Name of the container image. The string will be passed as " " the image\nargument to the containerization run command. " @@ -626,7 +626,7 @@ class TesTask(CustomBaseModel): ) resources: Optional[TesResources] = None executors: list[TesExecutor] = Field( - default=[TesExecutor], + default=[TesExecutor()], description=( "An array of executors to be run. Each of the executors " " will run one\nat a time sequentially. Each executor is a" diff --git a/pro_tes/gunicorn.py b/pro_tes/gunicorn.py index 2e8b33b..5656dc3 100644 --- a/pro_tes/gunicorn.py +++ b/pro_tes/gunicorn.py @@ -17,7 +17,7 @@ forwarded_allow_ips = "*" # pylint: disable=invalid-name # Set Gunicorn bind address -bind = f"{app_config.server.host}:{app_config.server.port}" +BIND = f"{app_config.server.host}:{app_config.server.port}" # Source environment variables for Gunicorn workers raw_env = [ diff --git a/pro_tes/tasks/track_task_progress.py b/pro_tes/tasks/track_task_progress.py index 09b6fa0..3b37852 100644 --- a/pro_tes/tasks/track_task_progress.py +++ b/pro_tes/tasks/track_task_progress.py @@ -20,14 +20,17 @@ # pylint: disable-msg=too-many-locals # pylint: disable=unsubscriptable-object +# pylint: disable=too-many-arguments +# pylint: disable=too-many-positional-arguments +# pylint: disable=unused-argument @celery.task( name="tasks.track_run_progress", bind=True, ignore_result=True, track_started=True, ) -def task__track_task_progress( # pylint: disable=too-many-arguments - self, # pylint: disable=unused-argument +def task__track_task_progress( + self, worker_id: str, remote_host: str, remote_base_path: str, diff --git a/requirements.txt b/requirements.txt index 983ac80..5a79f95 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ celery-types>=0.20.0 connexion>=2.11.2,<3 -foca>=0.12.1 +foca==0.12.1 geopy>=2.2.0 gunicorn>=20.1.0,<21 ip2geotools>=0.1.6