diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..5dfaa00
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,18 @@
+[run]
+branch = True
+
+[report]
+fail_under = 100
+show_missing = True
+omit =
+ google/area120/tables/__init__.py
+exclude_lines =
+ # Re-enable the standard pragma
+ pragma: NO COVER
+ # Ignore debug-only repr
+ def __repr__
+ # Ignore pkg_resources exceptions.
+ # This is added at the module level as a safeguard for if someone
+ # generates the code and tries to run it without pip installing. This
+ # makes it virtually impossible to test properly.
+ except pkg_resources.DistributionNotFound
diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml
new file mode 100644
index 0000000..fc281c0
--- /dev/null
+++ b/.github/header-checker-lint.yml
@@ -0,0 +1,15 @@
+{"allowedCopyrightHolders": ["Google LLC"],
+ "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"],
+ "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt"],
+ "sourceFileExtensions": [
+ "ts",
+ "js",
+ "java",
+ "sh",
+ "Dockerfile",
+ "yaml",
+ "py",
+ "html",
+ "txt"
+ ]
+}
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index b9daa52..b4243ce 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,8 +50,10 @@ docs.metadata
# Virtual environment
env/
+
+# Test logs
coverage.xml
-sponge_log.xml
+*sponge_log.xml
# System test environment variables.
system_tests/local_test_setup
diff --git a/.kokoro/build.sh b/.kokoro/build.sh
index 6f6d2fa..0c2a445 100755
--- a/.kokoro/build.sh
+++ b/.kokoro/build.sh
@@ -15,7 +15,11 @@
set -eo pipefail
-cd github/python-area120-tables
+if [[ -z "${PROJECT_ROOT:-}" ]]; then
+ PROJECT_ROOT="github/python-area120-tables"
+fi
+
+cd "${PROJECT_ROOT}"
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
@@ -30,16 +34,26 @@ export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json
export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json")
# Remove old nox
-python3.6 -m pip uninstall --yes --quiet nox-automation
+python3 -m pip uninstall --yes --quiet nox-automation
# Install nox
-python3.6 -m pip install --upgrade --quiet nox
-python3.6 -m nox --version
+python3 -m pip install --upgrade --quiet nox
+python3 -m nox --version
+
+# If this is a continuous build, send the test log to the FlakyBot.
+# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
+if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then
+ cleanup() {
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ }
+ trap cleanup EXIT HUP
+fi
# If NOX_SESSION is set, it only runs the specified session,
# otherwise run all the sessions.
if [[ -n "${NOX_SESSION:-}" ]]; then
- python3.6 -m nox -s "${NOX_SESSION:-}"
+ python3 -m nox -s ${NOX_SESSION:-}
else
- python3.6 -m nox
+ python3 -m nox
fi
diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg
index 1118107..40a8e65 100644
--- a/.kokoro/docs/docs-presubmit.cfg
+++ b/.kokoro/docs/docs-presubmit.cfg
@@ -15,3 +15,14 @@ env_vars: {
key: "TRAMPOLINE_IMAGE_UPLOAD"
value: "false"
}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-area120-tables/.kokoro/build.sh"
+}
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "docs docfx"
+}
diff --git a/.kokoro/samples/python3.6/periodic-head.cfg b/.kokoro/samples/python3.6/periodic-head.cfg
new file mode 100644
index 0000000..f9cfcd3
--- /dev/null
+++ b/.kokoro/samples/python3.6/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-pubsub/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.7/periodic-head.cfg b/.kokoro/samples/python3.7/periodic-head.cfg
new file mode 100644
index 0000000..f9cfcd3
--- /dev/null
+++ b/.kokoro/samples/python3.7/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-pubsub/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.8/periodic-head.cfg b/.kokoro/samples/python3.8/periodic-head.cfg
new file mode 100644
index 0000000..f9cfcd3
--- /dev/null
+++ b/.kokoro/samples/python3.8/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-pubsub/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh
new file mode 100755
index 0000000..48c3a10
--- /dev/null
+++ b/.kokoro/test-samples-against-head.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A customized test runner for samples.
+#
+# For periodic builds, you can specify this file for testing against head.
+
+# `-e` enables the script to automatically fail when a command fails
+# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
+set -eo pipefail
+# Enables `**` to include files nested inside sub-folders
+shopt -s globstar
+
+cd github/python-area120-tables
+
+exec .kokoro/test-samples-impl.sh
diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh
new file mode 100755
index 0000000..cf5de74
--- /dev/null
+++ b/.kokoro/test-samples-impl.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# `-e` enables the script to automatically fail when a command fails
+# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
+set -eo pipefail
+# Enables `**` to include files nested inside sub-folders
+shopt -s globstar
+
+# Exit early if samples directory doesn't exist
+if [ ! -d "./samples" ]; then
+ echo "No tests run. `./samples` not found"
+ exit 0
+fi
+
+# Disable buffering, so that the logs stream through.
+export PYTHONUNBUFFERED=1
+
+# Debug: show build environment
+env | grep KOKORO
+
+# Install nox
+python3.6 -m pip install --upgrade --quiet nox
+
+# Use secrets acessor service account to get secrets
+if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then
+ gcloud auth activate-service-account \
+ --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \
+ --project="cloud-devrel-kokoro-resources"
+fi
+
+# This script will create 3 files:
+# - testing/test-env.sh
+# - testing/service-account.json
+# - testing/client-secrets.json
+./scripts/decrypt-secrets.sh
+
+source ./testing/test-env.sh
+export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json
+
+# For cloud-run session, we activate the service account for gcloud sdk.
+gcloud auth activate-service-account \
+ --key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
+
+export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json
+
+echo -e "\n******************** TESTING PROJECTS ********************"
+
+# Switch to 'fail at end' to allow all tests to complete before exiting.
+set +e
+# Use RTN to return a non-zero value if the test fails.
+RTN=0
+ROOT=$(pwd)
+# Find all requirements.txt in the samples directory (may break on whitespace).
+for file in samples/**/requirements.txt; do
+ cd "$ROOT"
+ # Navigate to the project folder.
+ file=$(dirname "$file")
+ cd "$file"
+
+ echo "------------------------------------------------------------"
+ echo "- testing $file"
+ echo "------------------------------------------------------------"
+
+ # Use nox to execute the tests for the project.
+ python3.6 -m nox -s "$RUN_TESTS_SESSION"
+ EXIT=$?
+
+ # If this is a periodic build, send the test log to the FlakyBot.
+ # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
+ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ fi
+
+ if [[ $EXIT -ne 0 ]]; then
+ RTN=1
+ echo -e "\n Testing failed: Nox returned a non-zero exit code. \n"
+ else
+ echo -e "\n Testing completed.\n"
+ fi
+
+done
+cd "$ROOT"
+
+# Workaround for Kokoro permissions issue: delete secrets
+rm testing/{test-env.sh,client-secrets.json,service-account.json}
+
+exit "$RTN"
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index 8c03662..36d5885 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# The default test runner for samples.
+#
+# For periodic builds, we rewinds the repo to the latest release, and
+# run test-samples-impl.sh.
# `-e` enables the script to automatically fail when a command fails
# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
@@ -24,87 +28,19 @@ cd github/python-area120-tables
# Run periodic samples tests at latest release
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
+ # preserving the test runner implementation.
+ cp .kokoro/test-samples-impl.sh "${TMPDIR}/test-samples-impl.sh"
+ echo "--- IMPORTANT IMPORTANT IMPORTANT ---"
+ echo "Now we rewind the repo back to the latest release..."
LATEST_RELEASE=$(git describe --abbrev=0 --tags)
git checkout $LATEST_RELEASE
-fi
-
-# Exit early if samples directory doesn't exist
-if [ ! -d "./samples" ]; then
- echo "No tests run. `./samples` not found"
- exit 0
-fi
-
-# Disable buffering, so that the logs stream through.
-export PYTHONUNBUFFERED=1
-
-# Debug: show build environment
-env | grep KOKORO
-
-# Install nox
-python3.6 -m pip install --upgrade --quiet nox
-
-# Use secrets acessor service account to get secrets
-if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then
- gcloud auth activate-service-account \
- --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \
- --project="cloud-devrel-kokoro-resources"
-fi
-
-# This script will create 3 files:
-# - testing/test-env.sh
-# - testing/service-account.json
-# - testing/client-secrets.json
-./scripts/decrypt-secrets.sh
-
-source ./testing/test-env.sh
-export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json
-
-# For cloud-run session, we activate the service account for gcloud sdk.
-gcloud auth activate-service-account \
- --key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
-
-export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json
-
-echo -e "\n******************** TESTING PROJECTS ********************"
-
-# Switch to 'fail at end' to allow all tests to complete before exiting.
-set +e
-# Use RTN to return a non-zero value if the test fails.
-RTN=0
-ROOT=$(pwd)
-# Find all requirements.txt in the samples directory (may break on whitespace).
-for file in samples/**/requirements.txt; do
- cd "$ROOT"
- # Navigate to the project folder.
- file=$(dirname "$file")
- cd "$file"
-
- echo "------------------------------------------------------------"
- echo "- testing $file"
- echo "------------------------------------------------------------"
-
- # Use nox to execute the tests for the project.
- python3.6 -m nox -s "$RUN_TESTS_SESSION"
- EXIT=$?
-
- # If this is a periodic build, send the test log to the FlakyBot.
- # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
- if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
- chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
- $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ echo "The current head is: "
+ echo $(git rev-parse --verify HEAD)
+ echo "--- IMPORTANT IMPORTANT IMPORTANT ---"
+ # move back the test runner implementation if there's no file.
+ if [ ! -f .kokoro/test-samples-impl.sh ]; then
+ cp "${TMPDIR}/test-samples-impl.sh" .kokoro/test-samples-impl.sh
fi
+fi
- if [[ $EXIT -ne 0 ]]; then
- RTN=1
- echo -e "\n Testing failed: Nox returned a non-zero exit code. \n"
- else
- echo -e "\n Testing completed.\n"
- fi
-
-done
-cd "$ROOT"
-
-# Workaround for Kokoro permissions issue: delete secrets
-rm testing/{test-env.sh,client-secrets.json,service-account.json}
-
-exit "$RTN"
+exec .kokoro/test-samples-impl.sh
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a9024b1..32302e4 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -12,6 +12,6 @@ repos:
hooks:
- id: black
- repo: https://gitlab.com/pycqa/flake8
- rev: 3.8.4
+ rev: 3.9.0
hooks:
- id: flake8
diff --git a/.trampolinerc b/.trampolinerc
index 995ee29..383b6ec 100644
--- a/.trampolinerc
+++ b/.trampolinerc
@@ -24,6 +24,7 @@ required_envvars+=(
pass_down_envvars+=(
"STAGING_BUCKET"
"V2_STAGING_BUCKET"
+ "NOX_SESSION"
)
# Prevent unintentional override on the default image.
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index e35f259..d3e195a 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -70,9 +70,14 @@ We use `nox `__ to instrument our tests.
- To test your changes, run unit tests with ``nox``::
$ nox -s unit-2.7
- $ nox -s unit-3.7
+ $ nox -s unit-3.8
$ ...
+- Args to pytest can be passed through the nox command separated by a `--`. For
+ example, to run a single test::
+
+ $ nox -s unit-3.8 -- -k
+
.. note::
The unit tests and system tests are described in the
@@ -93,8 +98,12 @@ On Debian/Ubuntu::
************
Coding Style
************
+- We use the automatic code formatter ``black``. You can run it using
+ the nox session ``blacken``. This will eliminate many lint errors. Run via::
+
+ $ nox -s blacken
-- PEP8 compliance, with exceptions defined in the linter configuration.
+- PEP8 compliance is required, with exceptions defined in the linter configuration.
If you have ``nox`` installed, you can test that you have not introduced
any non-compliant code via::
@@ -133,13 +142,18 @@ Running System Tests
- To run system tests, you can execute::
- $ nox -s system-3.7
+ # Run all system tests
+ $ nox -s system-3.8
$ nox -s system-2.7
+ # Run a single system test
+ $ nox -s system-3.8 -- -k
+
+
.. note::
System tests are only configured to run under Python 2.7 and
- Python 3.7. For expediency, we do not run them in older versions
+ Python 3.8. For expediency, we do not run them in older versions
of Python 3.
This alone will not run the tests. You'll need to change some local
diff --git a/MANIFEST.in b/MANIFEST.in
index e9e29d1..e783f4c 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -16,10 +16,10 @@
# Generated by synthtool. DO NOT EDIT!
include README.rst LICENSE
-recursive-include google *.json *.proto
+recursive-include google *.json *.proto py.typed
recursive-include tests *
global-exclude *.py[co]
global-exclude __pycache__
# Exclude scripts for samples readmegen
-prune scripts/readme-gen
\ No newline at end of file
+prune scripts/readme-gen
diff --git a/docs/tables_v1alpha1/services.rst b/docs/tables_v1alpha1/services.rst
index 1a019f7..286fd46 100644
--- a/docs/tables_v1alpha1/services.rst
+++ b/docs/tables_v1alpha1/services.rst
@@ -1,6 +1,6 @@
Services for Google Area120 Tables v1alpha1 API
===============================================
+.. toctree::
+ :maxdepth: 2
-.. automodule:: google.area120.tables_v1alpha1.services.tables_service
- :members:
- :inherited-members:
+ tables_service
diff --git a/docs/tables_v1alpha1/tables_service.rst b/docs/tables_v1alpha1/tables_service.rst
new file mode 100644
index 0000000..bdb4d91
--- /dev/null
+++ b/docs/tables_v1alpha1/tables_service.rst
@@ -0,0 +1,11 @@
+TablesService
+-------------------------------
+
+.. automodule:: google.area120.tables_v1alpha1.services.tables_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.area120.tables_v1alpha1.services.tables_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/tables_v1alpha1/types.rst b/docs/tables_v1alpha1/types.rst
index 9c77b6c..440ad8b 100644
--- a/docs/tables_v1alpha1/types.rst
+++ b/docs/tables_v1alpha1/types.rst
@@ -3,4 +3,5 @@ Types for Google Area120 Tables v1alpha1 API
.. automodule:: google.area120.tables_v1alpha1.types
:members:
+ :undoc-members:
:show-inheritance:
diff --git a/google/area120/tables/__init__.py b/google/area120/tables/__init__.py
index cd0469c..071566b 100644
--- a/google/area120/tables/__init__.py
+++ b/google/area120/tables/__init__.py
@@ -23,6 +23,7 @@
)
from google.area120.tables_v1alpha1.types.tables import BatchCreateRowsRequest
from google.area120.tables_v1alpha1.types.tables import BatchCreateRowsResponse
+from google.area120.tables_v1alpha1.types.tables import BatchDeleteRowsRequest
from google.area120.tables_v1alpha1.types.tables import BatchUpdateRowsRequest
from google.area120.tables_v1alpha1.types.tables import BatchUpdateRowsResponse
from google.area120.tables_v1alpha1.types.tables import ColumnDescription
@@ -30,18 +31,26 @@
from google.area120.tables_v1alpha1.types.tables import DeleteRowRequest
from google.area120.tables_v1alpha1.types.tables import GetRowRequest
from google.area120.tables_v1alpha1.types.tables import GetTableRequest
+from google.area120.tables_v1alpha1.types.tables import GetWorkspaceRequest
+from google.area120.tables_v1alpha1.types.tables import LabeledItem
from google.area120.tables_v1alpha1.types.tables import ListRowsRequest
from google.area120.tables_v1alpha1.types.tables import ListRowsResponse
from google.area120.tables_v1alpha1.types.tables import ListTablesRequest
from google.area120.tables_v1alpha1.types.tables import ListTablesResponse
+from google.area120.tables_v1alpha1.types.tables import ListWorkspacesRequest
+from google.area120.tables_v1alpha1.types.tables import ListWorkspacesResponse
+from google.area120.tables_v1alpha1.types.tables import LookupDetails
+from google.area120.tables_v1alpha1.types.tables import RelationshipDetails
from google.area120.tables_v1alpha1.types.tables import Row
from google.area120.tables_v1alpha1.types.tables import Table
from google.area120.tables_v1alpha1.types.tables import UpdateRowRequest
from google.area120.tables_v1alpha1.types.tables import View
+from google.area120.tables_v1alpha1.types.tables import Workspace
__all__ = (
"BatchCreateRowsRequest",
"BatchCreateRowsResponse",
+ "BatchDeleteRowsRequest",
"BatchUpdateRowsRequest",
"BatchUpdateRowsResponse",
"ColumnDescription",
@@ -49,14 +58,21 @@
"DeleteRowRequest",
"GetRowRequest",
"GetTableRequest",
+ "GetWorkspaceRequest",
+ "LabeledItem",
"ListRowsRequest",
"ListRowsResponse",
"ListTablesRequest",
"ListTablesResponse",
+ "ListWorkspacesRequest",
+ "ListWorkspacesResponse",
+ "LookupDetails",
+ "RelationshipDetails",
"Row",
"Table",
"TablesServiceAsyncClient",
"TablesServiceClient",
"UpdateRowRequest",
"View",
+ "Workspace",
)
diff --git a/google/area120/tables_v1alpha1/__init__.py b/google/area120/tables_v1alpha1/__init__.py
index 94bc397..8ad2200 100644
--- a/google/area120/tables_v1alpha1/__init__.py
+++ b/google/area120/tables_v1alpha1/__init__.py
@@ -18,6 +18,7 @@
from .services.tables_service import TablesServiceClient
from .types.tables import BatchCreateRowsRequest
from .types.tables import BatchCreateRowsResponse
+from .types.tables import BatchDeleteRowsRequest
from .types.tables import BatchUpdateRowsRequest
from .types.tables import BatchUpdateRowsResponse
from .types.tables import ColumnDescription
@@ -25,19 +26,27 @@
from .types.tables import DeleteRowRequest
from .types.tables import GetRowRequest
from .types.tables import GetTableRequest
+from .types.tables import GetWorkspaceRequest
+from .types.tables import LabeledItem
from .types.tables import ListRowsRequest
from .types.tables import ListRowsResponse
from .types.tables import ListTablesRequest
from .types.tables import ListTablesResponse
+from .types.tables import ListWorkspacesRequest
+from .types.tables import ListWorkspacesResponse
+from .types.tables import LookupDetails
+from .types.tables import RelationshipDetails
from .types.tables import Row
from .types.tables import Table
from .types.tables import UpdateRowRequest
from .types.tables import View
+from .types.tables import Workspace
__all__ = (
"BatchCreateRowsRequest",
"BatchCreateRowsResponse",
+ "BatchDeleteRowsRequest",
"BatchUpdateRowsRequest",
"BatchUpdateRowsResponse",
"ColumnDescription",
@@ -45,13 +54,20 @@
"DeleteRowRequest",
"GetRowRequest",
"GetTableRequest",
+ "GetWorkspaceRequest",
+ "LabeledItem",
"ListRowsRequest",
"ListRowsResponse",
"ListTablesRequest",
"ListTablesResponse",
+ "ListWorkspacesRequest",
+ "ListWorkspacesResponse",
+ "LookupDetails",
+ "RelationshipDetails",
"Row",
"Table",
"UpdateRowRequest",
"View",
+ "Workspace",
"TablesServiceClient",
)
diff --git a/google/area120/tables_v1alpha1/services/tables_service/async_client.py b/google/area120/tables_v1alpha1/services/tables_service/async_client.py
index 5b535d3..653893e 100644
--- a/google/area120/tables_v1alpha1/services/tables_service/async_client.py
+++ b/google/area120/tables_v1alpha1/services/tables_service/async_client.py
@@ -48,6 +48,10 @@ class TablesServiceAsyncClient:
- Each Table has a collection of
[Row][google.area120.tables.v1alpha1.Row] resources, named
``tables/*/rows/*``
+
+ - The API has a collection of
+ [Workspace][google.area120.tables.v1alpha1.Workspace] resources,
+ named ``workspaces/*``.
"""
_client: TablesServiceClient
@@ -59,6 +63,8 @@ class TablesServiceAsyncClient:
parse_row_path = staticmethod(TablesServiceClient.parse_row_path)
table_path = staticmethod(TablesServiceClient.table_path)
parse_table_path = staticmethod(TablesServiceClient.parse_table_path)
+ workspace_path = staticmethod(TablesServiceClient.workspace_path)
+ parse_workspace_path = staticmethod(TablesServiceClient.parse_workspace_path)
common_billing_account_path = staticmethod(
TablesServiceClient.common_billing_account_path
@@ -89,7 +95,36 @@ class TablesServiceAsyncClient:
TablesServiceClient.parse_common_location_path
)
- from_service_account_file = TablesServiceClient.from_service_account_file
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ TablesServiceAsyncClient: The constructed client.
+ """
+ return TablesServiceClient.from_service_account_info.__func__(TablesServiceAsyncClient, info, *args, **kwargs) # type: ignore
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ TablesServiceAsyncClient: The constructed client.
+ """
+ return TablesServiceClient.from_service_account_file.__func__(TablesServiceAsyncClient, filename, *args, **kwargs) # type: ignore
+
from_service_account_json = from_service_account_file
@property
@@ -165,12 +200,13 @@ async def get_table(
r"""Gets a table. Returns NOT_FOUND if the table does not exist.
Args:
- request (:class:`~.tables.GetTableRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.GetTableRequest`):
The request object. Request message for
TablesService.GetTable.
name (:class:`str`):
Required. The name of the table to
retrieve. Format: tables/{table}
+
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -182,7 +218,7 @@ async def get_table(
sent along with the request as metadata.
Returns:
- ~.tables.Table:
+ google.area120.tables_v1alpha1.types.Table:
A single table.
"""
# Create or coerce a protobuf request object.
@@ -234,7 +270,7 @@ async def list_tables(
r"""Lists tables for the user.
Args:
- request (:class:`~.tables.ListTablesRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.ListTablesRequest`):
The request object. Request message for
TablesService.ListTables.
@@ -245,7 +281,7 @@ async def list_tables(
sent along with the request as metadata.
Returns:
- ~.pagers.ListTablesAsyncPager:
+ google.area120.tables_v1alpha1.services.tables_service.pagers.ListTablesAsyncPager:
Response message for
TablesService.ListTables.
Iterating over this object will yield
@@ -277,6 +313,133 @@ async def list_tables(
# Done; return the response.
return response
+ async def get_workspace(
+ self,
+ request: tables.GetWorkspaceRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> tables.Workspace:
+ r"""Gets a workspace. Returns NOT_FOUND if the workspace does not
+ exist.
+
+ Args:
+ request (:class:`google.area120.tables_v1alpha1.types.GetWorkspaceRequest`):
+ The request object. Request message for
+ TablesService.GetWorkspace.
+ name (:class:`str`):
+ Required. The name of the workspace
+ to retrieve. Format:
+ workspaces/{workspace}
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.area120.tables_v1alpha1.types.Workspace:
+ A single workspace.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = tables.GetWorkspaceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_workspace,
+ default_timeout=60.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_workspaces(
+ self,
+ request: tables.ListWorkspacesRequest = None,
+ *,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListWorkspacesAsyncPager:
+ r"""Lists workspaces for the user.
+
+ Args:
+ request (:class:`google.area120.tables_v1alpha1.types.ListWorkspacesRequest`):
+ The request object. Request message for
+ TablesService.ListWorkspaces.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.area120.tables_v1alpha1.services.tables_service.pagers.ListWorkspacesAsyncPager:
+ Response message for
+ TablesService.ListWorkspaces.
+ Iterating over this object will yield
+ results and resolve additional pages
+ automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+
+ request = tables.ListWorkspacesRequest(request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_workspaces,
+ default_timeout=60.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListWorkspacesAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
async def get_row(
self,
request: tables.GetRowRequest = None,
@@ -290,13 +453,14 @@ async def get_row(
table.
Args:
- request (:class:`~.tables.GetRowRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.GetRowRequest`):
The request object. Request message for
TablesService.GetRow.
name (:class:`str`):
Required. The name of the row to
retrieve. Format:
tables/{table}/rows/{row}
+
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -308,7 +472,7 @@ async def get_row(
sent along with the request as metadata.
Returns:
- ~.tables.Row:
+ google.area120.tables_v1alpha1.types.Row:
A single row in a table.
"""
# Create or coerce a protobuf request object.
@@ -362,12 +526,13 @@ async def list_rows(
exist.
Args:
- request (:class:`~.tables.ListRowsRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.ListRowsRequest`):
The request object. Request message for
TablesService.ListRows.
parent (:class:`str`):
Required. The parent table.
Format: tables/{table}
+
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -379,7 +544,7 @@ async def list_rows(
sent along with the request as metadata.
Returns:
- ~.pagers.ListRowsAsyncPager:
+ google.area120.tables_v1alpha1.services.tables_service.pagers.ListRowsAsyncPager:
Response message for
TablesService.ListRows.
Iterating over this object will yield
@@ -444,17 +609,18 @@ async def create_row(
r"""Creates a row.
Args:
- request (:class:`~.tables.CreateRowRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.CreateRowRequest`):
The request object. Request message for
TablesService.CreateRow.
parent (:class:`str`):
Required. The parent table where this
row will be created. Format:
tables/{table}
+
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- row (:class:`~.tables.Row`):
+ row (:class:`google.area120.tables_v1alpha1.types.Row`):
Required. The row to create.
This corresponds to the ``row`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -467,7 +633,7 @@ async def create_row(
sent along with the request as metadata.
Returns:
- ~.tables.Row:
+ google.area120.tables_v1alpha1.types.Row:
A single row in a table.
"""
# Create or coerce a protobuf request object.
@@ -521,7 +687,7 @@ async def batch_create_rows(
r"""Creates multiple rows.
Args:
- request (:class:`~.tables.BatchCreateRowsRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.BatchCreateRowsRequest`):
The request object. Request message for
TablesService.BatchCreateRows.
@@ -532,7 +698,7 @@ async def batch_create_rows(
sent along with the request as metadata.
Returns:
- ~.tables.BatchCreateRowsResponse:
+ google.area120.tables_v1alpha1.types.BatchCreateRowsResponse:
Response message for
TablesService.BatchCreateRows.
@@ -574,15 +740,15 @@ async def update_row(
r"""Updates a row.
Args:
- request (:class:`~.tables.UpdateRowRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.UpdateRowRequest`):
The request object. Request message for
TablesService.UpdateRow.
- row (:class:`~.tables.Row`):
+ row (:class:`google.area120.tables_v1alpha1.types.Row`):
Required. The row to update.
This corresponds to the ``row`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- update_mask (:class:`~.field_mask.FieldMask`):
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The list of fields to update.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -595,7 +761,7 @@ async def update_row(
sent along with the request as metadata.
Returns:
- ~.tables.Row:
+ google.area120.tables_v1alpha1.types.Row:
A single row in a table.
"""
# Create or coerce a protobuf request object.
@@ -649,7 +815,7 @@ async def batch_update_rows(
r"""Updates multiple rows.
Args:
- request (:class:`~.tables.BatchUpdateRowsRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.BatchUpdateRowsRequest`):
The request object. Request message for
TablesService.BatchUpdateRows.
@@ -660,7 +826,7 @@ async def batch_update_rows(
sent along with the request as metadata.
Returns:
- ~.tables.BatchUpdateRowsResponse:
+ google.area120.tables_v1alpha1.types.BatchUpdateRowsResponse:
Response message for
TablesService.BatchUpdateRows.
@@ -701,13 +867,14 @@ async def delete_row(
r"""Deletes a row.
Args:
- request (:class:`~.tables.DeleteRowRequest`):
+ request (:class:`google.area120.tables_v1alpha1.types.DeleteRowRequest`):
The request object. Request message for
TablesService.DeleteRow
name (:class:`str`):
Required. The name of the row to
delete. Format:
tables/{table}/rows/{row}
+
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -755,6 +922,50 @@ async def delete_row(
request, retry=retry, timeout=timeout, metadata=metadata,
)
+ async def batch_delete_rows(
+ self,
+ request: tables.BatchDeleteRowsRequest = None,
+ *,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Deletes multiple rows.
+
+ Args:
+ request (:class:`google.area120.tables_v1alpha1.types.BatchDeleteRowsRequest`):
+ The request object. Request message for
+ TablesService.BatchDeleteRows
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+
+ request = tables.BatchDeleteRowsRequest(request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.batch_delete_rows,
+ default_timeout=60.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ await rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/area120/tables_v1alpha1/services/tables_service/client.py b/google/area120/tables_v1alpha1/services/tables_service/client.py
index b802231..863aee5 100644
--- a/google/area120/tables_v1alpha1/services/tables_service/client.py
+++ b/google/area120/tables_v1alpha1/services/tables_service/client.py
@@ -83,6 +83,10 @@ class TablesServiceClient(metaclass=TablesServiceClientMeta):
- Each Table has a collection of
[Row][google.area120.tables.v1alpha1.Row] resources, named
``tables/*/rows/*``
+
+ - The API has a collection of
+ [Workspace][google.area120.tables.v1alpha1.Workspace] resources,
+ named ``workspaces/*``.
"""
@staticmethod
@@ -119,6 +123,22 @@ def _get_default_mtls_endpoint(api_endpoint):
DEFAULT_ENDPOINT
)
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ TablesServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -131,7 +151,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
kwargs: Additional arguments to pass to the constructor.
Returns:
- {@api.name}: The constructed client.
+ TablesServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
@@ -170,6 +190,17 @@ def parse_table_path(path: str) -> Dict[str, str]:
m = re.match(r"^tables/(?P.+?)$", path)
return m.groupdict() if m else {}
+ @staticmethod
+ def workspace_path(workspace: str,) -> str:
+ """Return a fully-qualified workspace string."""
+ return "workspaces/{workspace}".format(workspace=workspace,)
+
+ @staticmethod
+ def parse_workspace_path(path: str) -> Dict[str, str]:
+ """Parse a workspace path into its component segments."""
+ m = re.match(r"^workspaces/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
@@ -245,10 +276,10 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- transport (Union[str, ~.TablesServiceTransport]): The
+ transport (Union[str, TablesServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (client_options_lib.ClientOptions): Custom options for the
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
@@ -284,21 +315,17 @@ def __init__(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
- ssl_credentials = None
+ client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
- import grpc # type: ignore
-
- cert, key = client_options.client_cert_source()
- ssl_credentials = grpc.ssl_channel_credentials(
- certificate_chain=cert, private_key=key
- )
is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
else:
- creds = SslCredentials()
- is_mtls = creds.is_mtls
- ssl_credentials = creds.ssl_credentials if is_mtls else None
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
@@ -341,7 +368,7 @@ def __init__(
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
- ssl_channel_credentials=ssl_credentials,
+ client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
@@ -358,12 +385,13 @@ def get_table(
r"""Gets a table. Returns NOT_FOUND if the table does not exist.
Args:
- request (:class:`~.tables.GetTableRequest`):
+ request (google.area120.tables_v1alpha1.types.GetTableRequest):
The request object. Request message for
TablesService.GetTable.
- name (:class:`str`):
+ name (str):
Required. The name of the table to
retrieve. Format: tables/{table}
+
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -375,7 +403,7 @@ def get_table(
sent along with the request as metadata.
Returns:
- ~.tables.Table:
+ google.area120.tables_v1alpha1.types.Table:
A single table.
"""
# Create or coerce a protobuf request object.
@@ -428,7 +456,7 @@ def list_tables(
r"""Lists tables for the user.
Args:
- request (:class:`~.tables.ListTablesRequest`):
+ request (google.area120.tables_v1alpha1.types.ListTablesRequest):
The request object. Request message for
TablesService.ListTables.
@@ -439,7 +467,7 @@ def list_tables(
sent along with the request as metadata.
Returns:
- ~.pagers.ListTablesPager:
+ google.area120.tables_v1alpha1.services.tables_service.pagers.ListTablesPager:
Response message for
TablesService.ListTables.
Iterating over this object will yield
@@ -472,6 +500,135 @@ def list_tables(
# Done; return the response.
return response
+ def get_workspace(
+ self,
+ request: tables.GetWorkspaceRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> tables.Workspace:
+ r"""Gets a workspace. Returns NOT_FOUND if the workspace does not
+ exist.
+
+ Args:
+ request (google.area120.tables_v1alpha1.types.GetWorkspaceRequest):
+ The request object. Request message for
+ TablesService.GetWorkspace.
+ name (str):
+ Required. The name of the workspace
+ to retrieve. Format:
+ workspaces/{workspace}
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.area120.tables_v1alpha1.types.Workspace:
+ A single workspace.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a tables.GetWorkspaceRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, tables.GetWorkspaceRequest):
+ request = tables.GetWorkspaceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_workspace]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_workspaces(
+ self,
+ request: tables.ListWorkspacesRequest = None,
+ *,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListWorkspacesPager:
+ r"""Lists workspaces for the user.
+
+ Args:
+ request (google.area120.tables_v1alpha1.types.ListWorkspacesRequest):
+ The request object. Request message for
+ TablesService.ListWorkspaces.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.area120.tables_v1alpha1.services.tables_service.pagers.ListWorkspacesPager:
+ Response message for
+ TablesService.ListWorkspaces.
+ Iterating over this object will yield
+ results and resolve additional pages
+ automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a tables.ListWorkspacesRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, tables.ListWorkspacesRequest):
+ request = tables.ListWorkspacesRequest(request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_workspaces]
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListWorkspacesPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
def get_row(
self,
request: tables.GetRowRequest = None,
@@ -485,13 +642,14 @@ def get_row(
table.
Args:
- request (:class:`~.tables.GetRowRequest`):
+ request (google.area120.tables_v1alpha1.types.GetRowRequest):
The request object. Request message for
TablesService.GetRow.
- name (:class:`str`):
+ name (str):
Required. The name of the row to
retrieve. Format:
tables/{table}/rows/{row}
+
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -503,7 +661,7 @@ def get_row(
sent along with the request as metadata.
Returns:
- ~.tables.Row:
+ google.area120.tables_v1alpha1.types.Row:
A single row in a table.
"""
# Create or coerce a protobuf request object.
@@ -558,12 +716,13 @@ def list_rows(
exist.
Args:
- request (:class:`~.tables.ListRowsRequest`):
+ request (google.area120.tables_v1alpha1.types.ListRowsRequest):
The request object. Request message for
TablesService.ListRows.
- parent (:class:`str`):
+ parent (str):
Required. The parent table.
Format: tables/{table}
+
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -575,7 +734,7 @@ def list_rows(
sent along with the request as metadata.
Returns:
- ~.pagers.ListRowsPager:
+ google.area120.tables_v1alpha1.services.tables_service.pagers.ListRowsPager:
Response message for
TablesService.ListRows.
Iterating over this object will yield
@@ -641,17 +800,18 @@ def create_row(
r"""Creates a row.
Args:
- request (:class:`~.tables.CreateRowRequest`):
+ request (google.area120.tables_v1alpha1.types.CreateRowRequest):
The request object. Request message for
TablesService.CreateRow.
- parent (:class:`str`):
+ parent (str):
Required. The parent table where this
row will be created. Format:
tables/{table}
+
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- row (:class:`~.tables.Row`):
+ row (google.area120.tables_v1alpha1.types.Row):
Required. The row to create.
This corresponds to the ``row`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -664,7 +824,7 @@ def create_row(
sent along with the request as metadata.
Returns:
- ~.tables.Row:
+ google.area120.tables_v1alpha1.types.Row:
A single row in a table.
"""
# Create or coerce a protobuf request object.
@@ -719,7 +879,7 @@ def batch_create_rows(
r"""Creates multiple rows.
Args:
- request (:class:`~.tables.BatchCreateRowsRequest`):
+ request (google.area120.tables_v1alpha1.types.BatchCreateRowsRequest):
The request object. Request message for
TablesService.BatchCreateRows.
@@ -730,7 +890,7 @@ def batch_create_rows(
sent along with the request as metadata.
Returns:
- ~.tables.BatchCreateRowsResponse:
+ google.area120.tables_v1alpha1.types.BatchCreateRowsResponse:
Response message for
TablesService.BatchCreateRows.
@@ -773,15 +933,15 @@ def update_row(
r"""Updates a row.
Args:
- request (:class:`~.tables.UpdateRowRequest`):
+ request (google.area120.tables_v1alpha1.types.UpdateRowRequest):
The request object. Request message for
TablesService.UpdateRow.
- row (:class:`~.tables.Row`):
+ row (google.area120.tables_v1alpha1.types.Row):
Required. The row to update.
This corresponds to the ``row`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- update_mask (:class:`~.field_mask.FieldMask`):
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to update.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -794,7 +954,7 @@ def update_row(
sent along with the request as metadata.
Returns:
- ~.tables.Row:
+ google.area120.tables_v1alpha1.types.Row:
A single row in a table.
"""
# Create or coerce a protobuf request object.
@@ -849,7 +1009,7 @@ def batch_update_rows(
r"""Updates multiple rows.
Args:
- request (:class:`~.tables.BatchUpdateRowsRequest`):
+ request (google.area120.tables_v1alpha1.types.BatchUpdateRowsRequest):
The request object. Request message for
TablesService.BatchUpdateRows.
@@ -860,7 +1020,7 @@ def batch_update_rows(
sent along with the request as metadata.
Returns:
- ~.tables.BatchUpdateRowsResponse:
+ google.area120.tables_v1alpha1.types.BatchUpdateRowsResponse:
Response message for
TablesService.BatchUpdateRows.
@@ -902,13 +1062,14 @@ def delete_row(
r"""Deletes a row.
Args:
- request (:class:`~.tables.DeleteRowRequest`):
+ request (google.area120.tables_v1alpha1.types.DeleteRowRequest):
The request object. Request message for
TablesService.DeleteRow
- name (:class:`str`):
+ name (str):
Required. The name of the row to
delete. Format:
tables/{table}/rows/{row}
+
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -957,6 +1118,51 @@ def delete_row(
request, retry=retry, timeout=timeout, metadata=metadata,
)
+ def batch_delete_rows(
+ self,
+ request: tables.BatchDeleteRowsRequest = None,
+ *,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Deletes multiple rows.
+
+ Args:
+ request (google.area120.tables_v1alpha1.types.BatchDeleteRowsRequest):
+ The request object. Request message for
+ TablesService.BatchDeleteRows
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a tables.BatchDeleteRowsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, tables.BatchDeleteRowsRequest):
+ request = tables.BatchDeleteRowsRequest(request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.batch_delete_rows]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/area120/tables_v1alpha1/services/tables_service/pagers.py b/google/area120/tables_v1alpha1/services/tables_service/pagers.py
index b77da57..101b88e 100644
--- a/google/area120/tables_v1alpha1/services/tables_service/pagers.py
+++ b/google/area120/tables_v1alpha1/services/tables_service/pagers.py
@@ -15,7 +15,16 @@
# limitations under the License.
#
-from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+from typing import (
+ Any,
+ AsyncIterable,
+ Awaitable,
+ Callable,
+ Iterable,
+ Sequence,
+ Tuple,
+ Optional,
+)
from google.area120.tables_v1alpha1.types import tables
@@ -24,7 +33,7 @@ class ListTablesPager:
"""A pager for iterating through ``list_tables`` requests.
This class thinly wraps an initial
- :class:`~.tables.ListTablesResponse` object, and
+ :class:`google.area120.tables_v1alpha1.types.ListTablesResponse` object, and
provides an ``__iter__`` method to iterate through its
``tables`` field.
@@ -33,7 +42,7 @@ class ListTablesPager:
through the ``tables`` field on the
corresponding responses.
- All the usual :class:`~.tables.ListTablesResponse`
+ All the usual :class:`google.area120.tables_v1alpha1.types.ListTablesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
@@ -51,9 +60,9 @@ def __init__(
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
- request (:class:`~.tables.ListTablesRequest`):
+ request (google.area120.tables_v1alpha1.types.ListTablesRequest):
The initial request object.
- response (:class:`~.tables.ListTablesResponse`):
+ response (google.area120.tables_v1alpha1.types.ListTablesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
@@ -86,7 +95,7 @@ class ListTablesAsyncPager:
"""A pager for iterating through ``list_tables`` requests.
This class thinly wraps an initial
- :class:`~.tables.ListTablesResponse` object, and
+ :class:`google.area120.tables_v1alpha1.types.ListTablesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``tables`` field.
@@ -95,7 +104,7 @@ class ListTablesAsyncPager:
through the ``tables`` field on the
corresponding responses.
- All the usual :class:`~.tables.ListTablesResponse`
+ All the usual :class:`google.area120.tables_v1alpha1.types.ListTablesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
@@ -113,9 +122,9 @@ def __init__(
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
- request (:class:`~.tables.ListTablesRequest`):
+ request (google.area120.tables_v1alpha1.types.ListTablesRequest):
The initial request object.
- response (:class:`~.tables.ListTablesResponse`):
+ response (google.area120.tables_v1alpha1.types.ListTablesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
@@ -148,11 +157,139 @@ def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+class ListWorkspacesPager:
+ """A pager for iterating through ``list_workspaces`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.area120.tables_v1alpha1.types.ListWorkspacesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``workspaces`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListWorkspaces`` requests and continue to iterate
+ through the ``workspaces`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.area120.tables_v1alpha1.types.ListWorkspacesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., tables.ListWorkspacesResponse],
+ request: tables.ListWorkspacesRequest,
+ response: tables.ListWorkspacesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.area120.tables_v1alpha1.types.ListWorkspacesRequest):
+ The initial request object.
+ response (google.area120.tables_v1alpha1.types.ListWorkspacesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = tables.ListWorkspacesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[tables.ListWorkspacesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[tables.Workspace]:
+ for page in self.pages:
+ yield from page.workspaces
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListWorkspacesAsyncPager:
+ """A pager for iterating through ``list_workspaces`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.area120.tables_v1alpha1.types.ListWorkspacesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``workspaces`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListWorkspaces`` requests and continue to iterate
+ through the ``workspaces`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.area120.tables_v1alpha1.types.ListWorkspacesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[tables.ListWorkspacesResponse]],
+ request: tables.ListWorkspacesRequest,
+ response: tables.ListWorkspacesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.area120.tables_v1alpha1.types.ListWorkspacesRequest):
+ The initial request object.
+ response (google.area120.tables_v1alpha1.types.ListWorkspacesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = tables.ListWorkspacesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[tables.ListWorkspacesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[tables.Workspace]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.workspaces:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
class ListRowsPager:
"""A pager for iterating through ``list_rows`` requests.
This class thinly wraps an initial
- :class:`~.tables.ListRowsResponse` object, and
+ :class:`google.area120.tables_v1alpha1.types.ListRowsResponse` object, and
provides an ``__iter__`` method to iterate through its
``rows`` field.
@@ -161,7 +298,7 @@ class ListRowsPager:
through the ``rows`` field on the
corresponding responses.
- All the usual :class:`~.tables.ListRowsResponse`
+ All the usual :class:`google.area120.tables_v1alpha1.types.ListRowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
@@ -179,9 +316,9 @@ def __init__(
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
- request (:class:`~.tables.ListRowsRequest`):
+ request (google.area120.tables_v1alpha1.types.ListRowsRequest):
The initial request object.
- response (:class:`~.tables.ListRowsResponse`):
+ response (google.area120.tables_v1alpha1.types.ListRowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
@@ -214,7 +351,7 @@ class ListRowsAsyncPager:
"""A pager for iterating through ``list_rows`` requests.
This class thinly wraps an initial
- :class:`~.tables.ListRowsResponse` object, and
+ :class:`google.area120.tables_v1alpha1.types.ListRowsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``rows`` field.
@@ -223,7 +360,7 @@ class ListRowsAsyncPager:
through the ``rows`` field on the
corresponding responses.
- All the usual :class:`~.tables.ListRowsResponse`
+ All the usual :class:`google.area120.tables_v1alpha1.types.ListRowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
@@ -241,9 +378,9 @@ def __init__(
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
- request (:class:`~.tables.ListRowsRequest`):
+ request (google.area120.tables_v1alpha1.types.ListRowsRequest):
The initial request object.
- response (:class:`~.tables.ListRowsResponse`):
+ response (google.area120.tables_v1alpha1.types.ListRowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
diff --git a/google/area120/tables_v1alpha1/services/tables_service/transports/base.py b/google/area120/tables_v1alpha1/services/tables_service/transports/base.py
index f756208..0f2f7ef 100644
--- a/google/area120/tables_v1alpha1/services/tables_service/transports/base.py
+++ b/google/area120/tables_v1alpha1/services/tables_service/transports/base.py
@@ -46,6 +46,7 @@ class TablesServiceTransport(abc.ABC):
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/spreadsheets.readonly",
+ "https://www.googleapis.com/auth/tables",
)
def __init__(
@@ -74,10 +75,10 @@ def __init__(
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
@@ -85,6 +86,9 @@ def __init__(
host += ":443"
self._host = host
+ # Save the scopes.
+ self._scopes = scopes or self.AUTH_SCOPES
+
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
@@ -94,20 +98,17 @@ def __init__(
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
- credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ credentials_file, scopes=self._scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
- scopes=scopes, quota_project_id=quota_project_id
+ scopes=self._scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
- # Lifted into its own function so it can be stubbed out during tests.
- self._prep_wrapped_messages(client_info)
-
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -117,6 +118,12 @@ def _prep_wrapped_messages(self, client_info):
self.list_tables: gapic_v1.method.wrap_method(
self.list_tables, default_timeout=60.0, client_info=client_info,
),
+ self.get_workspace: gapic_v1.method.wrap_method(
+ self.get_workspace, default_timeout=60.0, client_info=client_info,
+ ),
+ self.list_workspaces: gapic_v1.method.wrap_method(
+ self.list_workspaces, default_timeout=60.0, client_info=client_info,
+ ),
self.get_row: gapic_v1.method.wrap_method(
self.get_row, default_timeout=60.0, client_info=client_info,
),
@@ -138,6 +145,9 @@ def _prep_wrapped_messages(self, client_info):
self.delete_row: gapic_v1.method.wrap_method(
self.delete_row, default_timeout=60.0, client_info=client_info,
),
+ self.batch_delete_rows: gapic_v1.method.wrap_method(
+ self.batch_delete_rows, default_timeout=60.0, client_info=client_info,
+ ),
}
@property
@@ -160,6 +170,27 @@ def list_tables(
]:
raise NotImplementedError()
+ @property
+ def get_workspace(
+ self,
+ ) -> typing.Callable[
+ [tables.GetWorkspaceRequest],
+ typing.Union[tables.Workspace, typing.Awaitable[tables.Workspace]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_workspaces(
+ self,
+ ) -> typing.Callable[
+ [tables.ListWorkspacesRequest],
+ typing.Union[
+ tables.ListWorkspacesResponse,
+ typing.Awaitable[tables.ListWorkspacesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
@property
def get_row(
self,
@@ -230,5 +261,14 @@ def delete_row(
]:
raise NotImplementedError()
+ @property
+ def batch_delete_rows(
+ self,
+ ) -> typing.Callable[
+ [tables.BatchDeleteRowsRequest],
+ typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
+ ]:
+ raise NotImplementedError()
+
__all__ = ("TablesServiceTransport",)
diff --git a/google/area120/tables_v1alpha1/services/tables_service/transports/grpc.py b/google/area120/tables_v1alpha1/services/tables_service/transports/grpc.py
index 7f77524..4b031d4 100644
--- a/google/area120/tables_v1alpha1/services/tables_service/transports/grpc.py
+++ b/google/area120/tables_v1alpha1/services/tables_service/transports/grpc.py
@@ -46,6 +46,10 @@ class TablesServiceGrpcTransport(TablesServiceTransport):
[Row][google.area120.tables.v1alpha1.Row] resources, named
``tables/*/rows/*``
+ - The API has a collection of
+ [Workspace][google.area120.tables.v1alpha1.Workspace] resources,
+ named ``workspaces/*``.
+
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
@@ -67,6 +71,7 @@ def __init__(
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
@@ -97,6 +102,10 @@ def __init__(
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
@@ -111,72 +120,60 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
+ self._stubs: Dict[str, Callable] = {}
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
- # Sanity check: Ensure that channel and credentials are not both
- # provided.
+ # Ignore credentials if a channel was passed.
credentials = False
-
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
- elif api_mtls_endpoint:
- warnings.warn(
- "api_mtls_endpoint and client_cert_source are deprecated",
- DeprecationWarning,
- )
- host = (
- api_mtls_endpoint
- if ":" in api_mtls_endpoint
- else api_mtls_endpoint + ":443"
- )
+ else:
+ if api_mtls_endpoint:
+ host = api_mtls_endpoint
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
- if credentials is None:
- credentials, _ = auth.default(
- scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
- )
-
- # Create SSL credentials with client_cert_source or application
- # default SSL credentials.
- if client_cert_source:
- cert, key = client_cert_source()
- ssl_credentials = grpc.ssl_channel_credentials(
- certificate_chain=cert, private_key=key
- )
else:
- ssl_credentials = SslCredentials().ssl_credentials
-
- # create a new channel. The provided one is ignored.
- self._grpc_channel = type(self).create_channel(
- host,
- credentials=credentials,
- credentials_file=credentials_file,
- ssl_credentials=ssl_credentials,
- scopes=scopes or self.AUTH_SCOPES,
- quota_project_id=quota_project_id,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
- self._ssl_channel_credentials = ssl_credentials
- else:
- host = host if ":" in host else host + ":443"
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
- if credentials is None:
- credentials, _ = auth.default(
- scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
- )
+ # The base transport sets the host, credentials and scopes
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
- # create a new channel. The provided one is ignored.
+ if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
- host,
- credentials=credentials,
+ self._host,
+ credentials=self._credentials,
credentials_file=credentials_file,
- ssl_credentials=ssl_channel_credentials,
- scopes=scopes or self.AUTH_SCOPES,
+ scopes=self._scopes,
+ ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
@@ -184,17 +181,8 @@ def __init__(
],
)
- self._stubs = {} # type: Dict[str, Callable]
-
- # Run the base constructor.
- super().__init__(
- host=host,
- credentials=credentials,
- credentials_file=credentials_file,
- scopes=scopes or self.AUTH_SCOPES,
- quota_project_id=quota_project_id,
- client_info=client_info,
- )
+ # Wrap messages. This must be done after self._grpc_channel exists
+ self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
@@ -208,7 +196,7 @@ def create_channel(
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
- address (Optional[str]): The host for the channel to use.
+ host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
@@ -297,6 +285,57 @@ def list_tables(
)
return self._stubs["list_tables"]
+ @property
+ def get_workspace(self) -> Callable[[tables.GetWorkspaceRequest], tables.Workspace]:
+ r"""Return a callable for the get workspace method over gRPC.
+
+ Gets a workspace. Returns NOT_FOUND if the workspace does not
+ exist.
+
+ Returns:
+ Callable[[~.GetWorkspaceRequest],
+ ~.Workspace]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_workspace" not in self._stubs:
+ self._stubs["get_workspace"] = self.grpc_channel.unary_unary(
+ "/google.area120.tables.v1alpha1.TablesService/GetWorkspace",
+ request_serializer=tables.GetWorkspaceRequest.serialize,
+ response_deserializer=tables.Workspace.deserialize,
+ )
+ return self._stubs["get_workspace"]
+
+ @property
+ def list_workspaces(
+ self,
+ ) -> Callable[[tables.ListWorkspacesRequest], tables.ListWorkspacesResponse]:
+ r"""Return a callable for the list workspaces method over gRPC.
+
+ Lists workspaces for the user.
+
+ Returns:
+ Callable[[~.ListWorkspacesRequest],
+ ~.ListWorkspacesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_workspaces" not in self._stubs:
+ self._stubs["list_workspaces"] = self.grpc_channel.unary_unary(
+ "/google.area120.tables.v1alpha1.TablesService/ListWorkspaces",
+ request_serializer=tables.ListWorkspacesRequest.serialize,
+ response_deserializer=tables.ListWorkspacesResponse.deserialize,
+ )
+ return self._stubs["list_workspaces"]
+
@property
def get_row(self) -> Callable[[tables.GetRowRequest], tables.Row]:
r"""Return a callable for the get row method over gRPC.
@@ -471,5 +510,31 @@ def delete_row(self) -> Callable[[tables.DeleteRowRequest], empty.Empty]:
)
return self._stubs["delete_row"]
+ @property
+ def batch_delete_rows(
+ self,
+ ) -> Callable[[tables.BatchDeleteRowsRequest], empty.Empty]:
+ r"""Return a callable for the batch delete rows method over gRPC.
+
+ Deletes multiple rows.
+
+ Returns:
+ Callable[[~.BatchDeleteRowsRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "batch_delete_rows" not in self._stubs:
+ self._stubs["batch_delete_rows"] = self.grpc_channel.unary_unary(
+ "/google.area120.tables.v1alpha1.TablesService/BatchDeleteRows",
+ request_serializer=tables.BatchDeleteRowsRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["batch_delete_rows"]
+
__all__ = ("TablesServiceGrpcTransport",)
diff --git a/google/area120/tables_v1alpha1/services/tables_service/transports/grpc_asyncio.py b/google/area120/tables_v1alpha1/services/tables_service/transports/grpc_asyncio.py
index 84698f4..45a9fc9 100644
--- a/google/area120/tables_v1alpha1/services/tables_service/transports/grpc_asyncio.py
+++ b/google/area120/tables_v1alpha1/services/tables_service/transports/grpc_asyncio.py
@@ -48,6 +48,10 @@ class TablesServiceGrpcAsyncIOTransport(TablesServiceTransport):
[Row][google.area120.tables.v1alpha1.Row] resources, named
``tables/*/rows/*``
+ - The API has a collection of
+ [Workspace][google.area120.tables.v1alpha1.Workspace] resources,
+ named ``workspaces/*``.
+
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
@@ -71,7 +75,7 @@ def create_channel(
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
- address (Optional[str]): The host for the channel to use.
+ host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
@@ -111,6 +115,7 @@ def __init__(
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
@@ -142,12 +147,16 @@ def __init__(
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -156,72 +165,60 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
+ self._stubs: Dict[str, Callable] = {}
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
- # Sanity check: Ensure that channel and credentials are not both
- # provided.
+ # Ignore credentials if a channel was passed.
credentials = False
-
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
- elif api_mtls_endpoint:
- warnings.warn(
- "api_mtls_endpoint and client_cert_source are deprecated",
- DeprecationWarning,
- )
- host = (
- api_mtls_endpoint
- if ":" in api_mtls_endpoint
- else api_mtls_endpoint + ":443"
- )
+ else:
+ if api_mtls_endpoint:
+ host = api_mtls_endpoint
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
- if credentials is None:
- credentials, _ = auth.default(
- scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
- )
-
- # Create SSL credentials with client_cert_source or application
- # default SSL credentials.
- if client_cert_source:
- cert, key = client_cert_source()
- ssl_credentials = grpc.ssl_channel_credentials(
- certificate_chain=cert, private_key=key
- )
else:
- ssl_credentials = SslCredentials().ssl_credentials
-
- # create a new channel. The provided one is ignored.
- self._grpc_channel = type(self).create_channel(
- host,
- credentials=credentials,
- credentials_file=credentials_file,
- ssl_credentials=ssl_credentials,
- scopes=scopes or self.AUTH_SCOPES,
- quota_project_id=quota_project_id,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
- self._ssl_channel_credentials = ssl_credentials
- else:
- host = host if ":" in host else host + ":443"
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
- if credentials is None:
- credentials, _ = auth.default(
- scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
- )
+ # The base transport sets the host, credentials and scopes
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
- # create a new channel. The provided one is ignored.
+ if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
- host,
- credentials=credentials,
+ self._host,
+ credentials=self._credentials,
credentials_file=credentials_file,
- ssl_credentials=ssl_channel_credentials,
- scopes=scopes or self.AUTH_SCOPES,
+ scopes=self._scopes,
+ ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
@@ -229,17 +226,8 @@ def __init__(
],
)
- # Run the base constructor.
- super().__init__(
- host=host,
- credentials=credentials,
- credentials_file=credentials_file,
- scopes=scopes or self.AUTH_SCOPES,
- quota_project_id=quota_project_id,
- client_info=client_info,
- )
-
- self._stubs = {}
+ # Wrap messages. This must be done after self._grpc_channel exists
+ self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
@@ -301,6 +289,61 @@ def list_tables(
)
return self._stubs["list_tables"]
+ @property
+ def get_workspace(
+ self,
+ ) -> Callable[[tables.GetWorkspaceRequest], Awaitable[tables.Workspace]]:
+ r"""Return a callable for the get workspace method over gRPC.
+
+ Gets a workspace. Returns NOT_FOUND if the workspace does not
+ exist.
+
+ Returns:
+ Callable[[~.GetWorkspaceRequest],
+ Awaitable[~.Workspace]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_workspace" not in self._stubs:
+ self._stubs["get_workspace"] = self.grpc_channel.unary_unary(
+ "/google.area120.tables.v1alpha1.TablesService/GetWorkspace",
+ request_serializer=tables.GetWorkspaceRequest.serialize,
+ response_deserializer=tables.Workspace.deserialize,
+ )
+ return self._stubs["get_workspace"]
+
+ @property
+ def list_workspaces(
+ self,
+ ) -> Callable[
+ [tables.ListWorkspacesRequest], Awaitable[tables.ListWorkspacesResponse]
+ ]:
+ r"""Return a callable for the list workspaces method over gRPC.
+
+ Lists workspaces for the user.
+
+ Returns:
+ Callable[[~.ListWorkspacesRequest],
+ Awaitable[~.ListWorkspacesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_workspaces" not in self._stubs:
+ self._stubs["list_workspaces"] = self.grpc_channel.unary_unary(
+ "/google.area120.tables.v1alpha1.TablesService/ListWorkspaces",
+ request_serializer=tables.ListWorkspacesRequest.serialize,
+ response_deserializer=tables.ListWorkspacesResponse.deserialize,
+ )
+ return self._stubs["list_workspaces"]
+
@property
def get_row(self) -> Callable[[tables.GetRowRequest], Awaitable[tables.Row]]:
r"""Return a callable for the get row method over gRPC.
@@ -481,5 +524,31 @@ def delete_row(self) -> Callable[[tables.DeleteRowRequest], Awaitable[empty.Empt
)
return self._stubs["delete_row"]
+ @property
+ def batch_delete_rows(
+ self,
+ ) -> Callable[[tables.BatchDeleteRowsRequest], Awaitable[empty.Empty]]:
+ r"""Return a callable for the batch delete rows method over gRPC.
+
+ Deletes multiple rows.
+
+ Returns:
+ Callable[[~.BatchDeleteRowsRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "batch_delete_rows" not in self._stubs:
+ self._stubs["batch_delete_rows"] = self.grpc_channel.unary_unary(
+ "/google.area120.tables.v1alpha1.TablesService/BatchDeleteRows",
+ request_serializer=tables.BatchDeleteRowsRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["batch_delete_rows"]
+
__all__ = ("TablesServiceGrpcAsyncIOTransport",)
diff --git a/google/area120/tables_v1alpha1/types/__init__.py b/google/area120/tables_v1alpha1/types/__init__.py
index 0c72ff3..c55da2e 100644
--- a/google/area120/tables_v1alpha1/types/__init__.py
+++ b/google/area120/tables_v1alpha1/types/__init__.py
@@ -16,41 +16,57 @@
#
from .tables import (
- GetTableRequest,
- ListTablesRequest,
- ListTablesResponse,
- GetRowRequest,
- ListRowsRequest,
- ListRowsResponse,
- CreateRowRequest,
BatchCreateRowsRequest,
BatchCreateRowsResponse,
- UpdateRowRequest,
+ BatchDeleteRowsRequest,
BatchUpdateRowsRequest,
BatchUpdateRowsResponse,
- DeleteRowRequest,
- Table,
ColumnDescription,
+ CreateRowRequest,
+ DeleteRowRequest,
+ GetRowRequest,
+ GetTableRequest,
+ GetWorkspaceRequest,
+ LabeledItem,
+ ListRowsRequest,
+ ListRowsResponse,
+ ListTablesRequest,
+ ListTablesResponse,
+ ListWorkspacesRequest,
+ ListWorkspacesResponse,
+ LookupDetails,
+ RelationshipDetails,
Row,
+ Table,
+ UpdateRowRequest,
+ Workspace,
View,
)
__all__ = (
- "GetTableRequest",
- "ListTablesRequest",
- "ListTablesResponse",
- "GetRowRequest",
- "ListRowsRequest",
- "ListRowsResponse",
- "CreateRowRequest",
"BatchCreateRowsRequest",
"BatchCreateRowsResponse",
- "UpdateRowRequest",
+ "BatchDeleteRowsRequest",
"BatchUpdateRowsRequest",
"BatchUpdateRowsResponse",
- "DeleteRowRequest",
- "Table",
"ColumnDescription",
+ "CreateRowRequest",
+ "DeleteRowRequest",
+ "GetRowRequest",
+ "GetTableRequest",
+ "GetWorkspaceRequest",
+ "LabeledItem",
+ "ListRowsRequest",
+ "ListRowsResponse",
+ "ListTablesRequest",
+ "ListTablesResponse",
+ "ListWorkspacesRequest",
+ "ListWorkspacesResponse",
+ "LookupDetails",
+ "RelationshipDetails",
"Row",
+ "Table",
+ "UpdateRowRequest",
+ "Workspace",
"View",
)
diff --git a/google/area120/tables_v1alpha1/types/tables.py b/google/area120/tables_v1alpha1/types/tables.py
index c3381d5..b37d92c 100644
--- a/google/area120/tables_v1alpha1/types/tables.py
+++ b/google/area120/tables_v1alpha1/types/tables.py
@@ -29,6 +29,9 @@
"GetTableRequest",
"ListTablesRequest",
"ListTablesResponse",
+ "GetWorkspaceRequest",
+ "ListWorkspacesRequest",
+ "ListWorkspacesResponse",
"GetRowRequest",
"ListRowsRequest",
"ListRowsResponse",
@@ -39,9 +42,14 @@
"BatchUpdateRowsRequest",
"BatchUpdateRowsResponse",
"DeleteRowRequest",
+ "BatchDeleteRowsRequest",
"Table",
"ColumnDescription",
+ "LabeledItem",
+ "RelationshipDetails",
+ "LookupDetails",
"Row",
+ "Workspace",
},
)
@@ -93,7 +101,7 @@ class ListTablesResponse(proto.Message):
r"""Response message for TablesService.ListTables.
Attributes:
- tables (Sequence[~.gat_tables.Table]):
+ tables (Sequence[google.area120.tables_v1alpha1.types.Table]):
The list of tables.
next_page_token (str):
A token, which can be sent as ``page_token`` to retrieve the
@@ -110,6 +118,63 @@ def raw_page(self):
next_page_token = proto.Field(proto.STRING, number=2)
+class GetWorkspaceRequest(proto.Message):
+ r"""Request message for TablesService.GetWorkspace.
+
+ Attributes:
+ name (str):
+ Required. The name of the workspace to
+ retrieve. Format: workspaces/{workspace}
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListWorkspacesRequest(proto.Message):
+ r"""Request message for TablesService.ListWorkspaces.
+
+ Attributes:
+ page_size (int):
+ The maximum number of workspaces to return.
+ The service may return fewer than this value.
+ If unspecified, at most 10 workspaces are
+ returned. The maximum value is 25; values above
+ 25 are coerced to 25.
+ page_token (str):
+ A page token, received from a previous ``ListWorkspaces``
+ call. Provide this to retrieve the subsequent page.
+
+ When paginating, all other parameters provided to
+ ``ListWorkspaces`` must match the call that provided the
+ page token.
+ """
+
+ page_size = proto.Field(proto.INT32, number=1)
+
+ page_token = proto.Field(proto.STRING, number=2)
+
+
+class ListWorkspacesResponse(proto.Message):
+ r"""Response message for TablesService.ListWorkspaces.
+
+ Attributes:
+ workspaces (Sequence[google.area120.tables_v1alpha1.types.Workspace]):
+ The list of workspaces.
+ next_page_token (str):
+ A token, which can be sent as ``page_token`` to retrieve the
+ next page. If this field is empty, there are no subsequent
+ pages.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ workspaces = proto.RepeatedField(proto.MESSAGE, number=1, message="Workspace",)
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
class GetRowRequest(proto.Message):
r"""Request message for TablesService.GetRow.
@@ -117,7 +182,7 @@ class GetRowRequest(proto.Message):
name (str):
Required. The name of the row to retrieve.
Format: tables/{table}/rows/{row}
- view (~.gat_tables.View):
+ view (google.area120.tables_v1alpha1.types.View):
Optional. Column key to use for values in the
row. Defaults to user entered name.
"""
@@ -148,9 +213,14 @@ class ListRowsRequest(proto.Message):
When paginating, all other parameters provided to
``ListRows`` must match the call that provided the page
token.
- view (~.gat_tables.View):
+ view (google.area120.tables_v1alpha1.types.View):
Optional. Column key to use for values in the
row. Defaults to user entered name.
+ filter (str):
+ Optional. Raw text query to search for in
+ rows of the table. Special characters must be
+ escaped. Logical operators and field specific
+ filtering not supported.
"""
parent = proto.Field(proto.STRING, number=1)
@@ -161,12 +231,14 @@ class ListRowsRequest(proto.Message):
view = proto.Field(proto.ENUM, number=4, enum="View",)
+ filter = proto.Field(proto.STRING, number=5)
+
class ListRowsResponse(proto.Message):
r"""Response message for TablesService.ListRows.
Attributes:
- rows (Sequence[~.gat_tables.Row]):
+ rows (Sequence[google.area120.tables_v1alpha1.types.Row]):
The rows from the specified table.
next_page_token (str):
A token, which can be sent as ``page_token`` to retrieve the
@@ -190,9 +262,9 @@ class CreateRowRequest(proto.Message):
parent (str):
Required. The parent table where this row
will be created. Format: tables/{table}
- row (~.gat_tables.Row):
+ row (google.area120.tables_v1alpha1.types.Row):
Required. The row to create.
- view (~.gat_tables.View):
+ view (google.area120.tables_v1alpha1.types.View):
Optional. Column key to use for values in the
row. Defaults to user entered name.
"""
@@ -211,7 +283,7 @@ class BatchCreateRowsRequest(proto.Message):
parent (str):
Required. The parent table where the rows
will be created. Format: tables/{table}
- requests (Sequence[~.gat_tables.CreateRowRequest]):
+ requests (Sequence[google.area120.tables_v1alpha1.types.CreateRowRequest]):
Required. The request message specifying the
rows to create.
A maximum of 500 rows can be created in a single
@@ -227,7 +299,7 @@ class BatchCreateRowsResponse(proto.Message):
r"""Response message for TablesService.BatchCreateRows.
Attributes:
- rows (Sequence[~.gat_tables.Row]):
+ rows (Sequence[google.area120.tables_v1alpha1.types.Row]):
The created rows.
"""
@@ -238,11 +310,11 @@ class UpdateRowRequest(proto.Message):
r"""Request message for TablesService.UpdateRow.
Attributes:
- row (~.gat_tables.Row):
+ row (google.area120.tables_v1alpha1.types.Row):
Required. The row to update.
- update_mask (~.field_mask.FieldMask):
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
The list of fields to update.
- view (~.gat_tables.View):
+ view (google.area120.tables_v1alpha1.types.View):
Optional. Column key to use for values in the
row. Defaults to user entered name.
"""
@@ -261,7 +333,7 @@ class BatchUpdateRowsRequest(proto.Message):
parent (str):
Required. The parent table shared by all rows
being updated. Format: tables/{table}
- requests (Sequence[~.gat_tables.UpdateRowRequest]):
+ requests (Sequence[google.area120.tables_v1alpha1.types.UpdateRowRequest]):
Required. The request messages specifying the
rows to update.
A maximum of 500 rows can be modified in a
@@ -277,7 +349,7 @@ class BatchUpdateRowsResponse(proto.Message):
r"""Response message for TablesService.BatchUpdateRows.
Attributes:
- rows (Sequence[~.gat_tables.Row]):
+ rows (Sequence[google.area120.tables_v1alpha1.types.Row]):
The updated rows.
"""
@@ -296,6 +368,26 @@ class DeleteRowRequest(proto.Message):
name = proto.Field(proto.STRING, number=1)
+class BatchDeleteRowsRequest(proto.Message):
+ r"""Request message for TablesService.BatchDeleteRows
+
+ Attributes:
+ parent (str):
+ Required. The parent table shared by all rows
+ being deleted. Format: tables/{table}
+ names (Sequence[str]):
+ Required. The names of the rows to delete.
+ All rows must belong to the parent table or else
+ the entire batch will fail. A maximum of 500
+ rows can be deleted in a batch.
+ Format: tables/{table}/rows/{row}
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ names = proto.RepeatedField(proto.STRING, number=2)
+
+
class Table(proto.Message):
r"""A single table.
@@ -305,7 +397,7 @@ class Table(proto.Message):
``tables/{table}``.
display_name (str):
The human readable title of the table.
- columns (Sequence[~.gat_tables.ColumnDescription]):
+ columns (Sequence[google.area120.tables_v1alpha1.types.ColumnDescription]):
List of columns in this table.
Order of columns matches the display order.
"""
@@ -324,10 +416,31 @@ class ColumnDescription(proto.Message):
name (str):
column name
data_type (str):
- Data type of the column Supported types are number, text,
- boolean, number_list, text_list, boolean_list.
+ Data type of the column Supported types are auto_id,
+ boolean, boolean_list, creator, create_timestamp, date,
+ dropdown, location, integer, integer_list, number,
+ number_list, person, person_list, tags, check_list, text,
+ text_list, update_timestamp, updater, relationship,
+ file_attachment_list. These types directly map to the column
+ types supported on Tables website.
id (str):
Internal id for a column.
+ labels (Sequence[google.area120.tables_v1alpha1.types.LabeledItem]):
+ Optional. Range of labeled values for the
+ column. Some columns like tags and drop-downs
+ limit the values to a set of possible values. We
+ return the range of values in such cases to help
+ clients implement better user data validation.
+ relationship_details (google.area120.tables_v1alpha1.types.RelationshipDetails):
+ Optional. Additional details about a relationship column.
+ Specified when data_type is relationship.
+ lookup_details (google.area120.tables_v1alpha1.types.LookupDetails):
+ Optional. Indicates that this is a lookup
+ column whose value is derived from the
+ relationship column specified in the details.
+ Lookup columns can not be updated directly. To
+ change the value you must update the associated
+ relationship column.
"""
name = proto.Field(proto.STRING, number=1)
@@ -336,6 +449,58 @@ class ColumnDescription(proto.Message):
id = proto.Field(proto.STRING, number=3)
+ labels = proto.RepeatedField(proto.MESSAGE, number=4, message="LabeledItem",)
+
+ relationship_details = proto.Field(
+ proto.MESSAGE, number=5, message="RelationshipDetails",
+ )
+
+ lookup_details = proto.Field(proto.MESSAGE, number=6, message="LookupDetails",)
+
+
+class LabeledItem(proto.Message):
+ r"""A single item in a labeled column.
+
+ Attributes:
+ name (str):
+ Display string as entered by user.
+ id (str):
+ Internal id associated with the item.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ id = proto.Field(proto.STRING, number=2)
+
+
+class RelationshipDetails(proto.Message):
+ r"""Details about a relationship column.
+
+ Attributes:
+ linked_table (str):
+ The name of the table this relationship is
+ linked to.
+ """
+
+ linked_table = proto.Field(proto.STRING, number=1)
+
+
+class LookupDetails(proto.Message):
+ r"""Details about a lookup column whose value comes from the
+ associated relationship.
+
+ Attributes:
+ relationship_column (str):
+ The name of the relationship column
+ associated with the lookup.
+ relationship_column_id (str):
+ The id of the relationship column.
+ """
+
+ relationship_column = proto.Field(proto.STRING, number=1)
+
+ relationship_column_id = proto.Field(proto.STRING, number=2)
+
class Row(proto.Message):
r"""A single row in a table.
@@ -345,7 +510,7 @@ class Row(proto.Message):
The resource name of the row. Row names have the form
``tables/{table}/rows/{row}``. The name is ignored when
creating a row.
- values (Sequence[~.gat_tables.Row.ValuesEntry]):
+ values (Sequence[google.area120.tables_v1alpha1.types.Row.ValuesEntry]):
The values of the row. This is a map of
column key to value. Key is user entered
name(default) or the internal column id based on
@@ -359,4 +524,24 @@ class Row(proto.Message):
)
+class Workspace(proto.Message):
+ r"""A single workspace.
+
+ Attributes:
+ name (str):
+ The resource name of the workspace. Workspace names have the
+ form ``workspaces/{workspace}``.
+ display_name (str):
+ The human readable title of the workspace.
+ tables (Sequence[google.area120.tables_v1alpha1.types.Table]):
+ The list of tables in the workspace.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ tables = proto.RepeatedField(proto.MESSAGE, number=3, message="Table",)
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/noxfile.py b/noxfile.py
index a57e24b..d64128b 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -18,6 +18,7 @@
from __future__ import absolute_import
import os
+import pathlib
import shutil
import nox
@@ -30,6 +31,22 @@
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
+CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
+
+# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
+nox.options.sessions = [
+ "unit",
+ "system",
+ "cover",
+ "lint",
+ "lint_setup_py",
+ "blacken",
+ "docs",
+]
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
@@ -70,18 +87,22 @@ def lint_setup_py(session):
def default(session):
# Install all test dependencies, then install this package in-place.
- session.install("asyncmock", "pytest-asyncio")
- session.install(
- "mock", "pytest", "pytest-cov",
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
- session.install("-e", ".")
+ session.install("asyncmock", "pytest-asyncio", "-c", constraints_path)
+
+ session.install("mock", "pytest", "pytest-cov", "-c", constraints_path)
+
+ session.install("-e", ".", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
- "--cov=google/cloud",
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
+ "--cov=google/area120",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
@@ -101,6 +122,9 @@ def unit(session):
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
@@ -110,6 +134,9 @@ def system(session):
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
+ # Install pyopenssl for mTLS testing.
+ if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
+ session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
@@ -122,16 +149,26 @@ def system(session):
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
- session.install(
- "mock", "pytest", "google-cloud-testutils",
- )
- session.install("-e", ".")
+ session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
+ session.install("-e", ".", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
- session.run("py.test", "--quiet", system_test_path, *session.posargs)
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ )
if system_test_folder_exists:
- session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ )
@nox.session(python=DEFAULT_PYTHON_VERSION)
@@ -142,7 +179,7 @@ def cover(session):
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
- session.run("coverage", "report", "--show-missing", "--fail-under=99")
+ session.run("coverage", "report", "--show-missing", "--fail-under=98")
session.run("coverage", "erase")
@@ -174,9 +211,7 @@ def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
- # sphinx-docfx-yaml supports up to sphinx version 1.5.5.
- # https://github.com/docascode/sphinx-docfx-yaml/issues/97
- session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml")
+ session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
diff --git a/renovate.json b/renovate.json
index 4fa9493..f08bc22 100644
--- a/renovate.json
+++ b/renovate.json
@@ -1,5 +1,6 @@
{
"extends": [
"config:base", ":preserveSemverRanges"
- ]
+ ],
+ "ignorePaths": [".pre-commit-config.yaml"]
}
diff --git a/setup.py b/setup.py
index 351c998..c19f5ff 100644
--- a/setup.py
+++ b/setup.py
@@ -41,8 +41,7 @@
platforms="Posix; MacOS X; Windows",
include_package_data=True,
install_requires=(
- "google-api-core[grpc] >= 1.22.0, < 2.0.0dev",
- "libcst >= 0.2.5",
+ "google-api-core[grpc] >= 1.22.2, < 2.0.0dev",
"proto-plus >= 1.4.0",
),
python_requires=">=3.6",
diff --git a/synth.metadata b/synth.metadata
index 79446d3..5ec4e92 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -3,30 +3,30 @@
{
"git": {
"name": ".",
- "remote": "https://github.com/googleapis/python-area120-tables.git",
- "sha": "0f2dc5f4501f446e666c83f9e90ae17798b3a0f3"
+ "remote": "git@github.com:googleapis/python-area120-tables",
+ "sha": "60737592b370e6cd7d111188552731913dab4d55"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "dd372aa22ded7a8ba6f0e03a80e06358a3fa0907",
- "internalRef": "347055288"
+ "sha": "8ff7d794576311d3d68d4df2ac6da93bbfcd7476",
+ "internalRef": "366472163"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "41a4e56982620d3edcf110d76f4fcdfdec471ac8"
+ "sha": "ff39353f34a36e7643b86e97724e4027ab466dc6"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "41a4e56982620d3edcf110d76f4fcdfdec471ac8"
+ "sha": "ff39353f34a36e7643b86e97724e4027ab466dc6"
}
}
],
@@ -40,92 +40,5 @@
"generator": "bazel"
}
}
- ],
- "generatedFiles": [
- ".flake8",
- ".github/CONTRIBUTING.md",
- ".github/ISSUE_TEMPLATE/bug_report.md",
- ".github/ISSUE_TEMPLATE/feature_request.md",
- ".github/ISSUE_TEMPLATE/support_request.md",
- ".github/PULL_REQUEST_TEMPLATE.md",
- ".github/release-please.yml",
- ".github/snippet-bot.yml",
- ".gitignore",
- ".kokoro/build.sh",
- ".kokoro/continuous/common.cfg",
- ".kokoro/continuous/continuous.cfg",
- ".kokoro/docker/docs/Dockerfile",
- ".kokoro/docker/docs/fetch_gpg_keys.sh",
- ".kokoro/docs/common.cfg",
- ".kokoro/docs/docs-presubmit.cfg",
- ".kokoro/docs/docs.cfg",
- ".kokoro/populate-secrets.sh",
- ".kokoro/presubmit/common.cfg",
- ".kokoro/presubmit/presubmit.cfg",
- ".kokoro/publish-docs.sh",
- ".kokoro/release.sh",
- ".kokoro/release/common.cfg",
- ".kokoro/release/release.cfg",
- ".kokoro/samples/lint/common.cfg",
- ".kokoro/samples/lint/continuous.cfg",
- ".kokoro/samples/lint/periodic.cfg",
- ".kokoro/samples/lint/presubmit.cfg",
- ".kokoro/samples/python3.6/common.cfg",
- ".kokoro/samples/python3.6/continuous.cfg",
- ".kokoro/samples/python3.6/periodic.cfg",
- ".kokoro/samples/python3.6/presubmit.cfg",
- ".kokoro/samples/python3.7/common.cfg",
- ".kokoro/samples/python3.7/continuous.cfg",
- ".kokoro/samples/python3.7/periodic.cfg",
- ".kokoro/samples/python3.7/presubmit.cfg",
- ".kokoro/samples/python3.8/common.cfg",
- ".kokoro/samples/python3.8/continuous.cfg",
- ".kokoro/samples/python3.8/periodic.cfg",
- ".kokoro/samples/python3.8/presubmit.cfg",
- ".kokoro/test-samples.sh",
- ".kokoro/trampoline.sh",
- ".kokoro/trampoline_v2.sh",
- ".pre-commit-config.yaml",
- ".trampolinerc",
- "CODE_OF_CONDUCT.md",
- "CONTRIBUTING.rst",
- "LICENSE",
- "MANIFEST.in",
- "area120-tables-v1alpha1-py.tar.gz",
- "docs/_static/custom.css",
- "docs/_templates/layout.html",
- "docs/conf.py",
- "docs/multiprocessing.rst",
- "docs/tables_v1alpha1/services.rst",
- "docs/tables_v1alpha1/types.rst",
- "google/area120/tables/__init__.py",
- "google/area120/tables/py.typed",
- "google/area120/tables_v1alpha1/__init__.py",
- "google/area120/tables_v1alpha1/py.typed",
- "google/area120/tables_v1alpha1/services/__init__.py",
- "google/area120/tables_v1alpha1/services/tables_service/__init__.py",
- "google/area120/tables_v1alpha1/services/tables_service/async_client.py",
- "google/area120/tables_v1alpha1/services/tables_service/client.py",
- "google/area120/tables_v1alpha1/services/tables_service/pagers.py",
- "google/area120/tables_v1alpha1/services/tables_service/transports/__init__.py",
- "google/area120/tables_v1alpha1/services/tables_service/transports/base.py",
- "google/area120/tables_v1alpha1/services/tables_service/transports/grpc.py",
- "google/area120/tables_v1alpha1/services/tables_service/transports/grpc_asyncio.py",
- "google/area120/tables_v1alpha1/types/__init__.py",
- "google/area120/tables_v1alpha1/types/tables.py",
- "mypy.ini",
- "noxfile.py",
- "renovate.json",
- "scripts/decrypt-secrets.sh",
- "scripts/readme-gen/readme_gen.py",
- "scripts/readme-gen/templates/README.tmpl.rst",
- "scripts/readme-gen/templates/auth.tmpl.rst",
- "scripts/readme-gen/templates/auth_api_key.tmpl.rst",
- "scripts/readme-gen/templates/install_deps.tmpl.rst",
- "scripts/readme-gen/templates/install_portaudio.tmpl.rst",
- "setup.cfg",
- "testing/.gitignore",
- "tests/unit/gapic/tables_v1alpha1/__init__.py",
- "tests/unit/gapic/tables_v1alpha1/test_tables_service.py"
]
}
\ No newline at end of file
diff --git a/synth.py b/synth.py
index 32b68ac..c9ae019 100644
--- a/synth.py
+++ b/synth.py
@@ -44,7 +44,7 @@
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
-templated_files = common.py_library(cov_level=99, microgenerator=True)
+templated_files = common.py_library(cov_level=98, microgenerator=True)
s.move(
templated_files, excludes=[".coveragerc"]
) # the microgenerator has a good coveragerc file
@@ -52,10 +52,8 @@
# fix coverage target
s.replace(
"noxfile.py",
- """["']--cov=google.cloud.area120tables",
-(\s+)[""]--cov=google.cloud["'],""",
- """"--cov=google.area120.tables",
-\g<1>"--cov=google.area120",""",
+ """[""]--cov=google/cloud["'],""",
+ '''"--cov=google/area120",''',
)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt
index e4e5e44..a37a34a 100644
--- a/testing/constraints-3.6.txt
+++ b/testing/constraints-3.6.txt
@@ -5,6 +5,5 @@
#
# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
# Then this file should have foo==1.14.0
-google-api-core==1.22.0
-libcst==0.2.5
-proto-plus==1.4.0
\ No newline at end of file
+google-api-core==1.22.2
+proto-plus==1.4.0
diff --git a/area120-tables-v1alpha1-py.tar.gz b/testing/constraints-3.9.txt
similarity index 100%
rename from area120-tables-v1alpha1-py.tar.gz
rename to testing/constraints-3.9.txt
diff --git a/tests/unit/gapic/tables_v1alpha1/__init__.py b/tests/unit/gapic/tables_v1alpha1/__init__.py
index 8b13789..42ffdf2 100644
--- a/tests/unit/gapic/tables_v1alpha1/__init__.py
+++ b/tests/unit/gapic/tables_v1alpha1/__init__.py
@@ -1 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/tests/unit/gapic/tables_v1alpha1/test_tables_service.py b/tests/unit/gapic/tables_v1alpha1/test_tables_service.py
index 8c66ef5..68bca60 100644
--- a/tests/unit/gapic/tables_v1alpha1/test_tables_service.py
+++ b/tests/unit/gapic/tables_v1alpha1/test_tables_service.py
@@ -89,7 +89,24 @@ def test__get_default_mtls_endpoint():
@pytest.mark.parametrize(
- "client_class", [TablesServiceClient, TablesServiceAsyncClient]
+ "client_class", [TablesServiceClient, TablesServiceAsyncClient,]
+)
+def test_tables_service_client_from_service_account_info(client_class):
+ creds = credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == "area120tables.googleapis.com:443"
+
+
+@pytest.mark.parametrize(
+ "client_class", [TablesServiceClient, TablesServiceAsyncClient,]
)
def test_tables_service_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
@@ -99,16 +116,21 @@ def test_tables_service_client_from_service_account_file(client_class):
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
assert client.transport._host == "area120tables.googleapis.com:443"
def test_tables_service_client_get_transport_class():
transport = TablesServiceClient.get_transport_class()
- assert transport == transports.TablesServiceGrpcTransport
+ available_transports = [
+ transports.TablesServiceGrpcTransport,
+ ]
+ assert transport in available_transports
transport = TablesServiceClient.get_transport_class("grpc")
assert transport == transports.TablesServiceGrpcTransport
@@ -159,7 +181,7 @@ def test_tables_service_client_client_options(
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
- ssl_channel_credentials=None,
+ client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@@ -175,7 +197,7 @@ def test_tables_service_client_client_options(
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
- ssl_channel_credentials=None,
+ client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@@ -191,7 +213,7 @@ def test_tables_service_client_client_options(
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
- ssl_channel_credentials=None,
+ client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@@ -219,7 +241,7 @@ def test_tables_service_client_client_options(
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
- ssl_channel_credentials=None,
+ client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@@ -270,29 +292,25 @@ def test_tables_service_client_mtls_env_auto(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
- ssl_channel_creds = mock.Mock()
- with mock.patch(
- "grpc.ssl_channel_credentials", return_value=ssl_channel_creds
- ):
- patched.return_value = None
- client = client_class(client_options=options)
+ patched.return_value = None
+ client = client_class(client_options=options)
- if use_client_cert_env == "false":
- expected_ssl_channel_creds = None
- expected_host = client.DEFAULT_ENDPOINT
- else:
- expected_ssl_channel_creds = ssl_channel_creds
- expected_host = client.DEFAULT_MTLS_ENDPOINT
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client.DEFAULT_ENDPOINT
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
- patched.assert_called_once_with(
- credentials=None,
- credentials_file=None,
- host=expected_host,
- scopes=None,
- ssl_channel_credentials=expected_ssl_channel_creds,
- quota_project_id=None,
- client_info=transports.base.DEFAULT_CLIENT_INFO,
- )
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
@@ -301,66 +319,53 @@ def test_tables_service_client_mtls_env_auto(
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
- "google.auth.transport.grpc.SslCredentials.__init__", return_value=None
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
):
with mock.patch(
- "google.auth.transport.grpc.SslCredentials.is_mtls",
- new_callable=mock.PropertyMock,
- ) as is_mtls_mock:
- with mock.patch(
- "google.auth.transport.grpc.SslCredentials.ssl_credentials",
- new_callable=mock.PropertyMock,
- ) as ssl_credentials_mock:
- if use_client_cert_env == "false":
- is_mtls_mock.return_value = False
- ssl_credentials_mock.return_value = None
- expected_host = client.DEFAULT_ENDPOINT
- expected_ssl_channel_creds = None
- else:
- is_mtls_mock.return_value = True
- ssl_credentials_mock.return_value = mock.Mock()
- expected_host = client.DEFAULT_MTLS_ENDPOINT
- expected_ssl_channel_creds = (
- ssl_credentials_mock.return_value
- )
-
- patched.return_value = None
- client = client_class()
- patched.assert_called_once_with(
- credentials=None,
- credentials_file=None,
- host=expected_host,
- scopes=None,
- ssl_channel_credentials=expected_ssl_channel_creds,
- quota_project_id=None,
- client_info=transports.base.DEFAULT_CLIENT_INFO,
- )
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client.DEFAULT_ENDPOINT
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
- # Check the case client_cert_source and ADC client cert are not provided.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
- ):
- with mock.patch.object(transport_class, "__init__") as patched:
- with mock.patch(
- "google.auth.transport.grpc.SslCredentials.__init__", return_value=None
- ):
- with mock.patch(
- "google.auth.transport.grpc.SslCredentials.is_mtls",
- new_callable=mock.PropertyMock,
- ) as is_mtls_mock:
- is_mtls_mock.return_value = False
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
- host=client.DEFAULT_ENDPOINT,
+ host=expected_host,
scopes=None,
- ssl_channel_credentials=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
@@ -386,7 +391,7 @@ def test_tables_service_client_client_options_scopes(
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
- ssl_channel_credentials=None,
+ client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@@ -416,7 +421,7 @@ def test_tables_service_client_client_options_credentials_file(
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
- ssl_channel_credentials=None,
+ client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@@ -435,7 +440,7 @@ def test_tables_service_client_client_options_from_dict():
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
- ssl_channel_credentials=None,
+ client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@@ -478,6 +483,22 @@ def test_get_table_from_dict():
test_get_table(request_type=dict)
+def test_get_table_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_table), "__call__") as call:
+ client.get_table()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.GetTableRequest()
+
+
@pytest.mark.asyncio
async def test_get_table_async(
transport: str = "grpc_asyncio", request_type=tables.GetTableRequest
@@ -667,6 +688,22 @@ def test_list_tables_from_dict():
test_list_tables(request_type=dict)
+def test_list_tables_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_tables), "__call__") as call:
+ client.list_tables()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.ListTablesRequest()
+
+
@pytest.mark.asyncio
async def test_list_tables_async(
transport: str = "grpc_asyncio", request_type=tables.ListTablesRequest
@@ -739,70 +776,505 @@ def test_list_tables_pages():
with mock.patch.object(type(client.transport.list_tables), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
- tables.ListTablesResponse(
- tables=[tables.Table(), tables.Table(), tables.Table(),],
+ tables.ListTablesResponse(
+ tables=[tables.Table(), tables.Table(), tables.Table(),],
+ next_page_token="abc",
+ ),
+ tables.ListTablesResponse(tables=[], next_page_token="def",),
+ tables.ListTablesResponse(tables=[tables.Table(),], next_page_token="ghi",),
+ tables.ListTablesResponse(tables=[tables.Table(), tables.Table(),],),
+ RuntimeError,
+ )
+ pages = list(client.list_tables(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_tables_async_pager():
+ client = TablesServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ tables.ListTablesResponse(
+ tables=[tables.Table(), tables.Table(), tables.Table(),],
+ next_page_token="abc",
+ ),
+ tables.ListTablesResponse(tables=[], next_page_token="def",),
+ tables.ListTablesResponse(tables=[tables.Table(),], next_page_token="ghi",),
+ tables.ListTablesResponse(tables=[tables.Table(), tables.Table(),],),
+ RuntimeError,
+ )
+ async_pager = await client.list_tables(request={},)
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager:
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, tables.Table) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_tables_async_pages():
+ client = TablesServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ tables.ListTablesResponse(
+ tables=[tables.Table(), tables.Table(), tables.Table(),],
+ next_page_token="abc",
+ ),
+ tables.ListTablesResponse(tables=[], next_page_token="def",),
+ tables.ListTablesResponse(tables=[tables.Table(),], next_page_token="ghi",),
+ tables.ListTablesResponse(tables=[tables.Table(), tables.Table(),],),
+ RuntimeError,
+ )
+ pages = []
+ async for page_ in (await client.list_tables(request={})).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_get_workspace(
+ transport: str = "grpc", request_type=tables.GetWorkspaceRequest
+):
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_workspace), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = tables.Workspace(
+ name="name_value", display_name="display_name_value",
+ )
+
+ response = client.get_workspace(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.GetWorkspaceRequest()
+
+ # Establish that the response is the type that we expect.
+
+ assert isinstance(response, tables.Workspace)
+
+ assert response.name == "name_value"
+
+ assert response.display_name == "display_name_value"
+
+
+def test_get_workspace_from_dict():
+ test_get_workspace(request_type=dict)
+
+
+def test_get_workspace_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_workspace), "__call__") as call:
+ client.get_workspace()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.GetWorkspaceRequest()
+
+
+@pytest.mark.asyncio
+async def test_get_workspace_async(
+ transport: str = "grpc_asyncio", request_type=tables.GetWorkspaceRequest
+):
+ client = TablesServiceAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_workspace), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ tables.Workspace(name="name_value", display_name="display_name_value",)
+ )
+
+ response = await client.get_workspace(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.GetWorkspaceRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, tables.Workspace)
+
+ assert response.name == "name_value"
+
+ assert response.display_name == "display_name_value"
+
+
+@pytest.mark.asyncio
+async def test_get_workspace_async_from_dict():
+ await test_get_workspace_async(request_type=dict)
+
+
+def test_get_workspace_field_headers():
+ client = TablesServiceClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = tables.GetWorkspaceRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_workspace), "__call__") as call:
+ call.return_value = tables.Workspace()
+
+ client.get_workspace(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_workspace_field_headers_async():
+ client = TablesServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = tables.GetWorkspaceRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_workspace), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tables.Workspace())
+
+ await client.get_workspace(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+def test_get_workspace_flattened():
+ client = TablesServiceClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_workspace), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = tables.Workspace()
+
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_workspace(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+
+def test_get_workspace_flattened_error():
+ client = TablesServiceClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_workspace(
+ tables.GetWorkspaceRequest(), name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_workspace_flattened_async():
+ client = TablesServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_workspace), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = tables.Workspace()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tables.Workspace())
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_workspace(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+
+@pytest.mark.asyncio
+async def test_get_workspace_flattened_error_async():
+ client = TablesServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_workspace(
+ tables.GetWorkspaceRequest(), name="name_value",
+ )
+
+
+def test_list_workspaces(
+ transport: str = "grpc", request_type=tables.ListWorkspacesRequest
+):
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_workspaces), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = tables.ListWorkspacesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ response = client.list_workspaces(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.ListWorkspacesRequest()
+
+ # Establish that the response is the type that we expect.
+
+ assert isinstance(response, pagers.ListWorkspacesPager)
+
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_workspaces_from_dict():
+ test_list_workspaces(request_type=dict)
+
+
+def test_list_workspaces_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_workspaces), "__call__") as call:
+ client.list_workspaces()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.ListWorkspacesRequest()
+
+
+@pytest.mark.asyncio
+async def test_list_workspaces_async(
+ transport: str = "grpc_asyncio", request_type=tables.ListWorkspacesRequest
+):
+ client = TablesServiceAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_workspaces), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ tables.ListWorkspacesResponse(next_page_token="next_page_token_value",)
+ )
+
+ response = await client.list_workspaces(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.ListWorkspacesRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListWorkspacesAsyncPager)
+
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_workspaces_async_from_dict():
+ await test_list_workspaces_async(request_type=dict)
+
+
+def test_list_workspaces_pager():
+ client = TablesServiceClient(credentials=credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_workspaces), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ tables.ListWorkspacesResponse(
+ workspaces=[
+ tables.Workspace(),
+ tables.Workspace(),
+ tables.Workspace(),
+ ],
+ next_page_token="abc",
+ ),
+ tables.ListWorkspacesResponse(workspaces=[], next_page_token="def",),
+ tables.ListWorkspacesResponse(
+ workspaces=[tables.Workspace(),], next_page_token="ghi",
+ ),
+ tables.ListWorkspacesResponse(
+ workspaces=[tables.Workspace(), tables.Workspace(),],
+ ),
+ RuntimeError,
+ )
+
+ metadata = ()
+ pager = client.list_workspaces(request={})
+
+ assert pager._metadata == metadata
+
+ results = [i for i in pager]
+ assert len(results) == 6
+ assert all(isinstance(i, tables.Workspace) for i in results)
+
+
+def test_list_workspaces_pages():
+ client = TablesServiceClient(credentials=credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_workspaces), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ tables.ListWorkspacesResponse(
+ workspaces=[
+ tables.Workspace(),
+ tables.Workspace(),
+ tables.Workspace(),
+ ],
next_page_token="abc",
),
- tables.ListTablesResponse(tables=[], next_page_token="def",),
- tables.ListTablesResponse(tables=[tables.Table(),], next_page_token="ghi",),
- tables.ListTablesResponse(tables=[tables.Table(), tables.Table(),],),
+ tables.ListWorkspacesResponse(workspaces=[], next_page_token="def",),
+ tables.ListWorkspacesResponse(
+ workspaces=[tables.Workspace(),], next_page_token="ghi",
+ ),
+ tables.ListWorkspacesResponse(
+ workspaces=[tables.Workspace(), tables.Workspace(),],
+ ),
RuntimeError,
)
- pages = list(client.list_tables(request={}).pages)
+ pages = list(client.list_workspaces(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
-async def test_list_tables_async_pager():
+async def test_list_workspaces_async_pager():
client = TablesServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock
+ type(client.transport.list_workspaces), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
- tables.ListTablesResponse(
- tables=[tables.Table(), tables.Table(), tables.Table(),],
+ tables.ListWorkspacesResponse(
+ workspaces=[
+ tables.Workspace(),
+ tables.Workspace(),
+ tables.Workspace(),
+ ],
next_page_token="abc",
),
- tables.ListTablesResponse(tables=[], next_page_token="def",),
- tables.ListTablesResponse(tables=[tables.Table(),], next_page_token="ghi",),
- tables.ListTablesResponse(tables=[tables.Table(), tables.Table(),],),
+ tables.ListWorkspacesResponse(workspaces=[], next_page_token="def",),
+ tables.ListWorkspacesResponse(
+ workspaces=[tables.Workspace(),], next_page_token="ghi",
+ ),
+ tables.ListWorkspacesResponse(
+ workspaces=[tables.Workspace(), tables.Workspace(),],
+ ),
RuntimeError,
)
- async_pager = await client.list_tables(request={},)
+ async_pager = await client.list_workspaces(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
- assert all(isinstance(i, tables.Table) for i in responses)
+ assert all(isinstance(i, tables.Workspace) for i in responses)
@pytest.mark.asyncio
-async def test_list_tables_async_pages():
+async def test_list_workspaces_async_pages():
client = TablesServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock
+ type(client.transport.list_workspaces), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
- tables.ListTablesResponse(
- tables=[tables.Table(), tables.Table(), tables.Table(),],
+ tables.ListWorkspacesResponse(
+ workspaces=[
+ tables.Workspace(),
+ tables.Workspace(),
+ tables.Workspace(),
+ ],
next_page_token="abc",
),
- tables.ListTablesResponse(tables=[], next_page_token="def",),
- tables.ListTablesResponse(tables=[tables.Table(),], next_page_token="ghi",),
- tables.ListTablesResponse(tables=[tables.Table(), tables.Table(),],),
+ tables.ListWorkspacesResponse(workspaces=[], next_page_token="def",),
+ tables.ListWorkspacesResponse(
+ workspaces=[tables.Workspace(),], next_page_token="ghi",
+ ),
+ tables.ListWorkspacesResponse(
+ workspaces=[tables.Workspace(), tables.Workspace(),],
+ ),
RuntimeError,
)
pages = []
- async for page_ in (await client.list_tables(request={})).pages:
+ async for page_ in (await client.list_workspaces(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@@ -841,6 +1313,22 @@ def test_get_row_from_dict():
test_get_row(request_type=dict)
+def test_get_row_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_row), "__call__") as call:
+ client.get_row()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.GetRowRequest()
+
+
@pytest.mark.asyncio
async def test_get_row_async(
transport: str = "grpc_asyncio", request_type=tables.GetRowRequest
@@ -1028,6 +1516,22 @@ def test_list_rows_from_dict():
test_list_rows(request_type=dict)
+def test_list_rows_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_rows), "__call__") as call:
+ client.list_rows()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.ListRowsRequest()
+
+
@pytest.mark.asyncio
async def test_list_rows_async(
transport: str = "grpc_asyncio", request_type=tables.ListRowsRequest
@@ -1319,6 +1823,22 @@ def test_create_row_from_dict():
test_create_row(request_type=dict)
+def test_create_row_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_row), "__call__") as call:
+ client.create_row()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.CreateRowRequest()
+
+
@pytest.mark.asyncio
async def test_create_row_async(
transport: str = "grpc_asyncio", request_type=tables.CreateRowRequest
@@ -1518,6 +2038,24 @@ def test_batch_create_rows_from_dict():
test_batch_create_rows(request_type=dict)
+def test_batch_create_rows_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_create_rows), "__call__"
+ ) as call:
+ client.batch_create_rows()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.BatchCreateRowsRequest()
+
+
@pytest.mark.asyncio
async def test_batch_create_rows_async(
transport: str = "grpc_asyncio", request_type=tables.BatchCreateRowsRequest
@@ -1644,6 +2182,22 @@ def test_update_row_from_dict():
test_update_row(request_type=dict)
+def test_update_row_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_row), "__call__") as call:
+ client.update_row()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.UpdateRowRequest()
+
+
@pytest.mark.asyncio
async def test_update_row_async(
transport: str = "grpc_asyncio", request_type=tables.UpdateRowRequest
@@ -1845,6 +2399,24 @@ def test_batch_update_rows_from_dict():
test_batch_update_rows(request_type=dict)
+def test_batch_update_rows_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_update_rows), "__call__"
+ ) as call:
+ client.batch_update_rows()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.BatchUpdateRowsRequest()
+
+
@pytest.mark.asyncio
async def test_batch_update_rows_async(
transport: str = "grpc_asyncio", request_type=tables.BatchUpdateRowsRequest
@@ -1968,6 +2540,22 @@ def test_delete_row_from_dict():
test_delete_row(request_type=dict)
+def test_delete_row_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_row), "__call__") as call:
+ client.delete_row()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.DeleteRowRequest()
+
+
@pytest.mark.asyncio
async def test_delete_row_async(
transport: str = "grpc_asyncio", request_type=tables.DeleteRowRequest
@@ -2116,6 +2704,147 @@ async def test_delete_row_flattened_error_async():
)
+def test_batch_delete_rows(
+ transport: str = "grpc", request_type=tables.BatchDeleteRowsRequest
+):
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_rows), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ response = client.batch_delete_rows(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.BatchDeleteRowsRequest()
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_batch_delete_rows_from_dict():
+ test_batch_delete_rows(request_type=dict)
+
+
+def test_batch_delete_rows_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = TablesServiceClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_rows), "__call__"
+ ) as call:
+ client.batch_delete_rows()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.BatchDeleteRowsRequest()
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_rows_async(
+ transport: str = "grpc_asyncio", request_type=tables.BatchDeleteRowsRequest
+):
+ client = TablesServiceAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_rows), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+
+ response = await client.batch_delete_rows(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == tables.BatchDeleteRowsRequest()
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_rows_async_from_dict():
+ await test_batch_delete_rows_async(request_type=dict)
+
+
+def test_batch_delete_rows_field_headers():
+ client = TablesServiceClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = tables.BatchDeleteRowsRequest()
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_rows), "__call__"
+ ) as call:
+ call.return_value = None
+
+ client.batch_delete_rows(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_batch_delete_rows_field_headers_async():
+ client = TablesServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = tables.BatchDeleteRowsRequest()
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.batch_delete_rows), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+
+ await client.batch_delete_rows(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TablesServiceGrpcTransport(
@@ -2215,6 +2944,8 @@ def test_tables_service_base_transport():
methods = (
"get_table",
"list_tables",
+ "get_workspace",
+ "list_workspaces",
"get_row",
"list_rows",
"create_row",
@@ -2222,6 +2953,7 @@ def test_tables_service_base_transport():
"update_row",
"batch_update_rows",
"delete_row",
+ "batch_delete_rows",
)
for method in methods:
with pytest.raises(NotImplementedError):
@@ -2248,6 +2980,7 @@ def test_tables_service_base_transport_with_credentials_file():
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/spreadsheets.readonly",
+ "https://www.googleapis.com/auth/tables",
),
quota_project_id="octopus",
)
@@ -2276,6 +3009,7 @@ def test_tables_service_auth_adc():
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/spreadsheets.readonly",
+ "https://www.googleapis.com/auth/tables",
),
quota_project_id=None,
)
@@ -2296,11 +3030,64 @@ def test_tables_service_transport_auth_adc():
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/spreadsheets.readonly",
+ "https://www.googleapis.com/auth/tables",
),
quota_project_id="octopus",
)
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.TablesServiceGrpcTransport,
+ transports.TablesServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_tables_service_grpc_transport_client_cert_source_for_mtls(transport_class):
+ cred = credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=(
+ "https://www.googleapis.com/auth/drive",
+ "https://www.googleapis.com/auth/drive.file",
+ "https://www.googleapis.com/auth/drive.readonly",
+ "https://www.googleapis.com/auth/spreadsheets",
+ "https://www.googleapis.com/auth/spreadsheets.readonly",
+ "https://www.googleapis.com/auth/tables",
+ ),
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
def test_tables_service_host_no_port():
client = TablesServiceClient(
credentials=credentials.AnonymousCredentials(),
@@ -2322,7 +3109,7 @@ def test_tables_service_host_with_port():
def test_tables_service_grpc_transport_channel():
- channel = grpc.insecure_channel("http://localhost/")
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TablesServiceGrpcTransport(
@@ -2334,7 +3121,7 @@ def test_tables_service_grpc_transport_channel():
def test_tables_service_grpc_asyncio_transport_channel():
- channel = aio.insecure_channel("http://localhost/")
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TablesServiceGrpcAsyncIOTransport(
@@ -2345,6 +3132,8 @@ def test_tables_service_grpc_asyncio_transport_channel():
assert transport._ssl_channel_credentials == None
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
@@ -2357,7 +3146,7 @@ def test_tables_service_transport_channel_mtls_with_client_cert_source(transport
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
- transport_class, "create_channel", autospec=True
+ transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
@@ -2389,6 +3178,7 @@ def test_tables_service_transport_channel_mtls_with_client_cert_source(transport
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/spreadsheets.readonly",
+ "https://www.googleapis.com/auth/tables",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
@@ -2401,6 +3191,8 @@ def test_tables_service_transport_channel_mtls_with_client_cert_source(transport
assert transport._ssl_channel_credentials == mock_ssl_cred
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
@@ -2416,7 +3208,7 @@ def test_tables_service_transport_channel_mtls_with_adc(transport_class):
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
- transport_class, "create_channel", autospec=True
+ transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
@@ -2440,6 +3232,7 @@ def test_tables_service_transport_channel_mtls_with_adc(transport_class):
"https://www.googleapis.com/auth/drive.readonly",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/spreadsheets.readonly",
+ "https://www.googleapis.com/auth/tables",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
@@ -2491,8 +3284,27 @@ def test_parse_table_path():
assert expected == actual
+def test_workspace_path():
+ workspace = "cuttlefish"
+
+ expected = "workspaces/{workspace}".format(workspace=workspace,)
+ actual = TablesServiceClient.workspace_path(workspace)
+ assert expected == actual
+
+
+def test_parse_workspace_path():
+ expected = {
+ "workspace": "mussel",
+ }
+ path = TablesServiceClient.workspace_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = TablesServiceClient.parse_workspace_path(path)
+ assert expected == actual
+
+
def test_common_billing_account_path():
- billing_account = "cuttlefish"
+ billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
@@ -2503,7 +3315,7 @@ def test_common_billing_account_path():
def test_parse_common_billing_account_path():
expected = {
- "billing_account": "mussel",
+ "billing_account": "nautilus",
}
path = TablesServiceClient.common_billing_account_path(**expected)
@@ -2513,7 +3325,7 @@ def test_parse_common_billing_account_path():
def test_common_folder_path():
- folder = "winkle"
+ folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = TablesServiceClient.common_folder_path(folder)
@@ -2522,7 +3334,7 @@ def test_common_folder_path():
def test_parse_common_folder_path():
expected = {
- "folder": "nautilus",
+ "folder": "abalone",
}
path = TablesServiceClient.common_folder_path(**expected)
@@ -2532,7 +3344,7 @@ def test_parse_common_folder_path():
def test_common_organization_path():
- organization = "scallop"
+ organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = TablesServiceClient.common_organization_path(organization)
@@ -2541,7 +3353,7 @@ def test_common_organization_path():
def test_parse_common_organization_path():
expected = {
- "organization": "abalone",
+ "organization": "clam",
}
path = TablesServiceClient.common_organization_path(**expected)
@@ -2551,7 +3363,7 @@ def test_parse_common_organization_path():
def test_common_project_path():
- project = "squid"
+ project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = TablesServiceClient.common_project_path(project)
@@ -2560,7 +3372,7 @@ def test_common_project_path():
def test_parse_common_project_path():
expected = {
- "project": "clam",
+ "project": "octopus",
}
path = TablesServiceClient.common_project_path(**expected)
@@ -2570,8 +3382,8 @@ def test_parse_common_project_path():
def test_common_location_path():
- project = "whelk"
- location = "octopus"
+ project = "oyster"
+ location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
@@ -2582,8 +3394,8 @@ def test_common_location_path():
def test_parse_common_location_path():
expected = {
- "project": "oyster",
- "location": "nudibranch",
+ "project": "cuttlefish",
+ "location": "mussel",
}
path = TablesServiceClient.common_location_path(**expected)