diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e159e08..5cdf3f3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -142,7 +142,7 @@ jobs: - name: "Setup environment" run: | pipx install poetry==1.8.5 - poetry config virtualenvs.prefer-active-python true + poetry config virtualenvs.create true --local pip install invoke toml codecov - name: "Install Package" run: "poetry install --all-extras" @@ -193,7 +193,7 @@ jobs: - name: "Setup environment" run: | pipx install poetry==1.8.5 - poetry config virtualenvs.prefer-active-python true + poetry config virtualenvs.create true --local pip install invoke toml codecov - name: "Install Package" run: "poetry install --all-extras" diff --git a/CHANGELOG.md b/CHANGELOG.md index cad316d..dfe6e86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,15 @@ This project uses [*towncrier*](https://towncrier.readthedocs.io/) and the chang +## [1.5.0](https://github.com/opsmill/infrahub-sdk-python/tree/v1.5.0) - 2025-01-09 + +### Added + +- Adds `infrahubctl info` command to display information of the connectivity status of the SDK. ([#109](https://github.com/opsmill/infrahub-sdk-python/issues/109)) +- Add `count` method to both sync and async clients to retrieve the number of objects of a given kind ([#158](https://github.com/opsmill/infrahub-sdk-python/issues/158)) +- Add the ability to batch API queries for `all` and `filter` functions. ([#159](https://github.com/opsmill/infrahub-sdk-python/issues/159)) +- `client.all` and `client.filters` now support `order` parameter allowing to disable order of retrieve nodes in order to enhance performances + ## [1.4.1](https://github.com/opsmill/infrahub-sdk-python/tree/v1.3.0) - 2025-01-05 ### Fixed diff --git a/infrahub_sdk/client.py b/infrahub_sdk/client.py index 4d7cf4f..839f1ec 100644 --- a/infrahub_sdk/client.py +++ b/infrahub_sdk/client.py @@ -46,13 +46,13 @@ ) from .object_store import ObjectStore, ObjectStoreSync from .protocols_base import CoreNode, CoreNodeSync -from .queries import get_commit_update_mutation +from .queries import QUERY_USER, get_commit_update_mutation from .query_groups import InfrahubGroupContext, InfrahubGroupContextSync from .schema import InfrahubSchema, InfrahubSchemaSync, NodeSchemaAPI from .store import NodeStore, NodeStoreSync from .timestamp import Timestamp -from .types import AsyncRequester, HTTPMethod, SyncRequester -from .utils import decode_json, is_valid_uuid +from .types import AsyncRequester, HTTPMethod, Order, SyncRequester +from .utils import decode_json, get_user_permissions, is_valid_uuid if TYPE_CHECKING: from types import TracebackType @@ -272,6 +272,22 @@ def _initialize(self) -> None: self._request_method: AsyncRequester = self.config.requester or self._default_request_method self.group_context = InfrahubGroupContext(self) + async def get_version(self) -> str: + """Return the Infrahub version.""" + response = await self.execute_graphql(query="query { InfrahubInfo { version }}") + version = response.get("InfrahubInfo", {}).get("version", "") + return version + + async def get_user(self) -> dict: + """Return user information""" + user_info = await self.execute_graphql(query=QUERY_USER) + return user_info + + async def get_user_permissions(self) -> dict: + """Return user permissions""" + user_info = await self.get_user() + return get_user_permissions(user_info["AccountProfile"]["member_of_groups"]["edges"]) + @overload async def create( self, @@ -525,6 +541,25 @@ async def _process_nodes_and_relationships( return ProcessRelationsNode(nodes=nodes, related_nodes=related_nodes) + async def count( + self, + kind: str | type[SchemaType], + at: Timestamp | None = None, + branch: str | None = None, + timeout: int | None = None, + ) -> int: + """Return the number of nodes of a given kind.""" + schema = await self.schema.get(kind=kind, branch=branch) + + branch = branch or self.default_branch + if at: + at = Timestamp(at) + + response = await self.execute_graphql( + query=Query(query={schema.kind: {"count": None}}).render(), branch_name=branch, at=at, timeout=timeout + ) + return int(response.get(schema.kind, {}).get("count", 0)) + @overload async def all( self, @@ -540,6 +575,8 @@ async def all( fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., + parallel: bool = ..., + order: Order | None = ..., ) -> list[SchemaType]: ... @overload @@ -557,6 +594,8 @@ async def all( fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., + parallel: bool = ..., + order: Order | None = ..., ) -> list[InfrahubNode]: ... async def all( @@ -573,6 +612,8 @@ async def all( fragment: bool = False, prefetch_relationships: bool = False, property: bool = False, + parallel: bool = False, + order: Order | None = None, ) -> list[InfrahubNode] | list[SchemaType]: """Retrieve all nodes of a given kind @@ -588,6 +629,8 @@ async def all( exclude (list[str], optional): List of attributes or relationships to exclude from the query. fragment (bool, optional): Flag to use GraphQL fragments for generic schemas. prefetch_relationships (bool, optional): Flag to indicate whether to prefetch related node data. + parallel (bool, optional): Whether to use parallel processing for the query. + order (Order, optional): Ordering related options. Setting `disable=True` enhances performances. Returns: list[InfrahubNode]: List of Nodes @@ -605,6 +648,8 @@ async def all( fragment=fragment, prefetch_relationships=prefetch_relationships, property=property, + parallel=parallel, + order=order, ) @overload @@ -623,6 +668,8 @@ async def filters( prefetch_relationships: bool = ..., partial_match: bool = ..., property: bool = ..., + parallel: bool = ..., + order: Order | None = ..., **kwargs: Any, ) -> list[SchemaType]: ... @@ -642,6 +689,8 @@ async def filters( prefetch_relationships: bool = ..., partial_match: bool = ..., property: bool = ..., + parallel: bool = ..., + order: Order | None = ..., **kwargs: Any, ) -> list[InfrahubNode]: ... @@ -660,6 +709,8 @@ async def filters( prefetch_relationships: bool = False, partial_match: bool = False, property: bool = False, + parallel: bool = False, + order: Order | None = None, **kwargs: Any, ) -> list[InfrahubNode] | list[SchemaType]: """Retrieve nodes of a given kind based on provided filters. @@ -677,32 +728,27 @@ async def filters( fragment (bool, optional): Flag to use GraphQL fragments for generic schemas. prefetch_relationships (bool, optional): Flag to indicate whether to prefetch related node data. partial_match (bool, optional): Allow partial match of filter criteria for the query. + parallel (bool, optional): Whether to use parallel processing for the query. + order (Order, optional): Ordering related options. Setting `disable=True` enhances performances. **kwargs (Any): Additional filter criteria for the query. Returns: list[InfrahubNodeSync]: List of Nodes that match the given filters. """ - schema = await self.schema.get(kind=kind, branch=branch) - branch = branch or self.default_branch + schema = await self.schema.get(kind=kind, branch=branch) if at: at = Timestamp(at) node = InfrahubNode(client=self, schema=schema, branch=branch) filters = kwargs + pagination_size = self.pagination_size - nodes: list[InfrahubNode] = [] - related_nodes: list[InfrahubNode] = [] - - has_remaining_items = True - page_number = 1 - - while has_remaining_items: - page_offset = (page_number - 1) * self.pagination_size - + async def process_page(page_offset: int, page_number: int) -> tuple[dict, ProcessRelationsNode]: + """Process a single page of results.""" query_data = await InfrahubNode(client=self, schema=schema, branch=branch).generate_query_data( offset=offset or page_offset, - limit=limit or self.pagination_size, + limit=limit or pagination_size, filters=filters, include=include, exclude=exclude, @@ -710,6 +756,7 @@ async def filters( prefetch_relationships=prefetch_relationships, partial_match=partial_match, property=property, + order=order, ) query = Query(query=query_data) response = await self.execute_graphql( @@ -727,14 +774,48 @@ async def filters( prefetch_relationships=prefetch_relationships, timeout=timeout, ) - nodes.extend(process_result["nodes"]) - related_nodes.extend(process_result["related_nodes"]) + return response, process_result + + async def process_batch() -> tuple[list[InfrahubNode], list[InfrahubNode]]: + """Process queries in parallel mode.""" + nodes = [] + related_nodes = [] + batch_process = await self.create_batch() + count = await self.count(kind=schema.kind) + total_pages = (count + pagination_size - 1) // pagination_size + + for page_number in range(1, total_pages + 1): + page_offset = (page_number - 1) * pagination_size + batch_process.add(task=process_page, node=node, page_offset=page_offset, page_number=page_number) - remaining_items = response[schema.kind].get("count", 0) - (page_offset + self.pagination_size) - if remaining_items < 0 or offset is not None or limit is not None: - has_remaining_items = False + async for _, response in batch_process.execute(): + nodes.extend(response[1]["nodes"]) + related_nodes.extend(response[1]["related_nodes"]) - page_number += 1 + return nodes, related_nodes + + async def process_non_batch() -> tuple[list[InfrahubNode], list[InfrahubNode]]: + """Process queries without parallel mode.""" + nodes = [] + related_nodes = [] + has_remaining_items = True + page_number = 1 + + while has_remaining_items: + page_offset = (page_number - 1) * pagination_size + response, process_result = await process_page(page_offset, page_number) + + nodes.extend(process_result["nodes"]) + related_nodes.extend(process_result["related_nodes"]) + remaining_items = response[schema.kind].get("count", 0) - (page_offset + pagination_size) + if remaining_items < 0 or offset is not None or limit is not None: + has_remaining_items = False + page_number += 1 + + return nodes, related_nodes + + # Select parallel or non-parallel processing + nodes, related_nodes = await (process_batch() if parallel else process_non_batch()) if populate_store: for node in nodes: @@ -744,7 +825,6 @@ async def filters( for node in related_nodes: if node.id: self.store.set(key=node.id, node=node) - return nodes def clone(self) -> InfrahubClient: @@ -1425,6 +1505,22 @@ def _initialize(self) -> None: self._request_method: SyncRequester = self.config.sync_requester or self._default_request_method self.group_context = InfrahubGroupContextSync(self) + def get_version(self) -> str: + """Return the Infrahub version.""" + response = self.execute_graphql(query="query { InfrahubInfo { version }}") + version = response.get("InfrahubInfo", {}).get("version", "") + return version + + def get_user(self) -> dict: + """Return user information""" + user_info = self.execute_graphql(query=QUERY_USER) + return user_info + + def get_user_permissions(self) -> dict: + """Return user permissions""" + user_info = self.get_user() + return get_user_permissions(user_info["AccountProfile"]["member_of_groups"]["edges"]) + @overload def create( self, @@ -1549,6 +1645,25 @@ def execute_graphql( # TODO add a special method to execute mutation that will check if the method returned OK + def count( + self, + kind: str | type[SchemaType], + at: Timestamp | None = None, + branch: str | None = None, + timeout: int | None = None, + ) -> int: + """Return the number of nodes of a given kind.""" + schema = self.schema.get(kind=kind, branch=branch) + + branch = branch or self.default_branch + if at: + at = Timestamp(at) + + response = self.execute_graphql( + query=Query(query={schema.kind: {"count": None}}).render(), branch_name=branch, at=at, timeout=timeout + ) + return int(response.get(schema.kind, {}).get("count", 0)) + @overload def all( self, @@ -1564,6 +1679,8 @@ def all( fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., + parallel: bool = ..., + order: Order | None = ..., ) -> list[SchemaTypeSync]: ... @overload @@ -1581,6 +1698,8 @@ def all( fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., + parallel: bool = ..., + order: Order | None = ..., ) -> list[InfrahubNodeSync]: ... def all( @@ -1597,6 +1716,8 @@ def all( fragment: bool = False, prefetch_relationships: bool = False, property: bool = False, + parallel: bool = False, + order: Order | None = None, ) -> list[InfrahubNodeSync] | list[SchemaTypeSync]: """Retrieve all nodes of a given kind @@ -1612,6 +1733,8 @@ def all( exclude (list[str], optional): List of attributes or relationships to exclude from the query. fragment (bool, optional): Flag to use GraphQL fragments for generic schemas. prefetch_relationships (bool, optional): Flag to indicate whether to prefetch related node data. + parallel (bool, optional): Whether to use parallel processing for the query. + order (Order, optional): Ordering related options. Setting `disable=True` enhances performances. Returns: list[InfrahubNodeSync]: List of Nodes @@ -1629,6 +1752,8 @@ def all( fragment=fragment, prefetch_relationships=prefetch_relationships, property=property, + parallel=parallel, + order=order, ) def _process_nodes_and_relationships( @@ -1682,6 +1807,8 @@ def filters( prefetch_relationships: bool = ..., partial_match: bool = ..., property: bool = ..., + parallel: bool = ..., + order: Order | None = ..., **kwargs: Any, ) -> list[SchemaTypeSync]: ... @@ -1701,6 +1828,8 @@ def filters( prefetch_relationships: bool = ..., partial_match: bool = ..., property: bool = ..., + parallel: bool = ..., + order: Order | None = ..., **kwargs: Any, ) -> list[InfrahubNodeSync]: ... @@ -1719,6 +1848,8 @@ def filters( prefetch_relationships: bool = False, partial_match: bool = False, property: bool = False, + parallel: bool = False, + order: Order | None = None, **kwargs: Any, ) -> list[InfrahubNodeSync] | list[SchemaTypeSync]: """Retrieve nodes of a given kind based on provided filters. @@ -1736,32 +1867,26 @@ def filters( fragment (bool, optional): Flag to use GraphQL fragments for generic schemas. prefetch_relationships (bool, optional): Flag to indicate whether to prefetch related node data. partial_match (bool, optional): Allow partial match of filter criteria for the query. + parallel (bool, optional): Whether to use parallel processing for the query. + order (Order, optional): Ordering related options. Setting `disable=True` enhances performances. **kwargs (Any): Additional filter criteria for the query. Returns: list[InfrahubNodeSync]: List of Nodes that match the given filters. """ - schema = self.schema.get(kind=kind, branch=branch) - branch = branch or self.default_branch + schema = self.schema.get(kind=kind, branch=branch) + node = InfrahubNodeSync(client=self, schema=schema, branch=branch) if at: at = Timestamp(at) - - node = InfrahubNodeSync(client=self, schema=schema, branch=branch) filters = kwargs + pagination_size = self.pagination_size - nodes: list[InfrahubNodeSync] = [] - related_nodes: list[InfrahubNodeSync] = [] - - has_remaining_items = True - page_number = 1 - - while has_remaining_items: - page_offset = (page_number - 1) * self.pagination_size - + def process_page(page_offset: int, page_number: int) -> tuple[dict, ProcessRelationsNodeSync]: + """Process a single page of results.""" query_data = InfrahubNodeSync(client=self, schema=schema, branch=branch).generate_query_data( offset=offset or page_offset, - limit=limit or self.pagination_size, + limit=limit or pagination_size, filters=filters, include=include, exclude=exclude, @@ -1769,6 +1894,7 @@ def filters( prefetch_relationships=prefetch_relationships, partial_match=partial_match, property=property, + order=order, ) query = Query(query=query_data) response = self.execute_graphql( @@ -1786,14 +1912,50 @@ def filters( prefetch_relationships=prefetch_relationships, timeout=timeout, ) - nodes.extend(process_result["nodes"]) - related_nodes.extend(process_result["related_nodes"]) + return response, process_result + + def process_batch() -> tuple[list[InfrahubNodeSync], list[InfrahubNodeSync]]: + """Process queries in parallel mode.""" + nodes = [] + related_nodes = [] + batch_process = self.create_batch() + + count = self.count(kind=schema.kind) + total_pages = (count + pagination_size - 1) // pagination_size - remaining_items = response[schema.kind].get("count", 0) - (page_offset + self.pagination_size) - if remaining_items < 0 or offset is not None or limit is not None: - has_remaining_items = False + for page_number in range(1, total_pages + 1): + page_offset = (page_number - 1) * pagination_size + batch_process.add(task=process_page, node=node, page_offset=page_offset, page_number=page_number) - page_number += 1 + for _, response in batch_process.execute(): + nodes.extend(response[1]["nodes"]) + related_nodes.extend(response[1]["related_nodes"]) + + return nodes, related_nodes + + def process_non_batch() -> tuple[list[InfrahubNodeSync], list[InfrahubNodeSync]]: + """Process queries without parallel mode.""" + nodes = [] + related_nodes = [] + has_remaining_items = True + page_number = 1 + + while has_remaining_items: + page_offset = (page_number - 1) * pagination_size + response, process_result = process_page(page_offset, page_number) + + nodes.extend(process_result["nodes"]) + related_nodes.extend(process_result["related_nodes"]) + + remaining_items = response[schema.kind].get("count", 0) - (page_offset + pagination_size) + if remaining_items < 0 or offset is not None or limit is not None: + has_remaining_items = False + page_number += 1 + + return nodes, related_nodes + + # Select parallel or non-parallel processing + nodes, related_nodes = process_batch() if parallel else process_non_batch() if populate_store: for node in nodes: @@ -1803,7 +1965,6 @@ def filters( for node in related_nodes: if node.id: self.store.set(key=node.id, node=node) - return nodes @overload diff --git a/infrahub_sdk/ctl/cli_commands.py b/infrahub_sdk/ctl/cli_commands.py index 2471f72..09a73ec 100644 --- a/infrahub_sdk/ctl/cli_commands.py +++ b/infrahub_sdk/ctl/cli_commands.py @@ -4,6 +4,7 @@ import functools import importlib import logging +import platform import sys from pathlib import Path from typing import TYPE_CHECKING, Any, Callable @@ -12,7 +13,11 @@ import typer import ujson from rich.console import Console +from rich.layout import Layout from rich.logging import RichHandler +from rich.panel import Panel +from rich.pretty import Pretty +from rich.table import Table from rich.traceback import Traceback from .. import __version__ as sdk_version @@ -392,11 +397,106 @@ def protocols( @app.command(name="version") @catch_exception(console=console) -def version(_: str = CONFIG_PARAM) -> None: - """Display the version of Infrahub and the version of the Python SDK in use.""" +def version() -> None: + """Display the version of Python and the version of the Python SDK in use.""" - client = initialize_client_sync() - response = client.execute_graphql(query="query { InfrahubInfo { version }}") + console.print(f"Python: {platform.python_version()}\nPython SDK: v{sdk_version}") - infrahub_version = response["InfrahubInfo"]["version"] - console.print(f"Infrahub: v{infrahub_version}\nPython SDK: v{sdk_version}") + +@app.command(name="info") +@catch_exception(console=console) +def info(detail: bool = typer.Option(False, help="Display detailed information."), _: str = CONFIG_PARAM) -> None: # noqa: PLR0915 + """Display the status of the Python SDK.""" + + info: dict[str, Any] = { + "error": None, + "status": ":x:", + "infrahub_version": "N/A", + "user_info": {}, + "groups": {}, + } + try: + client = initialize_client_sync() + info["infrahub_version"] = client.get_version() + info["user_info"] = client.get_user() + info["status"] = ":white_heavy_check_mark:" + info["groups"] = client.get_user_permissions() + except Exception as e: + info["error"] = f"{e!s} ({e.__class__.__name__})" + + if detail: + layout = Layout() + + # Layout structure + new_console = Console(height=45) + layout = Layout() + layout.split_column( + Layout(name="body", ratio=1), + ) + layout["body"].split_row( + Layout(name="left"), + Layout(name="right"), + ) + + layout["left"].split_column( + Layout(name="connection_status", size=7), + Layout(name="client_info", ratio=1), + ) + + layout["right"].split_column( + Layout(name="version_info", size=7), + Layout(name="infrahub_info", ratio=1), + ) + + # Connection status panel + connection_status = Table(show_header=False, box=None) + connection_status.add_row("Server Address:", client.config.address) + connection_status.add_row("Status:", info["status"]) + if info["error"]: + connection_status.add_row("Error Reason:", info["error"]) + layout["connection_status"].update(Panel(connection_status, title="Connection Status")) + + # Version information panel + version_info = Table(show_header=False, box=None) + version_info.add_row("Python Version:", platform.python_version()) + version_info.add_row("Infrahub Version", info["infrahub_version"]) + version_info.add_row("Infrahub SDK:", sdk_version) + layout["version_info"].update(Panel(version_info, title="Version Information")) + + # SDK client configuration panel + pretty_model = Pretty(client.config.model_dump(), expand_all=True) + layout["client_info"].update(Panel(pretty_model, title="Client Info")) + + # Infrahub information planel + infrahub_info = Table(show_header=False, box=None) + if info["user_info"]: + infrahub_info.add_row("User:", info["user_info"]["AccountProfile"]["display_label"]) + infrahub_info.add_row("Description:", info["user_info"]["AccountProfile"]["description"]["value"]) + infrahub_info.add_row("Status:", info["user_info"]["AccountProfile"]["status"]["label"]) + infrahub_info.add_row( + "Number of Groups:", str(info["user_info"]["AccountProfile"]["member_of_groups"]["count"]) + ) + + if groups := info["groups"]: + infrahub_info.add_row("Groups:", "") + for group, roles in groups.items(): + infrahub_info.add_row("", group, ", ".join(roles)) + + layout["infrahub_info"].update(Panel(infrahub_info, title="Infrahub Info")) + + new_console.print(layout) + else: + # Simple output + table = Table(show_header=False, box=None) + table.add_row("Address:", client.config.address) + table.add_row("Connection Status:", info["status"]) + if info["error"]: + table.add_row("Connection Error:", info["error"]) + + table.add_row("Python Version:", platform.python_version()) + table.add_row("SDK Version:", sdk_version) + table.add_row("Infrahub Version:", info["infrahub_version"]) + if account := info["user_info"].get("AccountProfile"): + table.add_row("User:", account["display_label"]) + + console.print(table) diff --git a/infrahub_sdk/graphql.py b/infrahub_sdk/graphql.py index d706c64..9b7722d 100644 --- a/infrahub_sdk/graphql.py +++ b/infrahub_sdk/graphql.py @@ -2,6 +2,8 @@ from typing import Any +from pydantic import BaseModel + VARIABLE_TYPE_MAPPING = ((str, "String!"), (int, "Int!"), (float, "Float!"), (bool, "Boolean!")) @@ -15,6 +17,9 @@ def convert_to_graphql_as_string(value: str | bool | list) -> str: if isinstance(value, list): values_as_string = [convert_to_graphql_as_string(item) for item in value] return "[" + ", ".join(values_as_string) + "]" + if isinstance(value, BaseModel): + data = value.model_dump() + return "{ " + ", ".join(f"{key}: {convert_to_graphql_as_string(val)}" for key, val in data.items()) + " }" return str(value) diff --git a/infrahub_sdk/node.py b/infrahub_sdk/node.py index 6a81835..3d84fb9 100644 --- a/infrahub_sdk/node.py +++ b/infrahub_sdk/node.py @@ -23,6 +23,7 @@ from .client import InfrahubClient, InfrahubClientSync from .schema import AttributeSchemaAPI, MainSchemaTypesAPI, RelationshipSchemaAPI + from .types import Order # pylint: disable=too-many-lines @@ -977,6 +978,7 @@ def generate_query_data_init( include: list[str] | None = None, exclude: list[str] | None = None, partial_match: bool = False, + order: Order | None = None, ) -> dict[str, Any | dict]: data: dict[str, Any] = { "count": None, @@ -985,6 +987,9 @@ def generate_query_data_init( data["@filters"] = filters or {} + if order: + data["@filters"]["order"] = order + if offset: data["@filters"]["offset"] = offset @@ -1176,9 +1181,16 @@ async def generate_query_data( prefetch_relationships: bool = False, partial_match: bool = False, property: bool = False, + order: Order | None = None, ) -> dict[str, Any | dict]: data = self.generate_query_data_init( - filters=filters, offset=offset, limit=limit, include=include, exclude=exclude, partial_match=partial_match + filters=filters, + offset=offset, + limit=limit, + include=include, + exclude=exclude, + partial_match=partial_match, + order=order, ) data["edges"]["node"].update( await self.generate_query_data_node( @@ -1682,9 +1694,16 @@ def generate_query_data( prefetch_relationships: bool = False, partial_match: bool = False, property: bool = False, + order: Order | None = None, ) -> dict[str, Any | dict]: data = self.generate_query_data_init( - filters=filters, offset=offset, limit=limit, include=include, exclude=exclude, partial_match=partial_match + filters=filters, + offset=offset, + limit=limit, + include=include, + exclude=exclude, + partial_match=partial_match, + order=order, ) data["edges"]["node"].update( self.generate_query_data_node( diff --git a/infrahub_sdk/queries.py b/infrahub_sdk/queries.py index 75bc593..9f24f73 100644 --- a/infrahub_sdk/queries.py +++ b/infrahub_sdk/queries.py @@ -42,3 +42,72 @@ def get_commit_update_mutation(is_read_only: bool = False) -> str: } } """ + +QUERY_USER = """ +query GET_PROFILE_DETAILS { + AccountProfile { + id + display_label + account_type { + value + __typename + updated_at + } + status { + label + value + updated_at + __typename + } + description { + value + updated_at + __typename + } + label { + value + updated_at + __typename + } + member_of_groups { + count + edges { + node { + display_label + group_type { + value + } + ... on CoreAccountGroup { + id + roles { + count + edges { + node { + permissions { + count + edges { + node { + display_label + identifier { + value + } + } + } + } + } + } + } + display_label + } + } + } + } + __typename + name { + value + updated_at + __typename + } + } +} +""" diff --git a/infrahub_sdk/schema/main.py b/infrahub_sdk/schema/main.py index 069f744..760d743 100644 --- a/infrahub_sdk/schema/main.py +++ b/infrahub_sdk/schema/main.py @@ -264,6 +264,7 @@ class BaseSchema(BaseModel): icon: str | None = None uniqueness_constraints: list[list[str]] | None = None documentation: str | None = None + order_by: list[str] | None = None @property def kind(self) -> str: diff --git a/infrahub_sdk/testing/schemas/animal.py b/infrahub_sdk/testing/schemas/animal.py index 636588d..1210a31 100644 --- a/infrahub_sdk/testing/schemas/animal.py +++ b/infrahub_sdk/testing/schemas/animal.py @@ -81,6 +81,7 @@ def schema_cat(self) -> NodeSchema: include_in_menu=True, inherit_from=[TESTING_ANIMAL], display_labels=["name__value", "breed__value", "color__value"], + order_by=["name__value"], attributes=[ Attr(name="breed", kind=AttributeKind.TEXT, optional=False), Attr(name="color", kind=AttributeKind.COLOR, default_value="#555555", optional=True), diff --git a/infrahub_sdk/types.py b/infrahub_sdk/types.py index e885d2d..f6a75fd 100644 --- a/infrahub_sdk/types.py +++ b/infrahub_sdk/types.py @@ -4,6 +4,8 @@ from logging import Logger from typing import TYPE_CHECKING, Any, Protocol, Union, runtime_checkable +from pydantic import BaseModel + if TYPE_CHECKING: import httpx @@ -64,3 +66,7 @@ def exception(self, event: str | None = None, *args: Any, **kw: Any) -> Any: InfrahubLoggers = Union[InfrahubLogger, Logger] + + +class Order(BaseModel): + disable: bool | None = None diff --git a/infrahub_sdk/utils.py b/infrahub_sdk/utils.py index 339c66e..1231dae 100644 --- a/infrahub_sdk/utils.py +++ b/infrahub_sdk/utils.py @@ -335,3 +335,20 @@ def write_to_file(path: Path, value: Any) -> bool: written = path.write_text(to_write) return written is not None + + +def get_user_permissions(data: list[dict]) -> dict: + groups = {} + for group in data: + group_name = group["node"]["display_label"] + permissions = [] + + roles = group["node"].get("roles", {}).get("edges", []) + for role in roles: + role_permissions = role["node"].get("permissions", {}).get("edges", []) + for permission in role_permissions: + permissions.append(permission["node"]["identifier"]["value"]) + + groups[group_name] = permissions + + return groups diff --git a/pyproject.toml b/pyproject.toml index f89684d..fe9c6a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,11 @@ [project] name = "infrahub-sdk" -version = "1.4.1" +version = "1.5.0" requires-python = ">=3.9" [tool.poetry] name = "infrahub-sdk" -version = "1.4.1" +version = "1.5.0" description = "Python Client to interact with Infrahub" authors = ["OpsMill "] readme = "README.md" @@ -164,33 +164,19 @@ disallow_untyped_defs = true [[tool.mypy.overrides]] module = "infrahub_sdk.ctl.check" -disable_error_code = [ - "call-overload" -] +disable_error_code = ["call-overload"] [[tool.mypy.overrides]] module = "infrahub_sdk.ctl.generator" -disable_error_code = [ - "attr-defined", -] +disable_error_code = ["attr-defined"] [[tool.mypy.overrides]] module = "infrahub_sdk.ctl.schema" -disable_error_code = [ - "arg-type", - "attr-defined", - "misc", - "union-attr", -] +disable_error_code = ["arg-type", "attr-defined", "misc", "union-attr"] [[tool.mypy.overrides]] module = "infrahub_sdk.utils" -disable_error_code = [ - "arg-type", - "attr-defined", - "return-value", - "union-attr", -] +disable_error_code = ["arg-type", "attr-defined", "return-value", "union-attr"] [tool.ruff] line-length = 120 @@ -215,31 +201,31 @@ task-tags = ["FIXME", "TODO", "XXX"] select = ["ALL"] ignore = [ - "D", # pydocstyle - "DOC", # pydoclint - "CPY", # flake8-copyright - "T201", # use of `print` - "ISC", # flake8-implicit-str-concat - "COM812", # missing-trailing-comma + "D", # pydocstyle + "DOC", # pydoclint + "CPY", # flake8-copyright + "T201", # use of `print` + "ISC", # flake8-implicit-str-concat + "COM812", # missing-trailing-comma ################################################################################################## # Rules below needs to be Investigated # ################################################################################################## - "PT", # flake8-pytest-style - "PGH", # pygrep-hooks - "ERA", # eradicate commented-out code - "SLF001", # flake8-self - "EM", # flake8-errmsg - "TRY", # tryceratops - "TD", # flake8-todos - "FIX", # flake8-fixme - "TID", # flake8-tidy-imports - "FBT", # flake8-boolean-trap - "G", # flake8-logging-format - "FLY", # flynt - "RSE", # flake8-raise - "BLE", # flake8-blind-except (BLE) - "A", # flake8-builtins + "PT", # flake8-pytest-style + "PGH", # pygrep-hooks + "ERA", # eradicate commented-out code + "SLF001", # flake8-self + "EM", # flake8-errmsg + "TRY", # tryceratops + "TD", # flake8-todos + "FIX", # flake8-fixme + "TID", # flake8-tidy-imports + "FBT", # flake8-boolean-trap + "G", # flake8-logging-format + "FLY", # flynt + "RSE", # flake8-raise + "BLE", # flake8-blind-except (BLE) + "A", # flake8-builtins ################################################################################################## # The ignored rules below should be removed once the code has been updated, they are included # @@ -313,21 +299,21 @@ max-complexity = 17 ################################################################################################## # Review and change the below later # ################################################################################################## - "ANN201", # ANN201 Missing return type annotation for public function - "ANN202", # Missing return type annotation for private function - "ANN204", # Missing return type annotation for special method - "ANN401", # Dynamically typed expressions (typing.Any) are disallowed + "ANN201", # ANN201 Missing return type annotation for public function + "ANN202", # Missing return type annotation for private function + "ANN204", # Missing return type annotation for special method + "ANN401", # Dynamically typed expressions (typing.Any) are disallowed ] "infrahub_sdk/client.py" = [ ################################################################################################## # Review and change the below later # ################################################################################################## - "PLR0904", # Too many public methods + "PLR0904", # Too many public methods ] "infrahub_sdk/pytest_plugin/models.py" = [ - "S105", # 'PASS' is not a password but a state + "S105", # 'PASS' is not a password but a state ] @@ -342,10 +328,10 @@ max-complexity = 17 ################################################################################################## # Review and change the below later # ################################################################################################## - "ANN001", # Missing type annotation for function argument - "ANN201", # ANN201 Missing return type annotation for public function - "ANN202", # Missing return type annotation for private function - "ANN204", # Missing return type annotation for special method + "ANN001", # Missing type annotation for function argument + "ANN201", # ANN201 Missing return type annotation for public function + "ANN202", # Missing return type annotation for private function + "ANN204", # Missing return type annotation for special method ] "tests/unit/sdk/test_client.py" = [ diff --git a/tests/fixtures/account_profile.json b/tests/fixtures/account_profile.json new file mode 100644 index 0000000..b3dfa7b --- /dev/null +++ b/tests/fixtures/account_profile.json @@ -0,0 +1,79 @@ +{ + "data": { + "AccountProfile": { + "id": "1816ebcd-cea7-3bf7-3fc9-c51282f03fe7", + "display_label": "Admin", + "account_type": { + "value": "User", + "__typename": "TextAttribute", + "updated_at": "2025-01-02T16:06:15.565985+00:00" + }, + "status": { + "label": "Active", + "value": "active", + "updated_at": "2025-01-02T16:06:15.565985+00:00", + "__typename": "Dropdown" + }, + "description": { + "value": null, + "updated_at": "2025-01-02T16:06:15.565985+00:00", + "__typename": "TextAttribute" + }, + "label": { + "value": "Admin", + "updated_at": "2025-01-02T16:06:15.565985+00:00", + "__typename": "TextAttribute" + }, + "member_of_groups": { + "count": 1, + "edges": [ + { + "node": { + "display_label": "Super Administrators", + "group_type": { + "value": "default" + }, + "id": "1816ebce-1cbe-2e96-3fc3-c5124c324bac", + "roles": { + "count": 1, + "edges": [ + { + "node": { + "permissions": { + "count": 2, + "edges": [ + { + "node": { + "display_label": "super_admin 6", + "identifier": { + "value": "global:super_admin:allow_all" + } + } + }, + { + "node": { + "display_label": "* * any 6", + "identifier": { + "value": "object:*:*:any:allow_all" + } + } + } + ] + } + } + } + ] + } + } + } + ] + }, + "__typename": "CoreAccount", + "name": { + "value": "admin", + "updated_at": "2025-01-02T16:06:15.565985+00:00", + "__typename": "TextAttribute" + } + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page1.json b/tests/fixtures/batch/mock_query_location_page1.json new file mode 100644 index 0000000..3d20d1b --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page1.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-032c-2b7b-35b6-c5132b0ec757", + "hfid": [ + "Location 4" + ], + "display_label": "Location 4", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 4" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-057b-d717-35b5-c51d0ee87714", + "hfid": [ + "Location 5" + ], + "display_label": "Location 5", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 5" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-078f-33e6-35bd-c51064d1da09", + "hfid": [ + "Location 6" + ], + "display_label": "Location 6", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 6" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page10.json b/tests/fixtures/batch/mock_query_location_page10.json new file mode 100644 index 0000000..2b9cb5e --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page10.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-4467-a731-35b0-c51343a6de25", + "hfid": [ + "Location 28" + ], + "display_label": "Location 28", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 28" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-460e-0a60-35b9-c51ae0bf3282", + "hfid": [ + "Location 29" + ], + "display_label": "Location 29", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 29" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-47e7-e6f8-35b4-c5126eb24b25", + "hfid": [ + "Location 30" + ], + "display_label": "Location 30", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 30" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page2.json b/tests/fixtures/batch/mock_query_location_page2.json new file mode 100644 index 0000000..26afa74 --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page2.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187156-c105-8bf8-35b4-c51939ad567e", + "hfid": [ + "Location 1" + ], + "display_label": "Location 1", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 1" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187156-ebed-396c-35b0-c51d157b594d", + "hfid": [ + "Location 2" + ], + "display_label": "Location 2", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 2" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-0159-c0cf-35b0-c5120224953b", + "hfid": [ + "Location 3" + ], + "display_label": "Location 3", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 3" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page3.json b/tests/fixtures/batch/mock_query_location_page3.json new file mode 100644 index 0000000..f8db266 --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page3.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-0e55-89f9-35bb-c51676dc02d7", + "hfid": [ + "Location 10" + ], + "display_label": "Location 10", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 10" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-1f00-6855-35be-c51faaac41a7", + "hfid": [ + "Location 11" + ], + "display_label": "Location 11", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 11" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-22ee-0b34-35b6-c5161a25d5c0", + "hfid": [ + "Location 12" + ], + "display_label": "Location 12", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 12" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page4.json b/tests/fixtures/batch/mock_query_location_page4.json new file mode 100644 index 0000000..9579fdf --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page4.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-094d-aaa9-35bd-c51e4fb264dd", + "hfid": [ + "Location 7" + ], + "display_label": "Location 7", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 7" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-0af4-719e-35b8-c5187f3c7e7d", + "hfid": [ + "Location 8" + ], + "display_label": "Location 8", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 8" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-0cb4-98fe-35b7-c5129da82bf7", + "hfid": [ + "Location 9" + ], + "display_label": "Location 9", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 9" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page5.json b/tests/fixtures/batch/mock_query_location_page5.json new file mode 100644 index 0000000..6904fb8 --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page5.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-3f82-8f33-35ba-c51ede0ca510", + "hfid": [ + "Location 25" + ], + "display_label": "Location 25", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 25" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-4121-02f8-35b2-c51ae4077d24", + "hfid": [ + "Location 26" + ], + "display_label": "Location 26", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 26" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-42a9-d237-35be-c5113cfc0566", + "hfid": [ + "Location 27" + ], + "display_label": "Location 27", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 27" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page6.json b/tests/fixtures/batch/mock_query_location_page6.json new file mode 100644 index 0000000..dc067ff --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page6.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-30fd-1b64-35be-c514408c7536", + "hfid": [ + "Location 19" + ], + "display_label": "Location 19", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 19" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-32ac-49b7-35b0-c518a6d4da07", + "hfid": [ + "Location 20" + ], + "display_label": "Location 20", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 20" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-38a6-6f2a-35b9-c51a8459cee4", + "hfid": [ + "Location 21" + ], + "display_label": "Location 21", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 21" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page7.json b/tests/fixtures/batch/mock_query_location_page7.json new file mode 100644 index 0000000..e6d863d --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page7.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-3a50-bba7-35bb-c5160d2b7e1e", + "hfid": [ + "Location 22" + ], + "display_label": "Location 22", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 22" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-3bf9-45c8-35b3-c51040fb8222", + "hfid": [ + "Location 23" + ], + "display_label": "Location 23", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 23" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-3dd8-7ab3-35b8-c51e08ca546d", + "hfid": [ + "Location 24" + ], + "display_label": "Location 24", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 24" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page8.json b/tests/fixtures/batch/mock_query_location_page8.json new file mode 100644 index 0000000..7f21eda --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page8.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-26fb-e960-35be-c51122cddc46", + "hfid": [ + "Location 13" + ], + "display_label": "Location 13", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 13" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-2891-c10c-35b8-c5147ff7bcfe", + "hfid": [ + "Location 14" + ], + "display_label": "Location 14", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 14" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-2a78-7ed7-35b9-c51bf5d86229", + "hfid": [ + "Location 15" + ], + "display_label": "Location 15", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 15" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/fixtures/batch/mock_query_location_page9.json b/tests/fixtures/batch/mock_query_location_page9.json new file mode 100644 index 0000000..8b6629e --- /dev/null +++ b/tests/fixtures/batch/mock_query_location_page9.json @@ -0,0 +1,69 @@ +{ + "data": { + "BuiltinLocation": { + "count": 30, + "edges": [ + { + "node": { + "id": "18187157-2c1d-b374-35b9-c517ba9eb45d", + "hfid": [ + "Location 16" + ], + "display_label": "Location 16", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 16" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-2db6-d251-35b5-c51e64fefb99", + "hfid": [ + "Location 17" + ], + "display_label": "Location 17", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 17" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + }, + { + "node": { + "id": "18187157-2f52-a6dd-35b4-c51747c96d6d", + "hfid": [ + "Location 18" + ], + "display_label": "Location 18", + "__typename": "BuiltinLocation", + "name": { + "value": "Location 18" + }, + "description": { + "value": null + }, + "tags": { + "count": 0, + "edges": [] + } + } + } + ] + } + } +} \ No newline at end of file diff --git a/tests/integration/test_infrahub_client.py b/tests/integration/test_infrahub_client.py index 4057d71..c18bc5f 100644 --- a/tests/integration/test_infrahub_client.py +++ b/tests/integration/test_infrahub_client.py @@ -59,7 +59,22 @@ async def test_get_all(self, client: InfrahubClient, base_dataset): nodes = await client.all(kind=TESTING_CAT) assert len(nodes) == 2 assert isinstance(nodes[0], InfrahubNode) - assert sorted([node.name.value for node in nodes]) == ["Bella", "Luna"] + assert [node.name.value for node in nodes] == ["Bella", "Luna"] + + # TODO enable these tests for infrahub version containing this commit + # https://github.com/opsmill/infrahub/commit/5a4d6860196b5bfb51fb8a124f33125f4a0b6753 + # when we support testing against multiple infrahub versions. + # async def test_get_all_no_order(self, client: InfrahubClient, base_dataset): + # nodes = await client.all(kind=TESTING_CAT, order=Order(disable=True)) + # assert len(nodes) == 2 + # assert isinstance(nodes[0], InfrahubNode) + # assert {node.name.value for node in nodes} == {"Bella", "Luna"} + # + # async def test_get_filters_no_order(self, client: InfrahubClient, base_dataset): + # nodes = await client.filters(kind=TESTING_CAT, order=Order(disable=True)) + # assert len(nodes) == 2 + # assert isinstance(nodes[0], InfrahubNode) + # assert {node.name.value for node in nodes} == {"Bella", "Luna"} async def test_get_one(self, client: InfrahubClient, base_dataset, cat_luna, person_sophia): node1 = await client.get(kind=TESTING_CAT, id=cat_luna.id) diff --git a/tests/unit/ctl/conftest.py b/tests/unit/ctl/conftest.py index 17c0ecc..7e00b02 100644 --- a/tests/unit/ctl/conftest.py +++ b/tests/unit/ctl/conftest.py @@ -1,6 +1,8 @@ import pytest from pytest_httpx import HTTPXMock +from tests.unit.sdk.conftest import mock_query_infrahub_user, mock_query_infrahub_version # noqa: F401 + @pytest.fixture async def mock_branches_list_query(httpx_mock: HTTPXMock) -> HTTPXMock: diff --git a/tests/unit/ctl/test_cli.py b/tests/unit/ctl/test_cli.py index a6df7f9..d73a710 100644 --- a/tests/unit/ctl/test_cli.py +++ b/tests/unit/ctl/test_cli.py @@ -27,3 +27,46 @@ def test_validate_all_groups_have_names(): assert app.registered_groups for group in app.registered_groups: assert group.name + + +@requires_python_310 +def test_version_command(): + result = runner.invoke(app, ["version"]) + assert result.exit_code == 0 + assert "Python SDK: v" in result.stdout + + +@requires_python_310 +def test_info_command_success(mock_query_infrahub_version, mock_query_infrahub_user): + result = runner.invoke(app, ["info"]) + assert result.exit_code == 0 + for expected in ["Connection Status", "Python Version", "SDK Version", "Infrahub Version"]: + assert expected in result.stdout, f"'{expected}' not found in info command output" + + +@requires_python_310 +def test_info_command_failure(): + result = runner.invoke(app, ["info"]) + assert result.exit_code == 0 + assert "Connection Error" in result.stdout + + +@requires_python_310 +def test_info_detail_command_success(mock_query_infrahub_version, mock_query_infrahub_user): + result = runner.invoke(app, ["info", "--detail"]) + assert result.exit_code == 0 + for expected in [ + "Connection Status", + "Version Information", + "Client Info", + "Infrahub Info", + "Groups:", + ]: + assert expected in result.stdout, f"'{expected}' not found in detailed info command output" + + +@requires_python_310 +def test_info_detail_command_failure(): + result = runner.invoke(app, ["info", "--detail"]) + assert result.exit_code == 0 + assert "Error Reason" in result.stdout diff --git a/tests/unit/sdk/conftest.py b/tests/unit/sdk/conftest.py index e802faa..7f1d494 100644 --- a/tests/unit/sdk/conftest.py +++ b/tests/unit/sdk/conftest.py @@ -1680,6 +1680,12 @@ async def mock_query_corenode_page1_1(httpx_mock: HTTPXMock, client: InfrahubCli return httpx_mock +@pytest.fixture +async def mock_query_repository_count(httpx_mock: HTTPXMock, client: InfrahubClient, mock_schema_query_01) -> HTTPXMock: + httpx_mock.add_response(method="POST", json={"data": {"CoreRepository": {"count": 5}}}) + return httpx_mock + + @pytest.fixture async def mock_query_repository_page1_empty( httpx_mock: HTTPXMock, client: InfrahubClient, mock_schema_query_01 @@ -2135,6 +2141,19 @@ async def mock_query_mutation_location_create_failed(httpx_mock: HTTPXMock) -> H return httpx_mock +@pytest.fixture +async def mock_query_infrahub_version(httpx_mock: HTTPXMock) -> HTTPXMock: + httpx_mock.add_response(method="POST", json={"data": {"InfrahubInfo": {"version": "1.1.0"}}}) + return httpx_mock + + +@pytest.fixture +async def mock_query_infrahub_user(httpx_mock: HTTPXMock) -> HTTPXMock: + response_text = (get_fixtures_dir() / "account_profile.json").read_text(encoding="UTF-8") + httpx_mock.add_response(method="POST", json=ujson.loads(response_text)) + return httpx_mock + + @pytest.fixture def query_01() -> str: """Simple query with one document""" @@ -2468,3 +2487,25 @@ async def mock_schema_query_ipam(httpx_mock: HTTPXMock) -> HTTPXMock: httpx_mock.add_response(method="GET", url="http://mock/api/schema?branch=main", json=ujson.loads(response_text)) return httpx_mock + + +@pytest.fixture +async def mock_query_location_batch_count( + httpx_mock: HTTPXMock, client: InfrahubClient, mock_schema_query_01 +) -> HTTPXMock: + response = {"data": {"BuiltinLocation": {"count": 30}}} + httpx_mock.add_response(method="POST", url="http://mock/graphql/main", json=response) + return httpx_mock + + +@pytest.fixture +async def mock_query_location_batch(httpx_mock: HTTPXMock, client: InfrahubClient, mock_schema_query_01) -> HTTPXMock: + for i in range(1, 11): + filename = get_fixtures_dir() / "batch" / f"mock_query_location_page{i}.json" + response_text = filename.read_text(encoding="UTF-8") + httpx_mock.add_response( + method="POST", + json=ujson.loads(response_text), + match_headers={"X-Infrahub-Tracker": f"query-builtinlocation-page{i}"}, + ) + return httpx_mock diff --git a/tests/unit/sdk/test_client.py b/tests/unit/sdk/test_client.py index b34ec87..3f08780 100644 --- a/tests/unit/sdk/test_client.py +++ b/tests/unit/sdk/test_client.py @@ -10,6 +10,13 @@ async_client_methods = [method for method in dir(InfrahubClient) if not method.startswith("_")] sync_client_methods = [method for method in dir(InfrahubClientSync) if not method.startswith("_")] +batch_client_types = [ + ("standard", False), + ("standard", True), + ("sync", False), + ("sync", True), +] + client_types = ["standard", "sync"] @@ -66,6 +73,48 @@ async def test_get_repositories( } +@pytest.mark.parametrize("client_type", client_types) +async def test_method_count(clients, mock_query_repository_count, client_type): # pylint: disable=unused-argument + if client_type == "standard": + count = await clients.standard.count(kind="CoreRepository") + else: + count = clients.sync.count(kind="CoreRepository") + + assert count == 5 + + +@pytest.mark.parametrize("client_type", client_types) +async def test_method_get_version(clients, mock_query_infrahub_version, client_type): # pylint: disable=unused-argument + if client_type == "standard": + version = await clients.standard.get_version() + else: + version = clients.sync.get_version() + + assert version == "1.1.0" + + +@pytest.mark.parametrize("client_type", client_types) +async def test_method_get_user(clients, mock_query_infrahub_user, client_type): # pylint: disable=unused-argument + if client_type == "standard": + user = await clients.standard.get_user() + else: + user = clients.sync.get_user() + + assert isinstance(user, dict) + assert user["AccountProfile"]["display_label"] == "Admin" + + +@pytest.mark.parametrize("client_type", client_types) +async def test_method_get_user_permissions(clients, mock_query_infrahub_user, client_type): # pylint: disable=unused-argument + if client_type == "standard": + groups = await clients.standard.get_user_permissions() + else: + groups = clients.sync.get_user_permissions() + + assert isinstance(groups, dict) + assert groups["Super Administrators"] == ["global:super_admin:allow_all", "object:*:*:any:allow_all"] + + @pytest.mark.parametrize("client_type", client_types) async def test_method_all_with_limit(clients, mock_query_repository_page1_2, client_type): # pylint: disable=unused-argument if client_type == "standard": @@ -104,6 +153,26 @@ async def test_method_all_multiple_pages( assert len(repos) == 5 +@pytest.mark.parametrize("client_type, use_parallel", batch_client_types) +async def test_method_all_batching( + clients, mock_query_location_batch_count, mock_query_location_batch, client_type, use_parallel +): # pylint: disable=unused-argument + if client_type == "standard": + locations = await clients.standard.all(kind="BuiltinLocation", parallel=use_parallel) + assert not clients.standard.store._store["BuiltinLocation"] + + locations = await clients.standard.all(kind="BuiltinLocation", populate_store=True, parallel=use_parallel) + assert len(clients.standard.store._store["BuiltinLocation"]) == 30 + else: + locations = clients.sync.all(kind="BuiltinLocation", parallel=use_parallel) + assert not clients.sync.store._store["BuiltinLocation"] + + locations = clients.sync.all(kind="BuiltinLocation", populate_store=True, parallel=use_parallel) + assert len(clients.sync.store._store["BuiltinLocation"]) == 30 + + assert len(locations) == 30 + + @pytest.mark.parametrize("client_type", client_types) async def test_method_all_single_page(clients, mock_query_repository_page1_1, client_type): # pylint: disable=unused-argument if client_type == "standard":