Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Activate ruff rules on tests/ folder #3999

Merged
merged 3 commits into from
Feb 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 4 additions & 21 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,6 @@ ignore = [
"B011", # 7 occurences [*] assert-false
"PT015", # 7 occurences [ ] pytest-assert-always-false
"N815", # 7 occurences [ ] mixed-case-variable-in-class-scope
"A001", # 6 occurences [ ] builtin-variable-shadowing
"PT006", # 6 occurences [*] pytest-parametrize-names-wrong-type
"RET504", # 6 occurences [*] unnecessary-assign
"N803", # 6 occurences [ ] invalid-argument-name
Expand All @@ -254,12 +253,7 @@ ignore = [
"B006", # 4 occurences [*] mutable-argument-default
"PIE794", # 4 occurences [*] duplicate-class-field-definition
"PTH103", # 4 occurences [ ] os-makedirs
"E711", # 4 occurences [*] none-comparison
"E721", # 4 occurences [ ] type-comparison
"PLW2901", # 4 occurences [ ] redefined-loop-name
"RUF003", # 4 occurences [ ] ambiguous-unicode-character-comment
"B904", # 3 occurences [ ] raise-without-from-inside-except
"A002", # 3 occurences [ ] builtin-argument-shadowing
"PTH112", # 3 occurences [ ] os-path-isdir
"W291", # 3 occurences [*] trailing-whitespace
"RUF017", # 3 occurences [*] quadratic-list-summation
Expand All @@ -268,34 +262,23 @@ ignore = [
"S605", # 2 occurences [ ] start-process-with-a-shell
"C408", # 2 occurences [*] unnecessary-collection-call
"C416", # 2 occurences [*] unnecessary-comprehension
"FIX003", # 2 occurences [ ] line-contains-xxx
"PIE810", # 2 occurences [*] multiple-starts-ends-with
"T203", # 2 occurences [*] p-print
"ARG005", # 2 occurences [ ] unused-lambda-argument
"PTH100", # 2 occurences [ ] os-path-abspath
"PTH109", # 2 occurences [ ] os-getcwd
"D210", # 2 occurences [ ] surrounding-whitespace
"FURB129", # 2 occurences [*] readlines-in-for
"RUF002", # 2 occurences [ ] ambiguous-unicode-character-docstring
"ASYNC251", # 1 occurences [ ] blocking-sleep-in-async-function
"S108", # 1 occurences [ ] hardcoded-temp-file
"S110", # 1 occurences [ ] try-except-pass
"C400", # 1 occurences [*] unnecessary-generator-list
"C401", # 1 occurences [*] unnecessary-generator-set
"SIM105", # 1 occurences [ ] suppressible-exception
"TD005", # 1 occurences [ ] missing-todo-description
"PTH113", # 1 occurences [ ] os-path-isfile
"F403", # 1 occurences [ ] undefined-local-with-import-star
"PLC0206", # 1 occurences [ ] dict-index-missing-items
"PLR0133", # 1 occurences [ ] comparison-of-constant
"RUF006", # 1 occurences [ ] asyncio-dangling-task

# keep those rules
# keep those exceptions
"FIX003", # line-contains-xxx: freedom of speech!
"FIX004", # line-contains-xxx: freedom of speech!
"PLR1730", # if-stmt-min-max: not clear that it makes the code easier to read
"RET506", # superfluous-else-raise: requires a slightly higher cognitive effort to understand the code
"RET507", # superfluous-else-continue : requires a slightly higher cognitive effort to understand the code
"RET508", # superfluous-else-break: requires a slightly higher cognitive effort to understand the code
"RET505", # superfluous-else-return: requires a slightly higher cognitive effort to understand the code
"S108", # hardcoded-temp-file: test code may contains weird things
]
"utils/build/*" = ["ALL"]
"lib-injection/*" = ["ALL"]
Expand Down
8 changes: 4 additions & 4 deletions tests/appsec/rasp/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,20 +84,20 @@ def find_series(is_metrics: bool, namespace, metric):
return series


def validate_metric(name, type, metric):
def validate_metric(name, metric_type, metric):
return (
metric.get("metric") == name
and metric.get("type") == "count"
and f"rule_type:{type}" in metric.get("tags", ())
and f"rule_type:{metric_type}" in metric.get("tags", ())
and any(s.startswith("waf_version:") for s in metric.get("tags", ()))
)


def validate_metric_variant(name, type, variant, metric):
def validate_metric_variant(name, metric_type, variant, metric):
return (
metric.get("metric") == name
and metric.get("type") == "count"
and f"rule_type:{type}" in metric.get("tags", ())
and f"rule_type:{metric_type}" in metric.get("tags", ())
and f"rule_variant:{variant}" in metric.get("tags", ())
and any(s.startswith("waf_version:") for s in metric.get("tags", ()))
)
Expand Down
4 changes: 2 additions & 2 deletions tests/appsec/waf/test_addresses.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,8 +338,8 @@ def test_basic(self):
for r in self.requests:
try:
interfaces.library.assert_waf_attack(r, address="grpc.server.request.message")
except:
raise ValueError(f"Basic attack #{self.requests.index(r)} not detected")
except Exception as e:
raise ValueError(f"Basic attack #{self.requests.index(r)} not detected") from e


@rfc("https://datadoghq.atlassian.net/wiki/spaces/APS/pages/2278064284/gRPC+Protocol+Support")
Expand Down
4 changes: 2 additions & 2 deletions tests/appsec/waf/test_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,10 @@ def validate_rules_monitoring_span_tags(span):
raise Exception("if there are rule errors, there should be rule error details too")
try:
json.loads(meta[expected_rules_errors_meta_tag])
except ValueError:
except ValueError as e:
raise Exception(
f"rule error details should be valid JSON but was `{meta[expected_rules_errors_meta_tag]}`"
)
) from e

return True

Expand Down
2 changes: 1 addition & 1 deletion tests/appsec/waf/test_telemetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def _setup(self):
r_blocked = weblog.get(
"/",
headers={"x-forwarded-for": "80.80.80.80", "user-agent": "dd-test-scanner-log-block"},
# XXX: hack to prevent rid inhibiting the dd-test-scanner-log-block rule
# Hack to prevent rid inhibiting the dd-test-scanner-log-block rule
rid_in_user_agent=False,
)
Test_TelemetryMetrics.__common_setup_done = True
Expand Down
4 changes: 2 additions & 2 deletions tests/auto_inject/test_auto_inject_install.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,8 @@ def test_crash_no_zombie(self, virtual_machine):
@scenarios.installer_auto_injection
class TestInstallerAutoInjectManual(base.AutoInjectBaseTest):
# Note: uninstallation of a single installer package is not available today
#  on the installer. As we can't only uninstall the injector, we are skipping
#  the uninstall test today
# on the installer. As we can't only uninstall the injector, we are skipping
# the uninstall test today
@parametrize_virtual_machines(
bugs=[
{"vm_name": "AlmaLinux_8_arm64", "weblog_variant": "test-app-python-alpine", "reason": "APMON-1576"},
Expand Down
4 changes: 2 additions & 2 deletions tests/auto_inject/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,8 @@ def _test_uninstall(self, virtual_machine):
start_weblog_command = virtual_machine._vm_provision.weblog_installation.remote_command
else: # Container
stop_weblog_command = "sudo -E docker-compose -f docker-compose.yml down"
#   On older Docker versions, the network recreation can hang. The solution is to restart Docker.
#   https://github.com/docker-archive/classicswarm/issues/1931
# On older Docker versions, the network recreation can hang. The solution is to restart Docker.
# https://github.com/docker-archive/classicswarm/issues/1931
start_weblog_command = "sudo systemctl restart docker && sudo -E docker-compose -f docker-compose.yml up --wait --wait-timeout 120"

install_command = "sudo datadog-installer apm instrument"
Expand Down
3 changes: 1 addition & 2 deletions tests/debugger/test_debugger_exception_replay.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,8 +406,7 @@ def setup_exception_replay_rockpaperscissors(self):
shapes = {"rock": False, "paper": False, "scissors": False}

while not all(shapes.values()) and retries < _max_retries:
for shape in shapes:
shape_found = shapes[shape]
for shape, shape_found in shapes.items():
logger.debug(f"{shape} found: {shape_found}, retry #{retries}")

if shape_found:
Expand Down
2 changes: 1 addition & 1 deletion tests/docker_ssi/test_docker_ssi.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def test_telemetry_abort(self):
inject_result = False
break

assert inject_result != None, "No telemetry data found for inject.success, inject.skip or inject.error"
assert inject_result is not None, "No telemetry data found for inject.success, inject.skip or inject.error"

# The injector detected by itself that the version is not supported
if inject_result == False:
Expand Down
11 changes: 5 additions & 6 deletions tests/fuzzer/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from logging.handlers import RotatingFileHandler
import os
import signal
import time

import aiohttp
from yarl import URL
Expand Down Expand Up @@ -95,7 +94,7 @@ def __init__(

self.dump_on_status = dump_on_status
self.enable_response_dump = False
self.systematic_exporter = _RequestDumper() if systematic_export else lambda x: 0
self.systematic_exporter = _RequestDumper() if systematic_export else lambda _: 0

self.total_metric = AccumulatedMetric("#", format_string="#{value}", display_length=7, has_raw_value=False)
self.memory_metric = NumericalMetric("Mem")
Expand Down Expand Up @@ -154,7 +153,7 @@ async def wait_for_first_response(self):
self.logger.info(f"First response received after {i} attempts")
return

time.sleep(1)
await asyncio.sleep(1)

raise Exception("Server does not respond")
finally:
Expand All @@ -164,10 +163,10 @@ def run_forever(self):
self.logger.info("")
self.logger.info("=" * 80)

asyncio.ensure_future(self._run(), loop=self.loop)
task = asyncio.ensure_future(self._run(), loop=self.loop)
self.loop.add_signal_handler(signal.SIGINT, self.perform_armageddon)
self.logger.info("Starting event loop")
self.loop.run_forever()
self.loop.run_until_complete(task)

def perform_armageddon(self):
self.finished = True
Expand Down Expand Up @@ -258,7 +257,7 @@ async def _run(self):
task = self.loop.create_task(self._process(session, request))
tasks.add(task)
task.add_done_callback(tasks.remove)
task.add_done_callback(lambda t: self.sem.release())
task.add_done_callback(lambda _: self.sem.release())

request_id += 1

Expand Down
5 changes: 3 additions & 2 deletions tests/fuzzer/corpus.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import os
import sys
import json
from pathlib import Path
from tests.fuzzer.tools.random_strings import get_random_unicode as gru


Expand Down Expand Up @@ -165,9 +166,9 @@ def _load_dir(base_dirname):
if filename.endswith(".json") or filename.endswith(".dump"):
_load_file(os.path.join(base_dirname, filename))

if os.path.isfile(source):
if Path(source).is_file():
_load_file(source)
elif os.path.isdir(source):
elif Path(source).is_dir():
_load_dir(source)
else:
raise ValueError(f"{source} is not a file or a dir")
Expand Down
2 changes: 1 addition & 1 deletion tests/fuzzer/request_mutator.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def _mutate_item(item):
item = random.choice((True, False))

else:
# TODO
# TODO: other use cases
pass

return item
Expand Down
2 changes: 1 addition & 1 deletion tests/integrations/test_db_integrations_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ class Test_MsSql(_BaseDatadogDbIntegrationTestClass):
@missing_feature(library="nodejs", reason="Not implemented yet")
def test_db_mssql_instance_name(self):
"""The Microsoft SQL Server instance name connecting to. This name is used to determine the port of a named instance.
This value should be set only if its specified on the mssql connection string.
This value should be set only if it's specified on the mssql connection string.
"""

for db_operation, span in self.get_spans():
Expand Down
2 changes: 1 addition & 1 deletion tests/integrations/test_dsm.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ def test_dsmcontext_injection_base64(self):
@features.datastreams_monitoring_support_for_base64_encoding
@scenarios.integrations
class Test_DsmContext_Extraction_Base64:
"""Verify DSM context is extracted using "dd-pathway-ctx-base64" """
"""Verify DSM context is extracted using dd-pathway-ctx-base64"""

def setup_dsmcontext_extraction_base64(self):
topic = "dsm-injection-topic"
Expand Down
2 changes: 1 addition & 1 deletion tests/integrations/test_inferred_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
@features.aws_api_gateway_inferred_span_creation
@scenarios.integrations
class Test_AWS_API_Gateway_Inferred_Span_Creation:
"""Verify DSM context is extracted using "dd-pathway-ctx-base64" """
"""Verify DSM context is extracted using dd-pathway-ctx-base64"""

start_time = round(time.time() * 1e3)
start_time_ns = start_time * 1e6
Expand Down
2 changes: 1 addition & 1 deletion tests/integrations/test_open_telemetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ class Test_MsSql(_BaseOtelDbIntegrationTestClass):
)
def test_db_mssql_instance_name(self):
"""The Microsoft SQL Server instance name connecting to. This name is used to determine the port of a named instance.
This value should be set only if its specified on the mssql connection string.
This value should be set only if it's specified on the mssql connection string.
"""
for db_operation, request in self.get_requests():
span = self.get_span_from_agent(request)
Expand Down
29 changes: 14 additions & 15 deletions tests/parametric/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ def __init__(self, base_url: str, pytest_request: None):
def _url(self, path: str) -> str:
return urllib.parse.urljoin(self._base_url, path)

def _write_log(self, type, json_trace):
def _write_log(self, log_type, json_trace):
with open(self.log_path, "a") as log:
log.write(f"\n{type}>>>>\n")
log.write(f"\n{log_type}>>>>\n")
log.write(json.dumps(json_trace))

def traces(self, clear=False, **kwargs):
Expand All @@ -131,15 +131,14 @@ def set_remote_config(self, path, payload):
def get_remote_config(self):
resp = self._session.get(self._url("/v0.7/config"))
resp_json = resp.json()
list = []
result = []
if resp_json and resp_json["target_files"]:
target_files = resp_json["target_files"]
for target in target_files:
path = target["path"]
msg = json.loads(str(base64.b64decode(target["raw"]), encoding="utf-8"))
dict = {"path": path, "msg": msg}
list.append(dict)
return list
result.append({"path": path, "msg": msg})
return result

def add_remote_config(self, path, payload):
current_rc = self.get_remote_config()
Expand Down Expand Up @@ -181,18 +180,18 @@ def _build_config_path_response(config: list):
client_configs = []
target_files = []
targets_tmp = {}
for dict in config:
client_configs.append(dict["path"])
dict["msg_enc"] = bytes(json.dumps(dict["msg"]), encoding="utf-8")
for item in config:
client_configs.append(item["path"])
item["msg_enc"] = bytes(json.dumps(item["msg"]), encoding="utf-8")
tf = {
"path": dict["path"],
"raw": str(base64.b64encode(dict["msg_enc"]), encoding="utf-8"),
"path": item["path"],
"raw": str(base64.b64encode(item["msg_enc"]), encoding="utf-8"),
}
target_files.append(tf)
targets_tmp[dict["path"]] = {
targets_tmp[item["path"]] = {
"custom": {"c": [""], "v": 0},
"hashes": {"sha256": hashlib.sha256(dict["msg_enc"]).hexdigest()},
"length": len(dict["msg_enc"]),
"hashes": {"sha256": hashlib.sha256(item["msg_enc"]).hexdigest()},
"length": len(item["msg_enc"]),
}

data = {
Expand Down Expand Up @@ -520,7 +519,7 @@ def docker_network(test_id: str) -> Generator[str, None, None]:
# It's possible (why?) of having some container not stopped.
# If it happen, failing here makes stdout tough to understance.
# Let's ignore this, later calls will clean the mess
pass
logger.info("Failed to remove network, ignoring the error")


@pytest.fixture
Expand Down
2 changes: 1 addition & 1 deletion tests/parametric/test_headers_tracecontext.py
Original file line number Diff line number Diff line change
Expand Up @@ -931,7 +931,7 @@ def test_tracestate_w3c_context_leak(self, test_agent, test_library):
)

assert case1["meta"].get("_dd.p.tid") == "3333333333333333"
assert case2["meta"].get("_dd.p.tid") == None
assert case2["meta"].get("_dd.p.tid") is None

@temporary_enable_optin_tracecontext()
def test_tracestate_all_allowed_characters(self, test_agent, test_library):
Expand Down
Loading