Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update CONTRIBUTING.md to explain compiling and adding snapshot tests #1844

Merged
merged 3 commits into from
Apr 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,12 @@ How do I know what kind of test(s) to write?

For each new detector, at least one regression tests must be present.

1. Create a test in `tests/e2e/detectors`
2. Update `ALL_TEST` in `tests/e2e/detectors/test_detectors.py`
3. Run `python tests/e2e/detectors/test_detectors.py --generate`. This will generate the json artifacts in `tests/expected_json`. Add the generated files to git. If updating an existing detector, identify the respective json artifacts and then delete them, or run `python ./tests/test_detectors.py --overwrite` instead.
4. Run `pytest tests/e2e/detectors/test_detectors.py` and check that everything worked.
1. Create a folder in `tests/e2e/detectors/test_data` with the detector's argument name.
2. Create a test contract in `tests/e2e/detectors/test_data/<detector_name>/`.
3. Update `ALL_TEST` in `tests/e2e/detectors/test_detectors.py`
4. Run `python tests/e2e/detectors/test_detectors.py --compile` to create a zip file of the compilation artifacts.
5. `pytest tests/e2e/detectors/test_detectors.py --insta update-new`. This will generate a snapshot of the detector output in `tests/e2e/detectors/snapshots/`. If updating an existing detector, run `pytest tests/e2e/detectors/test_detectors.py --insta review` and accept or reject the updates.
6. Run `pytest tests/e2e/detectors/test_detectors.py` to ensure everything worked. Then, add and commit the files to git.

> ##### Helpful commands for detector tests
>
Expand Down
67 changes: 10 additions & 57 deletions tests/e2e/detectors/test_detectors.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import json
import os
from pathlib import Path
import sys
Expand All @@ -8,6 +7,7 @@
from crytic_compile import CryticCompile, save_to_zip
from crytic_compile.utils.zip import load_from_zip

from solc_select import solc_select

from slither import Slither
from slither.detectors.abstract_detector import AbstractDetector
Expand All @@ -33,7 +33,6 @@ def __init__(
"""
self.detector = detector
self.test_file = test_file
self.expected_result = test_file + "." + solc_ver + "." + detector.__name__ + ".json"
self.solc_ver = solc_ver
if additional_files is None:
self.additional_files = []
Expand All @@ -44,6 +43,10 @@ def __init__(
def set_solc(test_item: Test): # pylint: disable=too-many-lines
# hacky hack hack to pick the solc version we want
env = dict(os.environ)

if not solc_select.artifact_path(test_item.solc_ver).exists():
print("Installing solc version", test_item.solc_ver)
solc_select.install_artifacts([test_item.solc_ver])
env["SOLC_VERSION"] = test_item.solc_ver
os.environ.clear()
os.environ.update(env)
Expand Down Expand Up @@ -1638,26 +1641,12 @@ def id_test(test_item: Test):
),
]


def get_all_tests() -> List[Test]:
# installed_solcs = set(get_installed_solc_versions())
# required_solcs = {test.solc_ver for test in ALL_TEST_OBJECTS}
# missing_solcs = list(required_solcs - installed_solcs)
# if missing_solcs:
# install_solc_versions(missing_solcs)

return ALL_TEST_OBJECTS


ALL_TESTS = get_all_tests()

GENERIC_PATH = "/GENERIC_PATH"

TEST_DATA_DIR = Path(__file__).resolve().parent / "test_data"


# pylint: disable=too-many-locals
@pytest.mark.parametrize("test_item", ALL_TESTS, ids=id_test)
@pytest.mark.parametrize("test_item", ALL_TEST_OBJECTS, ids=id_test)
def test_detector(test_item: Test, snapshot):
test_dir_path = Path(
TEST_DATA_DIR,
Expand All @@ -1681,38 +1670,6 @@ def test_detector(test_item: Test, snapshot):
assert snapshot() == actual_output


def _generate_test(test_item: Test, skip_existing=False):
test_dir_path = Path(
TEST_DATA_DIR,
test_item.detector.ARGUMENT,
test_item.solc_ver,
).as_posix()
test_file_path = Path(test_dir_path, test_item.test_file).as_posix()
expected_result_path = Path(test_dir_path, test_item.expected_result).absolute().as_posix()

if skip_existing:
if os.path.isfile(expected_result_path):
return

set_solc(test_item)
sl = Slither(test_file_path)
sl.register_detector(test_item.detector)
results = sl.run_detectors()

results_as_string = json.dumps(results)
test_file_path = test_file_path.replace("\\", "\\\\")
results_as_string = results_as_string.replace(test_file_path, GENERIC_PATH)

for additional_file in test_item.additional_files:
additional_path = Path(test_dir_path, additional_file).absolute().as_posix()
additional_path = additional_path.replace("\\", "\\\\")
results_as_string = results_as_string.replace(additional_path, GENERIC_PATH)

results = json.loads(results_as_string)
with open(expected_result_path, "w", encoding="utf8") as f:
f.write(json.dumps(results, indent=4))


def _generate_compile(test_item: Test, skip_existing=False):
test_dir_path = Path(
TEST_DATA_DIR,
Expand All @@ -1733,13 +1690,9 @@ def _generate_compile(test_item: Test, skip_existing=False):

if __name__ == "__main__":
if len(sys.argv) != 2:
print("To generate the json artifacts run\n\tpython tests/test_detectors.py --generate")
elif sys.argv[1] == "--generate":
for next_test in ALL_TESTS:
_generate_test(next_test, skip_existing=True)
elif sys.argv[1] == "--overwrite":
for next_test in ALL_TESTS:
_generate_test(next_test)
print(
"To generate the zip artifacts run\n\tpython tests/e2e/tests/test_detectors.py --compile"
)
elif sys.argv[1] == "--compile":
for next_test in ALL_TESTS:
for next_test in ALL_TEST_OBJECTS:
_generate_compile(next_test, skip_existing=True)