diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index c4647b1cc..ace888390 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -47,10 +47,6 @@ env: HOMEBREW_NO_GITHUB_API: "ON" HOMEBREW_NO_INSTALL_CLEANUP: "ON" DEBIAN_FRONTEND: "noninteractive" # disable interactive apt installs - SSDB_SINGLE: "127.0.0.1:6380" - SSDB_CLUSTERED: "127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002" - SSDB_UDS: "unix:///tmp/redis.sock" - SMARTREDIS_TEST_CLUSTER: False SR_LOG_FILE: "smartredis_cicd_tests_log.txt" SR_LOG_LEVEL: "INFO" @@ -143,58 +139,19 @@ jobs: sudo docker cp $CONTAINER_NAME:/usr/lib/redis/modules/redisai.so /usr/lib/redis/modules && sudo docker cp $CONTAINER_NAME:/usr/lib/redis/modules/backends/ /usr/lib/redis/modules/ && echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/install/lib" >> $GITHUB_ENV && + echo "REDISAI_CPU_INSTALL_PATH=/usr/lib/redis/modules/" >> $GITHUB_ENV && + echo "SR_CICD_EXECUTION=True" >> $GITHUB_ENV && echo "REDISAI_MODULES=/usr/lib/redis/modules/redisai.so \ TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so \ TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so" >> $GITHUB_ENV - # Begin running the tests using various DB deployments - - name: Single-shard DB tests + # Run the tests using various DB deployments + - name: Run tests run: | - SCRATCH_DIR="single_scratch" && PORT=6380 && - rm -rf $SCRATCH_DIR; mkdir $SCRATCH_DIR; pushd $SCRATCH_DIR && - redis-server --port $PORT --daemonize yes \ - --logfile "single.log" \ - --loadmodule $REDISAI_MODULES && - popd && - export SSDB=$SSDB_SINGLE SMARTREDIS_TEST_CLUSTER=False && - utils/check_redis.sh $PORT && - make test-verbose-with-coverage COV_FLAGS="--cov=./src/python/module/smartredis/ --cov-report=xml --cov-append" SR_FORTRAN=ON SR_PYTHON=ON - - - name: Clustered DB tests - run: | - SCRATCH_DIR="cluster_scratch" && - rm -rf $SCRATCH_DIR; mkdir $SCRATCH_DIR; pushd $SCRATCH_DIR && - for port in {7000..7002}; do - redis-server --port $port --cluster-enabled yes --daemonize yes \ - --cluster-config-file "$port.conf" --protected-mode no --save "" \ - --logfile "$port.log" \ - --loadmodule $REDISAI_MODULES - done && - popd && - for port in {7000..7002}; do - utils/check_redis.sh $port - done && - redis-cli --cluster create $(echo $SSDB_CLUSTERED | tr "," " ") --cluster-yes && - export SSDB=$SSDB_CLUSTERED SMARTREDIS_TEST_CLUSTER=True && - python -m pytest --cov=./src/python/module/smartredis/ --cov-report=xml --cov-append \ - --ignore ./tests/docker -vv -s ./tests --build Coverage - - - name: UDS DB tests - run: | - SOCKET="/tmp/redis.sock" && - mkdir -p /tmp && ## Create socketfile for UDS testing - touch $SOCKET && - chmod 777 $SOCKET && - SCRATCH_DIR="uds_scratch" && - rm -rf $SCRATCH_DIR; mkdir $SCRATCH_DIR; pushd $SCRATCH_DIR && - redis-server --unixsocket $SOCKET --unixsocketperm 777 --port 0 --bind 127.0.0.1 \ - --daemonize yes --protected-mode no --logfile "uds.log" \ - --loadmodule $REDISAI_MODULES && - popd && - utils/check_redis.sh $SOCKET - export SSDB=$SSDB_UDS SMARTREDIS_TEST_CLUSTER=False && - python -m pytest --cov=./src/python/module/smartredis/ --cov-report=xml --cov-append \ - --ignore ./tests/docker -vv -s ./tests --build Coverage + make test-verbose-with-coverage \ + COV_FLAGS="--cov=./src/python/module/smartredis/ --cov-report=xml --cov-append" \ + SR_FORTRAN=ON SR_PYTHON=ON SR_TEST_REDIS_MODE=All SR_TEST_PORT=7000 \ + SR_TEST_REDISAI_VER=v${{ matrix.rai_v }} # Process and upload code coverage (Python was collected during pytest) - name: Collect coverage from C/C++/Fortran testers diff --git a/Makefile b/Makefile index 6f1d27c9f..b3b01dd02 100644 --- a/Makefile +++ b/Makefile @@ -28,18 +28,6 @@ MAKEFLAGS += --no-print-directory SHELL:=/bin/bash -# Build variables -NPROC := $(shell nproc 2>/dev/null || python -c "import multiprocessing as mp; print (mp.cpu_count())" 2>/dev/null || echo 4) -SR_BUILD := Release -SR_LINK := Shared -SR_PEDANTIC := OFF -SR_FORTRAN := OFF -SR_PYTHON := OFF - -# Test variables -COV_FLAGS := -SR_TEST_DEVICE := cpu - # Params for third-party software HIREDIS_URL := https://github.com/redis/hiredis.git HIREDIS_VER := v1.1.0 @@ -50,12 +38,30 @@ PYBIND_VER := v2.10.3 REDIS_URL := https://github.com/redis/redis.git REDIS_VER := 6.0.8 REDISAI_URL := https://github.com/RedisAI/RedisAI.git -REDISAI_VER := v1.2.3 +# REDISAI_VER is controlled instead by SR_TEST_REDISAI_VER below CATCH2_URL := https://github.com/catchorg/Catch2.git CATCH2_VER := v2.13.6 LCOV_URL := https://github.com/linux-test-project/lcov.git LCOV_VER := v1.15 +# Build variables +NPROC := $(shell nproc 2>/dev/null || python -c "import multiprocessing as mp; print (mp.cpu_count())" 2>/dev/null || echo 4) +SR_BUILD := Release +SR_LINK := Shared +SR_PEDANTIC := OFF +SR_FORTRAN := OFF +SR_PYTHON := OFF + +# Test variables +COV_FLAGS := +SR_TEST_REDIS_MODE := Clustered +SR_TEST_UDS_FILE := /tmp/redis.sock +SR_TEST_PORT := 6379 +SR_TEST_NODES := 3 +SR_TEST_REDISAI_VER := v1.2.3 +SR_TEST_DEVICE := cpu +SR_TEST_PYTEST_FLAGS := -vv -s + # Do not remove this block. It is used by the 'help' rule when # constructing the help output. # help: @@ -81,6 +87,21 @@ help: # help: SR_PEDANTIC {OFF, ON} -- GNU only; enable pickiest compiler settings # help: SR_FORTRAN {OFF, ON} -- Enable/disable build of Fortran library # help: SR_PYTHON {OFF, ON} -- Enable/disable build of Python library +# help: +# help: Test variables +# help: -------------- +# help: +# help: These variables affect the way that the SmartRedis library is tested. Each +# help: has several options; the first listed is the default. Use by appending +# help: the variable name and setting after the make target, e.g. +# help: make test SR_BUILD=Debug SR_LINK=Static SR_FORTRAN=ON +# help: +# help: SR_TEST_REDIS_MODE {Clustered, Standalone} -- type of Redis backend launched for tests +# help: SR_TEST_PORT (Default: 6379) -- first port for Redis server(s) +# help: SR_TEST_NODES (Default: 3) Number of shards to intantiate for a clustered Redis database +# help: SR_TEST_REDISAI_VER {v1.2.7, v1.2.5} -- version of RedisAI to use for tests +# help: SR_TEST_DEVICE {cpu, gpu} -- device type to test on. Warning, this variable is CASE SENSITIVE! +# help: SR_TEST_PYTEST_FLAGS (default: "-vv -s"): Verbosity flags to use with pytest # help: # help: Build targets @@ -278,90 +299,187 @@ endif ifeq ($(SR_FORTRAN),OFF) SKIP_FORTRAN = --ignore ./tests/fortran endif +SKIP_DOCKER := --ignore ./tests/docker + +# Build SSDB string for clustered database +SSDB_STRING := 127.0.0.1:$(SR_TEST_PORT) +PORT_RANGE := $(shell seq `expr $(SR_TEST_PORT) + 1` 1 `expr $(SR_TEST_PORT) + $(SR_TEST_NODES) - 1`) +SSDB_STRING += $(foreach P,$(PORT_RANGE),",127.0.0.1:$(P)") +SSDB_STRING := $(shell echo $(SSDB_STRING) | tr -d " ") + +# Run test cases with a freshly instantiated standalone Redis server +# Parameters: +# 1: the test directory in which to run tests +define run_smartredis_tests_with_standalone_server + echo "Launching standalone Redis server" && \ + export SR_TEST_DEVICE=$(SR_TEST_DEVICE) SR_SERVER_MODE=Standalone && \ + export SMARTREDIS_TEST_CLUSTER=False SMARTREDIS_TEST_DEVICE=$(SR_TEST_DEVICE) && \ + export SSDB=127.0.0.1:$(SR_TEST_PORT) && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes 1 \ + --rai $(SR_TEST_REDISAI_VER) --device $(SR_TEST_DEVICE) && \ + echo "Running standalone tests" && \ + PYTHONFAULTHANDLER=1 python -m pytest $(SR_TEST_PYTEST_FLAGS) $(COV_FLAGS) \ + $(SKIP_DOCKER) $(SKIP_PYTHON) $(SKIP_FORTRAN) \ + --build $(SR_BUILD) --sr_fortran $(SR_FORTRAN) $(1) && \ + echo "Shutting down standalone Redis server" && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes 1 --stop && \ + echo "Standalone tests complete" +endef + +# Run test cases with a freshly instantiated clustered Redis server +# Parameters: +# 1: the test directory in which to run tests +define run_smartredis_tests_with_clustered_server + echo "Launching clustered Redis server" && \ + export SR_TEST_DEVICE=$(SR_TEST_DEVICE) SR_SERVER_MODE=Clustered && \ + export SMARTREDIS_TEST_CLUSTER=True SMARTREDIS_TEST_DEVICE=$(SR_TEST_DEVICE) && \ + export SSDB=$(SSDB_STRING) && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes $(SR_TEST_NODES) \ + --rai $(SR_TEST_REDISAI_VER) --device $(SR_TEST_DEVICE) && \ + echo "Running clustered tests" && \ + PYTHONFAULTHANDLER=1 python -m pytest $(SR_TEST_PYTEST_FLAGS) $(COV_FLAGS) \ + $(SKIP_DOCKER) $(SKIP_PYTHON) $(SKIP_FORTRAN) \ + --build $(SR_BUILD) --sr_fortran $(SR_FORTRAN) $(1) && \ + echo "Shutting down clustered Redis server" && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) \ + --nodes $(SR_TEST_NODES) --stop && \ + echo "Clustered tests complete" +endef + +# Run test cases with a freshly instantiated standalone Redis server +# connected via a Unix Domain Socket +# Parameters: +# 1: the test directory in which to run tests +define run_smartredis_tests_with_uds_server + echo "Launching standalone Redis server with Unix Domain Socket support" + export SR_TEST_DEVICE=$(SR_TEST_DEVICE) SR_SERVER_MODE=Standalone && \ + export SMARTREDIS_TEST_CLUSTER=False SMARTREDIS_TEST_DEVICE=$(SR_TEST_DEVICE) && \ + export SSDB=unix://$(SR_TEST_UDS_FILE) && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes 1 \ + --rai $(SR_TEST_REDISAI_VER) --device $(SR_TEST_DEVICE) \ + --udsport $(SR_TEST_UDS_FILE) && \ + echo "Running standalone tests with Unix Domain Socket connection" && \ + PYTHONFAULTHANDLER=1 python -m pytest $(SR_TEST_PYTEST_FLAGS) $(COV_FLAGS) \ + $(SKIP_DOCKER) $(SKIP_PYTHON) $(SKIP_FORTRAN) \ + --build $(SR_BUILD) --sr_fortran $(SR_FORTRAN) $(1) && \ + echo "Shutting down standalone Redis server with Unix Domain Socket support" + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes 1 \ + --udsport $(SR_TEST_UDS_FILE) --stop && \ + echo "UDS tests complete" +endef + +# Run test cases with freshly instantiated Redis servers +# Parameters: +# 1: the test directory in which to run tests +define run_smartredis_tests_with_server + $(if $(or $(filter $(SR_TEST_REDIS_MODE),Standalone), + $(filter $(SR_TEST_REDIS_MODE),All)), + $(call run_smartredis_tests_with_standalone_server,$(1)) + ) + $(if $(or $(filter $(SR_TEST_REDIS_MODE),Clustered), + $(filter $(SR_TEST_REDIS_MODE),All)), + $(call run_smartredis_tests_with_clustered_server,$(1)) + ) + $(if $(or $(filter $(SR_TEST_REDIS_MODE),UDS), + $(filter $(SR_TEST_REDIS_MODE),All)), + $(if $(filter-out $(shell uname -s),Darwin), + $(call run_smartredis_tests_with_uds_server,$(1)), + @echo "Skipping: Unix Domain Socket is not supported on MacOS" + ) + ) +endef # help: test - Build and run all tests (C, C++, Fortran, Python) .PHONY: test test: test-deps test: build-tests +test: SR_TEST_PYTEST_FLAGS := -vv test: - @PYTHONFAULTHANDLER=1 python -m pytest --ignore ./tests/docker \ - $(SKIP_PYTHON) $(SKIP_FORTRAN) -vv ./tests --build $(SR_BUILD) - + -@$(call run_smartredis_tests_with_server,./tests) # help: test-verbose - Build and run all tests [verbosely] .PHONY: test-verbose test-verbose: test-deps test-verbose: build-tests +test-verbose: SR_TEST_PYTEST_FLAGS := -vv -s test-verbose: - @PYTHONFAULTHANDLER=1 python -m pytest $(COV_FLAGS) --ignore ./tests/docker \ - $(SKIP_PYTHON) $(SKIP_FORTRAN) -vv -s ./tests --build $(SR_BUILD) + -@$(call run_smartredis_tests_with_server,./tests) # help: test-verbose-with-coverage - Build and run all tests [verbose-with-coverage] .PHONY: test-verbose-with-coverage -test-verbose-with-coverage: SR_BUILD=Coverage +test-verbose-with-coverage: SR_BUILD := Coverage test-verbose-with-coverage: test-deps test-verbose-with-coverage: build-tests +test-verbose-with-coverage: SR_TEST_PYTEST_FLAGS := -vv -s test-verbose-with-coverage: - @PYTHONFAULTHANDLER=1 python -m pytest $(COV_FLAGS) --ignore ./tests/docker \ - $(SKIP_PYTHON) $(SKIP_FORTRAN) -vv -s ./tests --build $(SR_BUILD) + -@$(call run_smartredis_tests_with_server,./tests) # help: test-c - Build and run all C tests .PHONY: test-c test-c: test-deps test-c: build-test-c +test-c: SR_TEST_PYTEST_FLAGS := -vv -s test-c: - @python -m pytest -vv -s ./tests/c/ --build $(SR_BUILD) + -@$(call run_smartredis_tests_with_server,./tests/c) # help: test-cpp - Build and run all C++ tests .PHONY: test-cpp test-cpp: test-deps test-cpp: build-test-cpp test-cpp: build-unit-test-cpp +test-cpp: SR_TEST_PYTEST_FLAGS := -vv -s test-cpp: - @python -m pytest -vv -s ./tests/cpp/ --build $(SR_BUILD) + -@$(call run_smartredis_tests_with_server,./tests/cpp) # help: unit-test-cpp - Build and run unit tests for C++ .PHONY: unit-test-cpp unit-test-cpp: test-deps unit-test-cpp: build-unit-test-cpp +unit-test-cpp: SR_TEST_PYTEST_FLAGS := -vv -s unit-test-cpp: - @python -m pytest -vv -s ./tests/cpp/unit-tests/ --build $(SR_BUILD) + -@$(call run_smartredis_tests_with_server,./tests/cpp/unit-tests) # help: test-py - run python tests .PHONY: test-py test-py: test-deps -test-py: SR_PYTHON=ON +test-py: SR_PYTHON := ON test-py: lib +test-py: SR_TEST_PYTEST_FLAGS := -vv test-py: - @PYTHONFAULTHANDLER=1 python -m pytest -vv ./tests/python/ --build $(SR_BUILD) + -@$(call run_smartredis_tests_with_server,./tests/python) # help: test-fortran - run fortran tests .PHONY: test-fortran +test-fortran: SR_FORTRAN := ON test-fortran: test-deps test-fortran: build-test-fortran - @python -m pytest -vv ./tests/fortran/ --build $(SR_BUILD) +test-fortran: SR_TEST_PYTEST_FLAGS := -vv +test-fortran: + -@$(call run_smartredis_tests_with_server,./tests/fortran) # help: testpy-cov - run python tests with coverage .PHONY: testpy-cov testpy-cov: test-deps -testpy-cov: SR_PYTHON=ON +testpy-cov: SR_PYTHON := ON +testpy-cov: SR_TEST_PYTEST_FLAGS := -vv +testpy-cov: COV_FLAGS := --cov=./src/python/module/smartredis/ testpy-cov: - @PYTHONFAULTHANDLER=1 python -m pytest --cov=./src/python/module/smartredis/ \ - -vv ./tests/python/ --build $(SR_BUILD) + -@$(call run_smartredis_tests_with_server,./tests/python) # help: test-examples - Build and run all examples .PHONY: test-examples test-examples: test-deps test-examples: build-examples +testpy-cov: SR_TEST_PYTEST_FLAGS := -vv -s test-examples: - @python -m pytest -vv -s ./examples --build $(SR_BUILD) --sr_fortran $(SR_FORTRAN) + -@$(call run_smartredis_tests_with_server,./examples) ############################################################################ # hidden build targets for third-party software # Hiredis (hidden build target) -.phony: hiredis +.PHONY: hiredis hiredis: install/lib/libhiredis.a install/lib/libhiredis.a: @rm -rf third-party/hiredis @@ -376,7 +494,7 @@ install/lib/libhiredis.a: echo "Finished installing Hiredis" # Redis-plus-plus (hidden build target) -.phony: redis-plus-plus +.PHONY: redis-plus-plus redis-plus-plus: install/lib/libredis++.a install/lib/libredis++.a: @rm -rf third-party/redis-plus-plus @@ -395,7 +513,7 @@ install/lib/libredis++.a: echo "Finished installing Redis-plus-plus" # Pybind11 (hidden build target) -.phony: pybind +.PHONY: pybind pybind: third-party/pybind/include/pybind11/pybind11.h third-party/pybind/include/pybind11/pybind11.h: @mkdir -p third-party @@ -405,7 +523,7 @@ third-party/pybind/include/pybind11/pybind11.h: echo "Finished installing Pybind11" # Redis (hidden test target) -.phony: redis +.PHONY: redis redis: third-party/redis/src/redis-server third-party/redis/src/redis-server: @mkdir -p third-party @@ -417,7 +535,7 @@ third-party/redis/src/redis-server: # cudann-check (hidden test target) # checks cuda dependencies for GPU build -.phony: cudann-check +.PHONY: cudann-check cudann-check: ifeq ($(SR_TEST_DEVICE),gpu) ifndef CUDA_HOME @@ -438,17 +556,18 @@ endif endif # RedisAI (hidden test target) -.phony: redisAI +.PHONY: redisAI redisAI: cudann-check -redisAI: third-party/RedisAI/install-cpu/redisai.so -third-party/RedisAI/install-cpu/redisai.so: +redisAI: third-party/RedisAI/$(SR_TEST_REDISAI_VER)/install-$(SR_TEST_DEVICE)/redisai.so +third-party/RedisAI/$(SR_TEST_REDISAI_VER)/install-$(SR_TEST_DEVICE)/redisai.so: + @echo in third-party/RedisAI/$(SR_TEST_REDISAI_VER)/install-$(SR_TEST_DEVICE)/redisai.so: $(eval DEVICE_IS_GPU := $(shell test $(SR_TEST_DEVICE) == "cpu"; echo $$?)) @mkdir -p third-party @cd third-party && \ - rm -rf RedisAI && \ - GIT_LFS_SKIP_SMUDGE=1 git clone --recursive $(REDISAI_URL) RedisAI \ - --branch $(REDISAI_VER) --depth=1 - -@cd third-party/RedisAI && \ + rm -rf RedisAI/$(SR_TEST_REDISAI_VER) && \ + GIT_LFS_SKIP_SMUDGE=1 git clone --recursive $(REDISAI_URL) RedisAI/$(SR_TEST_REDISAI_VER) \ + --branch $(SR_TEST_REDISAI_VER) --depth=1 + -@cd third-party/RedisAI/$(SR_TEST_REDISAI_VER) && \ CC=gcc CXX=g++ WITH_PT=1 WITH_TF=1 WITH_TFLITE=0 WITH_ORT=0 bash get_deps.sh \ $(SR_TEST_DEVICE) && \ CC=gcc CXX=g++ GPU=$(DEVICE_IS_GPU) WITH_PT=1 WITH_TF=1 WITH_TFLITE=0 WITH_ORT=0 \ @@ -456,7 +575,7 @@ third-party/RedisAI/install-cpu/redisai.so: echo "Finished installing RedisAI" # Catch2 (hidden test target) -.phony: catch2 +.PHONY: catch2 catch2: third-party/catch/single_include/catch2/catch.hpp third-party/catch/single_include/catch2/catch.hpp: @mkdir -p third-party @@ -465,7 +584,7 @@ third-party/catch/single_include/catch2/catch.hpp: @echo "Finished installing Catch2" # LCOV (hidden test target) -.phony: lcov +.PHONY: lcov lcov: third-party/lcov/install/usr/local/bin/lcov third-party/lcov/install/usr/local/bin/lcov: @mkdir -p third-party diff --git a/doc/changelog.rst b/doc/changelog.rst index 0093d093a..1fe9e5565 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -12,6 +12,7 @@ Description - Updated setup.py to work with the new build system - Remove unneeded method from Python SRObject class - Fixed a memory leak in the C layer +- Revamp SmartRedis test system - Remove debug output in pybind layer - Update Hiredis version to 1.1.0 - Enable parallel build for the SmartRedis examples @@ -31,6 +32,7 @@ Detailed Notes - Streamlined setup.py to simplify Python install (PR359) - Remove from_pybind() from Python SRObject class as it's not needed and didn't work properly anyway (PR358_) - Fixed memory leaked from the C layer when calling get_string_option() (PR357_) +- Major revamp to simplify use of SmartRedis test system, automating most test processes (PR356_) - Remove debug output in pybind layer associated with put_dataset (PR352_) - Updated to the latest version of Hiredis (1.1.0) (PR351_) - Enable parallel build for the SmartRedis examples by moving utility Fortran code @@ -56,6 +58,7 @@ users need not import `iso_c_binding` in their own applications (PR340_) .. _PR359: https://github.com/CrayLabs/SmartRedis/pull/359 .. _PR358: https://github.com/CrayLabs/SmartRedis/pull/358 .. _PR357: https://github.com/CrayLabs/SmartRedis/pull/357 +.. _PR356: https://github.com/CrayLabs/SmartRedis/pull/356 .. _PR352: https://github.com/CrayLabs/SmartRedis/pull/352 .. _PR351: https://github.com/CrayLabs/SmartRedis/pull/351 .. _PR349: https://github.com/CrayLabs/SmartRedis/pull/349 diff --git a/examples/serial/c/example_put_get_3D.c b/examples/serial/c/example_put_get_3D.c index e27d9d03f..02c497320 100644 --- a/examples/serial/c/example_put_get_3D.c +++ b/examples/serial/c/example_put_get_3D.c @@ -66,7 +66,7 @@ int main(int argc, char* argv[]) { for(size_t i=0; i tensor_2(n_values, 0); for(size_t i=0; i input_tensor(n_values, 0); for(size_t i=0; i tensor(tensor_size, 0); for (size_t i=0; i t(name, data, dims, type, mem_layout); @@ -62,7 +62,7 @@ SCENARIO("Testing Tensor", "[Tensor]") size_t tensor_size_2 = dims_2.at(0) * dims_2.at(1) * dims_2.at(2); std::vector tensor_2(tensor_size_2, 0); for (size_t i=0; i t_2(name_2, data_2, dims_2, type_2, mem_layout_2); diff --git a/tests/cpp/unit-tests/test_tensorbase.cpp b/tests/cpp/unit-tests/test_tensorbase.cpp index b01b79c86..54eef51d0 100644 --- a/tests/cpp/unit-tests/test_tensorbase.cpp +++ b/tests/cpp/unit-tests/test_tensorbase.cpp @@ -58,7 +58,7 @@ SCENARIO("Testing TensorBase through TensorPack", "[TensorBase]") size_t tensor_size = dims.at(0) * dims.at(1) * dims.at(2); std::vector tensor(tensor_size, 0); for (size_t i=0; i tensor(tensor_size, 0); for (size_t i=0; i tensor(tensor_size, 0); for (size_t i=0; i dims = {}; std::vector tensor(5, 0); for (size_t i=0; i<5; i++) - tensor[i] = 2.0*rand()/RAND_MAX - 1.0; + tensor[i] = 2.0*rand()/(double)RAND_MAX - 1.0; void* data = tensor.data(); THEN("A runtime error is thrown") @@ -223,7 +223,7 @@ SCENARIO("Testing TensorBase through TensorPack", "[TensorBase]") std::vector dims = {1, 0, 3}; std::vector tensor(5, 0); for (size_t i=0; i<5; i++) - tensor[i] = 2.0*rand()/RAND_MAX - 1.0; + tensor[i] = 2.0*rand()/(double)RAND_MAX - 1.0; void* data = tensor.data(); THEN("A runtime error is thrown") diff --git a/utils/launch_redis.py b/utils/launch_redis.py new file mode 100644 index 000000000..6d4c778f7 --- /dev/null +++ b/utils/launch_redis.py @@ -0,0 +1,256 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from subprocess import Popen, SubprocessError, run, DEVNULL +from time import sleep +import argparse +import os +import pathlib + +def check_availability(n_nodes, port, udsport): + """Repeat a command until it is successful + """ + num_tries = 5 + is_uds = udsport is not None + if is_uds: + n_nodes = 1 + cicd = os.getenv('SR_CICD_EXECUTION') + is_cicd = False if cicd is None else cicd.lower() == "true" + if is_cicd: + rediscli = 'redis-cli' + else: + rediscli = ( + pathlib.Path(__file__).parent.parent + / "third-party/redis/src/redis-cli" + ).resolve() + for i in range(n_nodes): + connection = f"-s {udsport}" if is_uds else f"-p {port + i}" + set_cmd = f"{rediscli} {connection} set __test__ __test__" + del_cmd = f"{rediscli} {connection} del __test__" + command_succeeded = False + for _ in range(num_tries): + try: + run(set_cmd.split(), shell=False, stdout=DEVNULL, stderr=DEVNULL) + run(del_cmd.split(), shell=False, stdout=DEVNULL, stderr=DEVNULL) + command_succeeded = True + break + except Exception: + # That try failed, so just retry + sleep(5) + if not command_succeeded: + raise RuntimeError(f"Failed to validate availability for connection {connection}") + +def stop_db(n_nodes, port, udsport): + """Stop a redis cluster and clear the files + associated with it + """ + is_uds = udsport is not None + if is_uds: + n_nodes = 1 + cicd = os.getenv('SR_CICD_EXECUTION') + is_cicd = False if cicd is None else cicd.lower() == "true" + + # It's clobberin' time! + if is_cicd: + rediscli = 'redis-cli' + else: + rediscli = ( + pathlib.Path(__file__).parent.parent + / "third-party/redis/src/redis-cli" + ).resolve() + + # Clobber the server(s) + procs = [] + for i in range(n_nodes): + connection = f"-s {udsport}" if is_uds else f"-p {port + i}" + cmd = f"{rediscli} {connection} shutdown" + print(cmd) + proc = Popen(cmd.split(), shell=False) + procs.append(proc) + + # Make sure that all servers are down + # Let exceptions propagate to the caller + for proc in procs: + _ = proc.communicate(timeout=15) + if proc.returncode != 0: + raise RuntimeError("Failed to kill Redis server!") + + # clean up after ourselves + for i in range(n_nodes): + fname = f"{port+i}.log" + if os.path.exists(fname): + os.remove(fname) + + fname = f"{port+i}.conf" + if os.path.exists(fname): + os.remove(fname) + + other_files = [ + 'dump.rdb', + 'single.log', + 'UDS.log', + ] + for fname in other_files: + if os.path.exists(fname): + os.remove(fname) + + # Pause to give Redis time to die + sleep(2) + +def prepare_uds_socket(udsport): + """Sets up the UDS socket""" + if udsport is None: + return # Silently bail + uds_abs = pathlib.Path(udsport).resolve() + basedir = uds_abs.parent + basedir.mkdir(exists_okay=True) + uds_abs.touch() + uds_abs.chmod(0o777) + +def create_db(n_nodes, port, device, rai_ver, udsport): + """Creates a redis database starting with port at 127.0.0.1 + + For a standalone server, the command issued should be equivalent to: + redis-server --port $PORT --daemonize yes \ + --logfile "single.log" \ + --loadmodule $REDISAI_MODULES + + For a clustered server, the command issued should be equivalent to: + redis-server --port $port --cluster-enabled yes --daemonize yes \ + --cluster-config-file "$port.conf" --protected-mode no --save "" \ + --logfile "$port.log" \ + --loadmodule $REDISAI_MODULES + + For a UDS server, the command issued should be equivalent to: + redis-server --unixsocket $SOCKET --unixsocketperm 777 --port 0 --bind 127.0.0.1 \ + --daemonize yes --protected-mode no --logfile "uds.log" \ + --loadmodule $REDISAI_MODULES + + where: + PORT ranges from port to port + n_nodes - 1 + REDISAI_MODULES is read from the environment or calculated relative to this file + """ + + # Set up configuration + is_uds = udsport is not None + if is_uds: + n_nodes = 1 + is_cluster = n_nodes > 1 + cicd = os.getenv('SR_CICD_EXECUTION') + is_cicd = False if cicd is None else cicd.lower() == "true" + + if is_cicd: + redisserver = "redis-server" + else: + redisserver = ( + pathlib.Path(__file__).parent.parent + / "third-party/redis/src/redis-server" + ).resolve() + rediscli = "redis-cli" if is_cicd else os.path.dirname(redisserver) + "/redis-cli" + test_device = device if device is not None else os.environ.get( + "SMARTREDIS_TEST_DEVICE","cpu").lower() + if is_cicd: + redisai = os.getenv(f'REDISAI_{test_device.upper()}_INSTALL_PATH') + '/redisai.so' + redisai_modules = os.getenv("REDISAI_MODULES") + if redisai_modules is None: + raise RuntimeError("REDISAI_MODULES environment variable is not set!") + rai_clause = f"--loadmodule {redisai_modules}" + else: + if not rai_ver: + raise RuntimeError("RedisAI version not specified") + redisai_dir = ( + pathlib.Path(__file__).parent.parent + / f"third-party/RedisAI/{rai_ver}/install-{test_device}" + ).resolve() + redisai = redisai_dir / "redisai.so" + tf_loc = redisai_dir / "backends/redisai_tensorflow/redisai_tensorflow.so" + torch_loc = redisai_dir / "backends/redisai_torch/redisai_torch.so" + rai_clause = f"--loadmodule {redisai} TF {tf_loc} TORCH {torch_loc}" + uds_clause = "" + if is_uds: + prepare_uds_socket(udsport) + uds_clause = f"--bind 127.0.0.1 --unixsocket {udsport} --unixsocketperm 777" + daemonize_clause = "--daemonize yes" + cluster_clause = "--cluster-enabled yes" if is_cluster else "" + prot_clause = "--protected-mode no" if is_cluster or is_uds else "" + save_clause = '--save ""' if is_cluster else "" + + # Start servers + procs = [] + for i in range(n_nodes): + l_port = port + i + port_clause = f"--port {l_port}" if not is_uds else "--port 0" + if is_cluster: + log_clause = f"--logfile {l_port}.log" + cluster_cfg_clause = f"--cluster-config-file {l_port}.conf" + else: + log_clause = "--logfile " + ("UDS.log" if is_uds else "single.log") + cluster_cfg_clause = "" + log_clause += " --loglevel notice" + cmd = f"{redisserver} {port_clause} {daemonize_clause} {cluster_clause} " + \ + f"{cluster_cfg_clause} {log_clause} {uds_clause} {rai_clause} " + \ + f"{prot_clause} {save_clause}" + + print(cmd) + proc = Popen(cmd.split(), shell=False) + procs.append(proc) + + # Make sure that all servers are up + # Let exceptions propagate to the caller + check_availability(n_nodes, port, udsport) + for proc in procs: + _ = proc.communicate(timeout=15) + if proc.returncode != 0: + raise RuntimeError("Failed to launch Redis server!") + + # Create cluster for clustered Redis request + if n_nodes > 1: + cluster_str = " ".join(f"127.0.0.1:{port + i}" for i in range(n_nodes)) + cmd = f"{rediscli} --cluster create {cluster_str} --cluster-replicas 0" + print(cmd) + proc = run(cmd.split(), input="yes", encoding="utf-8", shell=False) + if proc.returncode != 0: + raise SubprocessError("Cluster could not be created!") + sleep(2) + print("Cluster has been setup!") + else: + print("Server has been setup!") + check_availability(n_nodes, port, udsport) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--port', type=int, default=6379) + parser.add_argument('--nodes', type=int, default=3) + parser.add_argument('--rai', type=str, default=None) + parser.add_argument('--device', type=str, default="cpu") + parser.add_argument('--udsport', type=str, default=None) + parser.add_argument('--stop', action='store_true') + args = parser.parse_args() + + if args.stop: + stop_db(args.nodes, args.port, args.udsport) + else: + create_db(args.nodes, args.port, args.device, args.rai, args.udsport)