From 8e0bd8cd5705c73f6e5184686fed598b6b9779af Mon Sep 17 00:00:00 2001 From: Marcus G K Williams Date: Wed, 8 Mar 2023 12:02:49 -0800 Subject: [PATCH 1/7] msg_passing test Signed-off-by: Marcus G K Williams --- poetry.lock | 41 +++++++++++++++++++++++++++++++++++++---- pyproject.toml | 2 +- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0c7c0e64..e57b4fab 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.2.1 and should not be changed by hand. [[package]] name = "argparse" @@ -464,6 +464,8 @@ files = [ {file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"}, {file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"}, {file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"}, + {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"}, + {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"}, {file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"}, {file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"}, {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"}, @@ -1260,13 +1262,14 @@ develop = true asteval = "^0.9.27" networkx = "<=2.8.7" numpy = "^1.22.2" +pybind11 = {version = "^2.10.1", extras = ["global"]} scipy = "^1.8.0" [package.source] type = "git" url = "https://github.com/lava-nc/lava.git" -reference = "main" -resolved_reference = "2c2e8aea23af95d27c2f1109505545a791dd3fa2" +reference = "messaging_refactor_release2nd" +resolved_reference = "cc63438c602b7769bf7cb112b0aabbffca148b3b" [[package]] name = "linecache2" @@ -1902,6 +1905,36 @@ files = [ {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] +[[package]] +name = "pybind11" +version = "2.10.3" +description = "Seamless operability between C++11 and Python" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pybind11-2.10.3-py3-none-any.whl", hash = "sha256:123e303f39ad5de97ddfa4f1f473cb85881a0a94ee5714eb3c37e2405371fc12"}, + {file = "pybind11-2.10.3.tar.gz", hash = "sha256:08cfe6d4f73746447cc85a400c8169a91608b8a00c5feecd8ff251a70565d12f"}, +] + +[package.dependencies] +pybind11-global = {version = "2.10.3", optional = true, markers = "extra == \"global\""} + +[package.extras] +global = ["pybind11-global (==2.10.3)"] + +[[package]] +name = "pybind11-global" +version = "2.10.3" +description = "Seamless operability between C++11 and Python" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pybind11_global-2.10.3-py3-none-any.whl", hash = "sha256:0185118804f11349007989e9fab9d346b2d9997166b8ff90915419c528ba8690"}, + {file = "pybind11_global-2.10.3.tar.gz", hash = "sha256:9982149e0859e7a8496397b7dcdf083a37b87cbc1cac6e5dbe453eb3f3f22db1"}, +] + [[package]] name = "pycodestyle" version = "2.8.0" @@ -2779,4 +2812,4 @@ testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools" [metadata] lock-version = "2.0" python-versions = ">=3.8, <3.11" -content-hash = "499d5e89025848339277f5d9b978e53fb3bd219d4a1ea8437f4e5ce752b115a8" +content-hash = "b462733c811176e7c2b25a8fb6ef96e0870d4633d6c76f90a262d012e500bc02" diff --git a/pyproject.toml b/pyproject.toml index 086ed332..a9646b94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,7 @@ classifiers = [ [tool.poetry.dependencies] python = ">=3.8, <3.11" -lava-nc = { git = "https://github.com/lava-nc/lava.git", branch = "main", develop = true } +lava-nc = { git = "https://github.com/lava-nc/lava.git", branch = "messaging_refactor_release2nd", develop = true } torchvision = "^0.14.0" h5py = "^3.7.0" From beef765863a32a91177e38dc662a9f53fed4b87b Mon Sep 17 00:00:00 2001 From: Marcus G K Williams <168222+mgkwill@users.noreply.github.com> Date: Wed, 8 Mar 2023 12:45:11 -0800 Subject: [PATCH 2/7] Update ci.yml --- .github/workflows/ci.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e8823f47..0da95b56 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -55,4 +55,21 @@ jobs: - name: Run unit tests run: poetry run pytest + + + unit-tests-pp: + name: Unit Test Code + Coverage + PP + runs-on: ${{ matrix.operating-system }} + strategy: + matrix: + operating-system: [ubuntu-latest, windows-latest, macos-latest] + + steps: + - name: Setup CI + uses: lava-nc/ci-setup-composite-action@v1.1 + with: + repository: 'Lava-DNF' + + - name: Run unit tests + run: LAVA_PURE_PYTHON=1 poetry run pytest From d3a0e16a34b781fdfed502836ee6329590b5853e Mon Sep 17 00:00:00 2001 From: Marcus G K Williams <168222+mgkwill@users.noreply.github.com> Date: Wed, 8 Mar 2023 12:49:36 -0800 Subject: [PATCH 3/7] Update ci.yml --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0da95b56..1d1497b0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,5 +71,5 @@ jobs: repository: 'Lava-DNF' - name: Run unit tests - run: LAVA_PURE_PYTHON=1 poetry run pytest + run: poetry run LAVA_PURE_PYTHON=1 pytest From 1a329496832ccd38f7be085c2e30adea18703836 Mon Sep 17 00:00:00 2001 From: Marcus G K Williams <168222+mgkwill@users.noreply.github.com> Date: Wed, 8 Mar 2023 12:54:53 -0800 Subject: [PATCH 4/7] Update ci.yml --- .github/workflows/ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1d1497b0..54364bab 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,5 +71,6 @@ jobs: repository: 'Lava-DNF' - name: Run unit tests - run: poetry run LAVA_PURE_PYTHON=1 pytest + run: | + LAVA_PURE_PYTHON=1 poetry run pytest From f0e1957637d5d3d715650512c7de7e94a693226b Mon Sep 17 00:00:00 2001 From: Marcus G K Williams <168222+mgkwill@users.noreply.github.com> Date: Wed, 8 Mar 2023 13:04:47 -0800 Subject: [PATCH 5/7] Update ci.yml --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 54364bab..d51021b5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,6 +59,8 @@ jobs: unit-tests-pp: name: Unit Test Code + Coverage + PP + env: + LAVA_PURE_PYTHON: '1' runs-on: ${{ matrix.operating-system }} strategy: matrix: @@ -72,5 +74,5 @@ jobs: - name: Run unit tests run: | - LAVA_PURE_PYTHON=1 poetry run pytest + poetry run pytest From 6925b8dd8f5e72968f03f04d99ba61abbd3b1b4d Mon Sep 17 00:00:00 2001 From: Marcus G K Williams Date: Fri, 31 Mar 2023 11:53:26 -0700 Subject: [PATCH 6/7] Add run.py for debugging/perf Signed-off-by: Marcus G K Williams --- .../lava/lib/dl/netx/pilotnet_snn/run.py | 268 ++++++++++++++++++ 1 file changed, 268 insertions(+) create mode 100644 tutorials/lava/lib/dl/netx/pilotnet_snn/run.py diff --git a/tutorials/lava/lib/dl/netx/pilotnet_snn/run.py b/tutorials/lava/lib/dl/netx/pilotnet_snn/run.py new file mode 100644 index 00000000..332cbb11 --- /dev/null +++ b/tutorials/lava/lib/dl/netx/pilotnet_snn/run.py @@ -0,0 +1,268 @@ +# %% [markdown] +# # PilotNet LIF Example +# +# This tutorial demonstrates how to use __lava__ to perform inference on a PilotNet LIF on both CPU and Loihi 2 neurocore. +# +# ![PilotNet Inference](images/pilotnet_lif.PNG) +# +# The network receives video input, recorded from a dashboard camera of a driving car (__Dataloader__). The data is encoded efficiently as the difference between individual frames (__Encoder__). The data passes through the PilotNet LIF, which was trained with __lava-dl__ and is built using its __Network Exchange__ module (netx.hdf5.Network), which automatically generates a Lava process from the training artifact. The network estimates the angle of the steering wheel of the car, is read from the output layer neuron's voltage state, decoded using proper scaling (__Decoder__) and sent to a visualization (__Monitor__) and logging system (__Logger__). +# +# The PilotNet LIF network predicts the steerting angle every 16th timestep. The input is sent from the dataloader at the same frequency and the PilotNet LIF network resets it's internal state every 16th timestep to prcoess the new input frame. +# +# The core of the tutorial is lava-dl's Network Exchange module, which is available as `lava.lib.dl.netx.{hdf5, blocks, utils}`. +# * `hdf5` implements automatic network generation. +# * `blocks` implements individual layer blocks. +# * `utils` implements hdf5 reading utilities. +# +# In addition, it also demonstrates how different lava processes can be connected with each other for real time interaction between them even though the underlying processes can be run on various backends, including Loihi 2. +# +# Switching between Loihi 2 hardware and CPU simulation is as simple as changing the run configuration settings. + +# %% + + +import numpy as np +import matplotlib.pyplot as plt + +from lava.magma.core.run_configs import Loihi2SimCfg, Loihi2HwCfg +from lava.magma.core.run_conditions import RunSteps +from lava.proc import io +from lava.magma.core.process.variable import Var +from lava.magma.core.process.ports.ports import RefPort + +from lava.lib.dl import netx +from dataset import PilotNetDataset +from utils import ( + PilotNetEncoder, PilotNetDecoder, VoltageReader, PilotNetMonitor, + loihi2hw_exception_map, loihi2sim_exception_map +) + + +# %% [markdown] +# # Import modules for Loihi2 execution +# +# Check if Loihi2 compiker is available and import related modules. + +# %% + +def setup(): + from lava.utils.system import Loihi2 + Loihi2.preferred_partition = 'oheogulch' + loihi2_is_available = Loihi2.is_loihi2_available + + if loihi2_is_available: + print(f'Running on {Loihi2.partition}') + compression = io.encoder.Compression.DELTA_SPARSE_8 + from lava.proc import embedded_io as eio + from lava.proc.embedded_io.state import Read as VoltageReader + else: + print("Loihi2 compiler is not available in this system. " + "This tutorial will execute on CPU backend.") + from utils import VoltageReader + compression = io.encoder.Compression.DENSE + + + # %% [markdown] + # ## Create network block + # + # PilotNet LIF is described by the hdf5 file inference `network.net`. + # + # A network block can be created by simply instantiating `netx.hdf5.Network` with the path of the desired hdf5 network description file. + # * The input layer is accessible as `net.in_layer`. + # * The output layer is accessible as `net.out_layer`. + # * All the constituent layers are accessible as as a list: `net.layers`. + # + # The PilotNet LIF needs to be reset for every input sample. The reset needs to be orchestrated at different time steps for each layer of the network for fastest possible throughput. `netx.hdf5.Network` features pipelined orchestration of layer reset where each subsequent layer is reset a time step later than it's input. + # + # ![PilotNet Inference](images/pilotnet_lif_network.PNG) + + # %% + # The input spike loads to dataloader at t=0 + # Gets transmitted through the embedded processor at t=1 + # and appears at the input layer at t=2 + net = netx.hdf5.Network(net_config='/home/mwillia/src/lava-dl/tutorials/lava/lib/dl/netx/pilotnet_snn/network.net', + reset_interval=16, + reset_offset=3) + print(net) + + + # %% + print(f'There are {len(net)} layers in network:') + + for l in net.layers: + print(f'{l.__class__.__name__:5s} : {l.name:10s}, shape : {l.shape}') + + + # %% [markdown] + # ## Set execution parameters + # Configure number of samples, execution timesteps, and readout offset. + + # %% + num_samples = 201 + steps_per_sample = net.reset_interval + readout_offset = len(net) + 2 + num_steps = num_samples * steps_per_sample + 1 + + + # %% [markdown] + # ## Create Dataset instance + # Typically the user would write it or provide it. + + # %% + full_set = PilotNetDataset( + path='/home/mwillia/src/lava-dl/tutorials/lava/lib/dl/netx/data', + transform=net.in_layer.transform, # input transform + visualize=True, # visualize ensures the images are returned in sequence + sample_offset=10550, + ) + train_set = PilotNetDataset( + path='/home/mwillia/src/lava-dl/tutorials/lava/lib/dl/netx/data', + transform=net.in_layer.transform, # input transform + train=True, + ) + test_set = PilotNetDataset( + path='/home/mwillia/src/lava-dl/tutorials/lava/lib/dl/netx/data', + transform=net.in_layer.transform, # input transform + train=False, + ) + + + # %% [markdown] + # ## Create Dataloader + # The dataloader process reads data from the dataset objects and sends out the input frame and ground truth as spikes. The dataloader injects new input sample every `steps_per_sample`. + # + # ![PilotNet Inference](images/pilotnet_lif_dataloader.PNG) + + # %% + dataloader = io.dataloader.SpikeDataloader(dataset=full_set, + interval=steps_per_sample) + + + # %% [markdown] + # ## Configure the input layer for graded spike input + # + # The PilotNet LIF network's input layer does bias integration. Bias input is a slow process compared to graded spike input. Therefore, we tweak the input layer of PilotNet LIF to receive graded spike and integrate it on neuron's current (u) state to achieve effective bias input. + + # %% + net.in_layer.neuron.du.init = -1 # Make current state persistent + + + # %% [markdown] + # ## Create Input Encoder + # + # The input encoder process does frame difference of subsequent frames to sparsify the input to the network. + # + # For Loihi execution, it additionally compresses and sends the input data to the Loihi 2 chip. + # + # ![PilotNet Inference](images/pilotnet_lif_encoder.PNG) + + # %% + input_encoder = PilotNetEncoder(shape=net.in_layer.shape, + interval=steps_per_sample, + offset=1, + compression=compression) + + + # %% [markdown] + # ## Create Output Decoder + # + # The output of PilotNet LIF network is the output layer neuron's voltage. We use a `VoltageReader` to read the neuron voltage + # and scale the input appropriately using `AffineTransformer`. + # + # For Loihi execution, `VoltageReader` additionally communicates the read values from the Loihi 2 chip. + # + # ![PilotNet Inference](images/pilotnet_lif_decoder.PNG) + + # %% + output_adapter = VoltageReader(shape=net.out.shape, + interval=steps_per_sample, + offset=len(net) + 1) + output_decoder = PilotNetDecoder(shape=net.out.shape, + weight=1 / steps_per_sample / 32 / 64, + interval=steps_per_sample, + offset=len(net) + 2) + + + # %% [markdown] + # ## Create Monitor and Dataloggers + # + # Monitor is a lava process that visualizes the PilotNet network prediction in real-time. In addition, datalogger processes store the network predictions and ground truths. + # + # ![PilotNet Inference](images/pilotnet_lif_monitors.PNG) + + # %% + monitor = PilotNetMonitor(shape=net.inp.shape, + transform=net.in_layer.transform, + interval=steps_per_sample) + gt_logger = io.sink.RingBuffer(shape=(1,), buffer=num_steps) + output_logger = io.sink.RingBuffer(shape=net.out.shape, buffer=num_steps) + + + # %% [markdown] + # # Connect the processes + # + # ![PilotNet Inference](images/pilotnet_lif.PNG) + + # %% + dataloader.ground_truth.connect(gt_logger.a_in) + dataloader.s_out.connect(input_encoder.inp) + input_encoder.out.connect(net.in_layer.neuron.a_in) + + output_adapter.connect_var(net.out_layer.neuron.v) + output_adapter.out.connect(output_decoder.inp) + output_decoder.out.connect(output_logger.a_in) + + dataloader.s_out.connect(monitor.frame_in) + dataloader.ground_truth.connect(monitor.gt_in) + output_decoder.out.connect(monitor.output_in) + + return loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger, readout_offset + + +# %% [markdown] +# ## Run the network +# +# Switching between Loihi 2 hardware and CPU simulation is as simple as changing the run configuration settings. +# +# ![PilotNet Inference](images/pilotnet_lif_backends.PNG) + +def run(output, gst, loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger): +# %% + if loihi2_is_available: + run_config = Loihi2HwCfg(exception_proc_model_map=loihi2hw_exception_map) + else: + run_config = Loihi2SimCfg(select_tag='fixed_pt', + exception_proc_model_map=loihi2sim_exception_map) + net.run(condition=RunSteps(num_steps=num_steps), run_cfg=run_config) + net.stop() + # output = output_logger.data.get().flatten() + # gts = gt_logger.data.get().flatten()[::steps_per_sample] + # net.stop() + +if __name__ == '__main__': + import timeit + from __main__ import setup + global gts + gts = None + global output + output = None + loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger, readout_offset = setup() + print(timeit.timeit("run(output, gts, loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger)", setup="from __main__ import run, gts, output, loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger", number=3)) + + # result = output[readout_offset::steps_per_sample] + + + # %% [markdown] + # ## Evaluate Results + # Plot and compare the results with the dataset ground truth. + + # %% + # plt.figure(figsize=(7, 5)) + # plt.plot(np.array(gts), label='Ground Truth') + # plt.plot(result[1:].flatten(), label='Lava output') + # plt.xlabel(f'Sample frames (+10550)') + # plt.ylabel('Steering angle (radians)') + # plt.legend() + + + From 7d597fffd708456030790611e9c7aa7730598041 Mon Sep 17 00:00:00 2001 From: Marcus G K Williams Date: Fri, 31 Mar 2023 13:17:57 -0700 Subject: [PATCH 7/7] Run run.py once per invoke Signed-off-by: Marcus G K Williams --- tutorials/lava/lib/dl/netx/pilotnet_snn/run.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tutorials/lava/lib/dl/netx/pilotnet_snn/run.py b/tutorials/lava/lib/dl/netx/pilotnet_snn/run.py index 332cbb11..8fbe0258 100644 --- a/tutorials/lava/lib/dl/netx/pilotnet_snn/run.py +++ b/tutorials/lava/lib/dl/netx/pilotnet_snn/run.py @@ -241,13 +241,21 @@ def run(output, gst, loihi2_is_available, net, num_steps, steps_per_sample, outp if __name__ == '__main__': import timeit + import time from __main__ import setup global gts gts = None global output output = None loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger, readout_offset = setup() - print(timeit.timeit("run(output, gts, loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger)", setup="from __main__ import run, gts, output, loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger", number=3)) + start = time.time() + run(output, gts, loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger) + end = time.time() + + #print(timeit.timeit("run(output, gts, loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger)", setup="from __main__ import run, gts, output, loihi2_is_available, net, num_steps, steps_per_sample, output_logger, gt_logger", number=3)) + + delta = end - start + print("took %.2f seconds to process" % delta) # result = output[readout_offset::steps_per_sample]