Pull the docker container
docker pull ghcr.io/kalanaratnayake/jetson-pytorch:r36.3.0
Build the docker container
docker buildx build --load --platform linux/arm64 -f pytorch-images/r3630.Dockerfile -t jetson-pytorch:r36.3.0 .
or build with cache locally on an AMD64 Computer and push when image compilation can be slow on github actions and storage exceeds
docker buildx build --push \
--platform linux/arm64 \
--cache-from=type=registry,ref=ghcr.io/kalanaratnayake/jetson-pytorch:r3630-buildcache \
--cache-to=type=registry,ref=ghcr.io/kalanaratnayake/jetson-pytorch:r3630-buildcache,mode=max \
-f pytorch-images/r3630.Dockerfile \
-t ghcr.io/kalanaratnayake/jetson-pytorch:r36.3.0 .
or build with cache locally on a Jetson Device and push when image compilation can be slow on github actions and storage exceeds
docker buildx build --push \
--cache-from=type=registry,ref=ghcr.io/kalanaratnayake/jetson-pytorch:r3630-buildcache \
--cache-to=type=registry,ref=ghcr.io/kalanaratnayake/jetson-pytorch:r3630-buildcache,mode=max \
-f pytorch-images/r3630.Dockerfile \
-t ghcr.io/kalanaratnayake/jetson-pytorch:r36.3.0 .
Start the docker container
docker run --rm -it --runtime nvidia --network host --gpus all -e DISPLAY ghcr.io/kalanaratnayake/jetson-pytorch:r36.3.0 bash
Run the following commands inside the docker container to confirm that the container is working properly.
python3 -c "import torch; print(torch.__version__)"
python3 -c "import torch; print(torch.cuda.is_available())"
python3 -c "import torch; print(torch.version.cuda)"
python3 -c "import torchvision; print(torchvision.__version__)"
python3 -c "import torchaudio; print(torchaudio.__version__)"