From eeb24b1e0d068640303167fb0f7053819297e5d7 Mon Sep 17 00:00:00 2001 From: Will Killian <2007799+willkill07@users.noreply.github.com> Date: Wed, 29 Jan 2025 11:44:41 -0500 Subject: [PATCH] Bugfix: update old references of 25.02 to 25.06 (#2151) Closes #2150 ## By Submitting this PR I confirm: - I am familiar with the [Contributing Guidelines](https://github.com/nv-morpheus/Morpheus/blob/main/docs/source/developer_guide/contributing.md). - When the PR is ready for review, new or existing tests cover these changes. - When the PR is ready for review, the documentation is up to date with these changes. Authors: - Will Killian (https://github.com/willkill07) Approvers: - David Gardner (https://github.com/dagardner-nv) - https://github.com/hsin-c URL: https://github.com/nv-morpheus/Morpheus/pull/2151 --- .../all_cuda-125_arch-aarch64.yaml | 2 +- .../all_cuda-125_arch-x86_64.yaml | 2 +- .../dev_cuda-125_arch-aarch64.yaml | 2 +- .../dev_cuda-125_arch-x86_64.yaml | 2 +- .../examples_cuda-125_arch-aarch64.yaml | 2 +- .../examples_cuda-125_arch-x86_64.yaml | 2 +- .../runtime_cuda-125_arch-aarch64.yaml | 2 +- .../runtime_cuda-125_arch-x86_64.yaml | 2 +- docs/source/basics/building_a_pipeline.md | 2 +- docs/source/basics/overview.rst | 2 +- docs/source/cloud_deployment_guide.md | 6 ++-- .../guides/2_real_world_phishing.md | 2 +- .../guides/5_digital_fingerprinting.md | 4 +-- docs/source/examples.md | 2 +- docs/source/getting_started.md | 20 ++++++------ examples/abp_nvsmi_detection/README.md | 4 +-- examples/abp_pcap_detection/README.md | 4 +-- .../3_simple_cpp_stage/CMakeLists.txt | 2 +- .../4_rabbitmq_cpp_stage/CMakeLists.txt | 2 +- .../dfp_example_cuda-125_arch-aarch64.yaml | 2 +- .../dfp_example_cuda-125_arch-x86_64.yaml | 2 +- examples/doca/vdb_realtime/README.md | 2 +- examples/llm/vdb_upload/README.md | 6 ++-- examples/log_parsing/README.md | 4 +-- examples/nlp_si_detection/README.md | 2 +- examples/ransomware_detection/README.md | 4 +-- examples/root_cause_analysis/README.md | 2 +- examples/sid_visualization/docker-compose.yml | 2 +- models/model-cards/abp-model-card.md | 6 ++-- models/model-cards/dfp-model-card.md | 32 +++++++++---------- models/model-cards/gnn-fsi-model-card.md | 2 +- models/model-cards/phishing-model-card.md | 2 +- .../root-cause-analysis-model-card.md | 6 ++-- models/triton-model-repo/README.md | 2 +- scripts/validation/val-globals.sh | 2 +- tests/benchmarks/README.md | 4 +-- thirdparty/README.md | 2 +- 37 files changed, 75 insertions(+), 75 deletions(-) diff --git a/conda/environments/all_cuda-125_arch-aarch64.yaml b/conda/environments/all_cuda-125_arch-aarch64.yaml index 9dc47e40c..7db03d7a0 100644 --- a/conda/environments/all_cuda-125_arch-aarch64.yaml +++ b/conda/environments/all_cuda-125_arch-aarch64.yaml @@ -68,7 +68,7 @@ dependencies: - libwebp=1.3.2 - libzlib >=1.3.1,<2 - mlflow>=2.10.0,<2.18 -- mrc=25.02 +- mrc=25.06 - myst-parser=0.18.1 - nbsphinx - networkx=2.8.8 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index 47110e7e1..2a0418f5d 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -69,7 +69,7 @@ dependencies: - libwebp=1.3.2 - libzlib >=1.3.1,<2 - mlflow>=2.10.0,<2.18 -- mrc=25.02 +- mrc=25.06 - myst-parser=0.18.1 - nbsphinx - networkx=2.8.8 diff --git a/conda/environments/dev_cuda-125_arch-aarch64.yaml b/conda/environments/dev_cuda-125_arch-aarch64.yaml index c3daa1719..3474549d3 100644 --- a/conda/environments/dev_cuda-125_arch-aarch64.yaml +++ b/conda/environments/dev_cuda-125_arch-aarch64.yaml @@ -57,7 +57,7 @@ dependencies: - libwebp=1.3.2 - libzlib >=1.3.1,<2 - mlflow>=2.10.0,<2.18 -- mrc=25.02 +- mrc=25.06 - myst-parser=0.18.1 - nbsphinx - networkx=2.8.8 diff --git a/conda/environments/dev_cuda-125_arch-x86_64.yaml b/conda/environments/dev_cuda-125_arch-x86_64.yaml index 135105efb..0e68fba90 100644 --- a/conda/environments/dev_cuda-125_arch-x86_64.yaml +++ b/conda/environments/dev_cuda-125_arch-x86_64.yaml @@ -58,7 +58,7 @@ dependencies: - libwebp=1.3.2 - libzlib >=1.3.1,<2 - mlflow>=2.10.0,<2.18 -- mrc=25.02 +- mrc=25.06 - myst-parser=0.18.1 - nbsphinx - networkx=2.8.8 diff --git a/conda/environments/examples_cuda-125_arch-aarch64.yaml b/conda/environments/examples_cuda-125_arch-aarch64.yaml index b3064fd11..7604dc842 100644 --- a/conda/environments/examples_cuda-125_arch-aarch64.yaml +++ b/conda/environments/examples_cuda-125_arch-aarch64.yaml @@ -31,7 +31,7 @@ dependencies: - kfp - libwebp=1.3.2 - mlflow>=2.10.0,<2.18 -- mrc=25.02 +- mrc=25.06 - networkx=2.8.8 - nodejs=18.* - numexpr diff --git a/conda/environments/examples_cuda-125_arch-x86_64.yaml b/conda/environments/examples_cuda-125_arch-x86_64.yaml index 8bed5bbd8..c7eb4c4a8 100644 --- a/conda/environments/examples_cuda-125_arch-x86_64.yaml +++ b/conda/environments/examples_cuda-125_arch-x86_64.yaml @@ -31,7 +31,7 @@ dependencies: - kfp - libwebp=1.3.2 - mlflow>=2.10.0,<2.18 -- mrc=25.02 +- mrc=25.06 - networkx=2.8.8 - newspaper3k==0.2.8 - nodejs=18.* diff --git a/conda/environments/runtime_cuda-125_arch-aarch64.yaml b/conda/environments/runtime_cuda-125_arch-aarch64.yaml index 851704426..a8426c43d 100644 --- a/conda/environments/runtime_cuda-125_arch-aarch64.yaml +++ b/conda/environments/runtime_cuda-125_arch-aarch64.yaml @@ -27,7 +27,7 @@ dependencies: - grpcio-status - libwebp=1.3.2 - mlflow>=2.10.0,<2.18 -- mrc=25.02 +- mrc=25.06 - networkx=2.8.8 - numpydoc=1.5 - pip diff --git a/conda/environments/runtime_cuda-125_arch-x86_64.yaml b/conda/environments/runtime_cuda-125_arch-x86_64.yaml index d1086eb84..527d564ce 100644 --- a/conda/environments/runtime_cuda-125_arch-x86_64.yaml +++ b/conda/environments/runtime_cuda-125_arch-x86_64.yaml @@ -27,7 +27,7 @@ dependencies: - grpcio-status - libwebp=1.3.2 - mlflow>=2.10.0,<2.18 -- mrc=25.02 +- mrc=25.06 - networkx=2.8.8 - numpydoc=1.5 - pip diff --git a/docs/source/basics/building_a_pipeline.md b/docs/source/basics/building_a_pipeline.md index b22831828..d3cf34d06 100644 --- a/docs/source/basics/building_a_pipeline.md +++ b/docs/source/basics/building_a_pipeline.md @@ -207,7 +207,7 @@ This example shows an NLP Pipeline which uses several stages available in Morphe #### Launching Triton Run the following to launch Triton and load the `sid-minibert` model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx ``` #### Launching Kafka diff --git a/docs/source/basics/overview.rst b/docs/source/basics/overview.rst index 1a39b4404..ba0309e51 100644 --- a/docs/source/basics/overview.rst +++ b/docs/source/basics/overview.rst @@ -114,7 +114,7 @@ The ONNX to TensorRT (TRT) conversion utility requires additional packages, whic conda env update --solver=libmamba -n morpheus --file conda/environments/model-utils_cuda-125_arch-$(arch).yaml ``` -Example usage of the ONNX to TRT conversion utility can be found in `models/README.md `_. +Example usage of the ONNX to TRT conversion utility can be found in `models/README.md `_. AutoComplete ------------ diff --git a/docs/source/cloud_deployment_guide.md b/docs/source/cloud_deployment_guide.md index e9c981f84..15e3de13d 100644 --- a/docs/source/cloud_deployment_guide.md +++ b/docs/source/cloud_deployment_guide.md @@ -103,7 +103,7 @@ The Helm chart (`morpheus-ai-engine`) that offers the auxiliary components requi Follow the below steps to install Morpheus AI Engine: ```bash -helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-ai-engine-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar +helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-ai-engine-25.06.tgz --username='$oauthtoken' --password=$API_KEY --untar ``` ```bash helm install --set ngc.apiKey="$API_KEY" \ @@ -145,7 +145,7 @@ replicaset.apps/zookeeper-87f9f4dd 1 1 1 54s Run the following command to pull the Morpheus SDK Client (referred to as Helm chart `morpheus-sdk-client`) on to your instance: ```bash -helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-sdk-client-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar +helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-sdk-client-25.06.tgz --username='$oauthtoken' --password=$API_KEY --untar ``` #### Morpheus SDK Client in Sleep Mode @@ -183,7 +183,7 @@ kubectl -n $NAMESPACE exec sdk-cli-helper -- cp -RL /workspace/models /common The Morpheus MLflow Helm chart offers MLflow server with Triton plugin to deploy, update, and remove models from the Morpheus AI Engine. The MLflow server UI can be accessed using NodePort `30500`. Follow the below steps to install the Morpheus MLflow: ```bash -helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-mlflow-25.02.tgz --username='$oauthtoken' --password=$API_KEY --untar +helm fetch https://helm.ngc.nvidia.com/nvidia/morpheus/charts/morpheus-mlflow-25.06.tgz --username='$oauthtoken' --password=$API_KEY --untar ``` ```bash helm install --set ngc.apiKey="$API_KEY" \ diff --git a/docs/source/developer_guide/guides/2_real_world_phishing.md b/docs/source/developer_guide/guides/2_real_world_phishing.md index 90008cb8e..bbe7f0ae5 100644 --- a/docs/source/developer_guide/guides/2_real_world_phishing.md +++ b/docs/source/developer_guide/guides/2_real_world_phishing.md @@ -235,7 +235,7 @@ We will launch a Triton Docker container with: ```shell docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 \ tritonserver --model-repository=/models/triton-model-repo \ --exit-on-error=false \ --log-info=true \ diff --git a/docs/source/developer_guide/guides/5_digital_fingerprinting.md b/docs/source/developer_guide/guides/5_digital_fingerprinting.md index 17e25aceb..f8e6fa1e7 100644 --- a/docs/source/developer_guide/guides/5_digital_fingerprinting.md +++ b/docs/source/developer_guide/guides/5_digital_fingerprinting.md @@ -23,10 +23,10 @@ Every account, user, service, and machine has a digital fingerprint that represe To construct this digital fingerprint, we will be training unsupervised behavioral models at various granularities, including a generic model for all users in the organization along with fine-grained models for each user to monitor their behavior. These models are continuously updated and retrained over time​, and alerts are triggered when deviations from normality occur for any user​. ## Running the DFP Example -Instructions for building and running the DFP example are available in the [`examples/digital_fingerprinting/production/README.md`](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/examples/digital_fingerprinting/production/README.md) guide in the Morpheus repository. +Instructions for building and running the DFP example are available in the [`examples/digital_fingerprinting/production/README.md`](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/examples/digital_fingerprinting/production/README.md) guide in the Morpheus repository. ## Training Sources -The data we will want to use for the training and inference will be any sensitive system that the user interacts with, such as VPN, authentication and cloud services. The digital fingerprinting example ([`examples/digital_fingerprinting/production/README.md`](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/examples/digital_fingerprinting/production/README.md)) included in Morpheus ingests logs from [Azure Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-sign-ins), and [Duo Authentication](https://duo.com/docs/adminapi). +The data we will want to use for the training and inference will be any sensitive system that the user interacts with, such as VPN, authentication and cloud services. The digital fingerprinting example ([`examples/digital_fingerprinting/production/README.md`](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/examples/digital_fingerprinting/production/README.md)) included in Morpheus ingests logs from [Azure Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-sign-ins), and [Duo Authentication](https://duo.com/docs/adminapi). The location of these logs could be either local to the machine running Morpheus, a shared file system like NFS, or on a remote store such as [Amazon S3](https://aws.amazon.com/s3/). diff --git a/docs/source/examples.md b/docs/source/examples.md index d2425fcd8..1a61a3223 100644 --- a/docs/source/examples.md +++ b/docs/source/examples.md @@ -40,7 +40,7 @@ Morpheus supports multiple environments, each environment is intended to support In addition to this many of the examples utilize the Morpheus Triton Models container which can be obtained by running the following command: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` The following are the supported environments: diff --git a/docs/source/getting_started.md b/docs/source/getting_started.md index 03390d73c..169d70fe0 100644 --- a/docs/source/getting_started.md +++ b/docs/source/getting_started.md @@ -42,18 +42,18 @@ More advanced users, or those who are interested in using the latest pre-release ### Pull the Morpheus Image 1. Go to [https://catalog.ngc.nvidia.com/orgs/nvidia/teams/morpheus/containers/morpheus/tags](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/morpheus/containers/morpheus/tags) 1. Choose a version -1. Download the selected version, for example for `25.02`: +1. Download the selected version, for example for `25.06`: ```bash - docker pull nvcr.io/nvidia/morpheus/morpheus:25.02-runtime + docker pull nvcr.io/nvidia/morpheus/morpheus:25.06-runtime ``` 1. Optional: Many of the examples require NVIDIA Triton Inference Server to be running with the included models. To download the Morpheus Triton Server Models container, ensure that the version number matches that of the Morpheus container you downloaded in the previous step, then run: ```bash - docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 + docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` > **Note about Morpheus versions:** > -> Morpheus uses Calendar Versioning ([CalVer](https://calver.org/)). For each Morpheus release there will be an image tagged in the form of `YY.MM-runtime` this tag will always refer to the latest point release for that version. In addition to this there will also be at least one point release version tagged in the form of `vYY.MM.00-runtime` this will be the initial point release for that version (ex. `v25.02.00-runtime`). In the event of a major bug, we may release additional point releases (ex. `v25.02.01-runtime`, `v25.02.02-runtime` etc...), and the `YY.MM-runtime` tag will be updated to reference that point release. +> Morpheus uses Calendar Versioning ([CalVer](https://calver.org/)). For each Morpheus release there will be an image tagged in the form of `YY.MM-runtime` this tag will always refer to the latest point release for that version. In addition to this there will also be at least one point release version tagged in the form of `vYY.MM.00-runtime` this will be the initial point release for that version (ex. `v25.06.00-runtime`). In the event of a major bug, we may release additional point releases (ex. `v25.06.01-runtime`, `v25.06.02-runtime` etc...), and the `YY.MM-runtime` tag will be updated to reference that point release. > > Users who want to ensure they are running with the latest bug fixes should use a release image tag (`YY.MM-runtime`). Users who need to deploy a specific version into production should use a point release image tag (`vYY.MM.00-runtime`). @@ -61,7 +61,7 @@ More advanced users, or those who are interested in using the latest pre-release 1. Ensure that [The NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installation) is installed. 1. Start the container downloaded from the previous section: ```bash -docker run --rm -ti --runtime=nvidia --gpus=all --net=host -v /var/run/docker.sock:/var/run/docker.sock nvcr.io/nvidia/morpheus/morpheus:25.02-runtime bash +docker run --rm -ti --runtime=nvidia --gpus=all --net=host -v /var/run/docker.sock:/var/run/docker.sock nvcr.io/nvidia/morpheus/morpheus:25.06-runtime bash ``` Note about some of the flags above: @@ -147,17 +147,17 @@ To run the built "release" container, use the following: ./docker/run_container_release.sh ``` -The `./docker/run_container_release.sh` script accepts the same `DOCKER_IMAGE_NAME`, and `DOCKER_IMAGE_TAG` environment variables that the `./docker/build_container_release.sh` script does. For example, to run version `v25.02.00` use the following: +The `./docker/run_container_release.sh` script accepts the same `DOCKER_IMAGE_NAME`, and `DOCKER_IMAGE_TAG` environment variables that the `./docker/build_container_release.sh` script does. For example, to run version `v25.06.00` use the following: ```bash -DOCKER_IMAGE_TAG="v25.02.00-runtime" ./docker/run_container_release.sh +DOCKER_IMAGE_TAG="v25.06.00-runtime" ./docker/run_container_release.sh ``` ## Acquiring the Morpheus Models Container Many of the validation tests and example workflows require a Triton server to function. For simplicity, Morpheus provides a pre-built models container, which contains both the Triton and Morpheus models. Users implementing a release version of Morpheus can download the corresponding Triton models container from NGC with the following command: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` Users working with an unreleased development version of Morpheus can build the Triton models container from the Morpheus repository. To build the Triton models container, run the following command from the root of the Morpheus repository: @@ -170,7 +170,7 @@ models/docker/build_container.sh In a new terminal, use the following command to launch a Docker container for Triton loading all of the included pre-trained models: ```bash docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 \ tritonserver --model-repository=/models/triton-model-repo \ --exit-on-error=false \ --log-info=true \ @@ -183,7 +183,7 @@ This will launch Triton using the default network ports (8000 for HTTP, 8001 for Note: The above command is useful for testing out Morpheus, however it does load several models into GPU memory, which at the time of this writing consumes roughly 2GB of GPU memory. Production users should consider only loading the specific models they plan on using with the `--model-control-mode=explicit` and `--load-model` flags. For example, to launch Triton only loading the `abp-nvsmi-xgb` model: ```bash docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 \ tritonserver --model-repository=/models/triton-model-repo \ --exit-on-error=false \ --log-info=true \ diff --git a/examples/abp_nvsmi_detection/README.md b/examples/abp_nvsmi_detection/README.md index 445b44809..65fd00c9d 100644 --- a/examples/abp_nvsmi_detection/README.md +++ b/examples/abp_nvsmi_detection/README.md @@ -89,12 +89,12 @@ This example utilizes the Triton Inference Server to perform inference. Pull the Docker image for Triton: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` Run the following to launch Triton and load the `abp-nvsmi-xgb` XGBoost model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-nvsmi-xgb +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-nvsmi-xgb ``` This will launch Triton and only load the `abp-nvsmi-xgb` model. This model has been configured with a max batch size of 32768, and to use dynamic batching for increased performance. diff --git a/examples/abp_pcap_detection/README.md b/examples/abp_pcap_detection/README.md index 9d5c505b3..35506c77b 100644 --- a/examples/abp_pcap_detection/README.md +++ b/examples/abp_pcap_detection/README.md @@ -30,13 +30,13 @@ To run this example, an instance of Triton Inference Server and a sample dataset ### Triton Inference Server ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` ##### Deploy Triton Inference Server Run the following to launch Triton and load the `abp-pcap-xgb` model: ```bash -docker run --rm --gpus=all -p 8000:8000 -p 8001:8001 -p 8002:8002 --name tritonserver nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-pcap-xgb +docker run --rm --gpus=all -p 8000:8000 -p 8001:8001 -p 8002:8002 --name tritonserver nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model abp-pcap-xgb ``` ##### Verify Model Deployment diff --git a/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt b/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt index d6aed8112..237f6f6be 100644 --- a/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt +++ b/examples/developer_guide/3_simple_cpp_stage/CMakeLists.txt @@ -25,7 +25,7 @@ mark_as_advanced(MORPHEUS_CACHE_DIR) list(PREPEND CMAKE_PREFIX_PATH "$ENV{CONDA_PREFIX}") project(3_simple_cpp_stage - VERSION 25.02.00 + VERSION 25.06.00 LANGUAGES C CXX ) diff --git a/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt b/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt index 952cb722b..8ada7c22f 100644 --- a/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt +++ b/examples/developer_guide/4_rabbitmq_cpp_stage/CMakeLists.txt @@ -26,7 +26,7 @@ list(PREPEND CMAKE_PREFIX_PATH "$ENV{CONDA_PREFIX}") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") project(4_rabbitmq_cpp_stage - VERSION 25.02.00 + VERSION 25.06.00 LANGUAGES C CXX ) diff --git a/examples/digital_fingerprinting/production/conda/environments/dfp_example_cuda-125_arch-aarch64.yaml b/examples/digital_fingerprinting/production/conda/environments/dfp_example_cuda-125_arch-aarch64.yaml index 191bb2e5f..4c018d0da 100644 --- a/examples/digital_fingerprinting/production/conda/environments/dfp_example_cuda-125_arch-aarch64.yaml +++ b/examples/digital_fingerprinting/production/conda/environments/dfp_example_cuda-125_arch-aarch64.yaml @@ -11,7 +11,7 @@ channels: dependencies: - boto3=1.35 - kfp -- morpheus-dfp=25.02 +- morpheus-dfp=25.06 - nodejs=18.* - papermill=2.4.0 - pip diff --git a/examples/digital_fingerprinting/production/conda/environments/dfp_example_cuda-125_arch-x86_64.yaml b/examples/digital_fingerprinting/production/conda/environments/dfp_example_cuda-125_arch-x86_64.yaml index f693d5fac..944079566 100644 --- a/examples/digital_fingerprinting/production/conda/environments/dfp_example_cuda-125_arch-x86_64.yaml +++ b/examples/digital_fingerprinting/production/conda/environments/dfp_example_cuda-125_arch-x86_64.yaml @@ -11,7 +11,7 @@ channels: dependencies: - boto3=1.35 - kfp -- morpheus-dfp=25.02 +- morpheus-dfp=25.06 - nodejs=18.* - papermill=2.4.0 - pip diff --git a/examples/doca/vdb_realtime/README.md b/examples/doca/vdb_realtime/README.md index 19e698145..dc8b8b054 100644 --- a/examples/doca/vdb_realtime/README.md +++ b/examples/doca/vdb_realtime/README.md @@ -49,7 +49,7 @@ To serve the embedding model, we will use Triton: cd ${MORPHEUS_ROOT} # Launch Triton -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2 +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2 ``` ## Populate the Milvus database diff --git a/examples/llm/vdb_upload/README.md b/examples/llm/vdb_upload/README.md index de9916c0f..e92d84440 100644 --- a/examples/llm/vdb_upload/README.md +++ b/examples/llm/vdb_upload/README.md @@ -148,12 +148,12 @@ milvus-server --data .tmp/milvusdb - Pull the Docker image for Triton: ```bash - docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 + docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` - Run the following to launch Triton and load the `all-MiniLM-L6-v2` model: ```bash - docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2 + docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model all-MiniLM-L6-v2 ``` This will launch Triton and only load the `all-MiniLM-L6-v2` model. Once Triton has loaded the model, the following @@ -287,7 +287,7 @@ using `sentence-transformers/paraphrase-multilingual-mpnet-base-v2` as an exampl - Reload the docker container, specifying that we also need to load paraphrase-multilingual-mpnet-base-v2 ```bash docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver \ --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model \ all-MiniLM-L6-v2 --load-model sentence-transformers/paraphrase-multilingual-mpnet-base-v2 ``` diff --git a/examples/log_parsing/README.md b/examples/log_parsing/README.md index dc8c77b5e..8e282812e 100644 --- a/examples/log_parsing/README.md +++ b/examples/log_parsing/README.md @@ -34,14 +34,14 @@ Pull the Morpheus Triton models Docker image from NGC. Example: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` ##### Start Triton Inference Server Container From the Morpheus repo root directory, run the following to launch Triton and load the `log-parsing-onnx` model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model log-parsing-onnx +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model log-parsing-onnx ``` ##### Verify Model Deployment diff --git a/examples/nlp_si_detection/README.md b/examples/nlp_si_detection/README.md index 4fa945048..415ad3940 100644 --- a/examples/nlp_si_detection/README.md +++ b/examples/nlp_si_detection/README.md @@ -85,7 +85,7 @@ This example utilizes the Triton Inference Server to perform inference. The neur From the Morpheus repo root directory, run the following to launch Triton and load the `sid-minibert` model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx ``` This will launch Triton and only load the `sid-minibert-onnx` model. This model has been configured with a max batch size of 32, and to use dynamic batching for increased performance. diff --git a/examples/ransomware_detection/README.md b/examples/ransomware_detection/README.md index d4f677cc7..3b3df1d16 100644 --- a/examples/ransomware_detection/README.md +++ b/examples/ransomware_detection/README.md @@ -35,7 +35,7 @@ Pull Docker image from NGC (https://ngc.nvidia.com/catalog/containers/nvidia:tri Example: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` ##### Start Triton Inference Server Container @@ -43,7 +43,7 @@ From the Morpheus repo root directory, run the following to launch Triton and lo ```bash # Run Triton in explicit mode docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 \ - nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 \ + nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 \ tritonserver --model-repository=/models/triton-model-repo \ --exit-on-error=false \ --model-control-mode=explicit \ diff --git a/examples/root_cause_analysis/README.md b/examples/root_cause_analysis/README.md index a94da3eff..155a06134 100644 --- a/examples/root_cause_analysis/README.md +++ b/examples/root_cause_analysis/README.md @@ -54,7 +54,7 @@ This example utilizes the Triton Inference Server to perform inference. The bina From the Morpheus repo root directory, run the following to launch Triton and load the `root-cause-binary-onnx` model: ```bash -docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model root-cause-binary-onnx +docker run --rm -ti --gpus=all -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --exit-on-error=false --model-control-mode=explicit --load-model root-cause-binary-onnx ``` This will launch Triton and only load the model required by our example pipeline. The model has been configured with a max batch size of 32, and to use dynamic batching for increased performance. diff --git a/examples/sid_visualization/docker-compose.yml b/examples/sid_visualization/docker-compose.yml index 8362ccf7d..c6c735a47 100644 --- a/examples/sid_visualization/docker-compose.yml +++ b/examples/sid_visualization/docker-compose.yml @@ -24,7 +24,7 @@ x-with-gpus: &with_gpus services: triton: - image: nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 + image: nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 <<: *with_gpus command: "tritonserver --exit-on-error=false --model-control-mode=explicit --load-model sid-minibert-onnx --model-repository=/models/triton-model-repo" environment: diff --git a/models/model-cards/abp-model-card.md b/models/model-cards/abp-model-card.md index 6ff57f2a2..a39007765 100644 --- a/models/model-cards/abp-model-card.md +++ b/models/model-cards/abp-model-card.md @@ -76,7 +76,7 @@ limitations under the License. ## Training Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/abp-sample-nvsmi-training-data.json
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/abp-sample-nvsmi-training-data.json
**Properties (Quantity, Dataset Descriptions, Sensors):** * Sample dataset consists of over 1000 `nvidia-smi` outputs
@@ -84,7 +84,7 @@ limitations under the License. ## Evaluation Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/abp-validation-data.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/validation-data/abp-validation-data.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):** * Sample dataset consists of over 1000 `nvidia-smi` outputs
@@ -146,7 +146,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe ## Model Card ++ Safety & Security Subcard ### Link the location of the repository for the training dataset. -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/abp-sample-nvsmi-training-data.json +* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/abp-sample-nvsmi-training-data.json ### Describe the life critical impact (if present). * None diff --git a/models/model-cards/dfp-model-card.md b/models/model-cards/dfp-model-card.md index fc8fede3d..753fc279a 100644 --- a/models/model-cards/dfp-model-card.md +++ b/models/model-cards/dfp-model-card.md @@ -65,36 +65,36 @@ The model architecture consists of an Autoencoder, where the reconstruction loss * Linux
## Model Versions: -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/dfp-models/hammah-role-g-20211017-dill.pkl -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/dfp-models/hammah-user123-20211017-dill.pkl +* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/dfp-models/hammah-role-g-20211017-dill.pkl +* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/dfp-models/hammah-user123-20211017-dill.pkl # Training & Evaluation: ## Training Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail +* https://github.com/nv-morpheus/Morpheus/tree/branch-25.06/models/datasets/training-data/cloudtrail **Properties (Quantity, Dataset Descriptions, Sensors):** The training dataset consists of AWS CloudTrail logs. It contains logs from two entities, providing information about their activities within the AWS environment. -* [hammah-role-g-training-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-role-g-training-part1.json): 700 records
-* [hammah-role-g-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-role-g-training-part2.json): 1187 records
-* [hammah-user123-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part2.json): 1000 records
-* [hammah-user123-training-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part3.json): 1000 records
-* [hammah-user123-training-part4.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/cloudtrail/hammah-user123-training-part4.json): 387 records
+* [hammah-role-g-training-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/cloudtrail/hammah-role-g-training-part1.json): 700 records
+* [hammah-role-g-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/cloudtrail/hammah-role-g-training-part2.json): 1187 records
+* [hammah-user123-training-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/cloudtrail/hammah-user123-training-part2.json): 1000 records
+* [hammah-user123-training-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/cloudtrail/hammah-user123-training-part3.json): 1000 records
+* [hammah-user123-training-part4.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/cloudtrail/hammah-user123-training-part4.json): 387 records
## Evaluation Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/validation-data/cloudtrail
+* https://github.com/nv-morpheus/Morpheus/tree/branch-25.06/models/datasets/validation-data/cloudtrail
**Properties (Quantity, Dataset Descriptions, Sensors):** The evaluation dataset consists of AWS CloudTrail logs. It contains logs from two entities, providing information about their activities within the AWS environment. -* [hammah-role-g-validation.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-role-g-validation.json): 314 records -* [hammah-user123-validation-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part1.json): 300 records -* [hammah-user123-validation-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part2.json): 300 records -* [hammah-user123-validation-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part3.json): 247 records +* [hammah-role-g-validation.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/validation-data/cloudtrail/hammah-role-g-validation.json): 314 records +* [hammah-user123-validation-part1.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part1.json): 300 records +* [hammah-user123-validation-part2.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part2.json): 300 records +* [hammah-user123-validation-part3.json](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/validation-data/cloudtrail/hammah-user123-validation-part3.json): 247 records ## Inference: **Engine:** @@ -133,7 +133,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe ### Describe how this model works. * The model works by training on baseline behaviors and subsequently detecting deviations from the established baseline, triggering alerts accordingly. -* [Training notebook](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/training-tuning-scripts/dfp-models/hammah-20211017.ipynb) +* [Training notebook](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/training-tuning-scripts/dfp-models/hammah-20211017.ipynb) ### List the technical limitations of the model. * The model expects CloudTrail logs with specific features that match the training dataset. Data lacking the required features or requiring a different feature set may not be compatible with the model. @@ -153,7 +153,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe ## Model Card ++ Safety & Security Subcard ### Link the location of the repository for the training dataset (if able to share). -* https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail +* https://github.com/nv-morpheus/Morpheus/tree/branch-25.06/models/datasets/training-data/cloudtrail ### Describe the life critical impact (if present). * None @@ -194,7 +194,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe * No ### Are we able to identify and trace source of dataset? -* Yes ([fully synthetic dataset](https://github.com/nv-morpheus/Morpheus/tree/branch-25.02/models/datasets/training-data/cloudtrail)) +* Yes ([fully synthetic dataset](https://github.com/nv-morpheus/Morpheus/tree/branch-25.06/models/datasets/training-data/cloudtrail)) ### Does data labeling (annotation, metadata) comply with privacy laws? * Not applicable (dataset is fully synthetic) diff --git a/models/model-cards/gnn-fsi-model-card.md b/models/model-cards/gnn-fsi-model-card.md index 5a61fd7f3..49dfad490 100644 --- a/models/model-cards/gnn-fsi-model-card.md +++ b/models/model-cards/gnn-fsi-model-card.md @@ -140,7 +140,7 @@ NVIDIA believes Trustworthy AI is a shared responsibility and we have establishe * None ### Link the relevant end user license agreement -* [Apache 2.0](https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/LICENSE) +* [Apache 2.0](https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/LICENSE) ## Model Card ++ Safety & Security Subcard diff --git a/models/model-cards/phishing-model-card.md b/models/model-cards/phishing-model-card.md index 49abc13fb..e503844f9 100644 --- a/models/model-cards/phishing-model-card.md +++ b/models/model-cards/phishing-model-card.md @@ -84,7 +84,7 @@ limitations under the License. ## Evaluation Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/phishing-email-validation-data.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/validation-data/phishing-email-validation-data.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):** * Dataset consists of SMS messages
diff --git a/models/model-cards/root-cause-analysis-model-card.md b/models/model-cards/root-cause-analysis-model-card.md index 4030465bb..f736e0702 100644 --- a/models/model-cards/root-cause-analysis-model-card.md +++ b/models/model-cards/root-cause-analysis-model-card.md @@ -74,7 +74,7 @@ limitations under the License. ## Training Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/root-cause-training-data.csv
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/root-cause-training-data.csv
**Properties (Quantity, Dataset Descriptions, Sensors):** * kern.log files from DGX machines
@@ -82,7 +82,7 @@ limitations under the License. ## Evaluation Dataset: **Link:** -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/validation-data/root-cause-validation-data-input.jsonlines
+* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/validation-data/root-cause-validation-data-input.jsonlines
**Properties (Quantity, Dataset Descriptions, Sensors):** * kern.log files from DGX machines
@@ -141,7 +141,7 @@ limitations under the License. ## Model Card ++ Safety & Security Subcard ### Link the location of the repository for the training dataset. -* https://github.com/nv-morpheus/Morpheus/blob/branch-25.02/models/datasets/training-data/root-cause-training-data.csv +* https://github.com/nv-morpheus/Morpheus/blob/branch-25.06/models/datasets/training-data/root-cause-training-data.csv ### Describe the life critical impact (if present). * None diff --git a/models/triton-model-repo/README.md b/models/triton-model-repo/README.md index 3f22dc1ab..f181c93a1 100644 --- a/models/triton-model-repo/README.md +++ b/models/triton-model-repo/README.md @@ -40,7 +40,7 @@ The downside of using symlinks is that the entire Morpheus model repo must be vo ## Models Container The models in this directory are available in a pre-built container image containing Triton Inference Server, along with the models themselves. The container image is available on NGC and can be pulled using the following command: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` Those users who are working on training their own models have two options available: diff --git a/scripts/validation/val-globals.sh b/scripts/validation/val-globals.sh index 6ce8ae5ee..ea50ccd66 100755 --- a/scripts/validation/val-globals.sh +++ b/scripts/validation/val-globals.sh @@ -26,7 +26,7 @@ export e="\033[0;90m" export y="\033[0;33m" export x="\033[0m" -export TRITON_IMAGE=${TRITON_IMAGE:-"nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02"} +export TRITON_IMAGE=${TRITON_IMAGE:-"nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06"} # TRITON_GRPC_PORT is only used when TRITON_URL is undefined export TRITON_GRPC_PORT=${TRITON_GRPC_PORT:-"8001"} diff --git a/tests/benchmarks/README.md b/tests/benchmarks/README.md index bf4c26350..1bf03acb8 100644 --- a/tests/benchmarks/README.md +++ b/tests/benchmarks/README.md @@ -24,12 +24,12 @@ Pull Morpheus Models Docker image from NGC. Example: ```bash -docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 +docker pull nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 ``` ##### Start Triton Inference Server container ```bash -docker run --gpus=all --rm -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.02 tritonserver --model-repository=/models/triton-model-repo --model-control-mode=explicit --load-model sid-minibert-onnx --load-model abp-nvsmi-xgb --load-model phishing-bert-onnx --load-model all-MiniLM-L6-v2 +docker run --gpus=all --rm -p8000:8000 -p8001:8001 -p8002:8002 nvcr.io/nvidia/morpheus/morpheus-tritonserver-models:25.06 tritonserver --model-repository=/models/triton-model-repo --model-control-mode=explicit --load-model sid-minibert-onnx --load-model abp-nvsmi-xgb --load-model phishing-bert-onnx --load-model all-MiniLM-L6-v2 ``` ##### Verify Model Deployments diff --git a/thirdparty/README.md b/thirdparty/README.md index 8d750d8d3..3ea1f9590 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -17,4 +17,4 @@ limitations under the License. # Source Code for OSS Packages in the NVIDIA Morpheus Docker container -The source code for OSS packages which are included in the NVIDIA Morpheus Docker image is available at [https://github.com/nv-morpheus/morpheus_third_party_oss/tree/branch-25.02](https://github.com/nv-morpheus/morpheus_third_party_oss/tree/branch-25.02) +The source code for OSS packages which are included in the NVIDIA Morpheus Docker image is available at [https://github.com/nv-morpheus/morpheus_third_party_oss/tree/branch-25.06](https://github.com/nv-morpheus/morpheus_third_party_oss/tree/branch-25.06)