From 223d44ba77994b8d469df8fe12be90987b9809f5 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 15 Jan 2025 17:13:29 +0100 Subject: [PATCH 01/19] clean up --- HISTORY.CM.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HISTORY.CM.md b/HISTORY.CM.md index ce9a2e43e..7099169b4 100644 --- a/HISTORY.CM.md +++ b/HISTORY.CM.md @@ -1,4 +1,4 @@ -This document narrates the history of the creation and design of CM and CM4MLOps (also known as CK2) +This document narrates the history of the creation and design of CM, CM4MLOps and MLPerf automations (also known as CK2) by [Grigori Fursin](https://cKnowledge.org/gfursin). It also highlights the donation of this open-source technology to MLCommons, aimed at benefiting the broader community and fostering its ongoing development as a collaborative, community-driven initiative: From 9b35cb3485874e59e87b7fd918cc5bf6a05ed34a Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Tue, 21 Jan 2025 17:44:01 +0100 Subject: [PATCH 02/19] add CM/CMX -i help "print info about available memory and disk space" --- .../test-cmx-mlperf-inference-resnet50.yml | 66 + cm/cmind/__init__.py | 2 +- cm/cmind/core.py | 1 + cmx4mlops/cmx4mlops/repo/.gitignore | 20 + cmx4mlops/cmx4mlops/repo/CHANGES.md | 247 + cmx4mlops/cmx4mlops/repo/CONTRIBUTING.md | 1 + cmx4mlops/cmx4mlops/repo/COPYRIGHT.md | 5 + cmx4mlops/cmx4mlops/repo/HISTORY.md | 127 + cmx4mlops/cmx4mlops/repo/LICENSE.md | 177 + .../cmx4mlops/repo/LICENSE.third-party.md | 1 + cmx4mlops/cmx4mlops/repo/README.md | 67 + .../cmx4mlops/repo/automation/COPYRIGHT.md | 3 + .../repo/automation/cache/COPYRIGHT.md | 3 + .../repo/automation/cache/README-extra.md | 71 + .../cmx4mlops/repo/automation/cache/README.md | 87 + .../cmx4mlops/repo/automation/cache/_cm.json | 12 + .../cmx4mlops/repo/automation/cache/module.py | 249 + .../repo/automation/cache/module_misc.py | 122 + .../repo/automation/cfg/COPYRIGHT.md | 3 + .../repo/automation/cfg/README-extra.md | 8 + .../cmx4mlops/repo/automation/cfg/README.md | 27 + .../cmx4mlops/repo/automation/cfg/_cm.json | 12 + .../cmx4mlops/repo/automation/cfg/module.py | 270 + .../repo/automation/challenge/COPYRIGHT.md | 3 + .../repo/automation/challenge/README.md | 27 + .../repo/automation/challenge/_cm.json | 9 + .../repo/automation/challenge/module.py | 66 + .../repo/automation/contributor/COPYRIGHT.md | 3 + .../repo/automation/contributor/README.md | 47 + .../repo/automation/contributor/_cm.json | 9 + .../repo/automation/contributor/module.py | 174 + .../repo/automation/data/COPYRIGHT.md | 3 + .../cmx4mlops/repo/automation/data/_cm.json | 9 + .../cmx4mlops/repo/automation/data/module.py | 66 + .../repo/automation/docker/COPYRIGHT.md | 3 + .../repo/automation/docker/README.md | 27 + .../cmx4mlops/repo/automation/docker/_cm.json | 11 + .../repo/automation/docker/module.py | 65 + .../repo/automation/docs/COPYRIGHT.md | 3 + .../cmx4mlops/repo/automation/docs/_cm.json | 9 + .../cmx4mlops/repo/automation/docs/module.py | 66 + .../repo/automation/experiment/COPYRIGHT.md | 3 + .../automation/experiment/README-extra.md | 315 + .../repo/automation/experiment/README.md | 87 + .../repo/automation/experiment/_cm.json | 11 + .../repo/automation/experiment/module.py | 855 +++ .../automation/experiment/tests/test2.bat | 1 + .../repo/automation/experiment/tests/test2.sh | 1 + .../automation/experiment/tests/test3.bat | 1 + .../repo/automation/experiment/tests/test3.sh | 1 + .../experiment/tests/test3_input.yaml | 4 + .../experiment/tests/test__json.bat | 1 + .../automation/experiment/tests/test__json.sh | 1 + .../experiment/tests/test__yaml.bat | 1 + .../automation/experiment/tests/test__yaml.sh | 1 + .../experiment/tests/test_input.json | 14 + .../experiment/tests/test_input.yaml | 4 + .../automation/experiment/tests/test_run.bat | 3 + .../automation/experiment/tests/test_run.sh | 1 + .../repo/automation/project/COPYRIGHT.md | 3 + .../repo/automation/project/README.md | 27 + .../repo/automation/project/_cm.json | 10 + .../repo/automation/project/module.py | 66 + .../repo/automation/report/COPYRIGHT.md | 3 + .../repo/automation/report/README.md | 27 + .../cmx4mlops/repo/automation/report/_cm.json | 9 + .../repo/automation/report/module.py | 66 + .../repo/automation/script/COPYRIGHT.md | 3 + .../repo/automation/script/README-extra.md | 1034 +++ .../repo/automation/script/README-specs.md | 79 + .../repo/automation/script/README.md | 427 ++ .../cmx4mlops/repo/automation/script/_cm.json | 18 + .../script/assets/scripts-workflow.png | Bin 0 -> 242876 bytes .../ubuntu-23.04.Dockerfile | 34 + .../ubuntu-23.04.Dockerfile.build.bat | 1 + .../ubuntu-23.04.Dockerfile.build.sh | 3 + .../ubuntu-23.04.Dockerfile.run.bat | 1 + .../ubuntu-23.04.Dockerfile.run.sh | 3 + .../repo/automation/script/module.py | 6587 +++++++++++++++++ .../repo/automation/script/module_help.py | 119 + .../repo/automation/script/module_misc.py | 2518 +++++++ .../script/template-ae-python/README-extra.md | 2 + .../script/template-ae-python/_cm.yaml | 38 + .../script/template-ae-python/analyze.bat | 12 + .../script/template-ae-python/analyze.sh | 12 + .../script/template-ae-python/customize.py | 24 + .../template-ae-python/install_deps.bat | 18 + .../script/template-ae-python/install_deps.sh | 17 + .../script/template-ae-python/main.py | 10 + .../script/template-ae-python/plot.bat | 12 + .../script/template-ae-python/plot.sh | 12 + .../script/template-ae-python/reproduce.bat | 12 + .../script/template-ae-python/reproduce.sh | 12 + .../script/template-ae-python/run.bat | 12 + .../script/template-ae-python/run.sh | 12 + .../script/template-ae-python/validate.bat | 12 + .../script/template-ae-python/validate.sh | 12 + .../script/template-python/README-extra.md | 1 + .../script/template-python/_cm.yaml | 23 + .../script/template-python/customize.py | 32 + .../automation/script/template-python/main.py | 10 + .../script/template-python/requirements.txt | 0 .../automation/script/template-python/run.bat | 25 + .../automation/script/template-python/run.sh | 24 + .../script/template-pytorch/README-extra.md | 1 + .../script/template-pytorch/_cm.yaml | 42 + .../script/template-pytorch/customize.py | 32 + .../script/template-pytorch/main.py | 15 + .../script/template-pytorch/requirements.txt | 0 .../script/template-pytorch/run.bat | 25 + .../automation/script/template-pytorch/run.sh | 24 + .../script/template/README-extra.md | 1 + .../automation/script/template/customize.py | 24 + .../repo/automation/script/template/run.bat | 1 + .../repo/automation/script/template/run.sh | 17 + .../script/template_list_of_scripts.md | 52 + .../repo/automation/utils/COPYRIGHT.md | 3 + .../cmx4mlops/repo/automation/utils/README.md | 387 + .../cmx4mlops/repo/automation/utils/_cm.json | 12 + .../cmx4mlops/repo/automation/utils/module.py | 1121 +++ .../repo/automation/utils/module_cfg.py | 352 + .../cfg/benchmark-hardware-compute/_cm.json | 10 + .../amd-cpu-x64.json | 6 + .../benchmark-hardware-compute/amd-gpu.json | 6 + .../generic-cpu-arm64.json | 6 + .../google-tpu.json | 6 + .../habana-gaudi.json | 6 + .../intel-cpu-x64.json | 6 + .../nvidia-gpu-jetson-orin.yaml | 7 + .../nvidia-gpu.json | 6 + .../qualcomm-ai100.json | 6 + .../stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml | 5 + .../repo/cfg/benchmark-list/_cm.json | 10 + .../repo/cfg/benchmark-list/loadgen-cpp.yaml | 19 + .../cfg/benchmark-list/loadgen-python.yaml | 16 + .../repo/cfg/benchmark-list/mlperf-abtf.yaml | 18 + .../cfg/benchmark-list/mlperf-inference.yaml | 28 + .../cfg/benchmark-list/mlperf-mobile.yaml | 14 + .../repo/cfg/benchmark-list/mlperf-tiny.yaml | 16 + .../cfg/benchmark-list/mlperf-training.yaml | 18 + .../_cm.yaml | 45 + .../run-005147815bf840b8-input.json | 54 + .../run-005147815bf840b8-meta.json | 9 + .../run-005147815bf840b8-output.json | 11 + .../run-0eeb9799b12b488f-input.json | 55 + .../run-0eeb9799b12b488f-meta.json | 9 + .../run-0eeb9799b12b488f-output.json | 137 + .../run-52c1d43172664ed0-input.json | 55 + .../run-52c1d43172664ed0-meta.json | 9 + .../run-52c1d43172664ed0-output.json | 137 + .../run-66cce585ff0242bc-input.json | 56 + .../run-66cce585ff0242bc-meta.json | 9 + .../run-6a07cf881dee462a-input.json | 56 + .../run-6a07cf881dee462a-meta.json | 9 + .../run-7d80f464b2274742-input.json | 55 + .../run-7d80f464b2274742-meta.json | 10 + .../run-7d80f464b2274742-output.json | 137 + .../run-7f094c244ebb4985-input.json | 56 + .../run-7f094c244ebb4985-meta.json | 9 + .../run-7f094c244ebb4985-output.json | 146 + .../run-7f094c244ebb4985.md | 1 + .../run-d5b6b5af6d794045-input.json | 53 + .../run-d5b6b5af6d794045-meta.json | 9 + .../run-d8c0f02f52bf49ae-input.json | 53 + .../run-d8c0f02f52bf49ae-meta.json | 10 + .../run-d8c0f02f52bf49ae-output.json | 137 + .../run-df843c22cbf54aaf-input.json | 56 + .../run-df843c22cbf54aaf-meta.json | 9 + .../run-df843c22cbf54aaf-output.json | 146 + .../run-df843c22cbf54aaf.md | 1 + .../run-f05147815bf840b8-input.json | 55 + .../run-f05147815bf840b8-meta.json | 9 + .../run-f05147815bf840b8-output.json | 137 + .../_cm.yaml | 38 + .../_cm.yaml | 39 + .../cfg/docker-basic-configurations/_cm.yaml | 10 + .../basic-archlinux.yaml | 9 + .../basic-rhel-9.yaml | 9 + .../basic-ubuntu-20.04.yaml | 9 + .../basic-ubuntu-22.04.yaml | 9 + .../basic-ubuntu-23.04.yaml | 9 + .../basic-ubuntu-24.04.yaml | 9 + ...-cuda-11.8-cudnn-8.6.0-pytorch-1.13.0.yaml | 11 + ...4-cuda-12.1-cudnn-8.9.1-pytorch-2.0.0.yaml | 11 + ...4-cuda-12.4-cudnn-9.0.0-pytorch-2.3.0.yaml | 11 + ...4-cuda-12.5-cudnn-9.1.0-pytorch-2.4.0.yaml | 11 + ...4-cuda-12.6-cudnn-9.3.0-pytorch-2.5.0.yaml | 11 + .../README.md | 32 + .../_cm.json | 22 + .../README.md | 4 + .../_cm.yaml | 21 + .../README.md | 10 + .../_cm.yaml | 25 + .../README.md | 30 + .../_cm.json | 22 + .../connect-mlperf-with-medperf/README.md | 23 + .../connect-mlperf-with-medperf/_cm.json | 26 + .../README.md | 16 + .../_cm.json | 20 + .../README.md | 7 + .../_cm.json | 19 + .../README.md | 18 + .../_cm.json | 27 + .../README.md | 74 + .../_cm.json | 27 + ...wd-benchmark-mlperf-bert-inference-cuda.md | 281 + .../docs/generate-3d-unet-submission.md | 59 + .../docs/generate-bert-submission.md | 80 + .../docs/generate-resnet50-submission.md | 82 + .../docs/generate-retinanet-submission.md | 67 + .../docs/generate-rnnt-submission.md | 53 + .../docs/setup-aws-instance.md | 48 + .../docs/setup-gcp-instance.md | 35 + .../docs/setup-nvidia-jetson-orin.md | 53 + .../README.md | 83 + .../_cm.json | 26 + .../docs/generate-3d-unet-submission.md | 67 + .../docs/generate-bert-submission.md | 113 + .../docs/generate-resnet50-submission.md | 90 + .../docs/generate-retinanet-submission.md | 75 + .../docs/generate-rnnt-submission.md | 61 + .../docs/setup-aws-instance.md | 50 + .../docs/setup-gcp-instance.md | 37 + .../docs/setup-nvidia-jetson-orin.md | 54 + .../README.md | 31 + .../_cm.json | 27 + .../README.md | 20 + .../_cm.json | 26 + .../README.md | 31 + .../_cm.json | 28 + .../README.md | 32 + .../_cm.json | 27 + .../README.md | 52 + .../_cm.json | 27 + .../README.md | 31 + .../_cm.json | 26 + .../README.md | 34 + .../_cm.json | 28 + .../README.md | 33 + .../_cm.json | 26 + .../README.md | 41 + .../_cm.json | 28 + .../README.md | 31 + .../_cm.json | 27 + .../README.md | 36 + .../_cm.json | 28 + .../repro-mlperf-inf-v3.0-orin/README.md | 16 + .../repro-mlperf-inf-v3.0-orin/_cm.json | 23 + .../README.md | 39 + .../_cm.json | 20 + .../README.md | 3 + .../repro-mlperf-inference-v4.0-2024/_cm.yaml | 25 + .../README.md | 4 + .../repro-mlperf-inference-v4.1-2024/_cm.yaml | 22 + .../README.md | 36 + .../_cm.json | 23 + .../README.md | 17 + .../_cm.json | 23 + .../run-mlperf@home-v3.1-cpu/README.md | 67 + .../run-mlperf@home-v3.1-cpu/_cm.json | 21 + .../run-cpu-bert-99-deepsparse.md | 100 + ...cpu-dse-mobilenets-efficientnets-tflite.md | 77 + .../run-mlperf@home-v3.1-gpu/README.md | 65 + .../run-mlperf@home-v3.1-gpu/_cm.json | 20 + ...idia-gpu-bert-99-nvidia-docker-tensorrt.md | 193 + .../run-nvidia-gpu-gpt-j-6b-ref-pytorch.md | 314 + .../train-llm-for-cm-mlperf-2023/README.md | 20 + .../train-llm-for-cm-mlperf-2023/_cm.json | 21 + .../cmx4mlops/repo/docs/cm-yaml-guide.md | 46 + .../cmx4mlops/repo/docs/getting-started.md | 135 + cmx4mlops/cmx4mlops/repo/docs/img/logo_v2.svg | 6 + .../cmx4mlops/repo/docs/img/pages (80).png | Bin 0 -> 242952 bytes cmx4mlops/cmx4mlops/repo/docs/index.md | 79 + .../cmx4mlops/repo/docs/requirements.txt | 6 + .../AI-ML-datasets/get-croissant/index.md | 86 + .../get-dataset-cifar10/index.md | 119 + .../AI-ML-datasets/get-dataset-cnndm/index.md | 128 + .../AI-ML-datasets/get-dataset-coco/index.md | 140 + .../get-dataset-coco2014/index.md | 152 + .../get-dataset-criteo/index.md | 124 + .../get-dataset-imagenet-aux/index.md | 119 + .../get-dataset-imagenet-calibration/index.md | 104 + .../get-dataset-imagenet-helper/index.md | 80 + .../get-dataset-imagenet-train/index.md | 96 + .../get-dataset-imagenet-val/index.md | 149 + .../get-dataset-kits19/index.md | 138 + .../get-dataset-librispeech/index.md | 97 + .../index.md | 105 + .../index.md | 131 + .../get-dataset-openimages/index.md | 164 + .../get-dataset-openorca/index.md | 132 + .../get-dataset-squad-vocab/index.md | 105 + .../AI-ML-datasets/get-dataset-squad/index.md | 92 + .../get-preprocessed-dataset-criteo/index.md | 137 + .../get-preprocessed-dataset-generic/index.md | 80 + .../index.md | 301 + .../get-preprocessed-dataset-kits19/index.md | 175 + .../index.md | 164 + .../index.md | 287 + .../index.md | 129 + .../get-preprocessed-dataset-squad/index.md | 165 + .../repo/docs/scripts/AI-ML-datasets/index.md | 27 + .../get-google-saxml/index.md | 89 + .../get-onnxruntime-prebuilt/index.md | 111 + .../get-qaic-apps-sdk/index.md | 80 + .../get-qaic-platform-sdk/index.md | 80 + .../get-qaic-software-kit/index.md | 119 + .../AI-ML-frameworks/get-rocm/index.md | 86 + .../scripts/AI-ML-frameworks/get-tvm/index.md | 141 + .../docs/scripts/AI-ML-frameworks/index.md | 12 + .../index.md | 136 + .../AI-ML-frameworks/install-rocm/index.md | 91 + .../install-tensorflow-for-c/index.md | 89 + .../install-tensorflow-from-src/index.md | 135 + .../install-tflite-from-src/index.md | 100 + .../index.md | 101 + .../get-bert-squad-vocab/index.md | 80 + .../scripts/AI-ML-models/get-dlrm/index.md | 118 + .../get-ml-model-3d-unet-kits19/index.md | 130 + .../get-ml-model-bert-base-squad/index.md | 119 + .../get-ml-model-bert-large-squad/index.md | 188 + .../get-ml-model-dlrm-terabyte/index.md | 165 + .../get-ml-model-efficientnet-lite/index.md | 191 + .../AI-ML-models/get-ml-model-gptj/index.md | 194 + .../get-ml-model-huggingface-zoo/index.md | 136 + .../AI-ML-models/get-ml-model-llama2/index.md | 161 + .../get-ml-model-mobilenet/index.md | 288 + .../get-ml-model-neuralmagic-zoo/index.md | 271 + .../get-ml-model-resnet50/index.md | 228 + .../get-ml-model-retinanet-nvidia/index.md | 111 + .../get-ml-model-retinanet/index.md | 140 + .../AI-ML-models/get-ml-model-rnnt/index.md | 133 + .../get-ml-model-stable-diffusion/index.md | 177 + .../get-ml-model-tiny-resnet/index.md | 153 + .../index.md | 95 + .../AI-ML-models/get-tvm-model/index.md | 188 + .../repo/docs/scripts/AI-ML-models/index.md | 21 + .../calibrate-model-for.qaic/index.md | 186 + .../compile-model-for.qaic/index.md | 216 + .../docs/scripts/AI-ML-optimization/index.md | 3 + .../prune-bert-models/index.md | 132 + .../CM-Interface/get-cache-dir/index.md | 95 + .../repo/docs/scripts/CM-Interface/index.md | 1 + .../create-custom-cache-entry/index.md | 92 + .../repo/docs/scripts/CM-automation/index.md | 1 + .../scripts/CM-interface-prototyping/index.md | 2 + .../test-debug/index.md | 87 + .../test-mlperf-inference-retinanet/index.md | 86 + .../CUDA-automation/get-cuda-devices/index.md | 86 + .../scripts/CUDA-automation/get-cuda/index.md | 158 + .../CUDA-automation/get-cudnn/index.md | 115 + .../CUDA-automation/get-tensorrt/index.md | 119 + .../docs/scripts/CUDA-automation/index.md | 6 + .../install-cuda-package-manager/index.md | 87 + .../install-cuda-prebuilt/index.md | 138 + .../destroy-terraform/index.md | 87 + .../Cloud-automation/get-aws-cli/index.md | 87 + .../Cloud-automation/get-terraform/index.md | 87 + .../docs/scripts/Cloud-automation/index.md | 6 + .../Cloud-automation/install-aws-cli/index.md | 86 + .../install-terraform-from-src/index.md | 90 + .../Cloud-automation/run-terraform/index.md | 388 + .../scripts/Collective-benchmarking/index.md | 1 + .../launch-benchmark/index.md | 81 + .../Compiler-automation/get-aocl/index.md | 92 + .../Compiler-automation/get-cl/index.md | 87 + .../get-compiler-flags/index.md | 80 + .../get-compiler-rust/index.md | 86 + .../Compiler-automation/get-gcc/index.md | 87 + .../Compiler-automation/get-go/index.md | 87 + .../Compiler-automation/get-llvm/index.md | 101 + .../docs/scripts/Compiler-automation/index.md | 18 + .../install-gcc-src/index.md | 90 + .../install-ipex-from-src/index.md | 128 + .../install-llvm-prebuilt/index.md | 90 + .../install-llvm-src/index.md | 160 + .../install-onednn-from-src/index.md | 129 + .../install-onnxruntime-from-src/index.md | 125 + .../install-pytorch-from-src/index.md | 143 + .../install-pytorch-kineto-from-src/index.md | 135 + .../install-torchvision-from-src/index.md | 137 + .../install-tpp-pytorch-extension/index.md | 128 + .../install-transformers-from-src/index.md | 128 + .../scripts/Dashboard-automation/index.md | 1 + .../publish-results-to-dashboard/index.md | 86 + .../get-android-sdk/index.md | 109 + .../get-aria2/index.md | 97 + .../get-bazel/index.md | 87 + .../get-blis/index.md | 110 + .../get-brew/index.md | 86 + .../get-cmake/index.md | 86 + .../get-cmsis_5/index.md | 123 + .../get-docker/index.md | 86 + .../get-generic-sys-util/index.md | 214 + .../get-google-test/index.md | 89 + .../get-java/index.md | 124 + .../get-javac/index.md | 124 + .../get-lib-armnn/index.md | 92 + .../get-lib-dnnl/index.md | 91 + .../get-lib-protobuf/index.md | 107 + .../get-lib-qaic-api/index.md | 90 + .../get-nvidia-docker/index.md | 86 + .../get-openssl/index.md | 87 + .../get-rclone/index.md | 107 + .../get-sys-utils-cm/index.md | 115 + .../get-sys-utils-min/index.md | 80 + .../get-xilinx-sdk/index.md | 99 + .../get-zendnn/index.md | 86 + .../index.md | 30 + .../install-bazel/index.md | 90 + .../install-cmake-prebuilt/index.md | 89 + .../install-gflags/index.md | 90 + .../install-github-cli/index.md | 88 + .../index.md | 126 + .../install-numactl-from-src/index.md | 125 + .../install-openssl/index.md | 90 + .../benchmark-program/index.md | 114 + .../compile-program/index.md | 97 + .../convert-csv-to-md/index.md | 96 + .../copy-to-clipboard/index.md | 98 + .../create-conda-env/index.md | 101 + .../DevOps-automation/create-patch/index.md | 92 + .../DevOps-automation/detect-sudo/index.md | 86 + .../download-and-extract/index.md | 145 + .../DevOps-automation/download-file/index.md | 156 + .../download-torrent/index.md | 120 + .../DevOps-automation/extract-file/index.md | 120 + .../scripts/DevOps-automation/fail/index.md | 96 + .../DevOps-automation/get-conda/index.md | 115 + .../DevOps-automation/get-git-repo/index.md | 187 + .../DevOps-automation/get-github-cli/index.md | 86 + .../docs/scripts/DevOps-automation/index.md | 22 + .../DevOps-automation/pull-git-repo/index.md | 95 + .../push-csv-to-spreadsheet/index.md | 107 + .../set-device-settings-qaic/index.md | 114 + .../set-echo-off-win/index.md | 80 + .../set-performance-mode/index.md | 139 + .../DevOps-automation/set-sqlite-dir/index.md | 95 + .../DevOps-automation/tar-my-folder/index.md | 92 + .../build-docker-image/index.md | 120 + .../build-dockerfile/index.md | 145 + .../docs/scripts/Docker-automation/index.md | 4 + .../Docker-automation/prune-docker/index.md | 86 + .../run-docker-container/index.md | 130 + .../repo/docs/scripts/GUI/gui/index.md | 174 + .../cmx4mlops/repo/docs/scripts/GUI/index.md | 1 + .../get-ck-repo-mlops/index.md | 86 + .../scripts/Legacy-CK-support/get-ck/index.md | 86 + .../docs/scripts/Legacy-CK-support/index.md | 2 + .../add-custom-nvidia-system/index.md | 109 + .../index.md | 192 + .../index.md | 164 + .../index.md | 122 + .../index.md | 122 + .../generate-mlperf-tiny-report/index.md | 107 + .../generate-mlperf-tiny-submission/index.md | 81 + .../generate-nvidia-engine/index.md | 165 + .../index.md | 117 + .../get-mlperf-inference-loadgen/index.md | 144 + .../index.md | 105 + .../index.md | 118 + .../get-mlperf-inference-results-dir/index.md | 111 + .../get-mlperf-inference-results/index.md | 132 + .../get-mlperf-inference-src/index.md | 192 + .../index.md | 111 + .../get-mlperf-inference-sut-configs/index.md | 103 + .../index.md | 100 + .../get-mlperf-logging/index.md | 81 + .../get-mlperf-power-dev/index.md | 134 + .../index.md | 99 + .../get-mlperf-tiny-src/index.md | 99 + .../get-mlperf-training-nvidia-code/index.md | 112 + .../get-mlperf-training-src/index.md | 181 + .../get-nvidia-mitten/index.md | 90 + .../get-spec-ptd/index.md | 121 + .../index.md | 107 + .../import-mlperf-tiny-to-experiment/index.md | 91 + .../index.md | 97 + .../scripts/MLPerf-benchmark-support/index.md | 41 + .../install-mlperf-logging-from-src/index.md | 89 + .../prepare-training-data-bert/index.md | 120 + .../prepare-training-data-resnet/index.md | 129 + .../index.md | 96 + .../process-mlperf-accuracy/index.md | 177 + .../index.md | 109 + .../index.md | 326 + .../index.md | 138 + .../run-mlperf-power-client/index.md | 119 + .../run-mlperf-power-server/index.md | 116 + .../index.md | 135 + .../index.md | 98 + .../app-image-classification-onnx-py/index.md | 138 + .../index.md | 98 + .../index.md | 113 + .../index.md | 114 + .../app-stable-diffusion-onnx-py/index.md | 128 + .../index.md | 5 + .../app-mlperf-inference-dummy/index.md | 235 + .../app-mlperf-inference-intel/index.md | 347 + .../app-mlperf-inference-qualcomm/index.md | 368 + .../Modular-MLPerf-benchmarks/index.md | 3 + .../app-loadgen-generic-python/index.md | 213 + .../index.md | 236 + .../index.md | 204 + .../index.md | 392 + .../app-mlperf-inference/index.md | 488 ++ .../benchmark-program-mlperf/index.md | 100 + .../index.md | 7 + .../run-mlperf-inference-app/index.md | 326 + .../app-mlperf-training-nvidia/index.md | 165 + .../app-mlperf-training-reference/index.md | 166 + .../index.md | 2 + .../app-image-corner-detection/index.md | 87 + .../Modular-application-pipeline/index.md | 1 + .../Platform-information/detect-cpu/index.md | 87 + .../Platform-information/detect-os/index.md | 86 + .../scripts/Platform-information/index.md | 2 + .../activate-python-venv/index.md | 88 + .../get-generic-python-lib/index.md | 421 ++ .../Python-automation/get-python3/index.md | 111 + .../docs/scripts/Python-automation/index.md | 6 + .../install-generic-conda-package/index.md | 113 + .../install-python-src/index.md | 144 + .../install-python-venv/index.md | 103 + .../docs/scripts/Remote-automation/index.md | 1 + .../remote-run-commands/index.md | 117 + .../app-mlperf-inference-nvidia/index.md | 652 ++ .../Reproduce-MLPerf-benchmarks/index.md | 4 + .../index.md | 137 + .../reproduce-mlperf-training-nvidia/index.md | 115 + .../index.md | 101 + .../get-ipol-src/index.md | 98 + .../index.md | 4 + .../process-ae-users/index.md | 95 + .../reproduce-ipol-paper-2022-439/index.md | 97 + .../index.md | 123 + .../repo/docs/scripts/Tests/index.md | 15 + .../scripts/Tests/print-any-text/index.md | 129 + .../Tests/print-croissant-desc/index.md | 106 + .../Tests/print-hello-world-java/index.md | 86 + .../Tests/print-hello-world-javac/index.md | 86 + .../Tests/print-hello-world-py/index.md | 86 + .../scripts/Tests/print-hello-world/index.md | 123 + .../Tests/print-python-version/index.md | 86 + .../docs/scripts/Tests/run-python/index.md | 95 + .../docs/scripts/Tests/test-cm-core/index.md | 87 + .../Tests/test-cm-script-pipeline/index.md | 90 + .../Tests/test-deps-conditions/index.md | 93 + .../Tests/test-deps-conditions2/index.md | 91 + .../index.md | 87 + .../Tests/test-set-sys-user-cm/index.md | 96 + .../scripts/Tests/upgrade-python-pip/index.md | 86 + .../create-fpgaconvnet-app-tinyml/index.md | 114 + .../create-fpgaconvnet-config-tinyml/index.md | 114 + .../flash-tinyml-binary/index.md | 119 + .../TinyML-automation/get-microtvm/index.md | 119 + .../TinyML-automation/get-zephyr-sdk/index.md | 93 + .../TinyML-automation/get-zephyr/index.md | 92 + .../docs/scripts/TinyML-automation/index.md | 6 + .../cmx4mlops/repo/docs/scripts/index.md | 30 + cmx4mlops/cmx4mlops/repo/mkdocs.yml | 77 + .../README.md | 93 + .../_cm.json | 16 + .../_cm.json | 17 + .../_cm.json | 17 + .../_cm.json | 17 + cmx4mlops/cmx4mlops/repo/requirements.txt | 6 + cmx4mlops/cmx4mlops/repo/script/COPYRIGHT.md | 3 + cmx4mlops/cmx4mlops/repo/script/README.md | 40 + .../script/activate-python-venv/COPYRIGHT.md | 3 + .../activate-python-venv/README-extra.md | 7 + .../script/activate-python-venv/README.md | 1 + .../repo/script/activate-python-venv/_cm.yaml | 18 + .../script/activate-python-venv/customize.py | 42 + .../repo/script/activate-python-venv/run.bat | 7 + .../repo/script/activate-python-venv/run.sh | 9 + .../add-custom-nvidia-system/COPYRIGHT.md | 3 + .../add-custom-nvidia-system/README-extra.md | 2 + .../script/add-custom-nvidia-system/README.md | 1 + .../script/add-custom-nvidia-system/_cm.yaml | 126 + .../add-custom-nvidia-system/customize.py | 35 + .../script/add-custom-nvidia-system/run.sh | 5 + .../COPYRIGHT.md | 3 + .../README-extra.md | 17 + .../README.md | 1 + .../app-image-classification-onnx-py/_cm.yaml | 120 + .../customize.py | 77 + .../img/computer_mouse.jpg | Bin 0 -> 41154 bytes .../requirements.txt | 0 .../app-image-classification-onnx-py/run.bat | 29 + .../app-image-classification-onnx-py/run.sh | 37 + .../src/onnx_classify.py | 204 + .../tests/README.md | 14 + .../COPYRIGHT.md | 3 + .../README-extra.md | 3 + .../README.md | 1 + .../_cm.yaml | 27 + .../include/benchmark.h | 511 ++ .../run.sh | 6 + .../src/classification.cpp | 107 + .../COPYRIGHT.md | 3 + .../README-extra.md | 16 + .../README.md | 1 + .../_cm.yaml | 46 + .../img/computer_mouse.jpg | Bin 0 -> 41154 bytes .../requirements.txt | 4 + .../app-image-classification-torch-py/run.bat | 20 + .../app-image-classification-torch-py/run.sh | 20 + .../src/pytorch_classify_preprocessed.py | 222 + .../COPYRIGHT.md | 3 + .../README-extra.md | 16 + .../README.md | 1 + .../_cm.yaml | 48 + .../img/computer_mouse.jpg | Bin 0 -> 41154 bytes .../requirements.txt | 7 + .../run.sh | 26 + .../src/classify.py | 302 + .../app-image-corner-detection/COPYRIGHT.md | 3 + .../README-extra.md | 32 + .../app-image-corner-detection/README.md | 1 + .../app-image-corner-detection/_cm.yaml | 32 + .../app-image-corner-detection/customize.py | 54 + .../script/app-image-corner-detection/run.sh | 6 + .../script/app-image-corner-detection/susan.c | 2161 ++++++ .../app-loadgen-generic-python/COPYRIGHT.md | 3 + .../README-extra.md | 289 + .../app-loadgen-generic-python/README.md | 1 + .../app-loadgen-generic-python/_cm.yaml | 326 + .../app-loadgen-generic-python/customize.py | 117 + .../script/app-loadgen-generic-python/run.bat | 4 + .../script/app-loadgen-generic-python/run.sh | 4 + .../src/backend_onnxruntime.py | 92 + .../src/backend_pytorch.py | 132 + .../src/loadgen/harness.py | 77 + .../src/loadgen/model.py | 24 + .../src/loadgen/runners.py | 188 + .../app-loadgen-generic-python/src/main.py | 274 + .../app-loadgen-generic-python/src/utils.py | 18 + .../tests/modular-cm-containers/_common.bat | 7 + .../tests/modular-cm-containers/_common.sh | 10 + .../tests/modular-cm-containers/build.bat | 16 + .../tests/modular-cm-containers/build.sh | 18 + ...dgen-generic-python--ubuntu-cpu.Dockerfile | 96 + .../loadgen-generic-python-auto.Dockerfile | 33 + .../tests/modular-cm-containers/run.bat | 3 + .../tests/modular-cm-containers/run.sh | 3 + .../app-mlperf-inference-amd/COPYRIGHT.md | 3 + .../script/app-mlperf-inference-amd/_cm.yaml | 341 + .../app-mlperf-inference-amd/customize.py | 59 + .../app-mlperf-inference-amd/run-llama2.sh | 76 + .../script/app-mlperf-inference-amd/run.sh | 8 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 282 + .../armnn/classification.cpp | 399 + .../customize.py | 120 + .../inc/benchmark.h | 488 ++ .../src/classification.cpp | 362 + .../app-mlperf-inference-dummy/COPYRIGHT.md | 3 + .../app-mlperf-inference-dummy/_cm.yaml | 291 + .../app-mlperf-inference-dummy/customize.py | 82 + .../script/app-mlperf-inference-dummy/run.sh | 8 + .../app-mlperf-inference-intel/COPYRIGHT.md | 3 + .../app-mlperf-inference-intel/README.md | 1 + .../app-mlperf-inference-intel/_cm.yaml | 1216 +++ .../build_bert_harness.sh | 51 + .../build_gptj_harness.sh | 50 + .../build_resnet50_harness.sh | 58 + .../build_retinanet_harness.sh | 44 + .../build_sdxl_harness.sh | 27 + .../calibrate_dlrm_v2_model.sh | 19 + .../calibrate_gptj_int4_model.sh | 37 + .../compile_resnet50.sh | 9 + .../compile_retinanet.sh | 11 + .../app-mlperf-inference-intel/customize.py | 261 + .../prepare_3d-unet_data_model.sh | 19 + .../prepare_imagenet_calibration.sh | 7 + .../run_3d-unet_harness.sh | 49 + .../run_bert_harness.sh | 26 + .../run_dlrm_v2_harness.sh | 60 + .../run_gptj_harness_v3_1.sh | 51 + .../run_gptj_harness_v4_0.sh | 75 + .../run_resnet50_harness.sh | 55 + .../run_retinanet_harness.sh | 50 + .../run_sdxl_harness.sh | 49 + .../CONTRIBUTING.md | 29 + .../COPYRIGHT.md | 3 + .../README-extra.md | 83 + .../README.md | 1 + .../_cm.yaml | 260 + .../customize.py | 119 + .../inc/backend.h | 304 + .../inc/common.h | 5 + .../inc/device.h | 64 + .../inc/gpu_device.h | 53 + .../inc/model.h | 124 + .../inc/npy.h | 143 + .../inc/onnxruntime_backend.h | 132 + .../inc/sample_library.h | 181 + .../inc/system.h | 135 + .../inc/tflite_backend.h | 132 + .../src/main.cpp | 214 + .../tests/win.bat | 8 + .../COPYRIGHT.md | 3 + .../README-about.md | 7 + .../README-extra.md | 235 + .../README.md | 1 + .../_cm.yaml | 1387 ++++ .../customize.py | 525 ++ .../nvidia/retinanet.py | 620 ++ .../app-mlperf-inference-nvidia/COPYRIGHT.md | 3 + .../README-about.md | 137 + .../app-mlperf-inference-nvidia/README.md | 1 + .../app-mlperf-inference-nvidia/_cm.yaml | 1853 +++++ .../app-mlperf-inference-nvidia/customize.py | 722 ++ .../script/app-mlperf-inference-nvidia/run.sh | 8 + .../COPYRIGHT.md | 3 + .../app-mlperf-inference-qualcomm/README.md | 1 + .../README_aws_dl2q.24xlarge.md | 97 + .../app-mlperf-inference-qualcomm/_cm.yaml | 775 ++ .../customize.py | 247 + .../app-mlperf-inference-qualcomm/run.sh | 8 + .../app-mlperf-inference-redhat/COPYRIGHT.md | 3 + .../app-mlperf-inference-redhat/_cm.yaml | 308 + .../app-mlperf-inference-redhat/customize.py | 116 + .../script/app-mlperf-inference-redhat/run.sh | 8 + .../script/app-mlperf-inference/COPYRIGHT.md | 3 + .../app-mlperf-inference/README-about.md | 22 + .../app-mlperf-inference/README-extra.md | 131 + .../script/app-mlperf-inference/README.md | 1 + .../repo/script/app-mlperf-inference/_cm.yaml | 1787 +++++ .../app-mlperf-inference/build_dockerfiles.py | 107 + .../script/app-mlperf-inference/customize.py | 665 ++ .../repo/script/app-mlperf-inference/run.sh | 8 + .../app-mlperf-inference/run_config.yml | 11 + .../app-mlperf-inference/verify_accuracy.sh | 4 + .../app-mlperf-training-nvidia/COPYRIGHT.md | 3 + .../app-mlperf-training-nvidia/README.md | 1 + .../app-mlperf-training-nvidia/_cm.yaml | 156 + .../app-mlperf-training-nvidia/customize.py | 82 + .../run-bert-training.sh | 9 + .../script/app-mlperf-training-nvidia/run.sh | 10 + .../COPYRIGHT.md | 3 + .../app-mlperf-training-reference/README.md | 1 + .../app-mlperf-training-reference/_cm.yaml | 150 + .../customize.py | 69 + .../run-bert-training.sh | 27 + .../app-mlperf-training-reference/run.sh | 10 + .../app-stable-diffusion-onnx-py/COPYRIGHT.md | 3 + .../README-extra.md | 30 + .../app-stable-diffusion-onnx-py/README.md | 1 + .../app-stable-diffusion-onnx-py/_cm.yaml | 110 + .../app-stable-diffusion-onnx-py/process.py | 36 + .../app-stable-diffusion-onnx-py/run.bat | 2 + .../app-stable-diffusion-onnx-py/run.sh | 4 + .../authenticate-github-cli/COPYRIGHT.md | 3 + .../script/authenticate-github-cli/README.md | 1 + .../script/authenticate-github-cli/_cm.yaml | 16 + .../authenticate-github-cli/customize.py | 45 + .../script/authenticate-github-cli/run.bat | 19 + .../script/authenticate-github-cli/run.sh | 18 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 318 + .../customize.py | 204 + .../run-template.sh | 90 + .../benchmark-program-mlperf/COPYRIGHT.md | 3 + .../script/benchmark-program-mlperf/README.md | 1 + .../script/benchmark-program-mlperf/_cm.yaml | 35 + .../benchmark-program-mlperf/customize.py | 75 + .../script/benchmark-program/COPYRIGHT.md | 3 + .../script/benchmark-program/README-extra.md | 3 + .../repo/script/benchmark-program/README.md | 1 + .../repo/script/benchmark-program/_cm.yaml | 38 + .../script/benchmark-program/customize.py | 135 + .../script/benchmark-program/run-ubuntu.sh | 10 + .../repo/script/benchmark-program/run.bat | 39 + .../repo/script/benchmark-program/run.sh | 102 + .../script/build-docker-image/COPYRIGHT.md | 3 + .../script/build-docker-image/README-extra.md | 16 + .../repo/script/build-docker-image/README.md | 1 + .../repo/script/build-docker-image/_cm.yaml | 45 + .../script/build-docker-image/customize.py | 147 + .../build-docker-image/examples/0-common.bat | 21 + .../examples/0-generate.bat | 9 + .../build-docker-image/examples/1-build.bat | 8 + .../examples/2-run-cm-command1.bat | 3 + .../examples/2-run-cm-command2.bat | 3 + .../examples/2-run-cm-command3.bat | 3 + .../examples/2-run-cm-command4.bat | 3 + .../examples/2-run-cm-command5.bat | 3 + .../examples/2-run-interactive1.bat | 3 + .../examples/2-run-interactive2.bat | 3 + .../examples/3-push-to-docker-hub.bat | 3 + .../Dockerfile.cm-base-ubuntu-22.04-20230804 | 38 + .../Dockerfile.cm-base-ubuntu-23.04-20230804 | 38 + .../Dockerfile.cm-base-ubuntu-23.04-latest | 38 + ...classification-onnx-py-ubuntu-23.04-latest | 45 + .../build-docker-image/examples/README.md | 1 + .../examples/computer_mouse.jpg | Bin 0 -> 41154 bytes ...cm-script-app-image-classification-onnx-py | 6 + ...e-classification-python-onnx-with-file.bat | 6 + ...ge-classification-python-onnx-with-file.sh | 5 + ...un-cm-image-classification-python-onnx.bat | 1 + ...run-cm-image-classification-python-onnx.sh | 3 + .../repo/script/build-docker-image/run.bat | 14 + .../repo/script/build-docker-image/run.sh | 15 + .../repo/script/build-dockerfile/COPYRIGHT.md | 3 + .../script/build-dockerfile/README-extra.md | 27 + .../repo/script/build-dockerfile/README.md | 1 + .../repo/script/build-dockerfile/_cm.yaml | 71 + .../repo/script/build-dockerfile/customize.py | 452 ++ .../script/build-dockerfile/dockerinfo.json | 87 + .../COPYRIGHT.md | 3 + .../README-extra.md | 2 + .../README.md | 1 + .../_cm.yaml | 398 + .../customize.py | 59 + .../run.sh | 16 + .../calibrate-model-for.qaic/COPYRIGHT.md | 3 + .../script/calibrate-model-for.qaic/README.md | 1 + .../script/calibrate-model-for.qaic/_cm.yaml | 146 + .../calibrate-model-for.qaic/customize.py | 231 + .../script/calibrate-model-for.qaic/run.sh | 28 + .../COPYRIGHT.md | 3 + .../_cm.yaml | 45 + .../customize.py | 66 + .../run.bat | 1 + .../run.sh | 17 + .../compile-model-for.qaic/COPYRIGHT.md | 3 + .../script/compile-model-for.qaic/README.md | 1 + .../script/compile-model-for.qaic/_cm.yaml | 257 + .../compile-model-for.qaic/customize.py | 100 + .../repo/script/compile-model-for.qaic/run.sh | 28 + .../repo/script/compile-program/COPYRIGHT.md | 3 + .../script/compile-program/README-extra.md | 3 + .../repo/script/compile-program/README.md | 1 + .../repo/script/compile-program/_cm.yaml | 23 + .../repo/script/compile-program/customize.py | 77 + .../repo/script/compile-program/run.bat | 35 + .../repo/script/compile-program/run.sh | 72 + .../script/convert-csv-to-md/COPYRIGHT.md | 3 + .../repo/script/convert-csv-to-md/README.md | 1 + .../repo/script/convert-csv-to-md/_cm.yaml | 34 + .../script/convert-csv-to-md/customize.py | 42 + .../repo/script/convert-csv-to-md/process.py | 10 + .../repo/script/convert-csv-to-md/run.bat | 1 + .../repo/script/convert-csv-to-md/run.sh | 28 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 29 + .../customize.py | 39 + .../run.sh | 2 + .../script/copy-to-clipboard/COPYRIGHT.md | 3 + .../repo/script/copy-to-clipboard/README.md | 1 + .../repo/script/copy-to-clipboard/_cm.yaml | 32 + .../repo/script/copy-to-clipboard/code.py | 13 + .../repo/script/copy-to-clipboard/run.bat | 4 + .../repo/script/copy-to-clipboard/run.sh | 4 + .../repo/script/create-conda-env/COPYRIGHT.md | 3 + .../repo/script/create-conda-env/README.md | 1 + .../repo/script/create-conda-env/_cm.yaml | 30 + .../repo/script/create-conda-env/customize.py | 44 + .../repo/script/create-conda-env/run.sh | 7 + .../create-custom-cache-entry/COPYRIGHT.md | 3 + .../create-custom-cache-entry/README.md | 1 + .../script/create-custom-cache-entry/_cm.yaml | 27 + .../create-custom-cache-entry/customize.py | 59 + .../COPYRIGHT.md | 3 + .../create-fpgaconvnet-app-tinyml/README.md | 1 + .../create-fpgaconvnet-app-tinyml/_cm.yaml | 44 + .../customize.py | 54 + .../create-fpgaconvnet-app-tinyml/run.sh | 26 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../create-fpgaconvnet-config-tinyml/_cm.yaml | 40 + .../customize.py | 75 + .../create-fpgaconvnet-config-tinyml/run.sh | 26 + .../repo/script/create-patch/COPYRIGHT.md | 3 + .../repo/script/create-patch/README-extra.md | 5 + .../repo/script/create-patch/README.md | 1 + .../repo/script/create-patch/_cm.yaml | 22 + .../repo/script/create-patch/customize.py | 68 + .../script/destroy-terraform/COPYRIGHT.md | 3 + .../script/destroy-terraform/README-extra.md | 1 + .../repo/script/destroy-terraform/README.md | 1 + .../repo/script/destroy-terraform/_cm.yaml | 13 + .../script/destroy-terraform/customize.py | 29 + .../repo/script/destroy-terraform/run.bat | 1 + .../repo/script/destroy-terraform/run.sh | 6 + .../repo/script/detect-cpu/COPYRIGHT.md | 3 + .../repo/script/detect-cpu/README-extra.md | 17 + .../repo/script/detect-cpu/README.md | 1 + .../cmx4mlops/repo/script/detect-cpu/_cm.yaml | 22 + .../repo/script/detect-cpu/customize.py | 203 + .../cmx4mlops/repo/script/detect-cpu/run.bat | 2 + .../cmx4mlops/repo/script/detect-cpu/run.sh | 48 + .../repo/script/detect-os/COPYRIGHT.md | 3 + .../cmx4mlops/repo/script/detect-os/README.md | 1 + .../cmx4mlops/repo/script/detect-os/_cm.yaml | 30 + .../repo/script/detect-os/customize.py | 120 + .../cmx4mlops/repo/script/detect-os/run.bat | 1 + .../cmx4mlops/repo/script/detect-os/run.sh | 21 + .../repo/script/detect-os/run_config.yml | 6 + .../repo/script/detect-sudo/COPYRIGHT.md | 3 + .../repo/script/detect-sudo/README.md | 1 + .../repo/script/detect-sudo/_cm.yaml | 17 + .../repo/script/detect-sudo/customize.py | 172 + .../cmx4mlops/repo/script/detect-sudo/run.sh | 27 + .../script/download-and-extract/COPYRIGHT.md | 3 + .../download-and-extract/README-extra.md | 109 + .../script/download-and-extract/README.md | 1 + .../repo/script/download-and-extract/_cm.yaml | 117 + .../script/download-and-extract/customize.py | 84 + .../tests/download-and-extract-file.bat | 1 + .../tests/download-and-extract-file2.bat | 1 + .../repo/script/download-file/COPYRIGHT.md | 3 + .../repo/script/download-file/README-extra.md | 98 + .../repo/script/download-file/README.md | 1 + .../repo/script/download-file/_cm.yaml | 79 + .../repo/script/download-file/customize.py | 346 + .../repo/script/download-file/run.bat | 56 + .../repo/script/download-file/run.sh | 58 + .../download-file/tests/download-file.bat | 2 + .../download-file/tests/download-file2.bat | 1 + .../repo/script/download-torrent/COPYRIGHT.md | 3 + .../repo/script/download-torrent/README.md | 1 + .../repo/script/download-torrent/_cm.yaml | 29 + .../repo/script/download-torrent/customize.py | 48 + .../repo/script/download-torrent/run.sh | 34 + .../draw-graph-from-json-data/COPYRIGHT.md | 3 + .../script/draw-graph-from-json-data/_cm.yaml | 21 + .../draw-graph-from-json-data/customize.py | 43 + .../process-cm-deps.py | 122 + .../script/draw-graph-from-json-data/run.bat | 1 + .../script/draw-graph-from-json-data/run.sh | 17 + .../repo/script/dump-pip-freeze/COPYRIGHT.md | 3 + .../repo/script/dump-pip-freeze/README.md | 1 + .../repo/script/dump-pip-freeze/_cm.yaml | 16 + .../repo/script/dump-pip-freeze/customize.py | 65 + .../repo/script/dump-pip-freeze/dump.py | 23 + .../repo/script/dump-pip-freeze/run.bat | 4 + .../repo/script/dump-pip-freeze/run.sh | 28 + .../repo/script/extract-file/COPYRIGHT.md | 3 + .../repo/script/extract-file/README-extra.md | 115 + .../repo/script/extract-file/README.md | 1 + .../repo/script/extract-file/_cm.yaml | 47 + .../repo/script/extract-file/customize.py | 228 + .../repo/script/extract-file/run.bat | 39 + .../cmx4mlops/repo/script/extract-file/run.sh | 21 + .../cmx4mlops/repo/script/fail/COPYRIGHT.md | 3 + .../repo/script/fail/README-extra.md | 1 + .../cmx4mlops/repo/script/fail/README.md | 1 + cmx4mlops/cmx4mlops/repo/script/fail/_cm.yaml | 18 + .../cmx4mlops/repo/script/fail/customize.py | 41 + .../script/flash-tinyml-binary/COPYRIGHT.md | 3 + .../flash-tinyml-binary/README-extra.md | 16 + .../repo/script/flash-tinyml-binary/README.md | 1 + .../repo/script/flash-tinyml-binary/_cm.yaml | 38 + .../script/flash-tinyml-binary/customize.py | 33 + .../repo/script/flash-tinyml-binary/run.sh | 11 + .../script/generate-docs-for-all-scripts.cmd | 1 + .../COPYRIGHT.md | 3 + .../README-extra.md | 12 + .../README.md | 1 + .../_cm.yaml | 119 + .../customize.py | 744 ++ .../default_files/analyzer_table.md | 3 + .../default_files/power_settings.md | 1 + .../sample-cm-sut-info.json | 7 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 96 + .../customize.py | 587 ++ .../generate-mlperf-tiny-report/COPYRIGHT.md | 3 + .../README-extra.md | 55 + .../generate-mlperf-tiny-report/README.md | 1 + .../generate-mlperf-tiny-report/_cm.yaml | 42 + .../generate-mlperf-tiny-report/customize.py | 97 + .../run_submission_checker.bat | 10 + .../run_submission_checker.sh | 12 + .../COPYRIGHT.md | 3 + .../README-extra.md | 3 + .../generate-mlperf-tiny-submission/README.md | 1 + .../generate-mlperf-tiny-submission/_cm.yaml | 26 + .../customize.py | 214 + .../generate-nvidia-engine/COPYRIGHT.md | 3 + .../generate-nvidia-engine/README-about.md | 1 + .../script/generate-nvidia-engine/README.md | 1 + .../script/generate-nvidia-engine/_cm.yaml | 152 + .../generate-nvidia-engine/customize.py | 48 + .../repo/script/generate-nvidia-engine/run.sh | 7 + .../repo/script/get-android-sdk/COPYRIGHT.md | 3 + .../script/get-android-sdk/README-extra.md | 3 + .../repo/script/get-android-sdk/README.md | 1 + .../repo/script/get-android-sdk/_cm.yaml | 32 + .../repo/script/get-android-sdk/customize.py | 194 + .../get-android-sdk/prepare-sdk-manager.bat | 27 + .../get-android-sdk/prepare-sdk-manager.sh | 26 + .../repo/script/get-aocl/COPYRIGHT.md | 3 + .../repo/script/get-aocl/README-extra.md | 0 .../cmx4mlops/repo/script/get-aocl/README.md | 1 + .../cmx4mlops/repo/script/get-aocl/_cm.yaml | 33 + .../repo/script/get-aocl/customize.py | 47 + .../cmx4mlops/repo/script/get-aocl/run.sh | 9 + .../repo/script/get-aria2/COPYRIGHT.md | 3 + .../repo/script/get-aria2/README-extra.md | 9 + .../cmx4mlops/repo/script/get-aria2/README.md | 1 + .../cmx4mlops/repo/script/get-aria2/_cm.yaml | 37 + .../repo/script/get-aria2/customize.py | 146 + .../repo/script/get-aria2/install.bat | 9 + .../repo/script/get-aria2/install.sh | 47 + .../cmx4mlops/repo/script/get-aria2/run.bat | 4 + .../cmx4mlops/repo/script/get-aria2/run.sh | 6 + .../repo/script/get-aws-cli/COPYRIGHT.md | 3 + .../repo/script/get-aws-cli/README-extra.md | 9 + .../repo/script/get-aws-cli/README.md | 1 + .../repo/script/get-aws-cli/_cm.yaml | 20 + .../repo/script/get-aws-cli/customize.py | 75 + .../cmx4mlops/repo/script/get-aws-cli/run.sh | 3 + .../repo/script/get-bazel/COPYRIGHT.md | 3 + .../repo/script/get-bazel/README-extra.md | 9 + .../cmx4mlops/repo/script/get-bazel/README.md | 1 + .../cmx4mlops/repo/script/get-bazel/_cm.yaml | 19 + .../repo/script/get-bazel/customize.py | 76 + .../cmx4mlops/repo/script/get-bazel/run.bat | 2 + .../cmx4mlops/repo/script/get-bazel/run.sh | 8 + .../repo/script/get-blis/COPYRIGHT.md | 3 + .../repo/script/get-blis/README-extra.md | 0 .../cmx4mlops/repo/script/get-blis/README.md | 1 + .../cmx4mlops/repo/script/get-blis/_cm.yaml | 49 + .../repo/script/get-blis/customize.py | 46 + .../cmx4mlops/repo/script/get-blis/run.bat | 1 + .../cmx4mlops/repo/script/get-blis/run.sh | 12 + .../repo/script/get-brew/COPYRIGHT.md | 3 + .../cmx4mlops/repo/script/get-brew/README.md | 1 + .../cmx4mlops/repo/script/get-brew/_cm.yaml | 19 + .../cmx4mlops/repo/script/get-brew/run.sh | 3 + .../repo/script/get-cache-dir/COPYRIGHT.md | 3 + .../repo/script/get-cache-dir/README.md | 1 + .../repo/script/get-cache-dir/_cm.yaml | 27 + .../repo/script/get-cache-dir/customize.py | 42 + .../script/get-ck-repo-mlops/COPYRIGHT.md | 3 + .../repo/script/get-ck-repo-mlops/README.md | 1 + .../repo/script/get-ck-repo-mlops/_cm.yaml | 12 + .../repo/script/get-ck-repo-mlops/run.bat | 1 + .../repo/script/get-ck-repo-mlops/run.sh | 4 + .../cmx4mlops/repo/script/get-ck/COPYRIGHT.md | 3 + .../cmx4mlops/repo/script/get-ck/README.md | 1 + .../cmx4mlops/repo/script/get-ck/_cm.yaml | 10 + .../cmx4mlops/repo/script/get-ck/run.bat | 1 + cmx4mlops/cmx4mlops/repo/script/get-ck/run.sh | 3 + .../cmx4mlops/repo/script/get-cl/COPYRIGHT.md | 3 + .../repo/script/get-cl/README-extra.md | 7 + .../cmx4mlops/repo/script/get-cl/README.md | 1 + .../cmx4mlops/repo/script/get-cl/_cm.yaml | 25 + .../cmx4mlops/repo/script/get-cl/customize.py | 158 + .../cmx4mlops/repo/script/get-cl/run.bat | 3 + .../repo/script/get-cmake/COPYRIGHT.md | 3 + .../cmx4mlops/repo/script/get-cmake/README.md | 1 + .../cmx4mlops/repo/script/get-cmake/_cm.yaml | 26 + .../repo/script/get-cmake/customize.py | 76 + .../cmx4mlops/repo/script/get-cmake/run.bat | 2 + .../cmx4mlops/repo/script/get-cmake/run.sh | 5 + .../repo/script/get-cmsis_5/COPYRIGHT.md | 3 + .../repo/script/get-cmsis_5/README-extra.md | 5 + .../repo/script/get-cmsis_5/README.md | 1 + .../repo/script/get-cmsis_5/_cm.yaml | 38 + .../repo/script/get-cmsis_5/customize.py | 38 + .../cmx4mlops/repo/script/get-cmsis_5/run.sh | 21 + .../script/get-compiler-flags/COPYRIGHT.md | 3 + .../repo/script/get-compiler-flags/README.md | 1 + .../repo/script/get-compiler-flags/_cm.yaml | 22 + .../script/get-compiler-flags/customize.py | 76 + .../script/get-compiler-rust/COPYRIGHT.md | 3 + .../repo/script/get-compiler-rust/README.md | 1 + .../repo/script/get-compiler-rust/_cm.yaml | 19 + .../script/get-compiler-rust/customize.py | 38 + .../repo/script/get-compiler-rust/run.sh | 7 + .../repo/script/get-conda/COPYRIGHT.md | 3 + .../cmx4mlops/repo/script/get-conda/README.md | 1 + .../cmx4mlops/repo/script/get-conda/_cm.yaml | 37 + .../repo/script/get-conda/customize.py | 120 + .../repo/script/get-conda/install.bat | 9 + .../repo/script/get-conda/install.sh | 18 + .../cmx4mlops/repo/script/get-conda/run.bat | 1 + .../cmx4mlops/repo/script/get-conda/run.sh | 3 + .../repo/script/get-croissant/COPYRIGHT.md | 3 + .../repo/script/get-croissant/README.md | 1 + .../repo/script/get-croissant/_cm.yaml | 30 + .../repo/script/get-croissant/customize.py | 28 + .../repo/script/get-croissant/run.bat | 20 + .../repo/script/get-croissant/run.sh | 21 + .../repo/script/get-cuda-devices/COPYRIGHT.md | 3 + .../repo/script/get-cuda-devices/README.md | 1 + .../repo/script/get-cuda-devices/_cm.yaml | 64 + .../repo/script/get-cuda-devices/customize.py | 77 + .../repo/script/get-cuda-devices/detect.py | 47 + .../repo/script/get-cuda-devices/detect.sh | 4 + .../get-cuda-devices/print_cuda_devices.cu | 74 + .../repo/script/get-cuda-devices/run.bat | 33 + .../repo/script/get-cuda-devices/run.sh | 35 + .../repo/script/get-cuda/COPYRIGHT.md | 3 + .../repo/script/get-cuda/README-about.md | 6 + .../repo/script/get-cuda/README-extra.md | 44 + .../cmx4mlops/repo/script/get-cuda/README.md | 1 + .../cmx4mlops/repo/script/get-cuda/_cm.yaml | 110 + .../repo/script/get-cuda/customize.py | 242 + .../cmx4mlops/repo/script/get-cuda/run.bat | 3 + .../cmx4mlops/repo/script/get-cuda/run.sh | 14 + .../repo/script/get-cudnn/COPYRIGHT.md | 3 + .../repo/script/get-cudnn/README-extra.md | 3 + .../cmx4mlops/repo/script/get-cudnn/README.md | 1 + .../cmx4mlops/repo/script/get-cudnn/_cm.yaml | 55 + .../repo/script/get-cudnn/customize.py | 213 + .../cmx4mlops/repo/script/get-cudnn/run.sh | 12 + .../script/get-dataset-cifar10/COPYRIGHT.md | 3 + .../repo/script/get-dataset-cifar10/README.md | 1 + .../repo/script/get-dataset-cifar10/_cm.yaml | 36 + .../script/get-dataset-cifar10/customize.py | 29 + .../get-dataset-cifar10/requirements.txt | 47 + .../repo/script/get-dataset-cifar10/run.bat | 48 + .../repo/script/get-dataset-cifar10/run.sh | 50 + .../script/get-dataset-cnndm/COPYRIGHT.md | 3 + .../repo/script/get-dataset-cnndm/README.md | 1 + .../repo/script/get-dataset-cnndm/_cm.yaml | 55 + .../script/get-dataset-cnndm/customize.py | 47 + .../script/get-dataset-cnndm/run-intel.sh | 15 + .../repo/script/get-dataset-cnndm/run.sh | 21 + .../repo/script/get-dataset-coco/COPYRIGHT.md | 3 + .../script/get-dataset-coco/README-extra.md | 95 + .../repo/script/get-dataset-coco/README.md | 1 + .../repo/script/get-dataset-coco/_cm.yaml | 97 + .../repo/script/get-dataset-coco/customize.py | 224 + .../script/get-dataset-coco2014/COPYRIGHT.md | 3 + .../script/get-dataset-coco2014/README.md | 1 + .../repo/script/get-dataset-coco2014/_cm.yaml | 94 + .../script/get-dataset-coco2014/customize.py | 51 + .../repo/script/get-dataset-coco2014/run.bat | 21 + .../repo/script/get-dataset-coco2014/run.sh | 44 + .../script/get-dataset-criteo/COPYRIGHT.md | 3 + .../script/get-dataset-criteo/README-extra.md | 9 + .../repo/script/get-dataset-criteo/README.md | 1 + .../repo/script/get-dataset-criteo/_cm.yaml | 26 + .../repo/script/get-dataset-criteo/run.sh | 26 + .../get-dataset-imagenet-aux/COPYRIGHT.md | 3 + .../script/get-dataset-imagenet-aux/README.md | 1 + .../script/get-dataset-imagenet-aux/_cm.yaml | 52 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../get-dataset-imagenet-calibration/_cm.yaml | 46 + .../get-dataset-imagenet-helper/COPYRIGHT.md | 3 + .../get-dataset-imagenet-helper/README.md | 1 + .../get-dataset-imagenet-helper/_cm.yaml | 14 + .../get-dataset-imagenet-helper/customize.py | 24 + .../imagenet_helper/__init__.py | 165 + .../get-dataset-imagenet-train/COPYRIGHT.md | 3 + .../get-dataset-imagenet-train/README.md | 1 + .../get-dataset-imagenet-train/_cm.yaml | 48 + .../get-dataset-imagenet-train/customize.py | 80 + .../script/get-dataset-imagenet-train/run.sh | 3 + .../get-dataset-imagenet-val/COPYRIGHT.md | 3 + .../get-dataset-imagenet-val/README-extra.md | 28 + .../script/get-dataset-imagenet-val/README.md | 1 + .../script/get-dataset-imagenet-val/_cm.yaml | 101 + .../get-dataset-imagenet-val/customize.py | 103 + .../script/get-dataset-imagenet-val/run.bat | 17 + .../script/get-dataset-kits19/COPYRIGHT.md | 3 + .../repo/script/get-dataset-kits19/README.md | 1 + .../repo/script/get-dataset-kits19/_cm.yaml | 60 + .../script/get-dataset-kits19/customize.py | 52 + .../repo/script/get-dataset-kits19/run.sh | 43 + .../get-dataset-librispeech/COPYRIGHT.md | 3 + .../get-dataset-librispeech/README-extra.md | 26 + .../script/get-dataset-librispeech/README.md | 1 + .../script/get-dataset-librispeech/_cm.yaml | 62 + .../get-dataset-librispeech/customize.py | 33 + .../script/get-dataset-librispeech/run.sh | 8 + .../COPYRIGHT.md | 3 + .../_cm.yaml | 380 + .../customize.py | 69 + .../get-dataset-mlperf-inference-igbh/run.sh | 24 + .../COPYRIGHT.md | 3 + .../_cm.yaml | 38 + .../customize.py | 37 + .../generate-test-dataset.py | 58 + .../run.sh | 5 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 35 + .../customize.py | 35 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 62 + .../customize.py | 41 + .../filter.py | 25 + .../run-filter.sh | 6 + .../get-dataset-openimages/COPYRIGHT.md | 3 + .../get-dataset-openimages/README-extra.md | 2 + .../script/get-dataset-openimages/README.md | 1 + .../script/get-dataset-openimages/_cm.yaml | 126 + .../get-dataset-openimages/customize.py | 112 + .../script/get-dataset-openimages/run.bat | 24 + .../repo/script/get-dataset-openimages/run.sh | 40 + .../script/get-dataset-openorca/COPYRIGHT.md | 3 + .../script/get-dataset-openorca/README.md | 1 + .../repo/script/get-dataset-openorca/_cm.yaml | 55 + .../script/get-dataset-openorca/customize.py | 35 + .../get-dataset-squad-vocab/COPYRIGHT.md | 3 + .../script/get-dataset-squad-vocab/README.md | 1 + .../script/get-dataset-squad-vocab/_cm.yaml | 37 + .../get-dataset-squad-vocab/customize.py | 30 + .../script/get-dataset-squad/COPYRIGHT.md | 3 + .../script/get-dataset-squad/README-extra.md | 20 + .../repo/script/get-dataset-squad/README.md | 1 + .../repo/script/get-dataset-squad/_cm.yaml | 46 + .../script/get-dataset-squad/customize.py | 33 + .../COPYRIGHT.md | 3 + .../get-dlrm-data-mlperf-inference/README.md | 1 + .../get-dlrm-data-mlperf-inference/_cm.yaml | 41 + .../checksums.txt | 26 + .../customize.py | 166 + .../get-dlrm-data-mlperf-inference/run.sh | 27 + .../repo/script/get-dlrm/COPYRIGHT.md | 3 + .../repo/script/get-dlrm/README-extra.md | 15 + .../cmx4mlops/repo/script/get-dlrm/README.md | 1 + .../cmx4mlops/repo/script/get-dlrm/_cm.yaml | 27 + .../repo/script/get-dlrm/customize.py | 48 + .../cmx4mlops/repo/script/get-dlrm/run.sh | 12 + .../repo/script/get-docker/COPYRIGHT.md | 3 + .../repo/script/get-docker/README.md | 1 + .../cmx4mlops/repo/script/get-docker/_cm.yaml | 25 + .../repo/script/get-docker/customize.py | 84 + .../repo/script/get-docker/install-centos.sh | 13 + .../repo/script/get-docker/install-ubuntu.sh | 43 + .../repo/script/get-docker/install.bat | 2 + .../repo/script/get-docker/install.sh | 2 + .../cmx4mlops/repo/script/get-docker/run.bat | 3 + .../cmx4mlops/repo/script/get-docker/run.sh | 3 + .../repo/script/get-gcc/COPYRIGHT.md | 3 + .../repo/script/get-gcc/README-extra.md | 15 + .../cmx4mlops/repo/script/get-gcc/README.md | 1 + .../cmx4mlops/repo/script/get-gcc/_cm.yaml | 32 + .../repo/script/get-gcc/customize.py | 117 + .../cmx4mlops/repo/script/get-gcc/run.bat | 3 + .../cmx4mlops/repo/script/get-gcc/run.sh | 8 + .../get-generic-python-lib/COPYRIGHT.md | 3 + .../get-generic-python-lib/README-extra.md | 6 + .../script/get-generic-python-lib/README.md | 1 + .../script/get-generic-python-lib/_cm.yaml | 727 ++ .../get-generic-python-lib/customize.py | 203 + .../get-generic-python-lib/detect-version.py | 36 + .../script/get-generic-python-lib/install.bat | 15 + .../script/get-generic-python-lib/install.sh | 60 + .../script/get-generic-python-lib/run.bat | 4 + .../repo/script/get-generic-python-lib/run.sh | 7 + .../tensorflow/run-aarch64.sh | 13 + .../tensorflow/run-macos.sh | 7 + .../get-generic-python-lib/uninstall_deps.sh | 8 + .../get-generic-python-lib/validate_cache.bat | 4 + .../get-generic-python-lib/validate_cache.sh | 7 + .../script/get-generic-sys-util/COPYRIGHT.md | 3 + .../get-generic-sys-util/README-extra.md | 425 ++ .../repo/script/get-generic-sys-util/_cm.yaml | 717 ++ .../script/get-generic-sys-util/customize.py | 197 + .../script/get-generic-sys-util/detect.sh | 21 + .../install-with-retry.sh | 48 + .../script/get-generic-sys-util/install.sh | 21 + .../script/get-gh-actions-runner/COPYRIGHT.md | 3 + .../script/get-gh-actions-runner/_cm.yaml | 51 + .../script/get-gh-actions-runner/customize.py | 56 + .../repo/script/get-gh-actions-runner/run.sh | 5 + .../repo/script/get-git-repo/COPYRIGHT.md | 3 + .../repo/script/get-git-repo/README-extra.md | 20 + .../repo/script/get-git-repo/README.md | 1 + .../repo/script/get-git-repo/_cm.yaml | 94 + .../repo/script/get-git-repo/customize.py | 118 + .../repo/script/get-git-repo/run.bat | 70 + .../cmx4mlops/repo/script/get-git-repo/run.sh | 97 + .../repo/script/get-github-cli/COPYRIGHT.md | 3 + .../repo/script/get-github-cli/README.md | 1 + .../repo/script/get-github-cli/_cm.yaml | 16 + .../repo/script/get-github-cli/customize.py | 70 + .../repo/script/get-github-cli/run.bat | 1 + .../repo/script/get-github-cli/run.sh | 2 + .../cmx4mlops/repo/script/get-go/COPYRIGHT.md | 3 + .../repo/script/get-go/README-extra.md | 10 + .../cmx4mlops/repo/script/get-go/README.md | 1 + .../cmx4mlops/repo/script/get-go/_cm.yaml | 23 + .../cmx4mlops/repo/script/get-go/customize.py | 75 + cmx4mlops/cmx4mlops/repo/script/get-go/run.sh | 3 + .../repo/script/get-google-saxml/COPYRIGHT.md | 3 + .../repo/script/get-google-saxml/README.md | 1 + .../repo/script/get-google-saxml/_cm.yaml | 45 + .../repo/script/get-google-saxml/customize.py | 33 + .../repo/script/get-google-saxml/run.bat | 3 + .../repo/script/get-google-saxml/run.sh | 3 + .../repo/script/get-google-test/COPYRIGHT.md | 3 + .../repo/script/get-google-test/README.md | 1 + .../repo/script/get-google-test/_cm.yaml | 37 + .../repo/script/get-google-test/customize.py | 46 + .../repo/script/get-google-test/run.sh | 23 + .../repo/script/get-ipol-src/COPYRIGHT.md | 3 + .../repo/script/get-ipol-src/README-extra.md | 1 + .../repo/script/get-ipol-src/README.md | 1 + .../repo/script/get-ipol-src/_cm.yaml | 29 + .../repo/script/get-ipol-src/customize.py | 77 + .../script/get-ipol-src/patch/20240127.patch | 10 + .../repo/script/get-java/COPYRIGHT.md | 3 + .../repo/script/get-java/README-extra.md | 6 + .../cmx4mlops/repo/script/get-java/README.md | 1 + .../cmx4mlops/repo/script/get-java/_cm.yaml | 26 + .../repo/script/get-java/customize.py | 165 + .../repo/script/get-java/install-prebuilt.bat | 9 + .../repo/script/get-java/install-prebuilt.sh | 15 + .../cmx4mlops/repo/script/get-java/run.bat | 3 + .../cmx4mlops/repo/script/get-java/run.sh | 3 + .../repo/script/get-javac/COPYRIGHT.md | 3 + .../repo/script/get-javac/README-extra.md | 6 + .../cmx4mlops/repo/script/get-javac/README.md | 1 + .../cmx4mlops/repo/script/get-javac/_cm.yaml | 27 + .../repo/script/get-javac/customize.py | 175 + .../script/get-javac/install-prebuilt.bat | 9 + .../repo/script/get-javac/install-prebuilt.sh | 15 + .../cmx4mlops/repo/script/get-javac/run.bat | 3 + .../cmx4mlops/repo/script/get-javac/run.sh | 3 + .../repo/script/get-lib-armnn/COPYRIGHT.md | 3 + .../repo/script/get-lib-armnn/README.md | 1 + .../repo/script/get-lib-armnn/_cm.yaml | 39 + .../repo/script/get-lib-armnn/customize.py | 64 + .../repo/script/get-lib-armnn/run.sh | 9 + .../repo/script/get-lib-dnnl/COPYRIGHT.md | 3 + .../repo/script/get-lib-dnnl/README.md | 1 + .../repo/script/get-lib-dnnl/_cm.yaml | 33 + .../repo/script/get-lib-dnnl/customize.py | 44 + .../cmx4mlops/repo/script/get-lib-dnnl/run.sh | 43 + .../repo/script/get-lib-protobuf/COPYRIGHT.md | 3 + .../repo/script/get-lib-protobuf/README.md | 1 + .../repo/script/get-lib-protobuf/_cm.yaml | 48 + .../repo/script/get-lib-protobuf/customize.py | 59 + .../repo/script/get-lib-protobuf/run.sh | 15 + .../repo/script/get-lib-qaic-api/COPYRIGHT.md | 3 + .../repo/script/get-lib-qaic-api/README.md | 1 + .../repo/script/get-lib-qaic-api/_cm.yaml | 27 + .../repo/script/get-lib-qaic-api/customize.py | 54 + .../get-lib-qaic-api/master/QAicInfApi.cpp | 750 ++ .../get-lib-qaic-api/master/QAicInfApi.h | 146 + .../repo/script/get-lib-qaic-api/run.sh | 4 + .../repo/script/get-llvm/COPYRIGHT.md | 3 + .../repo/script/get-llvm/README-extra.md | 96 + .../cmx4mlops/repo/script/get-llvm/README.md | 1 + .../cmx4mlops/repo/script/get-llvm/_cm.yaml | 48 + .../repo/script/get-llvm/customize.py | 108 + .../cmx4mlops/repo/script/get-llvm/run.bat | 3 + .../cmx4mlops/repo/script/get-llvm/run.sh | 4 + .../repo/script/get-microtvm/COPYRIGHT.md | 3 + .../repo/script/get-microtvm/README-extra.md | 5 + .../repo/script/get-microtvm/README.md | 1 + .../repo/script/get-microtvm/_cm.yaml | 38 + .../repo/script/get-microtvm/customize.py | 39 + .../cmx4mlops/repo/script/get-microtvm/run.sh | 12 + .../get-ml-model-3d-unet-kits19/COPYRIGHT.md | 3 + .../get-ml-model-3d-unet-kits19/README.md | 1 + .../get-ml-model-3d-unet-kits19/_cm.yaml | 100 + .../get-ml-model-3d-unet-kits19/customize.py | 36 + .../get-ml-model-bert-base-squad/COPYRIGHT.md | 3 + .../get-ml-model-bert-base-squad/README.md | 1 + .../get-ml-model-bert-base-squad/_cm.yaml | 68 + .../COPYRIGHT.md | 3 + .../get-ml-model-bert-large-squad/README.md | 1 + .../get-ml-model-bert-large-squad/_cm.yaml | 265 + .../customize.py | 48 + .../run-packed.sh | 6 + .../get-ml-model-dlrm-terabyte/COPYRIGHT.md | 3 + .../get-ml-model-dlrm-terabyte/README.md | 1 + .../get-ml-model-dlrm-terabyte/_cm.yaml | 125 + .../script/get-ml-model-dlrm-terabyte/run.sh | 4 + .../COPYRIGHT.md | 3 + .../get-ml-model-efficientnet-lite/README.md | 1 + .../get-ml-model-efficientnet-lite/_cm.yaml | 142 + .../customize.py | 67 + .../script/get-ml-model-gptj/COPYRIGHT.md | 3 + .../repo/script/get-ml-model-gptj/README.md | 1 + .../repo/script/get-ml-model-gptj/_cm.yaml | 220 + .../get-ml-model-gptj/convert_gptj_ckpt.py | 184 + .../script/get-ml-model-gptj/customize.py | 110 + .../get-ml-model-gptj/run-int4-calibration.sh | 10 + .../script/get-ml-model-gptj/run-intel.sh | 18 + .../script/get-ml-model-gptj/run-nvidia.sh | 21 + .../get-ml-model-gptj/run-saxml-quantized.sh | 6 + .../script/get-ml-model-gptj/run-saxml.sh | 8 + .../get-ml-model-huggingface-zoo/COPYRIGHT.md | 3 + .../README-extra.md | 21 + .../get-ml-model-huggingface-zoo/README.md | 1 + .../get-ml-model-huggingface-zoo/_cm.yaml | 58 + .../get-ml-model-huggingface-zoo/customize.py | 64 + .../download_model.py | 105 + .../get-ml-model-huggingface-zoo/run.bat | 3 + .../get-ml-model-huggingface-zoo/run.sh | 4 + .../script/get-ml-model-llama2/COPYRIGHT.md | 3 + .../repo/script/get-ml-model-llama2/README.md | 1 + .../repo/script/get-ml-model-llama2/_cm.yaml | 176 + .../script/get-ml-model-llama2/customize.py | 57 + .../script/get-ml-model-llama2/run-amd.sh | 28 + .../script/get-ml-model-llama2/run-nvidia.sh | 29 + .../script/get-ml-model-mixtral/COPYRIGHT.md | 3 + .../repo/script/get-ml-model-mixtral/_cm.yaml | 67 + .../script/get-ml-model-mixtral/customize.py | 39 + .../get-ml-model-mobilenet/COPYRIGHT.md | 3 + .../get-ml-model-mobilenet/README-extra.md | 15 + .../script/get-ml-model-mobilenet/README.md | 1 + .../script/get-ml-model-mobilenet/_cm.yaml | 280 + .../get-ml-model-mobilenet/customize.py | 67 + .../get-ml-model-neuralmagic-zoo/COPYRIGHT.md | 3 + .../get-ml-model-neuralmagic-zoo/README.md | 1 + .../get-ml-model-neuralmagic-zoo/_cm.yaml | 223 + .../get-ml-model-neuralmagic-zoo/customize.py | 57 + .../download_sparse.py | 10 + .../get-ml-model-neuralmagic-zoo/run.bat | 2 + .../get-ml-model-neuralmagic-zoo/run.sh | 2 + .../script/get-ml-model-resnet50/COPYRIGHT.md | 3 + .../get-ml-model-resnet50/README-extra.md | 15 + .../script/get-ml-model-resnet50/README.md | 1 + .../script/get-ml-model-resnet50/_cm.yaml | 239 + .../script/get-ml-model-resnet50/customize.py | 44 + .../get-ml-model-resnet50/run-fix-input.sh | 10 + .../get-ml-model-resnet50/run_config.yml | 6 + .../COPYRIGHT.md | 3 + .../get-ml-model-retinanet-nvidia/README.md | 1 + .../get-ml-model-retinanet-nvidia/_cm.yaml | 45 + .../customize.py | 49 + .../nvidia_patch_retinanet_efficientnms.py | 123 + .../polygraphy_script.sh | 24 + .../get-ml-model-retinanet-nvidia/run.sh | 16 + .../get-ml-model-retinanet/COPYRIGHT.md | 3 + .../get-ml-model-retinanet/README-extra.md | 16 + .../script/get-ml-model-retinanet/README.md | 1 + .../script/get-ml-model-retinanet/_cm.yaml | 123 + .../get-ml-model-retinanet/customize.py | 45 + .../node-precision-info.py | 88 + .../get-ml-model-retinanet/run-no-nms.sh | 35 + .../script/get-ml-model-rgat/COPYRIGHT.md | 3 + .../repo/script/get-ml-model-rgat/_cm.yaml | 65 + .../script/get-ml-model-rgat/customize.py | 41 + .../script/get-ml-model-rnnt/COPYRIGHT.md | 3 + .../repo/script/get-ml-model-rnnt/README.md | 1 + .../repo/script/get-ml-model-rnnt/_cm.yaml | 52 + .../script/get-ml-model-rnnt/customize.py | 51 + .../COPYRIGHT.md | 3 + .../get-ml-model-stable-diffusion/README.md | 1 + .../get-ml-model-stable-diffusion/_cm.yaml | 129 + .../customize.py | 40 + .../get-ml-model-tiny-resnet/COPYRIGHT.md | 3 + .../script/get-ml-model-tiny-resnet/README.md | 1 + .../script/get-ml-model-tiny-resnet/_cm.yaml | 108 + .../get-ml-model-tiny-resnet/customize.py | 41 + .../script/get-ml-model-tiny-resnet/run.sh | 4 + .../get-ml-model-tiny-resnet/run_config.yml | 6 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 37 + .../customize.py | 33 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 37 + .../customize.py | 40 + .../run.bat | 1 + .../run.sh | 32 + .../get-mlperf-inference-loadgen/COPYRIGHT.md | 3 + .../README-extra.md | 26 + .../get-mlperf-inference-loadgen/README.md | 1 + .../get-mlperf-inference-loadgen/_cm.yaml | 193 + .../get-mlperf-inference-loadgen/customize.py | 68 + .../get-mlperf-inference-loadgen/run.bat | 39 + .../get-mlperf-inference-loadgen/run.sh | 53 + .../tests/download-and-install.bat | 2 + .../COPYRIGHT.md | 3 + .../README-extra.md | 9 + .../README.md | 1 + .../_cm.yaml | 55 + .../customize.py | 32 + .../COPYRIGHT.md | 3 + .../README-extra.md | 1 + .../README.md | 1 + .../_cm.yaml | 46 + .../customize.py | 44 + .../run.bat | 1 + .../run.sh | 32 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../get-mlperf-inference-results-dir/_cm.yaml | 44 + .../customize.py | 40 + .../get-mlperf-inference-results/COPYRIGHT.md | 3 + .../README-extra.md | 18 + .../get-mlperf-inference-results/README.md | 1 + .../get-mlperf-inference-results/_cm.yaml | 83 + .../get-mlperf-inference-results/customize.py | 59 + .../get-mlperf-inference-src/COPYRIGHT.md | 3 + .../get-mlperf-inference-src/README-extra.md | 29 + .../script/get-mlperf-inference-src/README.md | 1 + .../script/get-mlperf-inference-src/_cm.yaml | 173 + .../get-mlperf-inference-src/customize.py | 183 + .../get-mlperf-inference-src/patch/coco.patch | 24 + .../get-mlperf-inference-src/patch/git.patch | 1925 +++++ .../patch/openimages-pycocotools.patch | 24 + .../patch/windows-openimages.patch | 64 + .../patch/windows-openimages2.patch | 11 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 38 + .../customize.py | 43 + .../COPYRIGHT.md | 3 + .../README-extra.md | 6 + .../README.md | 1 + .../get-mlperf-inference-sut-configs/_cm.yaml | 32 + .../pytorch-framework/default-config.yaml | 38 + .../default-config.yaml | 38 + .../tensorrt-framework/default-config.yaml | 30 + .../default-config.yaml | 36 + .../tensorrt-framework/default-config.yaml | 38 + .../default-config.yaml | 42 + .../configs/default/config.yaml | 73 + .../default/default/default-config.yaml | 55 + .../customize.py | 155 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 55 + .../customize.py | 198 + .../detect_memory.sh | 7 + .../get_memory_info.py | 61 + .../hardware/default.json | 26 + .../get-mlperf-inference-utils/COPYRIGHT.md | 3 + .../get-mlperf-inference-utils/README.md | 1 + .../get-mlperf-inference-utils/_cm.yaml | 18 + .../get-mlperf-inference-utils/customize.py | 46 + .../mlperf_utils.py | 353 + .../script/get-mlperf-logging/COPYRIGHT.md | 3 + .../script/get-mlperf-logging/README-extra.md | 16 + .../repo/script/get-mlperf-logging/README.md | 1 + .../repo/script/get-mlperf-logging/_cm.yaml | 24 + .../script/get-mlperf-logging/customize.py | 34 + .../script/get-mlperf-power-dev/COPYRIGHT.md | 3 + .../script/get-mlperf-power-dev/README.md | 1 + .../repo/script/get-mlperf-power-dev/_cm.yaml | 65 + .../script/get-mlperf-power-dev/customize.py | 33 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 23 + .../customize.py | 77 + .../run.bat | 72 + .../run.sh | 39 + .../script/get-mlperf-tiny-src/COPYRIGHT.md | 3 + .../repo/script/get-mlperf-tiny-src/README.md | 1 + .../repo/script/get-mlperf-tiny-src/_cm.yaml | 31 + .../script/get-mlperf-tiny-src/customize.py | 71 + .../repo/script/get-mlperf-tiny-src/run.bat | 72 + .../repo/script/get-mlperf-tiny-src/run.sh | 39 + .../COPYRIGHT.md | 3 + .../get-mlperf-training-nvidia-code/README.md | 1 + .../get-mlperf-training-nvidia-code/_cm.yaml | 53 + .../customize.py | 36 + .../get-mlperf-training-src/COPYRIGHT.md | 3 + .../get-mlperf-training-src/README-extra.md | 27 + .../script/get-mlperf-training-src/README.md | 1 + .../script/get-mlperf-training-src/_cm.yaml | 97 + .../get-mlperf-training-src/customize.py | 40 + .../patch/cpu_load.patch | 16 + .../patch/nvidia-retinanet.patch | 170 + .../script/get-nvidia-docker/COPYRIGHT.md | 3 + .../repo/script/get-nvidia-docker/README.md | 1 + .../repo/script/get-nvidia-docker/_cm.yaml | 26 + .../script/get-nvidia-docker/run-ubuntu.sh | 36 + .../script/get-nvidia-mitten/COPYRIGHT.md | 3 + .../script/get-nvidia-mitten/README-extra.md | 1 + .../repo/script/get-nvidia-mitten/README.md | 1 + .../repo/script/get-nvidia-mitten/_cm.yaml | 33 + .../script/get-nvidia-mitten/customize.py | 33 + .../repo/script/get-nvidia-mitten/run.bat | 3 + .../repo/script/get-nvidia-mitten/run.sh | 4 + .../get-onnxruntime-prebuilt/COPYRIGHT.md | 3 + .../script/get-onnxruntime-prebuilt/README.md | 1 + .../script/get-onnxruntime-prebuilt/_cm.yaml | 36 + .../get-onnxruntime-prebuilt/customize.py | 95 + .../script/get-onnxruntime-prebuilt/run.bat | 10 + .../script/get-onnxruntime-prebuilt/run.sh | 14 + .../repo/script/get-openssl/COPYRIGHT.md | 3 + .../repo/script/get-openssl/README-extra.md | 8 + .../repo/script/get-openssl/README.md | 1 + .../repo/script/get-openssl/_cm.yaml | 23 + .../repo/script/get-openssl/customize.py | 73 + .../cmx4mlops/repo/script/get-openssl/run.sh | 4 + .../script/get-platform-details/COPYRIGHT.md | 3 + .../get-platform-details/README-EXTRA.md | 10 + .../repo/script/get-platform-details/_cm.yaml | 62 + .../script/get-platform-details/customize.py | 58 + .../script/get-platform-details/run-macos.sh | 1 + .../repo/script/get-platform-details/run.bat | 1 + .../repo/script/get-platform-details/run.sh | 138 + .../COPYRIGHT.md | 3 + .../README-extra.md | 16 + .../get-preprocessed-dataset-criteo/README.md | 1 + .../get-preprocessed-dataset-criteo/_cm.yaml | 156 + .../checksums.txt | 3 + .../customize.py | 62 + .../preprocess.py | 35 + .../preprocess_multihot.sh | 9 + .../run-multihot.sh | 6 + .../get-preprocessed-dataset-criteo/run.sh | 8 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../get-preprocessed-dataset-generic/_cm.yaml | 14 + .../customize.py | 22 + .../src/generic_preprocess.py | 221 + .../preprocess_object_detection_dataset.py | 193 + .../COPYRIGHT.md | 3 + .../README-extra.md | 26 + .../README.md | 1 + .../_cm.yaml | 272 + .../customize.py | 84 + .../preprocess.py | 41 + .../get-preprocessed-dataset-imagenet/run.bat | 4 + .../get-preprocessed-dataset-imagenet/run.sh | 6 + .../COPYRIGHT.md | 3 + .../get-preprocessed-dataset-kits19/README.md | 1 + .../get-preprocessed-dataset-kits19/_cm.yaml | 97 + .../customize.py | 41 + .../get-preprocessed-dataset-kits19/run.sh | 6 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 93 + .../customize.py | 43 + .../run.sh | 6 + .../COPYRIGHT.md | 3 + .../README-extra.md | 28 + .../README.md | 1 + .../_cm.yaml | 232 + .../customize.py | 68 + .../nvidia_preprocess.py | 167 + .../preprocess.py | 50 + .../run.bat | 1 + .../run.sh | 3 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 110 + .../customize.py | 66 + .../get-preprocessed-dataset-openorca/run.sh | 5 + .../COPYRIGHT.md | 3 + .../get-preprocessed-dataset-squad/README.md | 1 + .../get-preprocessed-dataset-squad/_cm.yaml | 93 + .../customize.py | 108 + .../run-packed.sh | 32 + .../get-preprocessed-dataset-squad/run.sh | 41 + .../repo/script/get-python3/COPYRIGHT.md | 3 + .../repo/script/get-python3/README-extra.md | 70 + .../repo/script/get-python3/README.md | 1 + .../repo/script/get-python3/_cm.yaml | 54 + .../repo/script/get-python3/customize.py | 154 + .../cmx4mlops/repo/script/get-python3/run.bat | 2 + .../cmx4mlops/repo/script/get-python3/run.sh | 13 + .../script/get-qaic-apps-sdk/COPYRIGHT.md | 3 + .../repo/script/get-qaic-apps-sdk/README.md | 1 + .../repo/script/get-qaic-apps-sdk/_cm.yaml | 30 + .../script/get-qaic-apps-sdk/customize.py | 128 + .../script/get-qaic-platform-sdk/COPYRIGHT.md | 3 + .../script/get-qaic-platform-sdk/README.md | 1 + .../script/get-qaic-platform-sdk/_cm.yaml | 32 + .../script/get-qaic-platform-sdk/customize.py | 129 + .../script/get-qaic-software-kit/COPYRIGHT.md | 3 + .../script/get-qaic-software-kit/README.md | 1 + .../script/get-qaic-software-kit/_cm.yaml | 56 + .../script/get-qaic-software-kit/customize.py | 77 + .../repo/script/get-qaic-software-kit/run.sh | 17 + .../script/get-rclone-config/COPYRIGHT.md | 3 + .../repo/script/get-rclone-config/_cm.yaml | 13 + .../script/get-rclone-config/customize.py | 38 + .../repo/script/get-rclone-config/run.bat | 1 + .../repo/script/get-rclone-config/run.sh | 17 + .../repo/script/get-rclone/COPYRIGHT.md | 3 + .../repo/script/get-rclone/README.md | 1 + .../cmx4mlops/repo/script/get-rclone/_cm.yaml | 30 + .../script/get-rclone/configs/rclone.conf | 8 + .../repo/script/get-rclone/customize.py | 158 + .../script/get-rclone/install-system-macos.sh | 3 + .../repo/script/get-rclone/install-system.sh | 3 + .../repo/script/get-rclone/install.bat | 12 + .../repo/script/get-rclone/install.sh | 13 + .../cmx4mlops/repo/script/get-rclone/run.bat | 5 + .../cmx4mlops/repo/script/get-rclone/run.sh | 11 + .../repo/script/get-rocm-devices/COPYRIGHT.md | 3 + .../repo/script/get-rocm-devices/README.md | 4 + .../repo/script/get-rocm-devices/_cm.yaml | 29 + .../repo/script/get-rocm-devices/customize.py | 77 + .../repo/script/get-rocm-devices/detect.py | 55 + .../repo/script/get-rocm-devices/detect.sh | 4 + .../repo/script/get-rocm-devices/run.sh | 31 + .../repo/script/get-rocm/COPYRIGHT.md | 3 + .../cmx4mlops/repo/script/get-rocm/README.md | 1 + .../cmx4mlops/repo/script/get-rocm/_cm.yaml | 20 + .../repo/script/get-rocm/customize.py | 77 + .../cmx4mlops/repo/script/get-rocm/run.sh | 5 + .../repo/script/get-spec-ptd/COPYRIGHT.md | 3 + .../repo/script/get-spec-ptd/README-extra.md | 16 + .../repo/script/get-spec-ptd/README.md | 1 + .../repo/script/get-spec-ptd/_cm.yaml | 49 + .../repo/script/get-spec-ptd/customize.py | 48 + .../cmx4mlops/repo/script/get-spec-ptd/run.sh | 11 + .../repo/script/get-sys-utils-cm/COPYRIGHT.md | 3 + .../repo/script/get-sys-utils-cm/README.md | 1 + .../repo/script/get-sys-utils-cm/_cm.yaml | 36 + .../repo/script/get-sys-utils-cm/customize.py | 100 + .../get-sys-utils-cm/do_pip_installs.sh | 6 + .../get-sys-utils-cm/do_pip_installs.sh.old | 6 + .../script/get-sys-utils-cm/requirements.txt | 5 + .../repo/script/get-sys-utils-cm/run-arch.sh | 39 + .../script/get-sys-utils-cm/run-debian.sh | 60 + .../repo/script/get-sys-utils-cm/run-macos.sh | 43 + .../repo/script/get-sys-utils-cm/run-rhel.sh | 46 + .../repo/script/get-sys-utils-cm/run-sles.sh | 42 + .../script/get-sys-utils-cm/run-ubuntu.sh | 64 + .../script/get-sys-utils-min/COPYRIGHT.md | 3 + .../repo/script/get-sys-utils-min/README.md | 1 + .../repo/script/get-sys-utils-min/_cm.yaml | 33 + .../script/get-sys-utils-min/customize.py | 78 + .../repo/script/get-tensorrt/COPYRIGHT.md | 3 + .../repo/script/get-tensorrt/README-extra.md | 11 + .../repo/script/get-tensorrt/README.md | 1 + .../repo/script/get-tensorrt/_cm.yaml | 38 + .../repo/script/get-tensorrt/customize.py | 166 + .../cmx4mlops/repo/script/get-tensorrt/run.sh | 41 + .../repo/script/get-terraform/COPYRIGHT.md | 3 + .../repo/script/get-terraform/README-extra.md | 9 + .../repo/script/get-terraform/README.md | 1 + .../repo/script/get-terraform/_cm.yaml | 20 + .../repo/script/get-terraform/customize.py | 75 + .../repo/script/get-terraform/run.sh | 3 + .../repo/script/get-tvm-model/COPYRIGHT.md | 3 + .../repo/script/get-tvm-model/README-extra.md | 21 + .../repo/script/get-tvm-model/README.md | 1 + .../repo/script/get-tvm-model/_cm.yaml | 120 + .../repo/script/get-tvm-model/customize.py | 65 + .../repo/script/get-tvm-model/process.py | 273 + .../repo/script/get-tvm-model/run.sh | 7 + .../repo/script/get-tvm/COPYRIGHT.md | 3 + .../repo/script/get-tvm/README-extra.md | 5 + .../cmx4mlops/repo/script/get-tvm/README.md | 1 + .../cmx4mlops/repo/script/get-tvm/_cm.yaml | 73 + .../repo/script/get-tvm/customize.py | 61 + .../cmx4mlops/repo/script/get-tvm/run.sh | 80 + .../repo/script/get-xilinx-sdk/COPYRIGHT.md | 3 + .../repo/script/get-xilinx-sdk/README.md | 1 + .../repo/script/get-xilinx-sdk/_cm.yaml | 27 + .../repo/script/get-xilinx-sdk/customize.py | 45 + .../repo/script/get-xilinx-sdk/run.sh | 27 + .../repo/script/get-zendnn/COPYRIGHT.md | 3 + .../repo/script/get-zendnn/README.md | 1 + .../cmx4mlops/repo/script/get-zendnn/_cm.yaml | 27 + .../repo/script/get-zendnn/customize.py | 40 + .../cmx4mlops/repo/script/get-zendnn/run.bat | 1 + .../cmx4mlops/repo/script/get-zendnn/run.sh | 9 + .../repo/script/get-zephyr-sdk/COPYRIGHT.md | 3 + .../script/get-zephyr-sdk/README-extra.md | 19 + .../repo/script/get-zephyr-sdk/README.md | 1 + .../repo/script/get-zephyr-sdk/_cm.yaml | 25 + .../repo/script/get-zephyr-sdk/customize.py | 28 + .../repo/script/get-zephyr-sdk/run.sh | 21 + .../repo/script/get-zephyr/COPYRIGHT.md | 3 + .../repo/script/get-zephyr/README-extra.md | 8 + .../repo/script/get-zephyr/README.md | 1 + .../cmx4mlops/repo/script/get-zephyr/_cm.yaml | 26 + .../repo/script/get-zephyr/customize.py | 29 + .../repo/script/get-zephyr/run-ubuntu.sh | 4 + .../cmx4mlops/repo/script/get-zephyr/run.sh | 22 + .../cmx4mlops/repo/script/gui/COPYRIGHT.md | 3 + .../cmx4mlops/repo/script/gui/README-about.md | 15 + cmx4mlops/cmx4mlops/repo/script/gui/README.md | 1 + cmx4mlops/cmx4mlops/repo/script/gui/_cm.yaml | 106 + cmx4mlops/cmx4mlops/repo/script/gui/app.py | 73 + .../cmx4mlops/repo/script/gui/customize.py | 77 + cmx4mlops/cmx4mlops/repo/script/gui/graph.py | 827 +++ .../repo/script/gui/install/linux.md | 10 + .../repo/script/gui/install/macos.md | 24 + .../repo/script/gui/install/redhat.md | 7 + .../repo/script/gui/install/windows.md | 15 + cmx4mlops/cmx4mlops/repo/script/gui/misc.py | 231 + .../cmx4mlops/repo/script/gui/playground.py | 208 + .../repo/script/gui/playground_apps.py | 38 + .../repo/script/gui/playground_beta.py | 37 + .../repo/script/gui/playground_beta_README.md | 10 + .../repo/script/gui/playground_challenges.py | 496 ++ .../gui/playground_challenges_with_prizes.py | 449 ++ .../script/gui/playground_contributors.py | 368 + .../repo/script/gui/playground_howtorun.py | 329 + .../repo/script/gui/playground_install.py | 147 + .../repo/script/gui/playground_reports.py | 144 + .../repo/script/gui/playground_reproduce.py | 460 ++ .../repo/script/gui/playground_scripts.py | 344 + cmx4mlops/cmx4mlops/repo/script/gui/run.bat | 2 + cmx4mlops/cmx4mlops/repo/script/gui/run.sh | 4 + cmx4mlops/cmx4mlops/repo/script/gui/script.py | 493 ++ .../cmx4mlops/repo/script/gui/tests/README.md | 3 + .../script/gui/tests/generate_password.py | 13 + .../cmx4mlops/repo/script/gui/tests/test.cmd | 1 + .../cmx4mlops/repo/script/gui/tests/test2.cmd | 1 + .../cmx4mlops/repo/script/gui/tests/test3.cmd | 1 + .../cmx4mlops/repo/script/gui/tests/test4.cmd | 1 + .../repo/script/gui/tests/test4a.cmd | 2 + .../repo/script/gui/tests/test4b.cmd | 2 + .../cmx4mlops/repo/script/gui/tests/test5.cmd | 1 + .../import-experiment-to-sqlite/README.md | 155 + .../COPYRIGHT.md | 3 + .../README-extra.md | 82 + .../README.md | 1 + .../_cm.yaml | 38 + .../customize.py | 365 + .../COPYRIGHT.md | 3 + .../README-extra.md | 68 + .../README.md | 1 + .../import-mlperf-tiny-to-experiment/_cm.yaml | 33 + ...d-customization-of-tinymlperf-results2.png | Bin 0 -> 118877 bytes .../customize.py | 560 ++ .../COPYRIGHT.md | 3 + .../README-extra.md | 54 + .../README.md | 1 + .../_cm.yaml | 39 + .../customize.py | 364 + .../run_mlperf_logger.sh | 9 + .../script/install-apt-package/COPYRIGHT.md | 3 + .../repo/script/install-apt-package/_cm.yaml | 21 + .../script/install-apt-package/customize.py | 38 + .../repo/script/install-apt-package/run.sh | 6 + .../repo/script/install-aws-cli/COPYRIGHT.md | 3 + .../repo/script/install-aws-cli/README.md | 1 + .../repo/script/install-aws-cli/_cm.yaml | 21 + .../repo/script/install-aws-cli/customize.py | 29 + .../repo/script/install-aws-cli/run.sh | 14 + .../repo/script/install-bazel/COPYRIGHT.md | 3 + .../repo/script/install-bazel/README.md | 1 + .../repo/script/install-bazel/_cm.yaml | 22 + .../repo/script/install-bazel/customize.py | 73 + .../repo/script/install-bazel/run-aarch64.sh | 25 + .../repo/script/install-bazel/run.bat | 9 + .../repo/script/install-bazel/run.sh | 28 + .../install-cmake-prebuilt/COPYRIGHT.md | 3 + .../script/install-cmake-prebuilt/README.md | 1 + .../script/install-cmake-prebuilt/_cm.yaml | 26 + .../install-cmake-prebuilt/customize.py | 135 + .../repo/script/install-cmake-prebuilt/run.sh | 10 + .../install-cuda-package-manager/COPYRIGHT.md | 3 + .../install-cuda-package-manager/README.md | 1 + .../install-cuda-package-manager/_cm.yaml | 19 + .../install-cuda-package-manager/customize.py | 27 + .../run-ubuntu.sh | 1 + .../install-cuda-package-manager/run.sh | 5 + .../script/install-cuda-prebuilt/COPYRIGHT.md | 3 + .../install-cuda-prebuilt/README-extra.md | 4 + .../script/install-cuda-prebuilt/README.md | 1 + .../script/install-cuda-prebuilt/_cm.yaml | 91 + .../script/install-cuda-prebuilt/customize.py | 66 + .../repo/script/install-cuda-prebuilt/run.sh | 8 + .../install-diffusers-from-src/COPYRIGHT.md | 3 + .../install-diffusers-from-src/_cm.yaml | 79 + .../install-diffusers-from-src/customize.py | 33 + .../script/install-diffusers-from-src/run.sh | 18 + .../repo/script/install-gcc-src/COPYRIGHT.md | 3 + .../repo/script/install-gcc-src/README.md | 1 + .../repo/script/install-gcc-src/_cm.yaml | 25 + .../repo/script/install-gcc-src/customize.py | 41 + .../repo/script/install-gcc-src/run.sh | 41 + .../COPYRIGHT.md | 3 + .../install-generic-conda-package/README.md | 1 + .../install-generic-conda-package/_cm.yaml | 52 + .../customize.py | 57 + .../install-generic-conda-package/run.sh | 7 + .../install-gflags-from-src/COPYRIGHT.md | 3 + .../script/install-gflags-from-src/_cm.yaml | 61 + .../install-gflags-from-src/customize.py | 39 + .../script/install-gflags-from-src/run.sh | 15 + .../repo/script/install-gflags/COPYRIGHT.md | 3 + .../repo/script/install-gflags/README.md | 1 + .../repo/script/install-gflags/_cm.yaml | 23 + .../repo/script/install-gflags/customize.py | 42 + .../repo/script/install-gflags/run.sh | 18 + .../script/install-github-cli/COPYRIGHT.md | 3 + .../repo/script/install-github-cli/README.md | 1 + .../repo/script/install-github-cli/_cm.yaml | 15 + .../script/install-github-cli/customize.py | 25 + .../script/install-github-cli/run-macos.sh | 1 + .../script/install-github-cli/run-rhel.sh | 3 + .../repo/script/install-github-cli/run.bat | 1 + .../repo/script/install-github-cli/run.sh | 7 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 76 + .../customize.py | 34 + .../run.sh | 14 + .../script/install-ipex-from-src/COPYRIGHT.md | 3 + .../script/install-ipex-from-src/README.md | 1 + .../script/install-ipex-from-src/_cm.yaml | 346 + .../apply_intel_3d-unet_patch.sh | 5 + .../apply_intel_resnet50_patch.sh | 5 + .../apply_intel_retinanet_patch.sh | 5 + .../script/install-ipex-from-src/customize.py | 51 + .../repo/script/install-ipex-from-src/run.sh | 58 + .../script/install-llvm-prebuilt/COPYRIGHT.md | 3 + .../install-llvm-prebuilt/README-extra.md | 99 + .../script/install-llvm-prebuilt/README.md | 1 + .../script/install-llvm-prebuilt/_cm.yaml | 28 + .../script/install-llvm-prebuilt/customize.py | 232 + .../repo/script/install-llvm-prebuilt/run.bat | 3 + .../repo/script/install-llvm-prebuilt/run.sh | 10 + .../repo/script/install-llvm-src/COPYRIGHT.md | 3 + .../repo/script/install-llvm-src/README.md | 1 + .../repo/script/install-llvm-src/_cm.yaml | 210 + .../repo/script/install-llvm-src/customize.py | 87 + .../install-llvm-16-intel-mlperf-inference.sh | 31 + .../repo/script/install-llvm-src/run.sh | 44 + .../COPYRIGHT.md | 3 + .../install-mlperf-logging-from-src/README.md | 1 + .../install-mlperf-logging-from-src/_cm.yaml | 36 + .../customize.py | 35 + .../install-mlperf-logging-from-src/run.sh | 29 + .../script/install-nccl-libs/COPYRIGHT.md | 3 + .../repo/script/install-nccl-libs/README.md | 1 + .../repo/script/install-nccl-libs/_cm.yaml | 13 + .../script/install-nccl-libs/customize.py | 35 + .../script/install-nccl-libs/run-ubuntu.sh | 2 + .../repo/script/install-nccl-libs/run.sh | 27 + .../install-numactl-from-src/COPYRIGHT.md | 3 + .../script/install-numactl-from-src/README.md | 1 + .../script/install-numactl-from-src/_cm.yaml | 61 + .../install-numactl-from-src/customize.py | 35 + .../script/install-numactl-from-src/run.sh | 19 + .../install-onednn-from-src/COPYRIGHT.md | 3 + .../script/install-onednn-from-src/README.md | 1 + .../script/install-onednn-from-src/_cm.yaml | 79 + .../install-onednn-from-src/customize.py | 44 + .../run-intel-mlperf-inference-bert.sh | 20 + .../run-intel-mlperf-inference.sh | 20 + .../script/install-onednn-from-src/run.sh | 15 + .../install-onnxruntime-from-src/COPYRIGHT.md | 3 + .../install-onnxruntime-from-src/README.md | 1 + .../install-onnxruntime-from-src/_cm.yaml | 76 + .../install-onnxruntime-from-src/customize.py | 34 + .../install-onnxruntime-from-src/run.sh | 15 + .../install-opencv-from-src/COPYRIGHT.md | 3 + .../script/install-opencv-from-src/_cm.yaml | 63 + .../install-opencv-from-src/customize.py | 39 + .../script/install-opencv-from-src/run.sh | 15 + .../repo/script/install-openssl/COPYRIGHT.md | 3 + .../repo/script/install-openssl/README.md | 1 + .../repo/script/install-openssl/_cm.yaml | 28 + .../repo/script/install-openssl/customize.py | 58 + .../repo/script/install-openssl/run.sh | 17 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 17 + .../customize.py | 53 + .../script/install-python-src/COPYRIGHT.md | 3 + .../repo/script/install-python-src/README.md | 1 + .../repo/script/install-python-src/_cm.yaml | 83 + .../script/install-python-src/customize.py | 62 + .../repo/script/install-python-src/run.sh | 71 + .../script/install-python-venv/COPYRIGHT.md | 3 + .../repo/script/install-python-venv/README.md | 1 + .../repo/script/install-python-venv/_cm.yaml | 31 + .../script/install-python-venv/customize.py | 101 + .../repo/script/install-python-venv/run.bat | 5 + .../repo/script/install-python-venv/run.sh | 10 + .../install-pytorch-from-src/COPYRIGHT.md | 3 + .../script/install-pytorch-from-src/README.md | 1 + .../script/install-pytorch-from-src/_cm.yaml | 288 + .../install-pytorch-from-src/customize.py | 49 + .../run-intel-mlperf-inference-v3_1.sh | 40 + .../run-intel-mlperf-inference-vision.sh | 23 + .../script/install-pytorch-from-src/run.sh | 28 + .../COPYRIGHT.md | 3 + .../install-pytorch-kineto-from-src/README.md | 1 + .../install-pytorch-kineto-from-src/_cm.yaml | 85 + .../customize.py | 29 + .../install-pytorch-kineto-from-src/run.sh | 15 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 80 + .../customize.py | 61 + .../install-qaic-compute-sdk-from-src/run.sh | 24 + .../install-rapidjson-from-src/COPYRIGHT.md | 3 + .../install-rapidjson-from-src/_cm.yaml | 61 + .../install-rapidjson-from-src/customize.py | 33 + .../script/install-rapidjson-from-src/run.sh | 15 + .../repo/script/install-rocm/COPYRIGHT.md | 3 + .../repo/script/install-rocm/README.md | 1 + .../repo/script/install-rocm/_cm.yaml | 19 + .../repo/script/install-rocm/customize.py | 32 + .../repo/script/install-rocm/run-rhel.sh | 27 + .../repo/script/install-rocm/run-ubuntu.sh | 32 + .../cmx4mlops/repo/script/install-rocm/run.sh | 2 + .../install-tensorflow-for-c/COPYRIGHT.md | 3 + .../script/install-tensorflow-for-c/README.md | 1 + .../script/install-tensorflow-for-c/_cm.yaml | 15 + .../install-tensorflow-for-c/customize.py | 43 + .../script/install-tensorflow-for-c/run.sh | 13 + .../install-tensorflow-from-src/COPYRIGHT.md | 3 + .../install-tensorflow-from-src/README.md | 1 + .../install-tensorflow-from-src/_cm.yaml | 346 + .../install-tensorflow-from-src/customize.py | 82 + .../script/install-tensorflow-from-src/run.sh | 44 + .../install-terraform-from-src/COPYRIGHT.md | 3 + .../install-terraform-from-src/README.md | 1 + .../install-terraform-from-src/_cm.yaml | 24 + .../install-terraform-from-src/customize.py | 33 + .../script/install-terraform-from-src/run.sh | 16 + .../install-tflite-from-src/COPYRIGHT.md | 3 + .../script/install-tflite-from-src/README.md | 1 + .../script/install-tflite-from-src/_cm.yaml | 40 + .../install-tflite-from-src/customize.py | 41 + .../script/install-tflite-from-src/run.sh | 27 + .../install-torchvision-from-src/COPYRIGHT.md | 3 + .../install-torchvision-from-src/README.md | 1 + .../install-torchvision-from-src/_cm.yaml | 108 + .../install-torchvision-from-src/customize.py | 33 + .../install-torchvision-from-src/run.sh | 14 + .../COPYRIGHT.md | 3 + .../install-tpp-pytorch-extension/README.md | 1 + .../install-tpp-pytorch-extension/_cm.yaml | 102 + .../customize.py | 36 + .../install-tpp-pytorch-extension/run.sh | 11 + .../COPYRIGHT.md | 3 + .../install-transformers-from-src/README.md | 1 + .../install-transformers-from-src/_cm.yaml | 100 + .../customize.py | 33 + .../install-transformers-from-src/run.sh | 24 + .../repo/script/launch-benchmark/COPYRIGHT.md | 3 + .../script/launch-benchmark/README-extra.md | 3 + .../repo/script/launch-benchmark/README.md | 1 + .../repo/script/launch-benchmark/_cm.yaml | 15 + .../repo/script/launch-benchmark/customize.py | 742 ++ .../script/launch-benchmark/tests/debug.py | 6 + .../plug-prebuilt-cudnn-to-cuda/COPYRIGHT.md | 3 + .../README-extra.md | 2 + .../plug-prebuilt-cudnn-to-cuda/_cm.yaml | 60 + .../plug-prebuilt-cudnn-to-cuda/customize.py | 63 + .../script/plug-prebuilt-cudnn-to-cuda/run.sh | 37 + .../COPYRIGHT.md | 3 + .../plug-prebuilt-cusparselt-to-cuda/_cm.yaml | 60 + .../customize.py | 63 + .../plug-prebuilt-cusparselt-to-cuda/run.sh | 37 + .../prepare-training-data-bert/COPYRIGHT.md | 3 + .../prepare-training-data-bert/README.md | 1 + .../prepare-training-data-bert/_cm.yaml | 107 + .../prepare-training-data-bert/customize.py | 77 + .../prepare-training-data-bert/run-nvidia.sh | 33 + .../run-reference.sh | 81 + .../script/prepare-training-data-bert/run.sh | 36 + .../prepare-training-data-bert/run_config.yml | 13 + .../prepare-training-data-resnet/COPYRIGHT.md | 3 + .../prepare-training-data-resnet/README.md | 1 + .../prepare-training-data-resnet/_cm.yaml | 78 + .../prepare-training-data-resnet/customize.py | 74 + .../run-nvidia.sh | 43 + .../run-reference.sh | 37 + .../run_config.yml | 13 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 37 + .../customize.py | 63 + .../run.sh | 5 + .../repo/script/print-any-text/COPYRIGHT.md | 3 + .../repo/script/print-any-text/README.md | 1 + .../repo/script/print-any-text/_cm.yaml | 34 + .../repo/script/print-any-text/customize.py | 41 + .../repo/script/print-any-text/run.bat | 5 + .../repo/script/print-any-text/run.sh | 3 + .../script/print-croissant-desc/COPYRIGHT.md | 3 + .../print-croissant-desc/README-extra.md | 16 + .../script/print-croissant-desc/README.md | 1 + .../repo/script/print-croissant-desc/_cm.yaml | 29 + .../repo/script/print-croissant-desc/code.py | 29 + .../repo/script/print-croissant-desc/run.bat | 2 + .../repo/script/print-croissant-desc/run.sh | 4 + .../print-hello-world-java/COPYRIGHT.md | 3 + .../script/print-hello-world-java/README.md | 1 + .../script/print-hello-world-java/_cm.yaml | 17 + .../script/print-hello-world-java/code.java | 27 + .../script/print-hello-world-java/run.bat | 4 + .../repo/script/print-hello-world-java/run.sh | 6 + .../print-hello-world-javac/COPYRIGHT.md | 3 + .../script/print-hello-world-javac/README.md | 1 + .../script/print-hello-world-javac/_cm.yaml | 17 + .../script/print-hello-world-javac/code.java | 27 + .../script/print-hello-world-javac/run.bat | 8 + .../script/print-hello-world-javac/run.sh | 10 + .../script/print-hello-world-py/COPYRIGHT.md | 3 + .../script/print-hello-world-py/README.md | 1 + .../repo/script/print-hello-world-py/_cm.yaml | 24 + .../repo/script/print-hello-world-py/app.py | 20 + .../script/print-hello-world-py/customize.py | 31 + .../repo/script/print-hello-world-py/run.bat | 8 + .../repo/script/print-hello-world-py/run.sh | 11 + .../script/print-hello-world/COPYRIGHT.md | 3 + .../repo/script/print-hello-world/README.md | 1 + .../repo/script/print-hello-world/_cm.yaml | 48 + .../repo/script/print-hello-world/run.bat | 16 + .../repo/script/print-hello-world/run.sh | 18 + .../script/print-python-version/COPYRIGHT.md | 3 + .../script/print-python-version/README.md | 1 + .../repo/script/print-python-version/_cm.yaml | 15 + .../repo/script/print-python-version/run.bat | 8 + .../repo/script/print-python-version/run.sh | 11 + .../repo/script/process-ae-users/COPYRIGHT.md | 3 + .../repo/script/process-ae-users/README.md | 1 + .../repo/script/process-ae-users/_cm.yaml | 17 + .../repo/script/process-ae-users/code.py | 80 + .../repo/script/process-ae-users/customize.py | 22 + .../repo/script/process-ae-users/run.bat | 2 + .../repo/script/process-ae-users/run.sh | 4 + .../process-mlperf-accuracy/COPYRIGHT.md | 3 + .../script/process-mlperf-accuracy/README.md | 1 + .../script/process-mlperf-accuracy/_cm.yaml | 263 + .../process-mlperf-accuracy/customize.py | 245 + .../script/process-mlperf-accuracy/run.bat | 8 + .../script/process-mlperf-accuracy/run.sh | 9 + .../script/prune-bert-models/COPYRIGHT.md | 3 + .../script/prune-bert-models/README-extra.md | 1 + .../repo/script/prune-bert-models/README.md | 1 + .../repo/script/prune-bert-models/_cm.yaml | 48 + .../script/prune-bert-models/customize.py | 64 + .../repo/script/prune-bert-models/run.sh | 19 + .../repo/script/prune-docker/COPYRIGHT.md | 3 + .../repo/script/prune-docker/README.md | 1 + .../repo/script/prune-docker/_cm.yaml | 8 + .../repo/script/prune-docker/run.bat | 1 + .../cmx4mlops/repo/script/prune-docker/run.sh | 3 + .../publish-results-to-dashboard/COPYRIGHT.md | 3 + .../publish-results-to-dashboard/README.md | 1 + .../publish-results-to-dashboard/_cm.yaml | 14 + .../publish-results-to-dashboard/code.py | 105 + .../publish-results-to-dashboard/run.bat | 2 + .../publish-results-to-dashboard/run.sh | 7 + .../repo/script/pull-git-repo/COPYRIGHT.md | 3 + .../repo/script/pull-git-repo/README.md | 1 + .../repo/script/pull-git-repo/_cm.yaml | 17 + .../repo/script/pull-git-repo/customize.py | 40 + .../repo/script/pull-git-repo/run.sh | 17 + .../push-csv-to-spreadsheet/COPYRIGHT.md | 3 + .../script/push-csv-to-spreadsheet/README.md | 1 + .../script/push-csv-to-spreadsheet/_cm.yaml | 23 + .../push-csv-to-spreadsheet/customize.py | 28 + .../push-csv-to-spreadsheet/google_api.py | 60 + .../script/push-csv-to-spreadsheet/run.sh | 3 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 37 + .../customize.py | 50 + .../run.bat | 31 + .../run.sh | 20 + .../script/remote-run-commands/COPYRIGHT.md | 3 + .../remote-run-commands/README-extra.md | 0 .../repo/script/remote-run-commands/README.md | 1 + .../repo/script/remote-run-commands/_cm.yaml | 28 + .../script/remote-run-commands/customize.py | 61 + .../repo/script/remote-run-commands/run.bat | 1 + .../repo/script/remote-run-commands/run.sh | 4 + .../COPYRIGHT.md | 3 + .../README.md | 42 + .../_cm.yaml | 45 + .../customize.py | 35 + .../install_deps.sh | 15 + .../install_deps_cuda.sh | 15 + .../install_deps_driver.sh | 15 + .../install_deps_pytorch.sh | 15 + .../install_deps_transformers.sh | 17 + .../run.sh | 21 + .../run_figure11.sh | 20 + .../run_figure12.sh | 18 + .../run_figure13.sh | 19 + .../COPYRIGHT.md | 3 + .../README.md | 61 + .../_cm.yaml | 40 + .../customize.py | 35 + .../install_deps.sh | 28 + .../install_spec_deps.sh | 37 + .../plot.sh | 21 + .../run.sh | 23 + .../run_spec.sh | 23 + .../COPYRIGHT.md | 3 + .../README.md | 74 + .../_cm.yaml | 48 + .../customize.py | 35 + .../install_deps.sh | 15 + .../install_deps_gem5.sh | 12 + .../install_deps_kernel.sh | 12 + .../plot.sh | 28 + .../run.sh | 16 + .../COPYRIGHT.md | 3 + .../README.md | 50 + .../_cm.yaml | 36 + .../install_deps.bat | 18 + .../install_deps.sh | 12 + .../plot.bat | 12 + .../plot.sh | 83 + .../run.bat | 12 + .../run.sh | 49 + .../COPYRIGHT.md | 3 + .../README.md | 30 + .../_cm.yaml | 20 + .../customize.py | 35 + .../install_deps.bat | 4 + .../install_deps.sh | 24 + .../main.py | 10 + .../run.bat | 4 + .../run.sh | 41 + .../.gitignore | 1 + .../COPYRIGHT.md | 3 + .../README.md | 74 + .../_cm.yaml | 40 + .../check.sh | 15 + .../customize.py | 35 + .../install_deps.bat | 18 + .../install_deps.sh | 30 + .../main.py | 10 + .../plot.bat | 12 + .../plot.sh | 15 + .../plot_pregenerated.sh | 15 + .../run.bat | 12 + .../run.sh | 14 + .../COPYRIGHT.md | 3 + .../Dockerfile | 28 + .../README.md | 40 + .../_cm.yaml | 30 + .../customize.py | 35 + .../install_deps.sh | 49 + .../plot.sh | 60 + .../run.sh | 54 + .../COPYRIGHT.md | 3 + .../Preliminary_build_onikiri.sh | 15 + .../Preliminary_create_binary.sh | 19 + .../Preliminary_experiment.sh | 30 + .../Preliminary_experiment_setup.sh | 13 + .../Preliminary_plot.sh | 15 + .../README.md | 49 + .../_cm.yaml | 55 + .../build_compiler.sh | 32 + .../build_onikiri.sh | 14 + .../create_binary.sh | 24 + .../experiment.sh | 14 + .../experiment_setup.sh | 16 + .../install_deps.sh | 4 + .../plot.sh | 34 + .../COPYRIGHT.md | 3 + .../README.md | 45 + .../_cm.yaml | 42 + .../customize.py | 35 + .../install_deps.sh | 24 + .../main.py | 10 + .../plot.sh | 14 + .../run.sh | 15 + .../COPYRIGHT.md | 3 + .../README-extra.md | 75 + .../reproduce-ipol-paper-2022-439/README.md | 1 + .../reproduce-ipol-paper-2022-439/_cm.yaml | 40 + .../customize.py | 51 + .../requirements.txt | 5 + .../reproduce-ipol-paper-2022-439/run.bat | 33 + .../reproduce-ipol-paper-2022-439/run.sh | 42 + .../README.md | 381 + .../COPYRIGHT.md | 3 + .../README-extra.md | 13 + .../README.md | 1 + .../_cm.yaml | 81 + .../customize.py | 36 + .../run.sh | 32 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../reproduce-mlperf-training-nvidia/_cm.yaml | 80 + .../customize.py | 46 + .../run-resnet.sh | 16 + .../reproduce-mlperf-training-nvidia/run.sh | 8 + .../script/run-all-mlperf-models/COPYRIGHT.md | 3 + .../script/run-all-mlperf-models/README.md | 237 + .../script/run-all-mlperf-models/_cm.yaml | 130 + .../script/run-all-mlperf-models/customize.py | 123 + .../run-all-mlperf-models/run-bert-macos.sh | 75 + .../script/run-all-mlperf-models/run-bert.sh | 79 + .../run-cpp-implementation.sh | 163 + .../run-mobilenet-models.sh | 67 + .../run-all-mlperf-models/run-nvidia-4090.sh | 61 + .../run-all-mlperf-models/run-nvidia-a100.sh | 59 + .../run-all-mlperf-models/run-nvidia-t4.sh | 59 + .../run-all-mlperf-models/run-pruned-bert.sh | 83 + .../run-reference-models.sh | 67 + .../run-resnet50-macos.sh | 70 + .../run-all-mlperf-models/run-resnet50.sh | 87 + .../run-all-mlperf-models/run-retinanet-sh | 86 + .../script/run-all-mlperf-models/template.sh | 66 + .../script/run-docker-container/COPYRIGHT.md | 3 + .../run-docker-container/README-extra.md | 15 + .../script/run-docker-container/README.md | 1 + .../repo/script/run-docker-container/_cm.yaml | 70 + .../script/run-docker-container/customize.py | 390 + .../run-mlperf-inference-app/COPYRIGHT.md | 3 + .../run-mlperf-inference-app/README-extra.md | 21 + .../script/run-mlperf-inference-app/README.md | 1 + .../script/run-mlperf-inference-app/_cm.yaml | 617 ++ .../run-mlperf-inference-app/customize.py | 1030 +++ .../faq/ctuning-cpp-tflite.md | 1 + .../faq/deepsparse.md | 1 + .../run-mlperf-inference-app/faq/intel.md | 1 + .../faq/mlcommons-cpp.md | 1 + .../faq/mlcommons-python.md | 1 + .../run-mlperf-inference-app/faq/nvidia.md | 2 + .../run-mlperf-inference-app/faq/qualcomm.md | 1 + .../modular-cm-containers/README.md | 30 + .../modular-cm-containers/_common.bat | 7 + .../modular-cm-containers/_common.sh | 10 + .../modular-cm-containers/build.bat | 25 + .../modular-cm-containers/build.sh | 27 + .../mlperf-inference--ubuntu-cpu.Dockerfile | 118 + .../modular-cm-containers/run.bat | 3 + .../modular-cm-containers/run.sh | 3 + .../run-mlperf-inference-app/run_mobilenet.py | 103 + .../setup/b-deepsparse.md | 1 + .../run-mlperf-inference-app/setup/i-intel.md | 1 + .../setup/i-nvidia.md | 3 + .../setup/i-qualcomm.md | 6 + .../COPYRIGHT.md | 3 + .../README-about.md | 107 + .../README.md | 1 + .../_cm.yaml | 134 + .../customize.py | 224 + .../run.sh | 1 + .../COPYRIGHT.md | 3 + .../README-extra.md | 10 + .../README.md | 1 + .../_cm.yaml | 105 + .../code.py | 29 + .../customize.py | 135 + .../run.bat | 6 + .../run.sh | 13 + .../run-mlperf-power-client/COPYRIGHT.md | 3 + .../run-mlperf-power-client/README-extra.md | 15 + .../script/run-mlperf-power-client/README.md | 1 + .../script/run-mlperf-power-client/_cm.yaml | 41 + .../run-mlperf-power-client/customize.py | 58 + .../script/run-mlperf-power-client/dummy.sh | 12 + .../script/run-mlperf-power-client/run.sh | 14 + .../run-mlperf-power-server/COPYRIGHT.md | 3 + .../run-mlperf-power-server/README-extra.md | 17 + .../script/run-mlperf-power-server/README.md | 1 + .../script/run-mlperf-power-server/_cm.yaml | 57 + .../run-mlperf-power-server/customize.py | 103 + .../script/run-mlperf-power-server/run.bat | 7 + .../script/run-mlperf-power-server/run.sh | 5 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 71 + .../customize.py | 54 + .../run.sh | 10 + .../repo/script/run-python/COPYRIGHT.md | 3 + .../repo/script/run-python/README.md | 1 + .../cmx4mlops/repo/script/run-python/_cm.yaml | 16 + .../cmx4mlops/repo/script/run-python/run.bat | 2 + .../cmx4mlops/repo/script/run-python/run.sh | 4 + .../repo/script/run-terraform/COPYRIGHT.md | 3 + .../repo/script/run-terraform/README-about.md | 12 + .../repo/script/run-terraform/README-extra.md | 1 + .../repo/script/run-terraform/README.md | 1 + .../repo/script/run-terraform/_cm.yaml | 320 + .../run-terraform/aws/apply_credentials.sh | 3 + .../run-terraform/aws/credentials.example | 3 + .../repo/script/run-terraform/aws/main.tf | 67 + .../repo/script/run-terraform/customize.py | 103 + .../run-terraform/gcp/apply_credentials.sh | 0 .../repo/script/run-terraform/gcp/main.tf | 80 + .../repo/script/run-terraform/run.sh | 14 + .../repo/script/run-vllm-server/COPYRIGHT.md | 3 + .../repo/script/run-vllm-server/_cm.yaml | 143 + .../repo/script/run-vllm-server/customize.py | 453 ++ .../repo/script/run-vllm-server/run.sh | 6 + .../script/runtime-system-infos/COPYRIGHT.md | 3 + .../repo/script/runtime-system-infos/_cm.yaml | 51 + .../script/runtime-system-infos/customize.py | 106 + .../COPYRIGHT.md | 3 + .../README.md | 1 + .../_cm.yaml | 13 + .../customize.py | 79 + .../set-device-settings-qaic/COPYRIGHT.md | 3 + .../script/set-device-settings-qaic/README.md | 1 + .../script/set-device-settings-qaic/_cm.yaml | 39 + .../set-device-settings-qaic/customize.py | 55 + .../script/set-device-settings-qaic/run.sh | 44 + .../repo/script/set-echo-off-win/COPYRIGHT.md | 3 + .../repo/script/set-echo-off-win/README.md | 1 + .../repo/script/set-echo-off-win/_cm.yaml | 14 + .../repo/script/set-echo-off-win/customize.py | 37 + .../script/set-performance-mode/COPYRIGHT.md | 3 + .../script/set-performance-mode/README.md | 1 + .../repo/script/set-performance-mode/_cm.yaml | 42 + .../script/set-performance-mode/customize.py | 36 + .../script/set-performance-mode/run-ubuntu.sh | 36 + .../repo/script/set-performance-mode/run.bat | 1 + .../repo/script/set-performance-mode/run.sh | 27 + .../repo/script/set-sqlite-dir/COPYRIGHT.md | 3 + .../repo/script/set-sqlite-dir/README.md | 1 + .../repo/script/set-sqlite-dir/_cm.yaml | 22 + .../repo/script/set-sqlite-dir/code.py | 1 + .../repo/script/set-sqlite-dir/customize.py | 21 + .../repo/script/set-sqlite-dir/run.bat | 2 + .../repo/script/set-sqlite-dir/run.sh | 4 + .../repo/script/set-user-limits/COPYRIGHT.md | 3 + .../repo/script/set-user-limits/_cm.yaml | 14 + .../repo/script/set-user-limits/customize.py | 42 + .../repo/script/set-user-limits/run.sh | 17 + .../repo/script/set-venv/COPYRIGHT.md | 3 + .../repo/script/set-venv/README-extra.md | 6 + .../cmx4mlops/repo/script/set-venv/README.md | 1 + .../cmx4mlops/repo/script/set-venv/_cm.yaml | 14 + .../repo/script/set-venv/customize.py | 112 + .../repo/script/tar-my-folder/COPYRIGHT.md | 3 + .../repo/script/tar-my-folder/README-extra.md | 12 + .../repo/script/tar-my-folder/README.md | 1 + .../repo/script/tar-my-folder/_cm.yaml | 15 + .../repo/script/tar-my-folder/customize.py | 43 + .../repo/script/test-cm-core/COPYRIGHT.md | 3 + .../repo/script/test-cm-core/README-extra.md | 1 + .../repo/script/test-cm-core/README.md | 1 + .../repo/script/test-cm-core/_cm.yaml | 14 + .../repo/script/test-cm-core/customize.py | 28 + .../repo/script/test-cm-core/run.bat | 3 + .../cmx4mlops/repo/script/test-cm-core/run.sh | 13 + .../script/test-cm-core/src/script/check.py | 16 + .../src/script/process_dockerfile.py | 33 + .../test-cm-core/src/script/process_readme.py | 27 + .../test-cm-core/src/script/test_deps.py | 25 + .../test-cm-core/src/script/test_docker.py | 39 + .../test-cm-core/src/script/test_features.py | 55 + .../test-cm-core/src/script/test_install.py | 15 + .../repo/script/test-cm-core/src/test_cm.py | 17 + .../test-cm-core/src/test_search_speed.py | 26 + .../src/tutorials/test_tutorial_retinanet.py | 37 + .../src/tutorials/test_tutorial_tvm.py | 28 + .../src/tutorials/test_tutorial_tvm_pip_ge.py | 26 + .../src/tutorials/test_tutorial_tvm_pip_vm.py | 27 + .../test-cm-script-pipeline/COPYRIGHT.md | 3 + .../test-cm-script-pipeline/README-extra.md | 8 + .../script/test-cm-script-pipeline/README.md | 1 + .../script/test-cm-script-pipeline/_cm.yaml | 37 + .../test-cm-script-pipeline/customize.py | 51 + .../script/test-cm-script-pipeline/run.bat | 5 + .../script/test-cm-script-pipeline/run.sh | 5 + .../script/test-cm-script-pipeline/run2.bat | 5 + .../script/test-cm-script-pipeline/run2.sh | 5 + .../repo/script/test-cm-scripts/COPYRIGHT.md | 3 + .../repo/script/test-cm-scripts/_cm.yaml | 31 + .../repo/script/test-cm-scripts/customize.py | 35 + .../repo/script/test-cm-scripts/run.bat | 1 + .../repo/script/test-cm-scripts/run.sh | 17 + .../script/test-debug/.vscode/launch.json | 22 + .../repo/script/test-debug/COPYRIGHT.md | 3 + .../repo/script/test-debug/README-extra.md | 20 + .../repo/script/test-debug/README.md | 1 + .../cmx4mlops/repo/script/test-debug/_cm.yaml | 29 + .../cmx4mlops/repo/script/test-debug/_demo.py | 9 + .../repo/script/test-debug/customize.py | 51 + .../test-debug/python/.vscode/launch.json | 23 + .../repo/script/test-debug/python/main.py | 26 + .../cmx4mlops/repo/script/test-debug/run.bat | 6 + .../cmx4mlops/repo/script/test-debug/run.sh | 8 + .../script/test-deps-conditions/COPYRIGHT.md | 3 + .../test-deps-conditions/README-extra.md | 6 + .../script/test-deps-conditions/README.md | 1 + .../repo/script/test-deps-conditions/_cm.yaml | 54 + .../script/test-deps-conditions2/COPYRIGHT.md | 3 + .../test-deps-conditions2/README-extra.md | 20 + .../script/test-deps-conditions2/README.md | 1 + .../script/test-deps-conditions2/_cm.yaml | 28 + .../COPYRIGHT.md | 3 + .../README-extra.md | 1 + .../README.md | 1 + .../_cm.yaml | 31 + .../customize.py | 35 + .../run.bat | 1 + .../run.sh | 27 + .../repo/script/test-dummy/COPYRIGHT.md | 3 + .../repo/script/test-dummy/README-extra.md | 1 + .../cmx4mlops/repo/script/test-dummy/_cm.yaml | 11 + .../repo/script/test-dummy/customize.py | 35 + .../cmx4mlops/repo/script/test-dummy/run.bat | 1 + .../cmx4mlops/repo/script/test-dummy/run.sh | 2 + .../COPYRIGHT.md | 3 + .../test-mlperf-inference-retinanet/README.md | 1 + .../test-mlperf-inference-retinanet/_cm.yaml | 28 + .../customize.py | 30 + .../test-mlperf-inference-retinanet/run.bat | 8 + .../test-mlperf-inference-retinanet/run.sh | 9 + .../script/test-set-sys-user-cm/COPYRIGHT.md | 3 + .../script/test-set-sys-user-cm/README.md | 1 + .../repo/script/test-set-sys-user-cm/_cm.yaml | 14 + .../repo/script/test-set-sys-user-cm/run.sh | 7 + .../COPYRIGHT.md | 3 + .../README-extra.md | 7 + .../README.md | 1 + .../_cm.yaml | 41 + .../customize.py | 37 + .../run.sh | 5 + .../script/upgrade-python-pip/COPYRIGHT.md | 3 + .../repo/script/upgrade-python-pip/README.md | 1 + .../repo/script/upgrade-python-pip/_cm.yaml | 16 + .../repo/script/upgrade-python-pip/run.bat | 2 + .../repo/script/upgrade-python-pip/run.sh | 4 + .../COPYRIGHT.md | 3 + .../README-extra.md | 17 + .../README.md | 1 + .../_cm.yaml | 28 + .../customize.py | 54 + .../run.sh | 0 .../cmx4mlops/repo/tests/script/check.py | 27 + .../repo/tests/script/process_dockerfile.py | 33 + .../repo/tests/script/process_readme.py | 27 + .../repo/tests/script/process_tests.py | 38 + .../cmx4mlops/repo/tests/script/test_deps.py | 25 + .../repo/tests/script/test_docker.py | 23 + .../repo/tests/script/test_features.py | 38 + .../repo/tests/script/test_install.py | 15 + cmx4mlops/cmx4mlops/repo/tests/test_cm.py | 17 + .../cmx4mlops/repo/tests/test_search_speed.py | 26 + .../tutorials/test_tutorial_retinanet.py | 37 + .../repo/tests/tutorials/test_tutorial_tvm.py | 28 + .../tutorials/test_tutorial_tvm_pip_ge.py | 26 + .../tutorials/test_tutorial_tvm_pip_vm.py | 27 + 2529 files changed, 148968 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/test-cmx-mlperf-inference-resnet50.yml create mode 100644 cmx4mlops/cmx4mlops/repo/.gitignore create mode 100644 cmx4mlops/cmx4mlops/repo/CHANGES.md create mode 100644 cmx4mlops/cmx4mlops/repo/CONTRIBUTING.md create mode 100644 cmx4mlops/cmx4mlops/repo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/HISTORY.md create mode 100644 cmx4mlops/cmx4mlops/repo/LICENSE.md create mode 100644 cmx4mlops/cmx4mlops/repo/LICENSE.third-party.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cache/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cache/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cache/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cache/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cache/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cache/module_misc.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cfg/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cfg/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cfg/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cfg/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/cfg/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/challenge/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/challenge/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/challenge/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/challenge/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/contributor/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/contributor/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/contributor/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/contributor/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/data/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/data/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/data/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/docker/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/docker/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/docker/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/docker/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/docs/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/docs/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/docs/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3_input.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/project/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/project/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/project/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/project/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/report/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/report/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/report/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/report/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/README-specs.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/assets/scripts-workflow.png create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/module_help.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/module_misc.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/main.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-python/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-python/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-python/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-python/main.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-python/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/main.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/automation/script/template_list_of_scripts.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/utils/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/utils/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/automation/utils/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/automation/utils/module.py create mode 100644 cmx4mlops/cmx4mlops/repo/automation/utils/module_cfg.py create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-cpu-x64.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-gpu.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/generic-cpu-arm64.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/google-tpu.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/habana-gaudi.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/intel-cpu-x64.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/qualcomm-ai100.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-cpp.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-python.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-abtf.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-inference.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-mobile.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-tiny.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-training.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.1/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-archlinux.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-rhel-9.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-20.04.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-22.04.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-23.04.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-24.04.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-20.04-cuda-11.8-cudnn-8.6.0-pytorch-1.13.0.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.1-cudnn-8.9.1-pytorch-2.0.0.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.4-cudnn-9.0.0-pytorch-2.3.0.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.5-cudnn-9.1.0-pytorch-2.4.0.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.6-cudnn-9.3.0-pytorch-2.5.0.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/crowd-benchmark-mlperf-bert-inference-cuda.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-3d-unet-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-bert-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-resnet50-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-retinanet-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-rnnt-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-aws-instance.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-gcp-instance.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-3d-unet-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-bert-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-resnet50-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-retinanet-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-rnnt-submission.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-aws-instance.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-gcp-instance.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-nvidia-jetson-orin.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/docs/cm-yaml-guide.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/getting-started.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/img/logo_v2.svg create mode 100644 cmx4mlops/cmx4mlops/repo/docs/img/pages (80).png create mode 100644 cmx4mlops/cmx4mlops/repo/docs/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-croissant/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-rocm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-tvm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-rocm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-dlrm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-tvm-model/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/prune-bert-models/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/get-cache-dir/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/create-custom-cache-entry/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-debug/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda-devices/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cudnn/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-tensorrt/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/destroy-terraform/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-aws-cli/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-terraform/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-aws-cli/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-terraform-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/run-terraform/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/launch-benchmark/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-aocl/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-cl/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-flags/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-rust/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-gcc/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-go/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-llvm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-gcc-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-ipex-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onednn-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-transformers-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/benchmark-program/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/compile-program/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/convert-csv-to-md/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/copy-to-clipboard/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-conda-env/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-patch/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/detect-sudo/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-and-extract/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-file/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-torrent/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/extract-file/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/fail/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-conda/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-git-repo/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-github-cli/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/pull-git-repo/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-echo-off-win/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-performance-mode/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-sqlite-dir/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/tar-my-folder/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-docker-image/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-dockerfile/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/prune-docker/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/run-docker-container/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/gui/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-cpu/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-os/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/activate-python-venv/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-generic-python-lib/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-python3/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-generic-conda-package/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-venv/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/remote-run-commands/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-any-text/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-croissant-desc/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-java/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-javac/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-py/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-python-version/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/run-python/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-core/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-script-pipeline/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions2/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-download-and-extract-artifacts/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-set-sys-user-cm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/upgrade-python-pip/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-microtvm/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/docs/scripts/index.md create mode 100644 cmx4mlops/cmx4mlops/repo/mkdocs.yml create mode 100644 cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-ctuning/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-hpcwire/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v4.0-press-release-ctuning/_cm.json create mode 100644 cmx4mlops/cmx4mlops/repo/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/activate-python-venv/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/activate-python-venv/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/activate-python-venv/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/img/computer_mouse.jpg create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/src/onnx_classify.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/tests/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/include/benchmark.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/src/classification.cpp create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/img/computer_mouse.jpg create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/src/classify.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/susan.c create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_onnxruntime.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_pytorch.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/harness.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/model.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/runners.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/main.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/utils.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run-llama2.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_bert_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_gptj_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_resnet50_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_retinanet_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_sdxl_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/compile_resnet50.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/compile_retinanet.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_3d-unet_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_bert_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_resnet50_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_retinanet_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_sdxl_harness.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/backend.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/common.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/device.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/gpu_device.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/model.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/npy.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/onnxruntime_backend.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/sample_library.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/system.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/tflite_backend.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README-about.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/README-about.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README-about.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/build_dockerfiles.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/run_config.yml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/verify_accuracy.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/run-bert-training.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/run-bert-training.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/process.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/run-template.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program/run-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/benchmark-program/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/0-common.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/0-generate.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/1-build.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command1.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command2.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command3.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command4.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command5.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-interactive1.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-interactive2.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/3-push-to-docker-hub.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/computer_mouse.jpg create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-docker-image/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-dockerfile/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-dockerfile/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-dockerfile/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-dockerfile/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-dockerfile/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-dockerfile/dockerinfo.json create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-program/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-program/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-program/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-program/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-program/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-program/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/compile-program/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/process.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/code.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-conda-env/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-conda-env/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-conda-env/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-conda-env/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-conda-env/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-patch/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-patch/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-patch/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-patch/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/create-patch/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/destroy-terraform/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/destroy-terraform/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/destroy-terraform/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/destroy-terraform/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/destroy-terraform/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/destroy-terraform/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/destroy-terraform/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-cpu/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-cpu/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-cpu/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-cpu/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-cpu/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-cpu/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-cpu/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-os/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-os/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-os/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-os/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-os/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-os/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-os/run_config.yml create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-sudo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-sudo/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-sudo/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-sudo/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/detect-sudo/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-and-extract/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-and-extract/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-and-extract/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-and-extract/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-and-extract/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-and-extract/tests/download-and-extract-file.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-and-extract/tests/download-and-extract-file2.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/tests/download-file.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-file/tests/download-file2.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-torrent/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-torrent/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-torrent/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-torrent/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/download-torrent/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/process-cm-deps.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/dump.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/extract-file/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/extract-file/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/extract-file/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/extract-file/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/extract-file/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/extract-file/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/extract-file/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/fail/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/fail/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/fail/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/fail/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/fail/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-docs-for-all-scripts.cmd create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/default_files/analyzer_table.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/default_files/power_settings.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/sample-cm-sut-info.json create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/run_submission_checker.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/run_submission_checker.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/README-about.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-android-sdk/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-android-sdk/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-android-sdk/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-android-sdk/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-android-sdk/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-android-sdk/prepare-sdk-manager.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-android-sdk/prepare-sdk-manager.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aocl/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aocl/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aocl/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aocl/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aocl/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aocl/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/install.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/install.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aria2/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aws-cli/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aws-cli/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aws-cli/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aws-cli/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aws-cli/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-aws-cli/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-bazel/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-bazel/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-bazel/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-bazel/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-bazel/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-bazel/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-bazel/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-blis/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-blis/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-blis/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-blis/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-blis/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-blis/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-blis/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-brew/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-brew/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-brew/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-brew/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cache-dir/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cache-dir/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cache-dir/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cache-dir/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ck/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cl/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cl/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cl/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cl/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cl/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cl/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmake/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmake/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmake/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmake/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmake/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmake/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-conda/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-conda/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-conda/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-conda/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-conda/install.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-conda/install.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-conda/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-conda/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-croissant/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-croissant/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-croissant/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-croissant/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-croissant/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-croissant/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/detect.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/detect.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/print_cuda_devices.cu create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda/README-about.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cuda/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cudnn/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cudnn/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cudnn/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cudnn/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cudnn/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-cudnn/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/run-intel.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/filter.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/run-filter.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/checksums.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-dlrm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/install-centos.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/install-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/install.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/install.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-docker/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gcc/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gcc/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gcc/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gcc/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gcc/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gcc/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gcc/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/detect-version.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/install.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/install.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/tensorflow/run-aarch64.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/tensorflow/run-macos.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/uninstall_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/validate_cache.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/validate_cache.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/detect.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/install-with-retry.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/install.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-git-repo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-git-repo/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-git-repo/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-git-repo/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-git-repo/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-git-repo/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-git-repo/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-github-cli/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-github-cli/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-github-cli/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-github-cli/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-github-cli/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-github-cli/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-go/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-go/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-go/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-go/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-go/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-go/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-saxml/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-saxml/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-saxml/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-saxml/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-saxml/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-saxml/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-test/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-test/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-test/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-test/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-google-test/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ipol-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ipol-src/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ipol-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ipol-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ipol-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ipol-src/patch/20240127.patch create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/install-prebuilt.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/install-prebuilt.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-java/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/install-prebuilt.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/install-prebuilt.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-javac/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/master/QAicInfApi.cpp create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/master/QAicInfApi.h create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-llvm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-llvm/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-llvm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-llvm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-llvm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-llvm/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-llvm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-microtvm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-microtvm/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-microtvm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-microtvm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-microtvm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-microtvm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/run-packed.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/convert_gptj_ckpt.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-int4-calibration.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-intel.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-nvidia.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-saxml-quantized.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-saxml.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/download_model.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/run-amd.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/run-nvidia.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/download_sparse.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/run-fix-input.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/run_config.yml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/polygraphy_script.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/node-precision-info.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/run-no-nms.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/run_config.yml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/tests/download-and-install.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/coco.patch create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/git.patch create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/openimages-pycocotools.patch create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/windows-openimages.patch create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/windows-openimages2.patch create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/IntelSPR.24c/intel-implementation/cpu-device/pytorch-framework/default-config.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/IntelSPR.24c/intel-implementation/cpu-device/pytorch-framework/framework-version-default/default-config.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x1/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x1/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/default/config.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/default/default/default-config.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/detect_memory.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/get_memory_info.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/hardware/default.json create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/mlperf_utils.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/patch/cpu_load.patch create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/patch/nvidia-retinanet.patch create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/run-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-openssl/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-openssl/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-openssl/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-openssl/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-openssl/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-openssl/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-platform-details/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-platform-details/README-EXTRA.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-platform-details/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-platform-details/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-platform-details/run-macos.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-platform-details/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-platform-details/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/checksums.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/preprocess.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/preprocess_multihot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/run-multihot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/src/generic_preprocess.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/preprocess.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/preprocess.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/run-packed.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-python3/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-python3/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-python3/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-python3/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-python3/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-python3/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-python3/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone-config/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone-config/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone-config/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone-config/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone-config/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/configs/rclone.conf create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/install-system-macos.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/install-system.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/install.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/install.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rclone/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/detect.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/detect.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-rocm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/do_pip_installs.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/do_pip_installs.sh.old create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-arch.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-debian.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-macos.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-rhel.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-sles.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tensorrt/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tensorrt/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tensorrt/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tensorrt/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tensorrt/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tensorrt/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-terraform/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-terraform/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-terraform/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-terraform/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-terraform/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-terraform/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm-model/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm-model/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm-model/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm-model/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm-model/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm-model/process.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm-model/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-tvm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zendnn/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zendnn/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zendnn/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zendnn/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zendnn/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zendnn/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr/run-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/get-zephyr/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/README-about.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/app.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/graph.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/install/linux.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/install/macos.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/install/redhat.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/install/windows.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/misc.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_apps.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_beta.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_beta_README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_challenges.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_challenges_with_prizes.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_contributors.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_howtorun.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_install.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_reports.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_reproduce.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/playground_scripts.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/script.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/generate_password.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/test.cmd create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/test2.cmd create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/test3.cmd create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/test4.cmd create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/test4a.cmd create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/test4b.cmd create mode 100644 cmx4mlops/cmx4mlops/repo/script/gui/tests/test5.cmd create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-experiment-to-sqlite/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-apt-package/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-apt-package/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-apt-package/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-apt-package/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-aws-cli/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-aws-cli/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-aws-cli/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-aws-cli/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-aws-cli/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-bazel/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-bazel/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-bazel/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-bazel/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-bazel/run-aarch64.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-bazel/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-bazel/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/run-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gcc-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gcc-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gcc-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gcc-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gcc-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-gflags/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-github-cli/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-github-cli/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-github-cli/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-github-cli/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-github-cli/run-macos.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-github-cli/run-rhel.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-github-cli/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-github-cli/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_3d-unet_patch.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_resnet50_patch.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_retinanet_patch.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-llvm-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/run-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run-intel-mlperf-inference-bert.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run-intel-mlperf-inference.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-openssl/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-openssl/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-openssl/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-openssl/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-openssl/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-venv/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-venv/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-venv/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-venv/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-venv/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-python-venv/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run-intel-mlperf-inference-vision.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rocm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rocm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rocm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rocm/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rocm/run-rhel.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rocm/run-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-rocm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/launch-benchmark/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/launch-benchmark/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/launch-benchmark/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/launch-benchmark/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/launch-benchmark/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/launch-benchmark/tests/debug.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run-nvidia.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run-reference.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run_config.yml create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run-nvidia.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run-reference.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run_config.yml create mode 100644 cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-any-text/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-any-text/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-any-text/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-any-text/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-any-text/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-any-text/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/code.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/code.java create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/code.java create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/app.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-hello-world/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-python-version/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-python-version/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-python-version/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-python-version/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/print-python-version/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-ae-users/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-ae-users/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-ae-users/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-ae-users/code.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-ae-users/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-ae-users/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-ae-users/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-bert-models/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-bert-models/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-bert-models/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-bert-models/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-bert-models/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-bert-models/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-docker/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-docker/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-docker/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-docker/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/prune-docker/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/code.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/pull-git-repo/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/pull-git-repo/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/pull-git-repo/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/pull-git-repo/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/pull-git-repo/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/google_api.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/remote-run-commands/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/remote-run-commands/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/remote-run-commands/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/remote-run-commands/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/remote-run-commands/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/remote-run-commands/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/remote-run-commands/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_cuda.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_driver.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_pytorch.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_transformers.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure11.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure12.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure13.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/install_spec_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/run_spec.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps_gem5.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps_kernel.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/install_deps.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/plot.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/install_deps.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/main.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/.gitignore create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/check.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/install_deps.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/main.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot_pregenerated.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/Dockerfile create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_build_onikiri.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_create_binary.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_experiment.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_experiment_setup.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/build_compiler.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/build_onikiri.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/create_binary.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/experiment.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/experiment_setup.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/install_deps.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/main.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/plot.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/requirements.txt create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-inference-dummy/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/run-resnet.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-bert-macos.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-bert.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-cpp-implementation.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-mobilenet-models.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-4090.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-a100.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-t4.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-pruned-bert.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-reference-models.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-resnet50-macos.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-resnet50.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-retinanet-sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/template.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-docker-container/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-docker-container/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-docker-container/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-docker-container/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-docker-container/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/ctuning-cpp-tflite.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/deepsparse.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/intel.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/mlcommons-cpp.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/mlcommons-python.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/nvidia.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/qualcomm.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/_common.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/_common.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/build.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/build.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/mlperf-inference--ubuntu-cpu.Dockerfile create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/run_mobilenet.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/b-deepsparse.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-intel.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-nvidia.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-qualcomm.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/README-about.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/code.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/dummy.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-python/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-python/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-python/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-python/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-python/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/README-about.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/apply_credentials.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/credentials.example create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/main.tf create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/gcp/apply_credentials.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/gcp/main.tf create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-terraform/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-vllm-server/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-vllm-server/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-vllm-server/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/run-vllm-server/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-performance-mode/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-performance-mode/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-performance-mode/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-performance-mode/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run-ubuntu.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/code.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-user-limits/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-user-limits/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-user-limits/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-user-limits/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-venv/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-venv/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-venv/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-venv/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/set-venv/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/tar-my-folder/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/tar-my-folder/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/tar-my-folder/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/tar-my-folder/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/tar-my-folder/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/check.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/process_dockerfile.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/process_readme.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_deps.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_docker.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_features.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_install.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/test_cm.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/test_search_speed.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_retinanet.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run2.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run2.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/.vscode/launch.json create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/_demo.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/python/.vscode/launch.json create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/python/main.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-debug/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-dummy/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-dummy/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-dummy/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-dummy/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-dummy/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-dummy/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/run.bat create mode 100644 cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/COPYRIGHT.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/README.md create mode 100644 cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/_cm.yaml create mode 100644 cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/customize.py create mode 100644 cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/run.sh create mode 100644 cmx4mlops/cmx4mlops/repo/tests/script/check.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/script/process_dockerfile.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/script/process_readme.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/script/process_tests.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/script/test_deps.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/script/test_docker.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/script/test_features.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/script/test_install.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/test_cm.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/test_search_speed.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_retinanet.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm_pip_ge.py create mode 100644 cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm_pip_vm.py diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml new file mode 100644 index 000000000..3980fa945 --- /dev/null +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -0,0 +1,66 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference ResNet50 via CMX + +on: + pull_request_target: + branches: [ "main", "master", "dev", "mlperf-inference" ] + paths: + - '.github/workflows/test-cmx-mlperf-inference-resnet50.yml' + - '**' + - '!**.md' +jobs: + build: + runs-on: ${{ matrix.os }} + env: + CM_INDEX: "on" + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: [ "3.12" ] + backend: [ "onnxruntime", "tf" ] + implementation: [ "python", "cpp" ] + exclude: + - backend: tf + implementation: cpp + - os: macos-latest + backend: tf + - os: windows-latest + implementation: cpp + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Configure git longpaths (Windows) + if: matrix.os == 'windows-latest' + run: | + git config --system core.longpaths true + - name: Install dependencies + run: | + pip install cmx4mlops + - name: Test MLPerf Inference ResNet50 (Windows) + if: matrix.os == 'windows-latest' + run: | + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + - name: Test MLPerf Inference ResNet50 (Linux/macOS) + if: matrix.os != 'windows-latest' + run: | + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + - name: Push Results + if: github.repository_owner == 'ctuning' + env: + USER: "GitHub Action" + EMAIL: "admin@cTuning.org" + GITHUB_TOKEN: ${{ secrets.TEST_RESULTS_GITHUB_TOKEN }} + run: | + git config --global user.name "${{ env.USER }}" + git config --global user.email "${{ env.EMAIL }}" + git config --global credential.https://github.com.helper "" + git config --global credential.https://github.com.helper "!gh auth git-credential" + git config --global credential.https://gist.github.com.helper "" + git config --global credential.https://gist.github.com.helper "!gh auth git-credential" + cm run script --tags=push,github,mlperf,inference,submission --repo_url= https://github.com/ctuning/test_mlperf_inference_submissions --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/cm/cmind/__init__.py b/cm/cmind/__init__.py index a90f5ed8e..3c2df1265 100644 --- a/cm/cmind/__init__.py +++ b/cm/cmind/__init__.py @@ -9,7 +9,7 @@ # White paper: https://arxiv.org/abs/2406.16791 # Project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md -__version__ = "3.5.3" +__version__ = "3.5.3.1" from cmind.core import access from cmind.core import x diff --git a/cm/cmind/core.py b/cm/cmind/core.py index 3f5e9cbcf..9f56005f4 100644 --- a/cm/cmind/core.py +++ b/cm/cmind/core.py @@ -1125,6 +1125,7 @@ def _x(self, i, control): print (' -raise - raise Python error when automation action fails') print (' -time - print elapsed time for a given automation') print (' -profile - profile a given automation') + print (' -i - print info about available memory and disk space') print (' -repro - record various info to the cmx-repro directory to replay CMX command') print ('') print ('Check https://github.com/mlcommons/ck/tree/master/cm/docs/cmx for more details.') diff --git a/cmx4mlops/cmx4mlops/repo/.gitignore b/cmx4mlops/cmx4mlops/repo/.gitignore new file mode 100644 index 000000000..96523fae4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/.gitignore @@ -0,0 +1,20 @@ +build +build/* +MANIFEST +*.pyc +__pycache__ +develop-eggs/ +dist/ +eggs/ +.eggs/ +lib/ +lib64/ +sdist/ +wheels/ +.cache/ +.coverage +htmlcov +*tmp/ +*tmp-ck-*/ +local/cache/ + diff --git a/cmx4mlops/cmx4mlops/repo/CHANGES.md b/cmx4mlops/cmx4mlops/repo/CHANGES.md new file mode 100644 index 000000000..8d7b12267 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/CHANGES.md @@ -0,0 +1,247 @@ +### 20241123 + [November 2024 release of cm4mlops](https://github.com/mlcommons/cm4mlops/discussions/590) + +### 20240927 + * added "test dummy" script to test Docker containers + * added more standard Nvidia Docker configuration for PyTorch + * added better support to select Docker configurations via UID + +### 20240916 + * fixed "cm add script" + +### 20240429 + * Added `cm set cfg` automation. For example, we can set default CM script to silent + using `cm set cfg default --key.script.silent` + * Added key `script_entry_repo_to_report_errors` to CM script meta + to customize where to report errors instead of the default repo. + For example, issues with the CM script `run-mlperf-inference-app` + should be reported at https://github.com/mlcommons/inference . + * Added saving running script from different deps without CM to tmp-run-without-cm.bat. + Example: `cmr "app image corner-detection" --debug-script-tags=benchmark,program` + * Generate Docker container sample during --repro (prototype) + + +### 20240427 + * Added cm run script --print_versions to print resolved versions of dependencies at the end of the run + +### 20240426 + * Improved cm run script --repro function to dump dependencies, versions and generate README + +### 20240425 + * CM script automation: fixed dumping versions (variations didn't have _ prefix) + +--- + +Since March 2023, all updates to CM automations are submitted via PRs. +You can follow our PRs at +* https://github.com/ctuning/mlcommons-ck/commits/master +* https://github.com/mlcommons/ck/pulls?q=is%3Apr+is%3Aclosed . + +--- + +### 20230214 + * experiment and graph gui are working now + +### 20230206: + * started prototyping cm run experiment + +### 20230123: + * added simple GUI to CM scripts + +### 20221206: + * added "script_name" to the CM "script" meta to specify any native script name + * added "--script_name" to "cm add script {alias} --script_name=my-native-script.sh" + +### 20221206: + * added CM_SCRIPT_EXTRA_CMD to force some flags to all scripts + +### 20221202: + * major updates for Windows (CL, CUDA, etc) + +### 20221111: + * various fixes for Student Cluster Competition at SuperComputing'22 + +### 20221110: + * added support to push MLPerf results to W&B dashboard + +### 20221103: + * added "cm json2yaml utils" and "cm yaml2json utils" + +### 20221024: + * added --verbose and --time to "cm run script" + +### 20221017: + * removed the need for echo-off script + +### 20221010: + * added cm run script --debug-script-tags to run cmd/bash before native script + * added cm run script --shell to set env and run shell after script execution + +* 20221007: + * added script template (used when adding new scripts) + * major clean up of all scripts + +### 20220916: + * treat alias as tags if spaces: + cm run script "get compiler" is converted to cm run script --tags=get,compiler + * improved gcc detection + * refactored "cm run script" to skip deps in cache if needed + +### 20220906 + * added --print_env flag to "cm run script" to print aggregated env + before running native scripts + * various fixes to support MLPerf automation + +### 20220823 + * various fixes for universal MLPerf inference submission automation + +### 20220803 + * various fixes for TVM and image classification + +### 20220802 + * added "run_script_after_post_deps" to script meta to run script after post deps + (useful to activate python virtual env) + * added "activate-python-venv" script to make it easier to debug Python deps installation + +### 20220722 + * added --accept-license and --skip-system-deps + (converted to env CM_ACCEPT_LICENSE ("True") and CM_SKIP_SYSTEM_DEPS ("True")) + +### 20220719 + * moved relatively stable MLOps automation scripts here + +### 20220718 + * fixed local_env_keys in get-python3 + * added new_env_only_keys to meta to specify which env to keep + * fixed problem with adding tags from the selected script during caching + * added --skip-compile and --skip-run to script (converted to env CM_SKIP_COMPILE and CM_SKIP_RUN) + * fixed local_env_keys in get-python3 + * added new_env_only_keys to get-python3 + +### 20220713 + * added local_env_keys to meta + * added "env" dict to os_info + +### 20220712 + * major script refactoring to support cache tags update from deps + * fixed version min/max propagations in deps + * improvements to support tags from deps + * added tags from deps (python, llvm) + +### 20220708 + * various fixes to handle versions (min/max/default) + * various fixes to avoid contamination of ENV from other scripts + * various fixes to handle versions (min/max/default) + +### 20220705 + * fixes for remembered selections + * added --skip-remembered-selections to "cm run script" + +### 20220704 + * fixed a bug with searching for scripts with variations + * added the possibility to update deps from pre/post processing + * added --extra-cache-tags and --name for "cm run script" + * added prototype of selection caching + * fixed get-python-venv + +### 20220701 + * added dummy "cm test script" + * added "--env" to "cm show cache" to show env and state + * added "cm show cache" + +### 20220629 + * added "detect_version_using_script" in script used to detect python packages + * major fix to properly support multiple scripts with the same tags, caching, selection, etc + * fixed a bug in version comparison (converting string to int) + * added recording of "version" to cache meta + +### 20220628 + * fixed local_env with deps + +### 20220623 + * important update of versions logic + +### 20220621 + * added support for --quiet + * changed CM_NEED_VERSION to CM_VERSION + * added CM_VERSION_MIN, CM_VERSION_MAX + * added cm compare_versions utils --version1=... --version2=... + * added support to detect min/max/correct versions + +### 20220617 + * fixed logic to handle variations (-_): https://github.com/mlcommons/ck/issues/243 + +### 20220616 + * changed "cached" to "cache" automation + +### 20220615 + * major update of script (remove parallel env/new_env and state/new_state). + keep global env & state and detect changes automatically + * major simplification of "script" + * removed "installed" to be more understandable + * added "cached" to be more understandable + +### 20220609 + * added "versions" key to the CM script meta + it works similar to "variations" and is forced by --version + * changed "ic" to "script" in "experiment" automation + +### 20220608 + * updated "variations" logic in "script"! + meta['default_variation'] (str): only one of many + meta['default_variations'] (list): multiple choices + * deprecated "ic" automation. Use "script" instead! + +### 20220607 + * added strip_folders to utils/unzip_file + * fixed minor bugs in CM script + +### 20220606 + * added "name" key to deps (list of names and UIDs) + * added "add_deps_tags" in variations and in CMD ({"name":"tag(s)"}) + * added "deps" to variations to be merged with the list of current deps + * added --input and --output for cm run script converted to env CM_INPUT and CM_OUTPUT + useful to create interactive CM scripts to process files + * Added prototype-test-deps-variations-tags to play with deps, variations, tags + +### 20220605 + * clean tmp files in "script" automation by default and keep them using --dirty flag + +### 20220603 + * added "skip" and "deps" to postprocess to call other scripts. + For example call install LLVM if detect LLVM fails... + * added "script" automation to substitute less intuitive "ic" + * Improved LLVM detection and installation + * Added example of image corner detection + * Added updated script entries + +### 20220601 + * added version, path, skip_install and post_deps to IC + * added --new to IC to detect new components + * Updating mechanisms to install and/or detect LLVM + * added support to install prebuilt LLVM for Linux, MacOs, Windows + +### 20220530 + * updated ic automation to read tmp-run-state.json + and merge it with the "new_state" dict + +### 20220524 + * changed directory ck2-repo-mlops to cm-devops + +### 20220517 + * Changed CM_PATH_LIST to +PATH + * Added general support for +ENV that is expanded to ENV=val1;val2;...:${ENV} + +### 20220511 + * Better handle exceptions in utils.download_file + * Added support for variations in intelligent components (ic) + * Fixed bugs in IC + * Added "_" prefix in tags to specify variation of IC + * Record env.sh in "installed artifacts even if bat file is not executed + * Fixed experiment directory naming on Windows + * Added "cm version ic" (#233) + * Added prototype of ic::prototype-get-ml-model-resnet50-onnx with variations + * Added prototype of ic::prototype-get-imagenet-val with variations + * Added prototype of ic::prototype-get-imagenet-aux with variations + * Added prototype of ic::prototype-get-llvm + * Added prototype of ic::prototype-get-tvm diff --git a/cmx4mlops/cmx4mlops/repo/CONTRIBUTING.md b/cmx4mlops/cmx4mlops/repo/CONTRIBUTING.md new file mode 100644 index 000000000..79d407acd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/CONTRIBUTING.md @@ -0,0 +1 @@ +https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md diff --git a/cmx4mlops/cmx4mlops/repo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/COPYRIGHT.md new file mode 100644 index 000000000..5aa453ab8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/COPYRIGHT.md @@ -0,0 +1,5 @@ +# Copyright Notice + +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/HISTORY.md b/cmx4mlops/cmx4mlops/repo/HISTORY.md new file mode 100644 index 000000000..4921bc0b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/HISTORY.md @@ -0,0 +1,127 @@ +This document narrates the history of the creation and design of CM, CM4MLOps and MLPerf automations (also known as CK2) +by [Grigori Fursin](https://cKnowledge.org/gfursin). It also highlights the donation of this open-source technology to MLCommons, +aimed at benefiting the broader community and fostering its ongoing development as a collaborative, community-driven initiative: + +* Jan 28, 2021: After delivering an invited ACM TechTalk'21 about the Collective Knowledge framework (CK1) + and reproducibility initiatives for conferences, as well as CK-MLOps and MLPerf automations, + Grigori received useful feedback and suggestions for improvements to workflow automations: + https://learning.acm.org/techtalks/reproducibility. + + Following this, Grigori began prototyping CK2 (later CM) to streamline CK1, CK-MLOps and MLPerf benchmarking. + The goal was to dramatically simplify CK1 workflows by introducing just a few core and portable automations, + which eventually evolved into `CM script` and `CM cache`. + + At that time, the cTuning foundation hosted CK1 and all the prototypes for the CM framework at https://github.com/ctuning/ck: + [ref1](https://github.com/mlcommons/ck/commit/9e57934f4999db23052531e92160772ab831463a), + [ref2](https://github.com/mlcommons/ck/tree/9e57934f4999db23052531e92160772ab831463a), + [ref3](https://github.com/mlcommons/ck/tree/9e57934f4999db23052531e92160772ab831463a/incubator). + +* Sep 23, 2021: donated CK1, CK-MLOps, MLPerf automations and early prototypes of CM from the cTuning repository to MLCommons: + [ref1](https://web.archive.org/web/20240803140223/https://octo.ai/blog/octoml-joins-the-community-effort-to-democratize-mlperf-inference-benchmarking), + [ref2](https://github.com/mlcommons/ck/tree/228f80b0bf44610c8244ff0c3f6bec5bbd25aa6c/incubator), + [ref3](https://github.com/mlcommons/ck/tree/695c3843fd8121bbdde6c453cd6ec9503986b0c6?tab=readme-ov-file#author-and-coordinator), + [ref4](https://github.com/mlcommons/ck/tree/master/ck), + [ref5](https://github.com/mlcommons/ck-mlops). + + Prepared MLCommons proposal for the creation of the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md), + aimed at fostering community-driven support for CK and CM developments to benefit everyone. + +* Jan, 2022: hired Arjun Suresh at OctoML to support and maintain CK1 framework and help prepare OctoML's MLPerf submissions using CK1. + Meanwhile, transitioned to focusing on CM and CM-MLOps development, building upon the prototypes created in 2021. + +* Mar 1, 2022: started developing cm-mlops: [ref](https://github.com/octoml/cm-mlops/commit/0ae94736a420dfa84f7417fc62d323303b8760c6). + +* Mar 24, 2022: after successfully stabilizing the initial prototype of CM, donated it to MLCommons to benefit the entire community: + [ref1](https://github.com/mlcommons/ck/tree/c7918ad544f26b6c499c2fc9c07431a9640fca5a/ck2), + [ref2](https://github.com/mlcommons/ck/tree/c7918ad544f26b6c499c2fc9c07431a9640fca5a/ck2#coordinators), + [ref3](https://github.com/mlcommons/ck/commit/3c146cb3c75a015363f7a96758adf6dcc43032d6), + [ref4](https://github.com/mlcommons/ck/commit/3c146cb3c75a015363f7a96758adf6dcc43032d6#diff-d97f0f6f5a32f16d6ed18b9600ffc650f7b25512685f7a2373436c492c6b52b3R48). + +* Apr 6, 2022: started transitioning previous MLOps and MLPerf automations from the mlcommons/ck-mlops format + to the new CM format using the cm-mlops repository (will be later renamed to cm4mlops): + [ref1](https://github.com/octoml/cm-mlops/commit/d1efdc30fb535ce144020d4e88f3ed768c933176), + [ref2](https://github.com/octoml/cm-mlops/blob/d1efdc30fb535ce144020d4e88f3ed768c933176/CONTRIBUTIONS). + +* Apr 22, 2022: began architecting "Intelligent Components" in the CM-MLOps repository, + which will be renamed to `CM Script` at a later stage: + [ref1](https://github.com/octoml/cm-mlops/commit/b335c609c47d2c547afe174d9df232652d57f4f8), + [ref2](https://github.com/octoml/cm-mlops/tree/b335c609c47d2c547afe174d9df232652d57f4f8), + [ref3](https://github.com/octoml/cm-mlops/blob/b335c609c47d2c547afe174d9df232652d57f4f8/CONTRIBUTIONS). + + At the same time, prototyped other core CM automations, including IC, Docker, and Experiment: + [ref1](https://github.com/octoml/cm-mlops/tree/b335c609c47d2c547afe174d9df232652d57f4f8/automation), + [ref2](https://github.com/mlcommons/ck/commits/master/?before=7f66e2438bfe21b4ce2d08326a5168bb9e3132f6+7001). + +* Apr 28, 2022: donated CM-MLOps to MLCommons, which was later renamed to CM4MLOps: + [ref](https://github.com/mlcommons/ck/commit/456e4861056c0e39c4d689c03da91f90a44be058). + +* May 9, 2022: developed the initial set of core IC automations for MLOps (aka CM scripts): + [ref1](https://github.com/octoml/cm-mlops/commit/4a4a027f4088ce7e7abcec29c39d98981bf09d4c), + [ref2](https://github.com/octoml/cm-mlops/tree/4a4a027f4088ce7e7abcec29c39d98981bf09d4c), + [ref3](https://github.com/octoml/cm-mlops/blob/7692240becd6397a96c3975388913ea082002e7a/CONTRIBUTIONS). + +* May 11, 2022: After successfully prototyping CM and CM-MLOps, deprecated the CK1 framework in favor of CM. + Transferred Arjun Suresh to the CM project as a maintainer and tester for CM and CM-MLOps: + [ref](https://github.com/octoml/cm-mlops/blob/17405833665bc1e93820f9ff76deb28a0f543bdb/CONTRIBUTIONS). + + Created a [file](https://github.com/mlcommons/ck/blob/master/cm-mlops/CHANGES.md) + to document and track our public developments at MLCommons. + +* Jun 8, 2022: renamed the 'IC' automation to the more intuitive 'CM script' automation. + [ref1](https://github.com/mlcommons/ck/tree/5ca4e2c33e58a660ac20a545d8aa5143ab6e8e81/cm-devops/automation/script), + [ref2](https://github.com/mlcommons/ck/tree/5ca4e2c33e58a660ac20a545d8aa5143ab6e8e81), + [ref3](https://github.com/octoml/cm-mlops/commit/7910fb7ffc62a617d987d2f887d6f9981ff80187). + +* Jun 16, 2022: prototyped the `CM cache` automation to facilitate caching and reuse of the outputs from CM scripts: + [ref1](https://github.com/mlcommons/ck/commit/1f81aae8cebd5567ec4ca55f693beaf32b49fb48), + [ref2](https://github.com/mlcommons/ck/tree/1f81aae8cebd5567ec4ca55f693beaf32b49fb48), + [ref3](https://github.com/mlcommons/ck/tree/1f81aae8cebd5567ec4ca55f693beaf32b49fb48?tab=readme-ov-file#contacts). + +* Sep 6, 2022: delivered CM demo to run MLPerf while deprecating CK1 automations for MLPerf: + [ref1](https://github.com/mlcommons/ck/commit/2c5d5c5c944ae5f252113c62af457c7a4c5e877a#diff-faac2c4ecfd0bfb928dafc938d3dad5651762fbb504a2544752a337294ee2573R224), + [ref2](https://github.com/mlcommons/ck/blob/2c5d5c5c944ae5f252113c62af457c7a4c5e877a/CONTRIBUTING.md#author-and-coordinator). + + Welcomed Arjun Suresh as a contributor to CM automations for MLPerf: [ref](https://github.com/mlcommons/ck/blob/2c5d5c5c944ae5f252113c62af457c7a4c5e877a/CONTRIBUTING.md#contributors-in-alphabetical-order). + +* From September 2022: coordinated community development of CM and CM4MLOps + to [modularize and automate MLPerf benchmarks](https://docs.mlcommons.org/inference) + and support [reproducibility initiatives at ML and Systems conferences](https://cTuning.or/ae) + through the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md). + + * Directed and financed the creation of (CM) automations to streamline the MLPerf power measurement processes. + + * Proposed to use MLPerf benchmarks for the Student Cluster Competition, led the developments + and prepared a tutorial to run MLPerf inference at SCC'22 via CM: [ref](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md) + +* April 2023: departed OctoML to focus on the development of the [CK playground](https://access.cKnowledge.org) and CM automations + to make Mlperf accessible to everyone. Hired Arjun Suresh to help with developments. + + * Initiated and funded development of the [MLPerf explorer](https://github.com/ctuning/q2a-mlperf-visualizer) + to improve visualization of results + +* August 2023: organized the 1st mass-scale MLPerf community submission of 12217 inference benchmark v3.1 results + out of total 13351 results (including 90% of all power results) across diverse models, software and hardware + from different vendors via [open challenges](https://access.cknowledge.org/playground/?action=challenges) funded by cTuning.org : + [LinkedIn article](https://www.linkedin.com/pulse/new-milestone-make-mlperf-benchmarks-accessible-everyone-fursin/) + with results visualized by the [MLPerf explorer](https://github.com/ctuning/q2a-mlperf-visualizer), + [CM4MLOps challenges at GitHub](https://github.com/mlcommons/cm4mlops/tree/main/challenge). + +* February, 2024: proposed to use CM to automate [MLPerf automotive benchmark (ABTF)](https://mlcommons.org/working-groups/benchmarks/automotive/). + + * moved my prototypes of the CM automation for ABTF to cm4abtf repo: [ref](https://github.com/mlcommons/cm4abtf/commit/f92b9f464de89a38a4bde149290dede2d94c8631) + * led further CM4ABTF developments funded by cTuning.org. + +* Starting in April 2024, began the gradual transfer of ongoing maintenance and enhancement + responsibilities for CM and CM4MLOps, including MLPerf automations, to MLCommons. + Welcomed Anandhu Sooraj as a maintainer and contributor to CM4MLOps with MLPerf automations. + +* Took a break from all development activities. + +* July 2024: started prototyping the next generation of CM (CMX and CMX4MLOps) with simpler interfaces + based on user feedback while maintaining backward compatibility. + +* 2025: continue developing CMX and CMX4MLOPs to make it easier to run and customize MLPerf inference, training + and other benchmarks across diverse models, datasets, software and hardware. + +For more details, please refer to the [white paper](https://arxiv.org/abs/2406.16791) +and the [ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339). diff --git a/cmx4mlops/cmx4mlops/repo/LICENSE.md b/cmx4mlops/cmx4mlops/repo/LICENSE.md new file mode 100644 index 000000000..f433b1a53 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/LICENSE.md @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/cmx4mlops/cmx4mlops/repo/LICENSE.third-party.md b/cmx4mlops/cmx4mlops/repo/LICENSE.third-party.md new file mode 100644 index 000000000..faa008458 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/LICENSE.third-party.md @@ -0,0 +1 @@ +This CM repository may contain CM scripts with third-party files licensed under Apache2, BSD or MIT license. diff --git a/cmx4mlops/cmx4mlops/repo/README.md b/cmx4mlops/cmx4mlops/repo/README.md index e69de29bb..49bd226a8 100644 --- a/cmx4mlops/cmx4mlops/repo/README.md +++ b/cmx4mlops/cmx4mlops/repo/README.md @@ -0,0 +1,67 @@ +## Unified and cross-platform CM interface for DevOps, MLOps and MLPerf + +[![License](https://img.shields.io/badge/License-Apache%202.0-green)](LICENSE.md) +[![Python Version](https://img.shields.io/badge/python-3+-blue.svg)](https://github.com/mlcommons/ck/tree/master/cm/cmind) +[![Powered by CM](https://img.shields.io/badge/Powered_by-MLCommons%20CM-blue)](https://pypi.org/project/cmind). +[![Downloads](https://static.pepy.tech/badge/cm4mlops)](https://pepy.tech/project/cm4mlops) + +[![CM script automation features test](https://github.com/mlcommons/cm4mlops/actions/workflows/test-cm-script-features.yml/badge.svg)](https://github.com/mlcommons/cm4mlops/actions/workflows/test-cm-script-features.yml) +[![MLPerf inference bert (deepsparse, tf, onnxruntime, pytorch)](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml/badge.svg)](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml) +[![MLPerf inference MLCommons C++ ResNet50](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml/badge.svg)](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml) +[![MLPerf inference ABTF POC Test](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-abtf-poc.yml/badge.svg)](https://github.com/mlcommons/cm4mlops/actions/workflows/test-mlperf-inference-abtf-poc.yml) +[![Test Compilation of QAIC Compute SDK (build LLVM from src)](https://github.com/mlcommons/cm4mlops/actions/workflows/test-qaic-compute-sdk-build.yml/badge.svg)](https://github.com/mlcommons/cm4mlops/actions/workflows/test-qaic-compute-sdk-build.yml) +[![Test QAIC Software kit Compilation](https://github.com/mlcommons/cm4mlops/actions/workflows/test-qaic-software-kit.yml/badge.svg)](https://github.com/mlcommons/cm4mlops/actions/workflows/test-qaic-software-kit.yml) + + +# CM4MLOps repository + +**CM4MLOps** repository is powered by the [Collective Mind automation framework](https://github.com/mlcommons/ck/tree/master/cm), +a [Python package](https://pypi.org/project/cmind/) with a CLI and API designed for creating and managing automations. + +Two key automations developed using CM are **Script** and **Cache**, which streamline machine learning (ML) workflows, +including managing Docker runs. Both Script and Cache automations are part of the **cm4mlops** repository. + +The CM scripts, also housed in this repository, consist of hundreds of modular Python-wrapped scripts accompanied +by `yaml` metadata, enabling the creation of robust and flexible ML workflows. + +- **CM Scripts Documentation**: [https://docs.mlcommons.org/cm4mlops/](https://docs.mlcommons.org/cm4mlops/) +- **CM CLI Documentation**: [https://docs.mlcommons.org/ck/specs/cm-cli/](https://docs.mlcommons.org/ck/specs/cm-cli/) + +The `mlperf-branch` of the **cm4mlops** repository is dedicated to developments specific to MLPerf Inference. +Please submit any pull requests (PRs) to this branch. For more information about using CM for MLPerf Inference, +refer to the [MLPerf Inference Documentation](https://docs.mlcommons.org/inference/). + +## License + +[Apache 2.0](LICENSE.md) + +## Copyright + +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and encourage collaborative development. + +## Maintainer(s) + +* MLCommons + +## CM author + +[Grigori Fursin](https://cKnowledge.org/gfursin) + +## CM concepts + +Check our [ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) and the [white paper](https://arxiv.org/abs/2406.16791). + +## CM script developers + +Arjun Suresh, Anandhu Sooraj, Grigori Fursin + +## Parent project + +Visit the [parent Collective Knowledge project](https://github.com/mlcommons/ck) for further details. + +## Citing this project + +If you found the CM automations helpful, kindly reference this article: +[ [ArXiv](https://arxiv.org/abs/2406.16791) ] diff --git a/cmx4mlops/cmx4mlops/repo/automation/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/cache/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cache/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/cache/README-extra.md new file mode 100644 index 000000000..84d274179 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cache/README-extra.md @@ -0,0 +1,71 @@ +[ [Back to index](../../../docs/README.md) ] + +# CM "cache" automation + +*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md) + and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts.* + +## CM script CLI + +Whenever a [given CM script]() caches the output, you can find it + +Whenever a [CM script](https://access.cknowledge.org/playground/?action=scripts) +caches its output (such as downloaded model or pre-processed data set or built code), +you can find it using the CM "cache" automation as follows: + +```bash +cm show cache +``` + +You can prune cache entries by tags and variations: +```bash +cm show cache --tags=ml-model +cm show cache --tags=python +``` + +You can find a path to a given cache artifact as follows: +```bash +cm find cache --tags=ml-model,bert +``` + +You can delete one or more cache artifacts as follows: +```bash +cm rm cache --tags=ml-model +``` + +You can skip user prompt by adding `-f` flag as follows: +```bash +cm rm cache --tags=ml-model -f +``` + +You can clean the whole cache as follows: +```bash +cm rm cache -f +``` + +## CM python API + +You can access the same functionality via CM Python API as follows: + +```python + +import cmind + +output = cmind.access({'action':'show', + 'automation':'cache,541d6f712a6b464e'}) + +if output['return']>0: + cmind.error(output) + +artifacts = output['list'] + +for artifact in artifacts: + print ('') + print (artifact.path) + print (artifact.meta) + +``` + +## Related + +* [CM "script" automation](../script/README-extra.md) diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/README.md b/cmx4mlops/cmx4mlops/repo/automation/cache/README.md new file mode 100644 index 000000000..0a3114d3b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cache/README.md @@ -0,0 +1,87 @@ +*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15)) + * CM CLI with UID: ```cm test cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'cache,541d6f712a6b464e' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### show + + * CM CLI: ```cm show cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54)) + * CM CLI with UID: ```cm show cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'show' + 'automation':'cache,541d6f712a6b464e' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L54) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### search + + * CM CLI: ```cm search cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153)) + * CM CLI with UID: ```cm search cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'search' + 'automation':'cache,541d6f712a6b464e' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L153) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### copy_to_remote + + * CM CLI: ```cm copy_to_remote cache``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186)) + * CM CLI with UID: ```cm copy_to_remote cache,541d6f712a6b464e``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'copy_to_remote' + 'automation':'cache,541d6f712a6b464e' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cache/module.py#L186) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/cache/_cm.json new file mode 100644 index 000000000..ac383f937 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cache/_cm.json @@ -0,0 +1,12 @@ +{ + "alias": "cache", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "desc": "Caching cross-platform CM scripts", + "developers": "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)", + "sort": 900, + "tags": [ + "automation" + ], + "uid": "541d6f712a6b464e" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/module.py b/cmx4mlops/cmx4mlops/repo/automation/cache/module.py new file mode 100644 index 000000000..885778800 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cache/module.py @@ -0,0 +1,249 @@ +# Author: Grigori Fursin +# Contributors: Arjun Suresh, Anandhu Sooraj +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} + + ############################################################ + def show(self, i): + """ + Show cache + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (env) (bool): if True, show env from cm-cached-state.json + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + import json + + # Check parsed automation + if 'parsed_automation' not in i: + return {'return': 1, 'error': 'automation is not specified'} + + console = i.get('out') == 'con' + + show_env = i.get('env', False) + +# Moved to search function +# # Check simplified CMD: cm show cache "get python" +# # If artifact has spaces, treat them as tags! +# artifact = i.get('artifact','') +# tags = i.get('tags','').strip() +# if ' ' in artifact or ',' in artifact: +# del(i['artifact']) +# if 'parsed_artifact' in i: del(i['parsed_artifact']) +# +# new_tags = artifact.replace(' ',',') +# tags = new_tags if tags=='' else new_tags+','+tags +# +# i['tags'] = tags + + # Find CM artifact(s) + i['out'] = None + r = self.search(i) + + if r['return'] > 0: + return r + + lst = r['list'] + for artifact in sorted(lst, key=lambda x: sorted(x.meta['tags'])): + # for artifact in lst: + path = artifact.path + meta = artifact.meta + dependent_cached_path = meta.get( + 'dependent_cached_path', '') + if dependent_cached_path and not os.path.exists( + dependent_cached_path): + continue + + original_meta = artifact.original_meta + + alias = meta.get('alias', '') + uid = meta.get('uid', '') + + tags = meta.get('tags', []) + tags1 = sorted([x for x in tags if not x.startswith('_')]) + tags2 = sorted([x for x in tags if x.startswith('_')]) + tags = tags1 + tags2 + + version = meta.get('version', '') + + if console: + print('') +# print ('* UID: {}'.format(uid)) + print('* Tags: {}'.format(','.join(tags))) + print(' Path: {}'.format(path)) + if version != '': + print(' Version: {}'.format(version)) + + if show_env and console: + path_to_cached_state_file = os.path.join( + path, 'cm-cached-state.json') + + if os.path.isfile(path_to_cached_state_file): + r = utils.load_json(file_name=path_to_cached_state_file) + if r['return'] > 0: + return r + + # Update env and state from cache! + cached_state = r['meta'] + + new_env = cached_state.get('new_env', {}) + if len(new_env) > 0: + print(' New env:') + print( + json.dumps( + new_env, + indent=6, + sort_keys=True).replace( + '{', + '').replace( + '}', + '')) + + new_state = cached_state.get('new_state', {}) + if len(new_state) > 0: + print(' New state:') + print(json.dumps(new_env, indent=6, sort_keys=True)) + + return {'return': 0, 'list': lst} + + ############################################################ + def search(self, i): + """ + Overriding the automation search function to add support for a simplified CMD with tags with spaces + + TBD: add input/output description + """ + # Check simplified CMD: cm show cache "get python" + # If artifact has spaces, treat them as tags! + artifact = i.get('artifact', '') + tags = i.get('tags', '') + + # Tags may be a list (if comes internally from CM scripts) or string if + # comes from CMD + if not isinstance(tags, list): + tags = tags.strip() + + if ' ' in artifact: # or ',' in artifact: + del (i['artifact']) + if 'parsed_artifact' in i: + del (i['parsed_artifact']) + + new_tags = artifact.replace(' ', ',') + tags = new_tags if tags == '' else new_tags + ',' + tags + + i['tags'] = tags + + # Force automation when reruning access with processed input + i['automation'] = 'cache,541d6f712a6b464e' + i['action'] = 'search' + # Avoid recursion - use internal CM add function to add the script + # artifact + i['common'] = True + + # Find CM artifact(s) + return self.cmind.access(i) + + ############################################################ + + def copy_to_remote(self, i): + """ + Add CM automation. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module( + self, __file__, 'module_misc', 'copy_to_remote', i) diff --git a/cmx4mlops/cmx4mlops/repo/automation/cache/module_misc.py b/cmx4mlops/cmx4mlops/repo/automation/cache/module_misc.py new file mode 100644 index 000000000..d83d9f763 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cache/module_misc.py @@ -0,0 +1,122 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os +from cmind import utils + + +############################################################ +def copy_to_remote(i): + """ + Add CM automation. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default) + + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + self_module = i['self_module'] + + remote_host = i.get('remote_host') + if not remote_host: + return {'return': 1, + 'error': 'Please input remote host_name/IP via --remote_host'} + remote_cm_repos_location = i.get( + 'remote_cm_repos_location', os.path.join( + "/home", os.getlogin(), "CM", "repos")) + remote_cm_cache_location = os.path.join( + remote_cm_repos_location, "local", "cache") + + remote_port = i.get('remote_port', '22') + remote_user = i.get('remote_user', os.getlogin()) + + tag_string = i['tags'] + tag_string += ",-tmp" + + cm_input = {'action': 'show', + 'automation': 'cache', + 'tags': f'{tag_string}', + 'quiet': True + } + r = self_module.cmind.access(cm_input) + if r['return'] > 0: + return r + + if len(r['list']) == 0: + pass # fixme + elif len(r['list']) > 1: + print("Multiple cache entries found: ") + for k in sorted(r['list'], key=lambda x: x.meta.get('alias', '')): + print(k.path) + x = input("Would you like to copy them all? Y/n: ") + if x.lower() == 'n': + return {'return': 0} + + import json + + for k in sorted(r['list'], key=lambda x: x.meta.get('alias', '')): + path = k.path + cacheid = os.path.basename(path) + + copy_cmd = f"rsync -avz --exclude cm-cached-state.json -e 'ssh -p {remote_port}' {path} {remote_user}@{remote_host}:{remote_cm_cache_location}" + print(copy_cmd) + os.system(copy_cmd) + + cm_cached_state_json_file = os.path.join(path, "cm-cached-state.json") + if not os.path.exists(cm_cached_state_json_file): + return {'return': 1, + 'error': f'cm-cached-state.json file missing in {path}'} + + with open(cm_cached_state_json_file, "r") as f: + cm_cached_state = json.load(f) + + new_env = cm_cached_state['new_env'] + new_state = cm_cached_state['new_state'] # Todo fix new state + cm_repos_path = os.environ.get( + 'CM_REPOS', os.path.join( + os.path.expanduser("~"), "CM", "repos")) + cm_cache_path = os.path.realpath( + os.path.join(cm_repos_path, "local", "cache")) + + for key, val in new_env.items(): + + +if isinstance(val, if ) new_env[key] = val.replace( + cm_cache_path, remote_cm_cache_location) + + with open("tmp_remote_cached_state.json", "w") as f: + json.dump(cm_cached_state, f, indent=2) + + remote_cached_state_file_location = os.path.join( + remote_cm_cache_location, cacheid, "cm-cached-state.json") + copy_cmd = f"rsync -avz -e 'ssh -p {remote_port}' tmp_remote_cached_state.json {remote_user}@{remote_host}:{remote_cached_state_file_location}" + print(copy_cmd) + os.system(copy_cmd) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/cfg/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/cfg/README-extra.md new file mode 100644 index 000000000..cc94030ab --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/README-extra.md @@ -0,0 +1,8 @@ +Examples: + +```bash +cm set cfg default +cm set cfg default --key.script.silent +cm set cfg default --key.script.silent- + +``` diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/README.md b/cmx4mlops/cmx4mlops/repo/automation/cfg/README.md new file mode 100644 index 000000000..3c82852c8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test cfg``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15)) + * CM CLI with UID: ```cm test cfg,88dce9c160324c5d``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'cfg,88dce9c160324c5d' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/cfg/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/cfg/_cm.json new file mode 100644 index 000000000..9a1dc030e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/_cm.json @@ -0,0 +1,12 @@ +{ + "action_substitutions": { + "set":"xset" + }, + "alias": "cfg", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "88dce9c160324c5d" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/cfg/module.py b/cmx4mlops/cmx4mlops/repo/automation/cfg/module.py new file mode 100644 index 000000000..4b08f3926 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/cfg/module.py @@ -0,0 +1,270 @@ +# Universal cfg for CM automations +# +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} + + ############################################################ + def xset(self, i): + """ + Set keys in configuration + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): CM artifact with configuration + (tags) (str): list of tags to find CM artifact with configuration + + (key) (dict): updating config + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + + r = self._find_cfg_artifact(i) + if r['return'] > 0: + return r + + # Path to cfg + path = r['path'] + path_to_config = r['path_to_config'] + config = r['config'] + + # Clean input to leave only keys for the configuration + new_config = i.get('key', {}) + + # If new config is empty, just print existing config + if len(new_config) > 0: + # Check if need to delete some + def check_to_delete(d): + + for k in list(d.keys()): + v = d[k] + if isinstance(v, dict): + check_to_delete(v) + else: + if k.endswith('-'): + if k[:-1] in d: + del (d[k[:-1]]) + del (d[k]) + else: + vsl = str(v).lower() + if vsl == 'none': + v = None + elif vsl == 'false': + v = False + elif vsl == 'true': + v = True + + d[k] = v + + utils.merge_dicts({'dict1': config, + 'dict2': new_config, + 'append_lists': True, + 'append_unique': True}) + + check_to_delete(config) + + r = utils.save_json(path_to_config, config) + if r['return'] > 0: + return r + + # Print config + print('Config:') + print('') + print(json.dumps(config, indent=2)) + + return {'return': 0} + + ############################################################ + def load(self, i): + """ + Load configuration + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): CM artifact with configuration + (tags) (str): list of tags to find CM artifact with configuration + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + return self._find_cfg_artifact(i) + + ############################################################ + def _find_cfg_artifact(self, i): + """ + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): CM artifact with configuration + (tags) (str): list of tags to find CM artifact with configuration + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + # Clean input to find artifact + ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags']) + + parsed_artifact = i.get('parsed_artifact', []) + + artifact_obj = parsed_artifact[0] if len(parsed_artifact) > 0 else None + artifact_repo = parsed_artifact[1] if len( + parsed_artifact) > 1 else None + + artifact = i.get('artifact', '') + + if artifact == '': + ii['artifact'] = 'default' + + tags = ii.get('tags', '') + + if 'cm-universal-cfg' not in tags: + if tags != '': + tags += ',' + tags += 'cm-universal-cfg' + + ii['tags'] = tags + + automation = ii['automation'] + if automation != '.' and ',' not in automation: + ii['automation'] = automation + ',' + self.meta['uid'] + + # Add placeholder (use common action) + + ii['action'] = 'find' + ii['out'] = '' + # Avoid recursion - use internal CM add function to add the script + # artifact + ii['common'] = True + + r = self.cmind.access(ii) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + ii['action'] = 'add' + ii['meta'] = {} + + # Tags must be unique for default + r = self.cmind.access(ii) + if r['return'] > 0: + return r + + path = r['path'] + elif len(lst) > 1: + return { + 'return': 1, 'error': 'ambiguity in cfg name - more than 1 CM artifact found'} + else: + path = lst[0].path + + # Check if has config + path_to_cfg = os.path.join(path, 'config.json') + + config = {} + if os.path.isfile(path_to_cfg): + r = utils.load_json(path_to_cfg) + if r['return'] > 0: + return r + + config = r['meta'] + + return {'return': 0, 'path': path, + 'path_to_config': path_to_cfg, 'config': config} diff --git a/cmx4mlops/cmx4mlops/repo/automation/challenge/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/challenge/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/challenge/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/challenge/README.md b/cmx4mlops/cmx4mlops/repo/automation/challenge/README.md new file mode 100644 index 000000000..2db03e8b1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/challenge/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test challenge``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15)) + * CM CLI with UID: ```cm test challenge,3d84abd768f34e08``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'challenge,3d84abd768f34e08' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/challenge/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/challenge/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/challenge/_cm.json new file mode 100644 index 000000000..a4f416452 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/challenge/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "challenge", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "3d84abd768f34e08" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/challenge/module.py b/cmx4mlops/cmx4mlops/repo/automation/challenge/module.py new file mode 100644 index 000000000..963ab43b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/challenge/module.py @@ -0,0 +1,66 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/contributor/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/contributor/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/contributor/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/contributor/README.md b/cmx4mlops/cmx4mlops/repo/automation/contributor/README.md new file mode 100644 index 000000000..df1f4e3d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/contributor/README.md @@ -0,0 +1,47 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test contributor``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15)) + * CM CLI with UID: ```cm test contributor,68eae17b590d4f8f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'contributor,68eae17b590d4f8f' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### add + + * CM CLI: ```cm add contributor``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54)) + * CM CLI with UID: ```cm add contributor,68eae17b590d4f8f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'add' + 'automation':'contributor,68eae17b590d4f8f' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/contributor/module.py#L54) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/contributor/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/contributor/_cm.json new file mode 100644 index 000000000..008f7d54c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/contributor/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "contributor", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "68eae17b590d4f8f" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/contributor/module.py b/cmx4mlops/cmx4mlops/repo/automation/contributor/module.py new file mode 100644 index 000000000..a2d6954ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/contributor/module.py @@ -0,0 +1,174 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} + + ############################################################ + def add(self, i): + """ + Add CM script + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + self_automation = self.meta['alias'] + ',' + self.meta['uid'] + + console = i.get('out') == 'con' + + artifact = i.get('artifact', '') + if ':' not in artifact: + artifact = 'mlcommons@ck:' + artifact + + j = artifact.find(':') + name = artifact[j + 1:] + + # Check info + if name == '': + name = input('Enter your name: ').strip() + if name == '': + return {'return': 1, 'error': 'name can\'t be empty'} + + artifact += name + + # Check if doesn't exist + r = self.cmind.access({'action': 'find', + 'automation': self_automation, + 'artifact': artifact}) + if r['return'] > 0: + return r + elif r['return'] == 0 and len(r['list']) > 0: + return {'return': 1, 'error': 'CM artifact with name {} already exists in {}'.format( + name, r['list'][0].path)} + + meta = i.get('meta', {}) + + # Prepare meta + org = meta.get('organization', '') + if org == '': + org = input('Enter your organization (optional): ').strip() + + url = input('Enter your webpage (optional): ').strip() + + tags = input( + 'Enter tags of your challenges separate by comma (you can add them later): ').strip() + + if meta.get('name', '') == '': + meta = {'name': name} + + if org != '': + meta['organization'] = org + + if url != '': + meta['urls'] = [url] + + if tags != '': + meta['ongoing'] = tags.split(',') + + # Add placeholder (use common action) + i['out'] = 'con' + # Avoid recursion - use internal CM add function to add the script + # artifact + i['common'] = True + + i['action'] = 'add' + i['automation'] = self_automation + i['artifact'] = artifact + + i['meta'] = meta + + print('') + + r = self.cmind.access(i) + if r['return'] > 0: + return r + + path = r['path'] + + path2 = os.path.dirname(path) + + print('') + print( + 'Please go to {}, add your directory to Git, commit and create PR:'.format(path2)) + print('') + print('cd {}'.format(path2)) + print('git add "{}"'.format(name)) + print('git commit "{}"'.format(name)) + print('') + print('Please join https://discord.gg/JjWNWXKxwT to discuss challenges!') + print('Looking forward to your contributions!') + + return r diff --git a/cmx4mlops/cmx4mlops/repo/automation/data/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/data/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/data/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/data/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/data/_cm.json new file mode 100644 index 000000000..7dd9a139f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/data/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "data", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "84d8ef6914bf4d78" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/data/module.py b/cmx4mlops/cmx4mlops/repo/automation/data/module.py new file mode 100644 index 000000000..963ab43b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/data/module.py @@ -0,0 +1,66 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/docker/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/docker/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/docker/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/docker/README.md b/cmx4mlops/cmx4mlops/repo/automation/docker/README.md new file mode 100644 index 000000000..c6ef9a384 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/docker/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test docker``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15)) + * CM CLI with UID: ```cm test docker,2d90be7cab6e4d9f``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'docker,2d90be7cab6e4d9f' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/docker/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/docker/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/docker/_cm.json new file mode 100644 index 000000000..11a5085d0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/docker/_cm.json @@ -0,0 +1,11 @@ +{ + "alias": "docker", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "desc": "Managing modular docker containers (under development)", + "developers": "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)", + "tags": [ + "automation" + ], + "uid": "2d90be7cab6e4d9f" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/docker/module.py b/cmx4mlops/cmx4mlops/repo/automation/docker/module.py new file mode 100644 index 000000000..4b49bbd3c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/docker/module.py @@ -0,0 +1,65 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + CM "docker" automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + (artifact) (str): artifact as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/docs/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/docs/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/docs/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/docs/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/docs/_cm.json new file mode 100644 index 000000000..6945bacca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/docs/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "docs", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "9558c9e6ca124065" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/docs/module.py b/cmx4mlops/cmx4mlops/repo/automation/docs/module.py new file mode 100644 index 000000000..963ab43b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/docs/module.py @@ -0,0 +1,66 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/experiment/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/experiment/README-extra.md new file mode 100644 index 000000000..454c8d6ac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/README-extra.md @@ -0,0 +1,315 @@ +[ [Back to index](../../../docs/README.md) ] + +
+Click here to see the table of contents. + +* [CM "experiment" automation](#cm-"experiment"-automation) + * [Introducing CM experiment automation](#introducing-cm-experiment-automation) + * [Installing CM with ResearchOps/DevOps/MLOps automations](#installing-cm-with-researchops/devops/mlops-automations) + * [Understanding CM experiments](#understanding-cm-experiments) + * [Exploring combinations of parameters (autotuning, design space exploration)](#exploring-combinations-of-parameters-autotuning-design-space-exploration) + * [Aggregating and unifying results](#aggregating-and-unifying-results) + * [Visualizing results](#visualizing-results) + * [Sharing experiments with the community](#sharing-experiments-with-the-community) + * [Running CM experiments with CM scripts](#running-cm-experiments-with-cm-scripts) + * [Further community developments](#further-community-developments) + +
+ +# CM "experiment" automation + +*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md), + [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) + and [CM scripts](../script/README-extra.md) to understand CM motivation and concepts. + You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md) + to run some applications and benchmarks on your platform using CM scripts.* + +## Introducing CM experiment automation + + +Researchers, engineers and students spend considerable amount of their time experimenting with +many different settings of applications, tools, compilers, software and hardware +to find the optimal combination suitable for their use cases. + +Based on their feedback, our [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +started developing a CM automation called "experiment". +The goal is to provide a common interface to run, record, share, visualize and reproduce experiments +on any platform with any software, hardware and data. + +The community helped us test a prototype of our "experiment" automation to record results in a unified CM format +from [several MLPerf benchmarks](https://github.com/mlcommons/cm4mlperf-results) +including [MLPerf inference](https://github.com/mlcommons/inference) and [MLPerf Tiny](https://github.com/mlcommons/tiny), +visualize them at the [MLCommons CM platform](https://access.cknowledge.org/playground/?action=experiments&tags=all), +and improve them by the community via [public benchmarking, optimization and reproducibility challenges](https://access.cknowledge.org/playground/?action=challenges). + + + +## Installing CM with ResearchOps/DevOps/MLOps automations + +This CM automation is available in the most commonly used `mlcommons@cm4mlops` repository. + +First, install CM automation language as described [here](https://github.com/mlcommons/ck/blob/master/docs/installation.md). +Then, install or update this repository as follows: +```bash +cm pull repo mlcommons@cm4mlops +``` + +You can now test that CM experiment automation is available as follows: +```bash +cm run experiment --help +``` +or using `cme` shortcut in CM V1.4.1+ +```bash +cme --help +``` + + + +## Understanding CM experiments + +CM experiment simply wraps any user command line, creates an associated CM `experiment` artifact with a random ID (16 low case HEX characters) +and some user tags in `_cm.json`, creates extra `{date}{time}` subdirectory with `cm-input.json` file with CM input, +and executes the user command line inside an extra subdirectory with another random ID as shown below. + +The following command will print "Hello World!" while recording all the provenance in CM format in the local CM repository: + +```bash +cme --tags=my,experiment,hello-world -- echo "Hello World!" +``` +or +```bash +cm run experiment --tags=my,experiment,hello-world -- echo "Hello World!" +``` + +You should see the output similar to the following: +```bash + +Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945 +Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466 +================================================================ +Experiment step: 1 out of 1 + +Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.09-58-02.863466\7ed0ea0edd6b4dd7 + +"Hello World!" +``` + +You can find and explore the newly created CM artifact as follows: +```bash +cm find experiment --tags=my,experiment,hello-world +``` +or using UID +```bash +cm find experiment b83a1fb24dbf4945 +``` + +When running the same experiment again, CM will find existing artifact by tags and create new {date}{time} directory there: +```bash +cme --tags=my,experiment,hello-world -- echo "Hello World!" + +Path to CM experiment artifact: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945 +Path to experiment: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210 +================================================================ +Experiment step: 1 out of 1 + +Path to experiment step: C:\Users\gfursin\CM\repos\local\experiment\b83a1fb24dbf4945\2023-06-09.10-02-08.911210\7ed0ea0edd6b4dd7 + +"Hello World!" +``` + +You can now replay this experiment as follows: +```bash +cm replay experiment --tags=my,experiment,hello-world +``` + +Note that you can obtain current directory where you called CM +(rather than the CM experiment artifact directory) via {{CD}} variable as follows: +```bash +cme --tags=my,experiment,hello-world -- echo {{CD}} +``` + +You can also record experiments in another CM repository instead of the `local` one as follows: +```bash +cm list repo +cme {CM repository from above list}: --tags=my,experiment,hello-world -- echo {{CD}} +``` + +Finally, you can force a specific artifact name instead of some random ID as follows: +```bash +cme {my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}} +``` +or with given repository +```bash +cme {CM repository from above list}:{my experiment artifact name} --tags=my,experiment,hello-world -- echo {{CD}} +``` + +## Exploring combinations of parameters (autotuning, design space exploration) + +One of the most common tasks is computer engineering (and other sciences) +is to explore various combinations of parameters of some applications +and systems to select the optimal ones to trade off performance, accuracy, +power consumption, memory usage and other characteristics. + +As a starting point, we have implemented a very simple explorer as a Cartesian product +of any number of specified variables that are passed to a user command line via double curly braces `{{VAR}}` similar to GitHub. + +You just need to create a simple JSON file `cm-input.json` to describe sets/ranges for each variable as follows: +```json +{ + "explore": { + "VAR1": [ + 1, + 2, + 3 + ], + "VAR2": [ + "a", + "b" + ], + "VAR3": "[2**i for i in range(0,6)]" + } +} +``` + +or YAML `cm-input.yaml`: + +```yaml +explore: + VAR1: [1,2,3] + VAR2: ["a","b"] + VAR3: "[2**i for i in range(0,6)]" +``` + +You can then run the following example to see all iterations: +```bash +cm run experiment --tags=my,experiment,hello-world @test_input.yaml \ + -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%% +``` + +Note that you can also define a Python list of range for other variables +directly in the command line as demonstrated in above example for `VAR4` - `{{VAR4{['xx','yy','zz']}}}`. + +CM will create or reuse experiment artifact with tags `my,experiment,hello-world` +and will then iterate in a Cartesian product of all detected variables. + +For each iteration, CM will create a `{date}{time}` subdirectory in a given experiment artifact +and will then run a user command line with substituted variables there. + +You can then replay any of the exploration experiment as follows: +```bash +cm replay experiment --tags={tags} --dir={sub directory} +``` + + + +## Aggregating and unifying results + +Users can expose any information such as measured characteristics of their applications and/or systems (performance, +hardware or OS state, accuracy, internal parameters, etc) to CM for further analysis and visualization +by generating a JSON `cm-result.json` file with any dictionary. + +If this file exists after executing a user command, CM will load it after each experiment or exploration step, +and merge it with a list in a common `cm-result.json` in `{date}{time}` directory for this experiment. + + + +## Visualizing results + +Users can now visualize multiple experiments using the CM GUI script as follows: +```bash +cm run script "gui _graph" --exp_tags=my,experiment,hello-world +``` + +This script will search for all CM experiment entries with these tags, read all `cm-result.json` files, +detect all keys used in result dictionaries, let users select these keys for X and Y axes +to prepare a 2D graph using a popular [StreamLit library](https://streamlit.io), add derived metrics and set constraints +as shown in the following example for one of the official [Tiny MLPerf submissions](https://github.com/mlcommons/tiny): + +![](../../script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png) + + + + + + +## Sharing experiments with the community + +It is possible to share experiments with a common automation interface +in your own GitHub/GitLab repository, container and zip/tar file +in a non-intrusive way. + +You need to go to a root directory of your project and initialize CM repository there +with a unique name "my-cool-project" as follows: + +```bash +cm init repo my-cool-project --path=. --prefix=cmr +``` + +This command will create a `cmr.yaml` file with a description and unique ID of this repository, +and will register it in the CM. Note that all CM automations and artifacts will be located +in the `cmr` sub-directory to avoid contaminating your project. They can be deleted +or moved to another project at any time. + +You can now record new experiments in this repository by adding `my-cool-project:` to the cm experiment command line as follows: +```bash +cm run experiment my-cool-project: --tags=my,experiment,hello-world -- echo "Hello World!" +``` + +You can also move a set of existing experiments from the `local` CM repository to the new one as follows: +```bash +cm move experiment my-cool-project: --tags=my,experiment,hello-world +``` + +You can continue replaying these experiments in the way no matter what CM repository they are in: +```bash +cm replay experiment --tags=my,experiment,hello-world +``` + +or you can enforce a specific repository as follows: +```bash +cm replay experiment my-cool-project: --tags=my,experiment,hello-world +``` + + + + + +## Running CM experiments with CM scripts + +User scripts and tools may contain some hardwired local paths that may prevent replaying them on another platform. +In such case, we suggest you to use [CM scripts](/../script/README-extra.md). + +CM scripts solve this problem by wrapping existing user scripts and tools and detecting/resolving paths +to specific tools and artifacts on a given user platform. + +You can find example of using CM scripts with CM experiments in [this directory](tests) - see `test3.bat` or `test3.sh`: +```bash +cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} +``` + +You can use the following environment variables to pass the current path, +different paths to experiment entries and the number of experiment to your CM script: +* {{CD}} +* {{CM_EXPERIMENT_STEP}} +* {{CM_EXPERIMENT_PATH}} +* {{CM_EXPERIMENT_PATH2}} +* {{CM_EXPERIMENT_PATH3}} + + +Feel free to check [this tutorial](../../../docs/tutorials/common-interface-to-reproduce-research-projects.md) +to add CM scripts for your own applications, tools and native scripts. + +We are currently extending CM experiments and CM scripts for MLPerf benchmarks +to automate benchmarking, optimization and design space exploration of ML/AI systems +on any software and hardware - please stay tuned via our [Discord server](https://discord.gg/JjWNWXKxwT). + + + +## Further community developments + +We are developing this experiment automation in CM to help the community share, reproduce and reuse experiments +using a common, simple, human readable, and portable [automation language](../../../docs/README.md). + +Join our [Discord server](https://discord.gg/JjWNWXKxwT) from the [MLCommons task force on automation and reproducibility](../taskforce.md) +to participate in the unification and extension of this interface and CM scripts for diverse research projects and tools. + diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/README.md b/cmx4mlops/cmx4mlops/repo/automation/experiment/README.md new file mode 100644 index 000000000..13ea6ec1a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/README.md @@ -0,0 +1,87 @@ +*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22)) + * CM CLI with UID: ```cm test experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'experiment,a0a2d123ef064bcb' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L22) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### run + + * CM CLI: ```cm run experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64)) + * CM CLI with UID: ```cm run experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'run' + 'automation':'experiment,a0a2d123ef064bcb' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L64) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### rerun + + * CM CLI: ```cm rerun experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428)) + * CM CLI with UID: ```cm rerun experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'rerun' + 'automation':'experiment,a0a2d123ef064bcb' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L428) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### replay + + * CM CLI: ```cm replay experiment``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451)) + * CM CLI with UID: ```cm replay experiment,a0a2d123ef064bcb``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'replay' + 'automation':'experiment,a0a2d123ef064bcb' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/experiment/module.py#L451) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/experiment/_cm.json new file mode 100644 index 000000000..49bb0e616 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/_cm.json @@ -0,0 +1,11 @@ +{ + "alias": "experiment", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "desc": "Managing and reproducing experiments (under development)", + "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", + "tags": [ + "automation" + ], + "uid": "a0a2d123ef064bcb" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/module.py b/cmx4mlops/cmx4mlops/repo/automation/experiment/module.py new file mode 100644 index 000000000..c83e7c049 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/module.py @@ -0,0 +1,855 @@ +# Universal experiment automation to support universal benchmarking +# and optimization of apps and systems +# +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os +import itertools +import copy +import json + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + CM "experiment" automation actions + """ + + CM_RESULT_FILE = 'cm-result.json' + CM_INPUT_FILE = 'cm-input.json' + CM_OUTPUT_FILE = 'cm-output.json' + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} + + ############################################################ + + def run(self, i): + """ + Run experiment + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): experiment artifact name (can include repository separated by :) + (tags) (str): experiment tags separated by comma + + (dir) (str): force recording into a specific directory + + + (script) (str): find and run CM script by name + (s) + + (script_tags) (str): find and run CM script by tags + (stags) + + (rerun) (bool): if True, rerun experiment in a given entry/directory instead of creating a new one... + + (explore) (dict): exploration dictionary + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + """ + + # Copy of original input + ii_copy = copy.deepcopy(i) + cur_dir = os.getcwd() + + # Find or add artifact based on repo/alias/tags + r = self._find_or_add_artifact(i) + if r['return'] > 0: + return r + + experiment = r['experiment'] + + console = i.get('out', '') == 'con' + + # Print experiment folder + experiment_path = experiment.path + + if console: + print('') + print('Path to CM experiment artifact: {}'.format(experiment_path)) + + # Get directory with datetime + datetime = i.get('dir', '') + + if datetime == '' and i.get('rerun', False): + # Check if already some dir exist + + directories = os.listdir(experiment_path) + + datetimes = sorted([f for f in directories if os.path.isfile( + os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) + + if len(datetimes) == 1: + datetime = datetimes[0] + elif len(datetimes) > 1: + print('') + print('Select experiment:') + + datetimes = sorted(datetimes) + + num = 0 + print('') + for d in datetimes: + print('{}) {}'.format(num, d.replace('.', ' '))) + num += 1 + + if not console: + return { + 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm rerun experiment --dir={date and time}"'} + + print('') + x = input('Make your selection or press Enter for 0: ') + + x = x.strip() + if x == '': + x = '0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + datetime = datetimes[selection] + + if datetime != '': + experiment_path2 = os.path.join(experiment_path, datetime) + else: + num = 0 + found = False + + while not found: + r = utils.get_current_date_time({}) + if r['return'] > 0: + return r + + datetime = r['iso_datetime'].replace( + ':', '-').replace('T', '.') + + if num > 0: + datetime += '.' + str(num) + + experiment_path2 = os.path.join(experiment_path, datetime) + + if not os.path.isdir(experiment_path2): + found = True + break + + num += 1 + + # Check/create directory with date_time + if not os.path.isdir(experiment_path2): + os.makedirs(experiment_path2) + + # Change current path + print('Path to experiment: {}'.format(experiment_path2)) + + os.chdir(experiment_path2) + + # Record experiment input with possible exploration + experiment_input_file = os.path.join( + experiment_path2, self.CM_INPUT_FILE) + experiment_result_file = os.path.join( + experiment_path2, self.CM_RESULT_FILE) + + # Clean original input + for k in ['parsed_artifact', 'parsed_automation', 'cmd']: + if k in ii_copy: + del (ii_copy[k]) + + r = utils.save_json(file_name=experiment_input_file, meta=ii_copy) + if r['return'] > 0: + return r + + # Prepare run command + cmd = '' + + unparsed = i.get('unparsed_cmd', []) + if len(unparsed) > 0: + for u in unparsed: + if ' ' in u: + u = '"' + u + '"' + cmd += ' ' + u + + cmd = cmd.strip() + + # Prepare script run + env = i.get('env', {}) + + ii = {'action': 'native-run', + 'automation': 'script,5b4e0237da074764', + 'env': env} + + # Prepare exploration + # Note that from Python 3.7, dictionaries are ordered so we can define order for exploration in json/yaml + # ${{XYZ}} ${{ABC(range(1,2,3))}} + + # Extract exploration expressions from {{VAR{expression}}} + explore = i.get('explore', {}) + + j = 1 + k = 0 + while j >= 0: + j = cmd.find('}}}', k) + if j >= 0: + k = j + 1 + + l = cmd.rfind('{{', 0, j) + + if l >= 0: + l2 = cmd.find('{', l + 2, j) + if l2 >= 0: + k = l2 + 1 + + var = cmd[l + 2:l2] + expr = cmd[l2 + 1:j] + + explore[var] = expr + + cmd = cmd[:l2] + cmd[j + 1:] + + # Separate Design Space Exploration into var and range + explore_keys = [] + explore_dimensions = [] + + for k in explore: + v = explore[k] + + explore_keys.append(k) + + +if not isinstance(v, if ) v = eval(v) + + explore_dimensions.append(v) + + # Next command will run all iterations so we need to redo above command + # once again + step = 0 + + steps = itertools.product(*explore_dimensions) + + num_steps = len(list(steps)) + + steps = itertools.product(*explore_dimensions) + + ii_copy = copy.deepcopy(ii) + + for dimensions in steps: + + step += 1 + + print('================================================================') + print('Experiment step: {} out of {}'.format(step, num_steps)) + + print('') + + ii = copy.deepcopy(ii_copy) + + env = ii.get('env', {}) + + l_dimensions = len(dimensions) + if l_dimensions > 0: + print(' Updating ENV variables during exploration:') + + print('') + for j in range(l_dimensions): + v = dimensions[j] + k = explore_keys[j] + print(' - Dimension {}: "{}" = {}'.format(j, k, v)) + + env[k] = str(v) + + print('') + + # Generate UID and prepare extra directory: + r = utils.gen_uid() + if r['return'] > 0: + return r + + uid = r['uid'] + + experiment_path3 = os.path.join(experiment_path2, uid) + if not os.path.isdir(experiment_path3): + os.makedirs(experiment_path3) + + # Get date time of experiment + r = utils.get_current_date_time({}) + if r['return'] > 0: + return r + + current_datetime = r['iso_datetime'] + + # Change current path + print('Path to experiment step: {}'.format(experiment_path3)) + print('') + os.chdir(experiment_path3) + + # Prepare and run experiment in a given placeholder directory + os.chdir(experiment_path3) + + ii['env'] = env + + # Change only in CMD + env_local = {'CD': cur_dir, + 'CM_EXPERIMENT_STEP': str(step), + 'CM_EXPERIMENT_PATH': experiment_path, + 'CM_EXPERIMENT_PATH2': experiment_path2, + 'CM_EXPERIMENT_PATH3': experiment_path3} + + # Update {{}} in CMD + cmd_step = cmd + + j = 1 + k = 0 + while j >= 0: + j = cmd_step.find('{{', k) + if j >= 0: + k = j + l = cmd_step.find('}}', j + 2) + if l >= 0: + var = cmd_step[j + 2:l] + + # Such vars must be in env + if var not in env and var not in env_local: + return { + 'return': 1, 'error': 'key "{}" is not in env during exploration'.format(var)} + + if var in env: + value = env[var] + else: + value = env_local[var] + + cmd_step = cmd_step[:j] + str(value) + cmd_step[l + 2:] + + ii['command'] = cmd_step + + print('Generated CMD:') + print('') + print(cmd_step) + print('') + + # Prepare experiment step input + experiment_step_input_file = os.path.join( + experiment_path3, self.CM_INPUT_FILE) + + r = utils.save_json(file_name=experiment_step_input_file, meta=ii) + if r['return'] > 0: + return r + + experiment_step_output_file = os.path.join( + experiment_path3, self.CM_OUTPUT_FILE) + if os.path.isfile(experiment_step_output_file): + os.delete(experiment_step_output_file) + + # Run CMD + rr = self.cmind.access(ii) + if rr['return'] > 0: + return rr + + # Record output + result = {} + + if os.path.isfile(experiment_step_output_file): + r = utils.load_json(file_name=experiment_step_output_file) + if r['return'] > 0: + return r + + result = r['meta'] + + # Try to flatten + try: + flatten_result = flatten_dict(result) + result = flatten_result + except BaseException: + pass + + # Add extra info + result['uid'] = uid + result['iso_datetime'] = current_datetime + + # Attempt to append to the main file ... + all_results = [] + + if os.path.isfile(experiment_result_file): + r = utils.load_json(file_name=experiment_result_file) + if r['return'] > 0: + return r + + all_results = r['meta'] + + all_results.append(result) + + r = utils.save_json( + file_name=experiment_result_file, + meta=all_results) + if r['return'] > 0: + return r + + rr = {'return': 0, + 'experiment_path': experiment_path, + 'experiment_path2': experiment_path2} + + return rr + + ############################################################ + + def rerun(self, i): + """ + Rerun experiment + + cm run experiment --rerun=True ... + """ + + i['rerun'] = True + + return self.run(i) + + ############################################################ + + def replay(self, i): + """ + Replay experiment + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): experiment artifact + + (tags) (str): experiment tags separated by comma + + (dir) (str): experiment directory (often date time) + (uid) (str): unique ID of an experiment + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + """ + + # Find or add artifact based on repo/alias/tags + i['fail_if_not_found'] = True + r = self._find_or_add_artifact(i) + if r['return'] > 0: + return r + + experiment = r['experiment'] + + console = i.get('out', '') == 'con' + + # Print experiment folder + experiment_path = experiment.path + + if console: + print('') + print('Path to CM experiment artifact: {}'.format(experiment_path)) + + # Check date and time folder + uid = i.get('uid', '') + datetime = i.get('dir', '') + + if datetime != '': + datetimes = [datetime] + else: + directories = os.listdir(experiment_path) + + datetimes = sorted([f for f in directories if os.path.isfile( + os.path.join(experiment_path, f, self.CM_RESULT_FILE))], reverse=True) + + if len(datetimes) == 0: + return {'return': 1, 'error': 'experiment(s) not found in {}'.format( + experiment_path)} + + # Check datetime directory + found_result = {} + + if uid != '': + for d in datetimes: + r = self._find_uid({'path': experiment_path, 'datetime': d, 'uid': uid}) + if r['return'] > 0: + return r + + if len(r.get('result', {})) > 0: + found_result = r['result'] + datetime = d + experiment_path2 = os.path.join(experiment_path, datetime) + break + + if len(found_result) == 0: + return {'return': 1, 'error': 'couldn\'t find result with UID {} in {}'.format( + uid, experiment_path)} + + else: + if len(datetimes) == 1: + datetime = datetimes[0] + else: + print('') + print('Available experiments:') + + datetimes = sorted(datetimes) + + num = 0 + print('') + for d in datetimes: + print('{}) {}'.format(num, d.replace('.', ' '))) + num += 1 + + if not console: + return { + 'return': 1, 'error': 'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'} + + print('') + x = input('Make your selection or press Enter for 0: ') + + x = x.strip() + if x == '': + x = '0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + datetime = datetimes[selection] + + # Final path to experiment + experiment_path2 = os.path.join(experiment_path, datetime) + + if not os.path.isdir(experiment_path2): + return {'return': 1, 'error': 'experiment path not found {}'.format( + experiment_path2)} + + r = self._find_uid({'path': experiment_path, 'datetime': datetime}) + if r['return'] > 0: + return r + + results = r['meta'] + + if len(results) == 0: + return {'return': 1, 'error': 'results not found in {}'.format( + experiment_path2)} + + elif len(results) == 1: + selection = 0 + + else: + print('') + print('Available Unique IDs of results:') + + results = sorted(results, key=lambda x: x.get('uid', '')) + + num = 0 + print('') + for r in results: + print('{}) {}'.format(num, r.get('uid', ''))) + num += 1 + + if not console: + return { + 'return': 1, 'error': 'more than 1 result found.\nPlease use "cm run experiment --uid={result UID}"'} + + print('') + x = input('Make your selection or press Enter for 0: ') + + x = x.strip() + if x == '': + x = '0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + found_result = results[selection] + uid = found_result['uid'] + + # Final info + if console: + print('') + print('Path to experiment: {}'.format(experiment_path2)) + + print('') + print('Result UID: {}'.format(uid)) + + # Attempt to load cm-input.json + experiment_input_file = os.path.join( + experiment_path2, self.CM_INPUT_FILE) + + if not os.path.isfile(experiment_input_file): + return { + 'return': 1, 'error': '{} not found - can\'t replay'.format(self.CM_INPUT_FILE)} + + r = utils.load_json(experiment_input_file) + if r['return'] > 0: + return r + + cm_input = r['meta'] + + tags = cm_input.get('tags', '').strip() + if 'replay' not in tags: + if tags != '': + tags += ',' + tags += 'replay' + cm_input['tags'] = tags + + if console: + print('') + print('Experiment input:') + print('') + print(json.dumps(cm_input, indent=2)) + print('') + + # Run experiment again + r = self.cmind.access(cm_input) + if r['return'] > 0: + return r + + # TBA - validate experiment, etc ... + + return {'return': 0} + + ############################################################ + + def _find_or_add_artifact(self, i): + """ + Find or add experiment artifact (reused in run and reply) + + Args: + (CM input dict): + + (fail_if_not_found) (bool) - if True, fail if experiment is not found + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + experiment (CM artifact class): Experiment artifact + + """ + + console = i.get('out', '') == 'con' + + # Try to find experiment artifact by alias and/or tags + ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags']) + ii['action'] = 'find' + + ii_copy = copy.deepcopy(ii) + + # If artifact is specified, remove tags + artifact = ii.get('artifact', '').strip() + if artifact != '' and not artifact.endswith(':') \ + and '*' not in artifact and '?' not in artifact: + if 'tags' in ii: + del (ii['tags']) + + r = self.cmind.access(ii) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) > 1: + print('More than 1 experiment artifact found:') + + lst = sorted(lst, key=lambda x: x.path) + + num = 0 + print('') + for e in lst: + print('{}) {}'.format(num, e.path)) + print( + ' Tags: {}'.format( + ','.join( + e.meta.get( + 'tags', + [])))) + num += 1 + + if not console: + return {'return': 1, 'error': 'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'} + + print('') + x = input('Make your selection or press Enter for 0: ') + + x = x.strip() + if x == '': + x = '0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + + experiment = lst[selection] + + elif len(lst) == 1: + experiment = lst[0] + else: + # Create new entry + if i.get('fail_if_not_found', False): + return {'return': 1, 'error': 'experiment not found'} + + ii = copy.deepcopy(ii_copy) + ii['action'] = 'add' + r = self.cmind.access(ii) + if r['return'] > 0: + return r + + experiment_uid = r['meta']['uid'] + + r = self.cmind.access({'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': experiment_uid}) + if r['return'] > 0: + return r + + lst = r['list'] + if len(lst) == 0 or len(lst) >1: + return { + 'return': 1, 'error': 'created experiment artifact with UID {} but can\'t find it - weird'.format(experiment_uid)} + + experiment = lst[0] + + return {'return': 0, 'experiment': experiment} + + ############################################################ + def _find_uid(self, i): + """ + Find experiment result with a given UID + + Args: + (CM input dict): + + path (str): path to experiment artifact + datetime (str): sub-path to experiment + (uid) (str): experiment UID + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + path_to_file (str): path to experiment result file + meta (dict): complete list of all results + result (dict): result dictionary with a given UID + + """ + + path = i['path'] + datetime = i['datetime'] + uid = i.get('uid', '').strip() + + path_to_experiment_result_file = os.path.join( + path, datetime, self.CM_RESULT_FILE) + + rr = {'return': 0, 'path_to_file': path_to_experiment_result_file} + + if os.path.isfile(path_to_experiment_result_file): + r = utils.load_json(file_name=path_to_experiment_result_file) + if r['return'] > 0: + return r + + meta = r['meta'] + + rr['meta'] = meta + + # Searching for UID + if uid != '': + for result in meta: + ruid = result.get('uid', '').strip() + if ruid != '' and ruid ==uid: + rr['result'] = result + break + + return rr + +############################################################################ + + +def flatten_dict(d, flat_dict= {}, prefix = ''): + + for k in d: + v = d[k] + + if type(v) is dict: + flatten_dict(v, flat_dict, prefix + k + '.') + else: + flat_dict[prefix + k] = v + + return flat_dict diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.bat new file mode 100644 index 000000000..5ecb3a0d8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.bat @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.yaml -- echo %VAR1% --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-%%VAR3%% diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.sh new file mode 100644 index 000000000..40d60a25a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test2.sh @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.yaml -- echo "\${VAR1} --batch_size={{VAR1}} {{VAR2}} {{VAR4{['xx','yy','zz']}}}-\${VAR3}" \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.bat new file mode 100644 index 000000000..800e36076 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.bat @@ -0,0 +1 @@ +cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.sh new file mode 100644 index 000000000..148e56433 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3.sh @@ -0,0 +1 @@ +cm run experiment --tags=test @test3_input.yaml -- cm run script "print hello-world native" --env.CM_ENV_TEST1={{VAR1}} --const.CM_ENV_TEST2={{VAR2}} diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3_input.yaml b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3_input.yaml new file mode 100644 index 000000000..1c789f52a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test3_input.yaml @@ -0,0 +1,4 @@ +explore: + VAR1: [1,2,3] + VAR2: ["a","b"] + CM_ENV_TEST3: "[2**i for i in range(0,6)]" diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.bat new file mode 100644 index 000000000..16eb9184b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.bat @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.json -- {{CD}}\test_run.bat diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.sh new file mode 100644 index 000000000..a46cb98f5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__json.sh @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.json -- {{CD}}/test_run.sh diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.bat new file mode 100644 index 000000000..e583f209b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.bat @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.yaml -- {{CD}}\test_run.bat diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.sh new file mode 100644 index 000000000..60c2f7a80 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test__yaml.sh @@ -0,0 +1 @@ +cm run experiment --tags=test @test_input.yaml -- {{CD}}/test_run.sh diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.json b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.json new file mode 100644 index 000000000..f682f5a34 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.json @@ -0,0 +1,14 @@ +{ + "explore": { + "VAR1": [ + 1, + 2, + 3 + ], + "VAR2": [ + "a", + "b" + ], + "VAR3": "[2**i for i in range(0,6)]" + } +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.yaml b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.yaml new file mode 100644 index 000000000..a621c5ef9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_input.yaml @@ -0,0 +1,4 @@ +explore: + VAR1: [1,2,3] + VAR2: ["a","b"] + VAR3: "[2**i for i in range(0,6)]" diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.bat b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.bat new file mode 100644 index 000000000..b3aa91028 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.bat @@ -0,0 +1,3 @@ +echo %VAR1% --batch_size=%VAR3% %VAR2% + +echo {"x":%VAR1%, "y":"%VAR2%", "z":%VAR3%} > cm-output.json diff --git a/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.sh b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.sh new file mode 100644 index 000000000..7ed1b472e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/experiment/tests/test_run.sh @@ -0,0 +1 @@ +echo $VAR1 --batch_size=$VAR3 $VAR2 diff --git a/cmx4mlops/cmx4mlops/repo/automation/project/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/project/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/project/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/project/README.md b/cmx4mlops/cmx4mlops/repo/automation/project/README.md new file mode 100644 index 000000000..e684ac7ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/project/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test project``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15)) + * CM CLI with UID: ```cm test project,6882553224164c56``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'project,6882553224164c56' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/project/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/project/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/project/_cm.json new file mode 100644 index 000000000..68042c431 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/project/_cm.json @@ -0,0 +1,10 @@ +{ + "alias": "project", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", + "tags": [ + "automation" + ], + "uid": "6882553224164c56" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/project/module.py b/cmx4mlops/cmx4mlops/repo/automation/project/module.py new file mode 100644 index 000000000..963ab43b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/project/module.py @@ -0,0 +1,66 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/report/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/report/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/report/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/report/README.md b/cmx4mlops/cmx4mlops/repo/automation/report/README.md new file mode 100644 index 000000000..6f2f96696 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/report/README.md @@ -0,0 +1,27 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test report``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15)) + * CM CLI with UID: ```cm test report,6462ecdba2054467``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'report,6462ecdba2054467' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/report/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/report/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/report/_cm.json new file mode 100644 index 000000000..880895757 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/report/_cm.json @@ -0,0 +1,9 @@ +{ + "alias": "report", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "tags": [ + "automation" + ], + "uid": "6462ecdba2054467" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/report/module.py b/cmx4mlops/cmx4mlops/repo/automation/report/module.py new file mode 100644 index 000000000..963ab43b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/report/module.py @@ -0,0 +1,66 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/script/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/README-extra.md new file mode 100644 index 000000000..d63c5dc16 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/README-extra.md @@ -0,0 +1,1034 @@ +[ [Back to index](../../../docs/README.md) ] + +# CM "script" automation + +
+Click here to see the table of contents. + + * [Motivation](#motivation) + * [Obtaining shared CM scripts](#obtaining-shared-cm-scripts) + * [Getting started with CM scripts](#getting-started-with-cm-scripts) + * [Understanding CM scripts](#understanding-cm-scripts) + * [Wrapping native scripts](#wrapping-native-scripts) + * [Modifying environment variables](#modifying-environment-variables) + * [Understanding unified output dictionary](#understanding-unified-output-dictionary) + * [Modifying state dictionary](#modifying-state-dictionary) + * [Running CM scripts via CM Python API](#running-cm-scripts-via-cm-python-api) + * [Assembling pipelines (workflows) of CM scripts](#assembling-pipelines-workflows-of-cm-scripts) + * [Customizing CM script execution flow](#customizing-cm-script-execution-flow) + * [Caching output of CM scripts](#caching-output-of-cm-scripts) + * [Assembling pipeline to compile and run image corner detection](#assembling-pipeline-to-compile-and-run-image-corner-detection) + * [Customizing sub-dependencies in a pipeline](#customizing-sub-dependencies-in-a-pipeline) + * [Using Python virtual environments](#using-python-virtual-environments) + * [Assembling pipelines with other artifacts included](#assembling-pipelines-with-other-artifacts-included) + * [Unifying host OS and CPU detection](#unifying-host-os-and-cpu-detection) + * [Detecting, installing and caching system dependencies](#detecting-installing-and-caching-system-dependencies) + * [Using variations](#using-variations) + * [Running CM scripts inside containers](#running-cm-scripts-inside-containers) + * [Getting help about other script automation flags](#getting-help-about-other-script-automation-flags) + * [Further reading](#further-reading) + +
+ +*We suggest you to check [CM introduction](https://github.com/mlcommons/ck/blob/master/docs/introduction-cm.md) + and [CM CLI/API](https://github.com/mlcommons/ck/blob/master/docs/interface.md) to understand CM motivation and concepts. + You can also try [CM tutorials](https://github.com/mlcommons/ck/blob/master/docs/tutorials/README.md) + to run some applications and benchmarks on your platform using CM scripts.* + +## Motivation + +While helping the community reproduce [150+ research papers](https://learning.acm.org/techtalks/reproducibility), +we have noticed that researchers always create their own ad-hoc scripts, environment variable and files +to perform *exactly the same steps (actions) across all papers* to prepare, run and reproduce their experiments +across different software, hardware, models and data. + +![](https://raw.githubusercontent.com/ctuning/ck-guide-images/master/cm-ad-hoc-projects.png) + +This experience motivated us to create a CM automation called "script" to warp native scripts +from research and industrial projects with a common, simple and unified CM Command Line Interface and Python API. + +Such non-intrusive wrapping helps to make numerous native scripts and tools more reusable, interoperable, portable, findable +and deterministic across different projects with different artifacts based on [FAIR principles](https://www.go-fair.org/fair-principles). + +CM scripts can be embedded into existing projects with minimal or no modifications at all, and they can be connected +into powerful and portable pipelines and workflows using simple JSON or YAML files +to prepare, run and reproduce experiments across continuously changing technology. + +Importantly, CM scripts can be executed in the same way in a native user environment, +Python virtual environments (to avoid messing up native environment) and containers +while automatically adapting to a given environment! + +![](https://raw.githubusercontent.com/ctuning/ck-guide-images/master/cm-unified-projects.png) + + + + + +## Obtaining shared CM scripts + +In order to reuse some CM scripts embedded into shared projects, +you need to install these projects via the CM interface. + +For example, to use automation scripts developed by the +[MLCommons task force on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +and shared via GitHub, you just need to pull this repository via CM: + +```bash +cm pull repo --url=https://github.com/mlcommons/cm4mlops --checkout=dev +``` + +or + +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +``` + +You can now see all available CM scripts in your system as follows: + +```bash +cm find script +cm find script install* | sort + +``` + + +## Getting started with CM scripts + +You can run any of the above CM script on any platform as follows: +```bash +cm run script "tags separated by space" --keys=values --env.KEY=VALUE +cm run script --tags="tags separated by comma" --keys=values --env.KEY=VALUE +``` +or using a shortcut `cmr` available in CM V1.4.0+: +```bash +cmr "tags separated by space" --keys=values --env.KEY=VALUE +``` + +You can also use `-j` flag to print JSON output at the end of the script execution +and `-v` flag to show extra debug information during script execution. + +For example, you can download a RESNET-50 model in ONNX format from Zenodo using the following script: +```bash +cmr "download file" --url=https://zenodo.org/record/4735647/files/resnet50_v1.onnx +``` + +You can also obtain info about your OS (Linux, Windows, MacOS) in a unified way and print JSON output +as well as CM debug info as follows: +```bash +cmr "detect os" -j -v +``` + +You can turn on silent mode using CM cfg automation: +```bash +cm set cfg --key.script.silent +``` +or +```bash +cm set cfg default --key.script.silent +``` + + +## Understanding CM scripts + +CM scripts are treated as standard CM artifacts with the associated CM automation ["script"](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/script), +CM action ["run"](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/module.py#L73), +and JSON and/or YAML meta descriptions. + +CM scripts can be invoked by using their alias, unique ID and human-readable tags (preferred method). + +For example, the [CM "Print Hello World" script](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world) +simply wraps 2 native `run.sh` and `run.bat` scripts to print "Hello World" on Linux, MacOs or Windows +together with a few environment variables: + +```bash +ls `cm find script print-hello-world` + +README.md _cm.json run.bat run.sh +``` + +It is described by this [_cm.json meta description file](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world/_cm.json) +with the following alias, UID and tags: + +```json +{ + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + + "alias": "print-hello-world", + "uid": "b9f0acba4aca4baa", + + "default_env": { + "CM_ENV_TEST1": "TEST1" + }, + + "env": { + "CM_ENV_TEST2": "TEST2" + }, + + "input_mapping": { + "test1": "CM_ENV_TEST1" + }, + + "new_env_keys": [ + "CM_ENV_TEST*" + ], + + "new_state_keys": [ + "hello_test*" + ], + + "tags": [ + "print", + "hello-world", + "hello world", + "hello", + "world", + "native-script", + "native", + "script" + ] +} +``` + +The `automation_alias` and `automation_uid` tells CM that this artifact can be used with the CM "script" automation. + +Therefore, this script can be executed from the command line in any of the following ways: + +```bash +cm run script print-hello-world +cm run script b9f0acba4aca4baa +cm run script --tags=print,native-script,hello-world +cm run script "print native-script hello-world" +``` + +The same script can be also executed using CM Python API as follows: +```python +import cmind + +output = cmind.access({'action':'run', 'automation':'script', 'tags':'print,native-script,hello-world'}) +if output['return']>0: + cmind.error(output) + +import json +print (json.dumps(output, indent=2)) +``` + +Normally you should see the following output along with some debug information (that will be removed soon): + +```bash + +... + +CM_ENV_TEST1 = TEST1 +CM_ENV_TEST2 = TEST2 + +HELLO WORLD! +... +``` + +### Wrapping native scripts + +*run.bat* and *run.sh* are native scripts that will be executed by this CM script in a unified way on Linux, MacOS and Windows: + +```bash +echo "" +echo "CM_ENV_TEST1 = ${CM_ENV_TEST1}" +echo "CM_ENV_TEST2 = ${CM_ENV_TEST2}" + +echo "" +echo "HELLO WORLD!" +``` + +The idea to use native scripts is to make it easier for researchers and engineers to reuse their existing automation scripts +while providing a common CM wrapper with a unified CLI, Python API and extensible meta descriptions. + + + + +### Modifying environment variables + +CM script automation CLI uses a flag `--env.VAR=VALUE` to set some environment variable and pass it to a native script +as shown in this example: + +```bash +cm run script "print native-script hello-world" \ + --env.CM_ENV_TEST1=ABC1 --env.CM_ENV_TEST2=ABC2 + +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = TEST2 + +HELLO WORLD! +``` + +Note, that *CM_ENV_TEST2* did not change. This happened because dictionary `env` in the *_cm.json* forces *CM_ENV_TEST2* to *TEST2*, +while `default_env` dictionary allows environment variables to be updated externally. + +You can still force an environment variable to a given value externally using a `--const` flag as follows: + +```bash +cm run script "print native-script hello-world" \ + --env.CM_ENV_TEST1=ABC1 --const.CM_ENV_TEST2=ABC2 + +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = ABC2 + +HELLO WORLD! + +``` + +You can also use a JSON file instead of flags. Create *input.json* (or any other filename): +```json +{ + "tags":"print,native-script,hello-world", + "env":{ + "CM_ENV_TEST1":"ABC1" + } +} +``` + +and run the CM script with this input file as follows: +``` +cm run script @input.json +``` + + +You can use YAML file instead of CLI. Create *input.yaml* (or any other filename): +```yaml +tags: "print,hello-world,script" +env: + CM_ENV_TEST1: "ABC1" +``` + +and run the CM script with this input file as follows: +``` +cm run script @input.yaml +``` + +Finally, you can map any other flag from the script CLI to an environment variable +using the key `input_mapping` in the `_cm.json` meta description of this script: + +```bash +cm run script "print native-script hello-world" --test1=ABC1 + +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = TEST2 + +HELLO WORLD! + +``` + + +### Understanding unified output dictionary + +You can see the output of a given CM script in the JSON format by adding `--out=json` flag as follows: + +```bash +cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json + +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = ABC2 + +HELLO WORLD! + +{ + "deps": [], + "env": { + "CM_ENV_TEST1": "ABC1", + "CM_ENV_TEST2": "TEST2" + }, + "new_env": { + "CM_ENV_TEST1": "ABC1", + "CM_ENV_TEST2": "TEST2" + }, + "new_state": {}, + "return": 0, + "state": {} +} +``` + +Note that `new_env`shows new environment variables produced and explicitly exposed by this script +via a `new_env_keys` key in the `_cm.json` meta description of this script. + +This is needed to assemble automation pipelines and workflows while avoiding their contamination +with temporal environments. CM script must explicitly expose environment variables that will +go to the next stage of a pipeline. + +In the following example, `CM_ENV_TEST3` will be added to the `new_env` while `CM_XYZ` will not +since it is not included in `"new_env_keys":["CM_ENV_TEST*"]`: + +```bash +cm run script --tags=print,hello-world,script --env.CM_ENV_TEST1=ABC1 --out=json --env.CM_ENV_TEST3=ABC3 --env.CM_XYZ=XYZ +``` + +### Modifying state dictionary + +Sometimes, it is needed to use more complex structures than environment variables in scripts and workflows. +We use a dictionary `state` that can be updated and exposed by a given script via `new_state_keys` key +in the `_cm.json` meta description of this script. + +In the following example, `hello_world` key will be updated in the `new_state` dictionary, +while `hello` key will not be updated because it is not included in the wild card `"new_state_key":["hello_world*"]`: + +```bash +cm run script --tags=print,hello-world,script --out=json \ + --state.hello=xyz1 --state.hello_world=xyz2 + +... + +{ + "deps": [], + "env": { + "CM_ENV_TEST1": "TEST1", + "CM_ENV_TEST2": "TEST2" + }, + "new_env": { + "CM_ENV_TEST1": "TEST1", + "CM_ENV_TEST2": "TEST2" + }, + "new_state": { + "hello_world": "xyz2" + }, + "return": 0, + "state": { + "hello": "xyz1", + "hello_world": "xyz2" + } +} +``` + +### Running CM scripts via CM Python API + +You can run a given CM script from python or Jupyter notebooks as follows: + +```python + +import cmind + +r = cmind.access({'action':'run', + 'automation':'script', + 'tags':'print,hello-world,script', + 'const':{ + 'CM_ENV_TEST1':'ABC1', + }, + 'env':{ + 'CM_ENV_TEST2':'ABC2' + }, + 'state': { + 'hello':'xyz1', + 'hello_world':'xyz2' + } + }) + +print (r) + +``` + +```bash +... + +CM_ENV_TEST1 = ABC1 +CM_ENV_TEST2 = ABC2 + +HELLO WORLD! + +{'return': 0, + 'env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'}, + 'new_env': {'CM_ENV_TEST2': 'TEST2', 'CM_ENV_TEST1': 'ABC1'}, + 'state': {'hello': 'xyz1', 'hello_world': 'xyz2'}, + 'new_state': {'hello_world': 'xyz2'}, + 'deps': []} + +``` + + + +### Assembling pipelines (workflows) of CM scripts + +We've added a simple mechanism to chain reusable CM scripts into complex pipelines +without the need for specialized workflow frameworks. + +Simply add the following dictionary "deps" to the `_cm.json` or `_cm.yaml` of your script as follows: + +```json + +{ + "deps": [ + { + "tags": "a string of tags separated by comma to find and execute the 1st CM script" + }, + { + "tags": "a string of tags separated by comma to find and execute the 1st CM script" + }, + ... + ] +} + +``` + +This CM script will run all dependent scripts in above sequence, aggregate environment variable and `state` dictionary, +and will then run native scripts. + +You can also turn on specific dependencies based on some values in specific environment variables or min/max version (if supported) +in this pipeline as follows: + +```json + +{ + "deps": [ + { + "tags": "a string of tags separated by comma to find and execute the 1st CM script", + "enable_if_env": { "USE_CUDA" : ["yes", "YES", "true"] } + }, + { + "tags": "a string of tags separated by comma to find and execute the 1st CM script" + "enable_if_env": { "USE_CPU" : ["yes", "YES", "true"] }, + "version_min": "3.10" + }, + ... + ] +} + +``` + +You can also specify dependencies to be invoked after executing native scripts +using a dictionary `"post_deps"` with the same format `"deps"`. + + +You can see an example of such dependencies in the [_cm.json](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py/_cm.json) +of the ["print-hello-world-py" CM script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/print-hello-world-py) +that detects and unifies OS parameters using the ["detect-os" CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os), +detects or builds Python using the ["get-python3" CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) +and then runs `code.py` with "Hello World" from `run.sh` or `run.bat`: + +```bash +cm run script "print python hello-world" +``` + + + + + + +### Customizing CM script execution flow + +If a developer adds `customize.py` file inside a given CM script, +it can be used to programmatically update environment variables, prepare input scripts +and even invoke other scripts programmatically using Python. + +If a function `preprocess` exists in this file, CM script will call it before +invoking a native script. + +If this function returns `{"skip":True}` in the output, +further execution of this script will be skipped. + +After executing the preprocess function, the CM script automation will record the global state dictionary +into *tmp-state.json* and the local state dictionary from this CM script into *tmp-state-new.json*. + +The CM script automation will then run a native script (run.sh on Linux/MacOS or run.bat on Windows) +with all merged environment variables from previous scripts. + +Note that native scripts can also create 2 files that will be automatically picked up and processed by the CM script automation: +* *tmp-run-env.out* - list of environment variables to update the "new_env" of a given CM script +* *tmp-run-state.json* - the state dictionary to update the "new_state" of a given CM script + +If `postprocess` function exists in the *customize.py* file, the CM script will call it +to finalize the postprocessing of files, environment variables, and the state dictionary. + +You can see an [example of such `customize.py` module](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py) in the CM script +to [detect or install/build Python interpreter](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) in a unified way on any machine. + +This script exposes a number of environment variables for a detected Python +in the [`postprocess` function](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-python3/customize.py#L60): + +* `CM_PYTHON_BIN` - python3.10 or python.exe or any other name of a Python interpreter on a given system +* `CM_PYTHON_BIN_PATH` - path to a detected or installed python +* `CM_PYTHON_BIN_WITH_PATH` - full path to a detected or installed python +* `LD_LIBRARY_PATH` - updated LD_LIBRARY_PATH to python +* `PATH` - updated PATH to python + +These environment variables can be reused by other CM scripts or external tools +while decoupling them from specific python versions and paths, and even allowing +multiple versions of tools and artifacts to co-exist on the same system +and plugged into CM scripts: + +```bash +cm run script "get python3" --out=json +``` + + + +### Caching output of CM scripts + +By default, CM scripts run wrapped scripts and tools, update environment variables and produce new files in the current directory. + +In many cases, we want to cache the output and environment variables when we run the same CM script with the same input again +to avoid potentially lengthy detections, downloads, builds and data pre/post processing. + +That's why we have developed another CM automation called ["cache"](../cache/README-extra.md) +to cache the output of scripts in the "cache" artifacts in the "local" CM repository +that can be found by tags or unique IDs like any other CM artifact. + +Our convention is to use names *get-{tool or artifact}* for CM scripts that detect already installed artifacts, +prepare their environment and cache them in the *local* CM repository using the "cache" automation. + +If installed artifact doesn't exist, we either enhance above scripts to include download, installation and even building +for a given artifact (if it's a tool) or we create extra CM scripts *install-{tool or artifact}* +that download and prepare tools and artifacts (install, build, preprocess, etc). + +For example, the CM script [*get-python3*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) +has *customize.py* with *preprocess* function that implements the search for python3 on Linux +or python.exe on Windows, 2 native scripts *run.sh* and *run.bat* to obtain the version of the detected python installation, +and *postprocess* function to prepare environment variables *CM_PYTHON_BIN* and *CM_PYTHON_BIN_WITH_PATH* +that can be used by other CM scripts: + +```bash +cm run script "get python" --out=json +``` + +If you run it for the first time and CM script detects multiple versions of python co-existing on your system, +it will ask you to select one. CM will then cache the output in the *cache* artifact of the CM repository. +You can see all *cache* CM entries for other tools and artifacts as follows: + +```bash +cm show cache +``` +or +```bash +cm show cache --tags=get,python +``` + +You can see the cached files as follows: +```bash +ls `cm find cache --tags=get,python` +``` + +* _cm.json - CM meta description of this "cache" artifact with its unique ID, tags and other meta information +* cm-cached-state.json - dictionary with the new environment variables and the new state dictionary +* tmp-env-all.sh - all environment variables used during CM script execution +* tmp-env.sh - only new environment variables produced after CM script execution (it can be used directly by external tools) +* tmp-run.sh - all environment variables and a call to the native script (useful for reproducibility) +* tmp-state.json - the state before running native script - it can be loaded and used by native scripts and tools instead of using environment variables +* tmp-ver.out - the output of the --version command parsed by `postprocess` and `detect_version` functions in `customize.py` + + +If you (or other CM script) run this CM script to get the python tool for the second time, CM script will reuse the cached output: +```bash +cm run script "get python" --out=json +``` + +This also allows us to install multiple tool versions into different CM cache entries (python virtual environments, +LLVM compiler, etc) and use them separately without the need to change higher-level CM scripts - these tools +will be automatically plugged in: + +```bash +cm run script "install prebuilt llvm" --version=14.0.0 +cm run script "install prebuilt llvm" --version=16.0.0 +cm run script "install src llvm" +``` + + +Such approach allows us to "probe" the user environment, detect different tools and artifacts, unify them +and adapt complex applications to a user environment in an automatic, transparent and non-intrusive way +as shown in the next example. + + + + + + +## Assembling pipeline to compile and run image corner detection + +We can use automatically detected compiler from CM script to create simple and technology-neutral compilation and execution pipelines +in CM scripts. + +For example, we have implemented a simple [image corner detection CM script]( https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection ) +with [this meta description](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-corner-detection/_cm.json). + +It uses two other reusable CM scripts to compile a given program using a detected/installed and cached compiler via CM (such as LLVM), +and then run it with some input image. + +First, let's detect installed LLVM it via CM: + +```bash +cm run script "get llvm" +``` +or install a prebuilt version on Linux, MacOs or Windows: +```bash +cm run script "install prebuilt llvm" --version=14.0.0 +``` + +We can then run this CM script to compile and run image corner detection as follows: +```bash +cm run script "app image corner-detection" --input=`cm find script --tags=app,image,corner-detection`/computer_mouse.pgm +``` + +This CM script will preset environment variables for a detected/installed compiler, +compile our C program, run it via `run.sh` (Linux/MacOS) or `run.bat` (Windows) +and generate an output image *output_image_with_corners.pgm* in the `output` directory of this script: + +```bash +ls `cm find script --tags=app,image,corner-detection`/output + +image-corner output_image_with_corners.pgm + +``` + +Note that this directory also contains the compiled tool "image-corner" that can now be used independently from CM if necessary. + + + + +### Customizing sub-dependencies in a pipeline + +When running a CM script with many sub-dependencies similar to above example, +we may want to specify some version constraints on sub-dependencies such as LLVM. + +One can use the key `"names"` in the "deps" list of any CM script meta description +to specify multiple names for a given dependency. + +For example, a dependency to "get compiler" in CM script "compile-program" +has `"names":["compiler"]` as shown [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/compile-program/_cm.json#L15). + +We can now use a CM script flag `--add_deps_recursive.{some name}.{some key}={some value}` or +`--adr.{above name}.{some key}={some value}` to update a dictionary of all sub-dependencies +that has `some name`. + +For example, we can now specify to use LLVM 16.0.0 for image corner detection as follows: +```bash +cm run script "app image corner-detection" --adr.compiler.tags=llvm --adr.compiler.version=16.0.0 +``` + +If this compiler was not yet detected or installed by CM, it will find related scripts +to install either a prebuilt version of LLVM or build it from sources. + + +## Using Python virtual environments + +By default, CM scripts will install python dependencies into user space. +This can influence other existing projects and may not be desirable. +CM can be used inside virtual Python environments without any changes, +but a user still need to do some manual steps to set up such environment. +That's why we've developed a [CM script](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv) +to automate creation of multiple Python virtual environments with different names: + +```bash +cm run script "install python-venv" --name={some name} +``` + +CM will create a virtual environment using default Python and save it in CM cache. +It is possible to create a python virtual environment with a minimal required version +or a specific one on Linux and MacOS as follows: + +```bash +cm run script "install python-venv" --version_min=3.8 --name=mlperf +cm run script "install python-venv" --version=3.10.8 --name=mlperf2 +``` + +In this case, CM will attempt to detect Python 3.10.8 on a system. +If CM can't detect it, CM will then automatically download and build it +using [this script](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src). + +Now, when user runs pipelines that install Python dependencies, CM will detect +virtual environment in the CM cache as well as native Python and will ask a user +which one to use. + +It is possible to avoid such questions by using the flag `--adr.python.name=mlperf`. +In such case, CM will propagate the name of a virtual environment to all sub-dependencies +as shown in the next example. + +Instead of adding this flag to all scripts, you can specify it +using `CM_SCRIPT_EXTRA_CMD` environment variable as follows: +```bash +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" +``` + +You can even specify min Python version required as follows: +```bash +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf --adr.python.version_min=3.9" +``` + +## Assembling pipelines with other artifacts included + +We can now use existing CM scripts as "LEGO" blocks to assemble more complex automation pipelines and workflows +while automatically downloading and plugging in +and pre-/post-processing all necessary artifacts (models, data sets, frameworks, compilers, etc) +on any supported platform (Linux, MacOS, Windows). + +For example, we have implemented a simple image classification application automated by the following CM script: +[*app-image-classification-onnx-py*]( https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py ). + +It is described by the following [`_cm.yaml`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml) meta description: + +```yaml +alias: app-image-classification-onnx-py +uid: 3d5e908e472b417e + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular ML/AI applications" + +tags: +- app +- image-classification +- onnx +- python + +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + +deps: +- tags: detect,os +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 +- tags: get,cuda + names: + - cuda + enable_if_env: + USE_CUDA: + - yes +- tags: get,dataset,imagenet,image-classification,original +- tags: get,dataset-aux,imagenet-aux,image-classification +- tags: get,ml-model,resnet50,_onnx,image-classification + +- tags: get,generic-python-lib,_onnxruntime + skip_if_env: + USE_CUDA: + - yes +- tags: get,generic-python-lib,_onnxruntime_gpu + enable_if_env: + USE_CUDA: + - yes + +variations: + cuda: + env: + USE_CUDA: yes +``` + + +Its `deps` pipeline runs other CM scripts to detect OS parameters, detect or install Python, +install the latest ONNX run-time, download ResNet-50 model and the minimal ImageNet dataset (500). + +It also contains [`run.sh`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.sh) +and [`run.bat`](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/run.bat) +to install extra Python requirements (not yet unified by CM scripts) +and run a Python script that classifies an image from ImageNet +or an image provided by user. + +Before running it, let us install Python virtual environment via CM to avoid altering +native Python installation: +```bash +cm run script "install python-venv" --name=my-test +cm show cache --tags=python +``` + +You can run it on any system as follows: + +```bash +cm run script "python app image-classification onnx" + +``` + + +To avoid CM asking which python to use, you can force the use of Python virtual environment +as follows: + +```bash +cm run script "python app image-classification onnx" --adr.python.name=my-test +``` + + + +If you run this CM script for the first time, it may take some minutes because it will detect, download, build and cache all dependencies. + +When you run it again, it will plug in all cached dependencies: + +```bash +cm run script "python app image-classification onnx" --adr.python.name.my-test + +``` + +You can then run it with your own image as follows: +```bash +cm run script --tags=app,image-classification,onnx,python \ + --adr.python.name.my-test --input={path to my JPEG image} +``` + + + +## Unifying host OS and CPU detection + +In order to make experiments more portable and interoperable, we need to unify +the information about host OS and CPU across different systems. +We are gradually improving the following two CM scripts: + +* [`detect-os`](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os) +* [`detect-cpu`](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu) + +These two CM script have *customize.py* with preprocess and postprocess functions +and a native run script to detect OS info and update environment variables +and the state dictionary needed by all other CM scripts. + +You can run them on your platform as follows: + +```bash +cm run script "detect os" --out=json + +... + +cm run script "detect cpu" --out=json +``` + +If some information is missing or not consistent across different platforms, +you can improve it in a backwards compatible way. You can then submit a PR [here](https://github.com/mlcommons/ck/pulls) +to let the community reuse your knowledge and collaboratively enhance common automation scripts, pipelines and workflows - +that's why we called our project "Collective Knowledge". + + +## Detecting, installing and caching system dependencies + +Many projects require installation of some system dependencies. Unfortunately, the procedure +is different across different systems. + +That's why we have developed two other CM script to unify and automate this process on any system. + +* [`get-sys-utils-cm`]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm ) +* [`get-sys-utils-min`]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-min ) + +They will install (minimal) system dependencies based on the OS and CPU info detected by CM scripts mentioned above. + +The last script is particularly useful to make applications compatible with Windows +where many typical tools like "wget", "patch", etc are missing - they will be automatically +download by that script. + +You can use them as follows: +```bash +cm run script "get sys-utils-min" --out=json +cm run script "get sys-utils-cm" +``` + + + + +## Using variations + +In some cases, we want the same CM script to download some artifact in a different format. + +For example, we may want to download and cache ResNet50 model in ONNX or PyTorch or TensorFlow or TFLite format. + +In such case, we use so-called `variations` in the meta description of a given CM script. + +For example, the CM script [`get-ml-model-resnet50`] has many variations and combinations separated by comma +to download this model in multiple formats: + +* `onnx` +* `onnx,opset-11` +* `onnx,opset-8` +* `pytorch` +* `pytorch,fp32` +* `pytorch,int8` +* `tflite` +* `tflite,argmax` +* `tflite,no-argmax` +* `tensorflow` +* `batch_size.1` +* `batch_size.#` + +These variations simply update environment variables and add more dependencies on other CM scripts +before running `customize.py` and native scripts as described in [_cm.json]( https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-ml-model-resnet50/_cm.json#L30 ). + +It is possible to specify a required variation or multiple variations when running a given CM script by adding extra tags with "_" prefix. + +For example, you can install quantized ResNet-50 model in PyTorch int8 format as follows: + +```bash +cm run script "get ml-model resnet50 _pytorch _int8" --out=json +``` + +You can install another FP32 variation of this model at the same time: +```bash +cm run script "get ml-model resnet50 _pytorch _fp32" --out=json +``` + +You can now find them in cache by tags and variations as follows: +```bash +cm show cache --tags=get,ml-model,resnet50 +cm show cache --tags=get,ml-model,resnet50,_pytorch +cm show cache --tags=get,ml-model,resnet50,_pytorch,_fp32 +``` + + + + + + + + + + + +## Running CM scripts inside containers + +One of the important ideas behind using a common automation language +is to use it inside and outside containers thus avoiding the need to create +ad-hoc manual containers and README files. + +We can just use base containers and let the CM automation language +detect installed tools and connect external data with the automation pipelines and workflows. + +See examples of modular containers with CM language to automate the MLPerf inference benchmark from MLCommons +[here](https://github.com/mlcommons/ck/tree/master/docker). + +Note that we continue working on a CM functionality to automatically generate +Docker containers and README files when executing CM scripts +(a prototype was successfully validated in the MLPerf inference v3.0 submission): + +* https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile +* https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image + + + + +## Getting help about other script automation flags + +You can get help about all flags used to customize execution +of a given CM script from the command line as follows: + +```bash +cm run script --help +``` + +Some flags are useful to make it easier to debug scripts and save output in files. + +You can find more info about CM script execution flow in this [document](README-specs.md). + + + + + + + + + + + + +## Further reading + +* [CM "script" automation specification](README-specs.md) +* [MLCommons CM script sources](https://github.com/mlcommons/cm4mlops/tree/main/script) +* [List of portable and reusable CM scripts from MLCommons](https://access.cknowledge.org/playground/?action=scripts) +* [CM "cache" automation](../cache/README-extra.md) diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/README-specs.md b/cmx4mlops/cmx4mlops/repo/automation/script/README-specs.md new file mode 100644 index 000000000..4b40feeba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/README-specs.md @@ -0,0 +1,79 @@ +# CM "script" automation specification + +Please check the [CM documentation](https://github.com/mlcommons/ck/tree/master/docs#collective-mind-language-cm) +for more details about the CM automation language. + +See the CM script introduction [here](README-extra.md). + +See the [automatically generated catalog](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) of all CM scripts from MLCommons. + +## Getting started with CM scripts + +* A CM script is identified by a set of tags and by unique ID. +* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. + +### CM script execution flow +* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order. +* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present. +* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed. +* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* Then `postprocess` function inside customize.py is executed if present. +* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed. + +** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`. + +### Input flags +When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable. + +### Conditional execution of any `deps`, `post_deps` +We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional + +### Versions +We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options. +* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`. +* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`. + +### Variations +* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. + +#### Variation groups +`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both. + +#### Dynamic variations +Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`. + +### ENV flow during CM script execution +* [TBD] Issue added [here](https://github.com/mlcommons/ck/issues/382) +* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it. +* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys` +* Same behaviour applies to `state` dictionary. + +#### Special env keys +* Any env key with a prefix `CM_TMP_*` and `CM_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency. +* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency. +* `--input` is automatically converted to `CM_INPUT` env key +* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX` +* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token. +* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS. + +### Script Meta +#### Special keys in script meta +* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env` + +### How cache works? +* If `cache=true` is set in a script meta, the result of the script execution is cached for further use. +* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `cm-cached.json` file inside the cached folder. +* By using `--new` input, a new cache entry can be forced even when an old one exist. +* By default no depndencies are run for a cached entry unless `dynamic` key is set for it. + +### Updating ENV from inside the run script +* [TBD] + + +### Script workflow (env, deps, native scripts) + + + + +© 2022-24 [MLCommons](https://mlcommons.org)
diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/README.md b/cmx4mlops/cmx4mlops/repo/automation/script/README.md new file mode 100644 index 000000000..d4a4c62bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/README.md @@ -0,0 +1,427 @@ +*This README is automatically generated - don't edit! See [extra README](README-extra.md) for extra notes!* + +### Automation actions + +#### run + + * CM CLI: ```cm run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77)) + * CM CLI with UID: ```cm run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'run' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L77) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### version + + * CM CLI: ```cm version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199)) + * CM CLI with UID: ```cm version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'version' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2199) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### search + + * CM CLI: ```cm search script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227)) + * CM CLI with UID: ```cm search script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'search' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2227) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### test + + * CM CLI: ```cm test script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346)) + * CM CLI with UID: ```cm test script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2346) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### native_run + + * CM CLI: ```cm native_run script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412)) + * CM CLI with UID: ```cm native_run script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'native_run' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2412) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### add + + * CM CLI: ```cm add script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485)) + * CM CLI with UID: ```cm add script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'add' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L2485) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### run_native_script + + * CM CLI: ```cm run_native_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270)) + * CM CLI with UID: ```cm run_native_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'run_native_script' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3270) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### find_file_in_paths + + * CM CLI: ```cm find_file_in_paths script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314)) + * CM CLI with UID: ```cm find_file_in_paths script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'find_file_in_paths' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3314) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### detect_version_using_script + + * CM CLI: ```cm detect_version_using_script script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533)) + * CM CLI with UID: ```cm detect_version_using_script script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'detect_version_using_script' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3533) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### find_artifact + + * CM CLI: ```cm find_artifact script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606)) + * CM CLI with UID: ```cm find_artifact script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'find_artifact' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3606) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### find_file_deep + + * CM CLI: ```cm find_file_deep script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764)) + * CM CLI with UID: ```cm find_file_deep script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'find_file_deep' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3764) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### find_file_back + + * CM CLI: ```cm find_file_back script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822)) + * CM CLI with UID: ```cm find_file_back script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'find_file_back' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3822) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### parse_version + + * CM CLI: ```cm parse_version script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863)) + * CM CLI with UID: ```cm parse_version script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'parse_version' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3863) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### update_deps + + * CM CLI: ```cm update_deps script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917)) + * CM CLI with UID: ```cm update_deps script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'update_deps' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3917) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### get_default_path_list + + * CM CLI: ```cm get_default_path_list script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937)) + * CM CLI with UID: ```cm get_default_path_list script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'get_default_path_list' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3937) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### doc + + * CM CLI: ```cm doc script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948)) + * CM CLI with UID: ```cm doc script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'doc' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3948) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### gui + + * CM CLI: ```cm gui script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976)) + * CM CLI with UID: ```cm gui script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'gui' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L3976) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### dockerfile + + * CM CLI: ```cm dockerfile script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013)) + * CM CLI with UID: ```cm dockerfile script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'dockerfile' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4013) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### docker + + * CM CLI: ```cm docker script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041)) + * CM CLI with UID: ```cm docker script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'docker' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4041) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### prepare + + * CM CLI: ```cm prepare script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095)) + * CM CLI with UID: ```cm prepare script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'prepare' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4095) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### clean_some_tmp_files + + * CM CLI: ```cm clean_some_tmp_files script``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106)) + * CM CLI with UID: ```cm clean_some_tmp_files script,5b4e0237da074764``` ([add flags (dict keys) from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'clean_some_tmp_files' + 'automation':'script,5b4e0237da074764' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/cm4mlops/tree/master/automation/script/module.py#L4106) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/script/_cm.json new file mode 100644 index 000000000..c1419f8f8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/_cm.json @@ -0,0 +1,18 @@ +{ + "alias": "script", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "min_cm_version": "2.2.0", + "deps": { + "cache": "cache,541d6f712a6b464e" + }, + "desc": "Making native scripts more portable, interoperable and deterministic", + "developers": "Arjun Suresh and Grigori Fursin", + "actions_with_help":["run", "docker"], + "prototype": true, + "sort": 1000, + "tags": [ + "automation" + ], + "uid": "5b4e0237da074764" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/assets/scripts-workflow.png b/cmx4mlops/cmx4mlops/repo/automation/script/assets/scripts-workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..60d0ef7157eaed67afe1a06b0a3b09a3795b43e2 GIT binary patch literal 242876 zcmeFZ1yCGYxGp*|h@c_3Lju7A1lOQRa2?#;3GQS=a7jXNhXjWSF2g_)G`RaffZ!Gw zTwl)&d!KV|-8!dU)xA}(>g}##XHU;`_v*j@^~?8ttHYHQWwGy*-3Ng{*soqnse(Wl z{U8ug0NzbtkI&7fa1iJogSDijvbCux2=p>MKHgF}PMf%62O+%nPW+)nsIue-<|l!l z@N}LI6&HxFrYlR^h1K0;i3ocx{a*S8v0gl#t@NV@k$>GJat&g=+121osPaPFX^u4g zh*GTaD%q#s@zP~^)}{Z%=>|<$koos+9~0GvZ___xNJ$>`+@5iy!H)0$rm z@YGwpa~Zc%~_N8d_2f?`=Z)#-S zbA5~d>{h}zB7P9@uN%HE@FqaVG#HO>-srqpZ-n_2ByB{^f44}S%?hNDMVN+>{!6&- zW?9hmFLn})*tdA{ePXsUA--FLe=`gUC!E*YIGRnTw6 zr9)L3R3nZUc)M!lO4I<-k8zNy&4px#4#ur|kYI(pH8$>f)|W&hPhr8GN8&;<98w=e z+A43f{8g(2OB2)f-NCmIOSMCVpwym@vm{M(XL@9%a74ae+>G$_8az_ zVNYXEZO?}@B`JeMiB-ZbRjXPnbmIL4%l(WT#d&335%`lRjLSQo+%7MAxd z_N+b@*j3pTxR(-~9ULl^BULU{*{Tq3&74s5!YxZi8By3#04^kW9-Q}jap<@zy$WNB zXX-Jfq~)O3KEF5qh2<$o|M9E%=L!3XJqaZVjWkSVG+5Xeu^+xYHBPgLdM5gs>80~$ zx-W+>t6yWk>Xqw$HT_!lHSz1(SGaNkpNzZsJ6pRq6W2a@(M5h7O*r`2`%#3>N5Ne) zhTW2>Ba!8k5Zlp9s)X@`ql7uu0Qn|Ef5jIn-(S&6XRFQ^{V1W69Mt4gESKNSS%`Y5 z@Kc9LvrvBREZreYh{$}M@jA7OK;tiFU7pv~ z^3}2$pYuL1mn9UB7yAG$j*Q94-2+56FAS4*ITXe^0Adsv2B zqtXD;L?jbgN(gb&+9=~KvCpB*)i3pv#T?@t4IdRK;jX(6>+;nYDmvB3ZFmg$Q=UWQ zb{&;uzXFF6L~SR&DaHTmrT>NKMLz}u#yt$cR@j9h>>*wu@Q?sKHe_a{-IqqiaKdPen!DPbwA0Mi-(&qsV1c_sWACn%4kw_#hRUxJLg%5z|>5i zZ|aE`0+%R{XpJbJ?XqH|I;z&LCe5HlhoC;sxHmXHLL*5dJ$@j?n$pH^M8_)Ej=)0A z5z(VL%KM{c&^;ozEqpQUcVh+jSAvP^>0TGMI^DX&EqouwGl{bsm_w413OdtkH-tT@FoiQbD}>cV0uFw7dUzDYepB3#>N4FB#HN6SEahn9Xr zd!urEZiWU*YuxfK`ct$qd4F(@h7nlKD8 zS3G~i+{Ik<+(vMU`BZ#gK1xB}tH+L&uZH>1EzL#Ge#W;^_I&sFOK@Otjg+2=)k6Ja z8&zLb#|o1-?t4ZNimzWpJR7AuFe-Zai`hy(XlN|o?aOeYSlcC5ZX!1_w zd1arOoEf9}k>k*8mF2K{jA@@zs!Hlfs?VEv7mwdDj`Mbf)A#MM=cXzZ+;lH%er_pF zD{azjhRyBjX1p-4o}Es=Ap}Jb?$xd+ZN8ZrRi)9Sc}fDeuZJFDj^LJyVQp%|^568Y z{0RG@D~%z&BpumyzMv~ccB!?yI5b-TyY(dRiR_{A;gc=$Nyo}7sX{4h#kJNON4F@= zdn$fZ)MD~cHEKKP8Tf=bJzaeN^+#z=v1;iX!zB|9`BA58eK*}`-`r#yZwxSEq=ZC97g82bZjMTG7lFHk_~jjxoKB{= z>-ZRBj+7Tlj*t#BZQfre6JB{!mOotqy#T`uV>7{`hJG*hBz{W&XgiOTOMJuLtFKru z?DOQbORB#;qXT>kGP`rIPFc6?z18m4u>n5g_e*rpg7XIqFD%#VZtAAjDmZ%Vr|#EY zS?uYa1+IyAP*w`*d-U$O-(lS z7X@{oaH27Nsj2`1c{6}Ofx#fqDX=SW6$Em71_G@cfk1*uAP})rT7!x(@CAm6oUGIp zYP+X(Gz09p=k!wB1q8aOh5AcvZ>j`#V!FOkkj7lTe*+hbLV?Ze9dJg_E2$Uio>QA} zB`ZD4CPrUINGOB06fYc#A^n#DwyC}~ADqUt)Zk{qlP#>>i81Ey7~R$3-?i@=)?rxE zcN@i(nE7mePmO#$jc39AkZ7Ov$?t69Tf{76-XGeGgx-b(*Ft97b|TWIM^}zNsk`;5 zX=L_GgcU6ot(^Px3HUdiitKEmHp&~o_|V(SZ)a|Ne}4dhei7$H|MN*)LW=yKUj_z; z@ghr1^mY&5{`TMh4|+&9b@!hSkEN>c|M~DT%!>Y>5AX2?W&Zg^&k_CK65 zhC<-Y@6J&eShj2o9;Uu;U9}6l+Mk_1x>SRg{&Ueldf6C?lkuyw@v95SRqeLs=NzTn z|K#*3|2jQjtAD34(qaCMfT(HnuQUAX3@ArH{^wt3_}3Z!uQ`K&j>j$J$pg2!#R3q^ z=jUT%hlgXDyB~{-l`=|zv~Ga>HY@eI7G;Z6UYwe0Z=ceE+oK8u@92Al|5OV9maIr) zf&MMRkyiNE8IV@^*BMY&_}3Z!b%y_wX#&|bqb#~Adi4jFQSrP*Yy^&^=s;O~;b_=| z?W`lFL?o zv0_wYDM=*MnMnmuebB)G3lb0*kVAFxfo0{@)i7AK+vWL6W~LbRZu%w^f643k&jy5A zl!B6yl$5d*3InBo0VC0@EO7FBGT`6UoA`4AF3X#f+?FYeLbL`P1{hgwrY=p zt;C77wCa33jOTk(YaQnrnGCoRw6*Fy9F4QLHE+2_7$6a_z*zHOV>OGc~2BH#K#uCCgIVj{prA2no&k)m9GM z=$ip!bwS@CyVGL(!3LiLg%IyEr%kQhkLB{Y&TFcA+dq5|J2y5n zvsvGqEG#@ZN#$Ev*>E5Rf%b(^h+NhKENs8?-GPDOe5DLwKeA?OJJP_}Eb#3>cWndz zOJ6U6vo3TU2H&~~3iS7=+QtBG5cG*hP>_pDumN__A?D!Nq3+DNN&=}B6k!ps9cH=t zZ64-@INVC*LqRmhx@g0JBRXS#;fVkJl;mGxBhQ4|{tq-36s6ZVkvL&ZO-*fW4KH79 zEgzqUySs;n`@%tT@^HSTg^aAMjEwAt0}`41_JJNbE@dOHlM}?!63KaFrmZbGxvi~N zcZc||iHXk6iHX(KaDv=iZtl1^Ztjj4Y%B^PP4n*DYKUR8-`S~KV?`wb?ja;J`MrB` zQb{a5os#VIcriPBFOx!-M)V5pAV24D7!{+Ut4~irZ8ddA&BsH~58x`T2!?*GG0)d9OP_?-)mPy+mB?K3zy36_Tw{N?PyRT>Qz`#!NH@VIyz~8ZP zDTz#kJ5hj7`RFL4k|x;n50_)|#LWi}j<(rYSvkafY8un{eB&8IDsnL7p*7-IT@Lmz z-}>s&S>qpCb6On}-Fyyu&Hjrr#hGz%+44I$!+lm7SkG#06===X)GYfI720+n23y-r z-tpt)E8!Vz-t|G%FO;SY%4P6Vn3{gnFrTj9wR9D%(XiNxFqQ0;m2l4DK3r>yTEV9?#oZo(ClrPAMgXz@ zkE7%Z7nkv}-d-MD!!~6p^z_toqsRERJr}UQdU>%t|K%7Q8ENQOT}L7*6l!hn{{GA; zBD5mqN>@ziDPeU$1tGr)RQjTH0;e}~vW#tDrbdy!=a~%jP-P5P?EIu(?0lg9;GL`Q z{-9&P*`RFbNpK6UYnriY?O#5NgdnLd-Q;`A<>;M#@k<#@L7&@aXbsqpYpW?)vH&ieuxCJWkV zi0p5(s8r(U9r0Lx5QQfHRq9Dn!v9Bw6@xg}Lh^9s@R0hd4hA!XEYTdKnY?GhC^u1F zZ;Nzn_S?-FWfDEQ#e(#cq~?p}sQA!(cDrRCMDRLZ5Vxh zHD=&FB|Ir^Y=HfBQ>S*r=O`cVVj;4=e)Gt}_JHzIcOee3@Ue7kpP9qwj?5rE)R2K_hx&`tmW58f|NL`hRb-LE47ZXGUHNOOq6J2#Yab- zUmnrZw`o8HcelJ!V!s{F7Vv2*kPqRnv=xJ)P>6x_{%6?1sJ8}|FAgp)4i2t|!(Cp# zwhP^rQ-r4O(6orXW{JK5e=dQ~V>%}M`%DIV&o-z;G}LYEwsQr``4EqGmKvEIwubaO zyeL9dd`iImbr#-jCEiCW6mQGj(=QJfGQMW+a3egade*m))49}UR!GA@+`iBkzTp7mT6NkG5(Gy1kI~{p4nx>fxMS@1B9f;OYp{x)c5!f3 z>FCc&9F7o)qx@Ri0Jj7ZtfWk_H;)6J+IpQ#R$egmPaVL{Mxz)QEiEEgbleuEBX#5b zzMMY_hCLQ4w1wNRIiwd8CN{0=PH8L^40~7SV^0byd3C!kJY_Tdz&S+(p4V(%LdHt5 zfF^;5gY~dv#=fJ2gXrjkgXL1njtNw**V4~kag9L~+(?j+E2@Wf*1U!!!O7gVa;Ix| zW?_CpJMh)WDYqTsqXw_RcDsuy?_!_4ZwR9d0}*NwT%hjU$3WKf1H=)5b^OOFE?faUnd8)_4RFU_bD=tl@HdruIWn#jmp0! zshCgN40ay%^$|2^_CGs3*l5rle6Vm{3MG8Bww1wa($&WKVRdPWyvDY=&vy3qWE&mU zrz1ytW)(j?WAD@bwR4|v7I2w9^v##9!(iZc>hkgjKK8+(g zB1se8Z5!m}Y}Mr=xV#%%2u@Fw3%esrqW!ushDVrz5kWV%RP{KRbtd#$a$R!VrLhc0 zg|gm!9e72`si!Xdl2o@Tg;{p;Lf0ax6fCV@&cCkyR3M4Js2H*#S?mqXXH(;Ic0U|e znud*)9+H=`XO?DYEP2PP%(%20up9PQ=no0>na+LrFXJmH)@kf1R9jrkSFAqrS?pup zfrqI|e=CR0O|uE(F0cK^Kp8$`fU-K`Nhu0*P0C zu>tBy5tfj{$j~%Xt%k0*R}0Ui4sPY77f=kZ>v-rSuLn9;xjGaz0fNkaP0fX)hCI$Y z>q9;wTNh`sRPbiQM)`bGQe0~Pi-VTXHb3j&{BETyvLh_758svbC`-vCC4+)aY}LW< zYJewdqr-)&3}@CH*EH5B*tX)|RQG+F9uvsG>|Py3aCReTfI9BzAwbe+S;i+nbg5Hs^#O>kOSTAmY4j*jH78Qm6WO+$m8E zC0RfiHXa{N6LaK97zZ;g9--?v-BPLQs{P|#XxYt!5bmzp?4E`yuwaMRijBh{J20Gm z-u@u-{r>A%Pmb(>2P(U6b98~<*dzifPxI24%R)jG3s51!i}#zBG9*vlX<;dUF<7_m z9=}<_!1@q8O3qhi*zCM`U7z}kY1NhX9Pqt^X)}a8B%CUW>iqoGT-ElT*Cu_@yw}&L zO9Au*bj?L~;!}`wWo75ysM#?`o@f(1&N0EAyWw|VHcppq<%(kNR|1c|w#J}|mG6u7 zf$@iyu*>G2;8Lbm{H_k=1+qu+tU}`%fSDg1;b-_(AV#G>K~`S4%wlHzs51V1Yx>r$TE}s?P0=$UpY54V zyUnACY%FY2D!r8xX#0buX&Nu`A!bzi1Yg`_y;b0<3af1zikX zDa{suJ{(n&OVYxhU{pmUGEpV46Uk;3b_5!zg1>O$&{s1(%C2oo_jM-xrkEC=nmRpw z;%2$nR6BQ5BLkCZ+3PF2N_7=%ciW#pO+|LJ9d(Bq%GLOwKq_@nfBbk79I??yz_}BM zP%TfVt_3m@fB6!GP14#NJ!krh)_)8(3&I3bq_4TdwW>MGSm6_IR_)QX@v?#u*@|6j z296lvo7?$&8Gdwq6IOZ+BLTa_{Xvfq!di)|iB!VJ%c+>SRKomket!>Y(A|$LSV|UQ z&au?e_uDgbtKJ^F0TRDb1%?D#72t`dE^j$h(nzfJiCahm~>GUQ;@n;R@BFY7OS>#*q*AcuWuaCP>*5dUUGo! z0RcP%696`b7?D`Teqq6oir!BKjEwFMS>Ws<)6}OCFG?Jux4C?+&S`xGWzF^6gdD@Q(KBs8B#;SQwLr`6q4x*>$BmZ%KwZ7fsfbx*zwshmqd#7P*LJEQFT5d>8GE}Y&-fw>s`9GHk_ZI zKWb&S?Dc+67+FDlmIvfhfmDwq@0v5qg=^qm({DKD=fYEkd)HoLON z;vR=ttVKjP^fk3?w1Y_D>^X0|VCI^^(hW~^h_|AqqT67@Kyl#!U(wf#?P5(Xy}(eX zKYhO$JlI=af0w?!f78!m|Jh_pjB`F2Ol@Bse~KWP`9FrSdC@=)*eP8#@34J&`S1`3 zLu_CChlk_hcF25nw7o8U&A*%+khDh3myvjC3gx5gr=)fM&~mYAbCo5M)!;o#2R;L1 zOA7|ZxoUSNsfg~|&I{h3h?_xzjCs*9bKPk{&I+;6j)I-zU*GxWggw7Doc{i~zP_+f zsv8+O*$^ucz2x=x^Rh;#vbI*$wr(kf!bdi4Qs#+;g_V~1d8%Ow`>9-#ZRN2ku?fiJ zdKg-VWfG5n%b(Nkss%UT5`imrJh|S#wIz1x>;u(ZpC6RV$z&xM(>9!&&@zP7R^#*3 zIn7u0v+F%|v1d~=wRS-)bvH;pb3RgSluu4#7`VqY;{N0x*SQ{wIZM_fj^xr()x>x2 ze2$2SOd^Vt%4%m1_w%`0GAGyWm=16X9cU#hfmYp#IWmMic1?D zA&*TOW*er4`@67w+1W(=8^u1wUx?&9%e#B~^o2Sva|}qPpuFtsOLv=2&oAfI2kWEV zi4X%e#K|ZGoNQ#S*R2;zLpe$FXcQs*(hC&->B3zz|_z!kk~zyXnLhQe8dK{3u%X1Q$UKoPV0#GEcrC8+4M2Ht$NI(Tum>r*^F z7RDgu%k7wK_bK~z+cfvsZhQ2`xZ_Mglf8IYe(WZT^K#y^CFcMj0R8978-^KQF28Lr zNvf&n(rgodi51aG6R}3Grfi{n9lJ=zn7Kh113q*&wJ32papvUEQ@!wR<0BgPXK~<42LrS%MzhGYxJl zinCuA&?qOCm0QDt3f9}6UccS88NJ4#>0#M1jT=iw6>8y23a80_a2lfbe>Hed)tR*O zJcnT8--w#6=GS~f;%pvU@S^cyc{eZ?`n*5;YjKQ9nagki^-6LsXF`pH@X2be7TT}5Y&K`rw2MzA_4rN}&A$8zFkB93(sl=@Nk=Xqn zGC!xL!Wy<=gMEDm5tFC(z18};b5F~@d3#QFx}9%^uDSpT2MLD=W1$1_Gg=-H$d6Gd zD;`WpZ*qGp{UfWyz3VW$>r?f3fo4m;V~=fT-z>#kN7dqE?|M9GXU`a-016x6=-Sue zRL>;7V@nKlv^DL<`_W$Zod%e=S1|nh_2@YrW-XHYL?y9U|3ypK?WK3bsgRv5eMlpW z=zDKl8xpC&y^8i~*x-lcJuw#*J2c`^OV}X99s8(w6yY4=-jHFNF|k;lp36u|aUOf+ z?DS0Nox?%t>(G_&CI0->xb3K1cmIC?OxLT#yq1U!dwQiG=XfQPI7Kj%c{xW5p9MWf zqO#_g(G@M$r5GF)naO@4Assmj(#5wD1~V2rFfn4N9jX3`W`5Lri#Ofsm2dZ-Kfiwc zaXLwGRm8Gc#;%mkYQ1eX!0rWm-ADy{T|Ah!`YH$f^N=cEF*-I=P=n&f@Ni8NZUDYY z$ig=%selE`VsOi9GJyWVb+y9S-baJo*%hnA-mO;K0vO>z*%Z`Z(qnNrC*t?K?bm}t z=L|(T92Wrn7@m`D%h6ED3RNn|%oac(sl7Wl>2>QFXWhfA@07Q`^>(+4E1ug? zQ3;*XnA~Q{%j?TXPbsqKZVg!C!(IF`{ITz+ZU;%=4E$m^)7_)k`rdvJvrqiOEPwhX zTY3V8e3e*Psn?B?RTX5WL8ALgX(W{PF|1PikA3WW4GMFsRSt>>)Ff8Ivm{IO5ufrV z4dba$U(K%$eQe}bgd|;xG82`;0@IS4-dDX9*%Pkvwf3BdeAY!={?OOyW2Z4lqh&sQOPM&v_A;uAxf2jiX`5J zQw7NGbqDVj38#Er^}*iKm}cWgN>}R)UY2!k=~4zlcC)nf z@UUbo%b&Eh-7iNIRl7cV#`LbLej__Zw&3MH+A($mri@1`*xdrX=(?8CIj>AZ4rPrd@ji>2GDXOUJh(d;@5yV*g86F8qG@SINoM9@xq_^ z87Uyc@91+D8!$6lR-6uULn_f}%r7YHeVfs=6wvsF>p1jgw};)!!?VG{9|%8``c>hT zTp&eatL|uvr1Ac`mZewOIc!IAEH9@RIzDIDw|TC5m&Zcdhf4JPbR!fOk#JohK))ad zB^!ebxWFbZm!rCv%I7%OghmIqKl)z>j zE*MxOy>o=Fw3Lx@?m)vN6x#^`>AdH2TSFIl>gPq!Xz9J2Xq8%Egh*5Ur#lG0YvdB$ za+imlbp{6gs_a32)dYOt006bdne*-a{k7rYVYgZ&_xY<6|61#B^(vSQotDc_;9Q6&*!W*A_nw@1e(9=d zYaOPiZ@#+deCblaY4cpv4=6o{^LyiL0dyWTB6q!n0cQzawV*pWH>=gf*w7FFt$YNq zStu?cJw`w#jgu~(nBDhq^4){pIl>n`R;9|oTmrq%J32&d>kcxb{+E9`=FZROmFBMM zE{2cXYVQEQ02pF?l^z$VaPDG;s$mK^3WBgCPryE{v2MM{=ezD1kMlr4+ zkn6zEP_C?;9J((;)0^f+yfPqXPfsT&&*SURl%X!Dbo&XtAZfPDC@JvdvjE)YdOnIv z)BPe*X+we#>FEWVz^J-!Q!_HOcQ7*0eI=po4CA&ig!r%l2L-k~w14y;OG5&K_g$)X z(X9mXj)CpZgMU|n0A~1a^d7&h;3!g@#?5iI(eG@z7dK^mM%Ut*oulR5rS!T?pXeIOsFcibSD$iqPJ*C-zk3_$BFeS^v9 ze-o0XFSEF}B(r#3!~3M8PO&PAzOZyfUf63dPDwGt z4u_|&FGGk==xm{_cCN{DkisTP{C6_9Wq*bMK{H=~KfuDxq9>2=_0<^}KE}L`?j=x? z{nEZ*6NzNHuVAl=Q`t~~N$-dZIi*1Z3TUatY2VBYvJzVwRcmxv87md?h@m7W;df`< zA?OMw)75{78X7u3h*N9pBBdtX#h>{BL~FlCUfrJcGHv14;`aokUIY|~Br$<=%U%_b=I1#bQ z=~|jJT_=e^=0{+=h-W6nnLtz| z=4iS&Y>R9d#|0f`ekSb!t4ME<^1gO<7t(yL!0oi)&a z7G))1EN#mxjqPnF|(RL)GOJzrhz>|9;_enVp^KX#%$L@+PWgTx@I{9PD0{=j$$W=TcjhX%mXf z_Vla`b8{aYf=UnP2d7q{is@w3Vs1rcqK(=xm^TIa8MyYthrvO`lH834rEd&^w?#M_ zmK23*&VT%XLmxcYt=wiCXDXLnn5Ee!HEI6Mmf2T{3*7V8-YjY}fcF+m^;2ISrvi(; z*3PJF15TT^Yk(g-O3rNda`6|%I5Y`t1R9ohAv9g!`NWZi7vqqQdBh4yl2iB>aDbDj-)D-Ue;zqY;~mmyzOWf7N(%|?iEs&U-Ytd@5f?{g zNC_cod|jHsjrk^Q>;}Rf+Y4%zJc6)Uze|rVbw?v&hx72N(HIHPqYL}8IyRsZsV0R4y zbpHED(DH?|S`YEn9ILbKm!9*Jk0i&9=*A*7)M(LAx|IV#W<4dFB=A0%^X7gZ1kaU2=EGPAg z(E56grli#F47f9qtXs<}HX0%h_mny$pnJcI1|;0+%n1A>3|Q&uI8^FO`N5FkI6!d>M+!N$iVss2?s{#;MdWU#Ktbnx=Pdi(>Kd zHJxn=%YB~n%`4!~d+ZFV&DeK<^Q;f`56YmVjgLQj-sLW8{zM%SX*_aFnweb6`$#hI zx>gbr8i(v`wyuoBu>A)qjJj}mn{893c6Tps3`P?3@v<94@$|MwRp|ce{NUI$IcMSq z;Zy$IlU!-ppB8>9kMF9b*9h=l#l*j5B7JNkHbd_!? z3h_99mM-8l5SSq3CfAM1+)bVodTXcAS*O}T`a$Q33UHeh>h|hLO#h_>;*YLou;VOG z!MQ_kOIiAH0^W03BW=@m5L!yHK?0 zNrX`O9YMZHL-*KE$u#^m=jbXrJ?S)onGmchEN?!rSD|J@lu6vX_46>!*=+d^fl|i# z@3=U|Zi@L(h#1-2!?)eb24$mJ@}%XMsiu$Xa~1$Zkg~b(4=o zB~V!0oGc+Jn2n|OJJ4Wp8#E!|66&E2`W{N^8b$WZ%lP|+Ap6mFwPRPaU1{!KOO=XP z9zk|Ho{t!YGvdeX-7S3nFd*=V^TXL=%HHsN^Z!u#BdK&Gsn$1H+0-iIYenNaP$2ja z@ipuEDhNc)u8x|XnYHL{*}O6kn+)H0u;yg&sP^$YL=JeT3JR`KwQhVUYnP_bHvdbr zO@rSt)(o=f=7>xKm!inRGH}W)|J>G}PZtJ(&FNCy>r%Wo&c4J*5j8(sQ2OwyNPoC3 z!TQx^&@4i{HkWrdzi9XVy;8-~8Jli;ykugV z9>u)L`qD(~aNg$W`an~6%5K(VMMwsAr+dzmwhot-GL*4PW)V37+f`6Ggnw+9vAPol z>B+^8tjv_p1v6p=Z;7P@s792Zphik&7FvZ!sP_ii?P9PHV5p^^pJMh?tii%Av`vc*Q91a?X@h$Qq_ez;vK1I_I<2iK_S|?-)>F*UGmpivAU@m^y@FIl&(3J+G9BuSzhvEp-1{?+&0|!m!i@QVG$7l0g?10hl3U$fECPyl(afI zuH1AtXU5;&+}x&e{cFku31ja>sA>pUTk|Hxr>j<`IEHfNa74^z`I2x7;rrGw{`Bwr z&g>?Q^^}8^WOr%3<@+$uDWeXYI zz@Bg``=Q;685ufc+Fe`xTA_UNl8pcw%q+c6+t(2!jX_^h2|ddf+uFYT(*l^U1$X!C zyF|44x?OLd|EV>lA9z36YnrjfSkJG>PD2ppy$OA!qw07^LG@Ro)z~cJN4ooIm(q*b zS<_tkBn=LrfPYVhvfb-=(jTnc_Ri2pxO>&Lny*POlzG1P1bVBxt?Q95@b|Qn6$!6r zMcM6V*A1!iM6eXKd#UG%YOg%XY{M^LrhaOj=<0o7<6X~1kQao=aRICVIj^pOfk(MI zvQf8egQ{_uAC`YyVCLfdt+`~D*CrV4!1{1$WPz4NXTDw$1yE`#_WXJL_tk`jXYP%*2E|ZQ zlkodDb{*Jg=9#EDe_A(6VH7X1y|4c?w`c!DdJRfR(AVmHYj|l|*+PGcCEY;eo$0;5 zt}l1&@($h3r<&&Bi}<3;VIh3UN8&&yv&xRD{Ro-3Ul<&?Np<;}#uJ*K=DQmuiyJda zyqxThqEWy9fweR4$F6oBu6*_mGeJT`A#X9(Fj_zG+2wiO@W&RHe7OEXtgpMr)qXx4 zeqBQ!`(2|<8|zo62h+6>!*Q7Xf4EvlJoQ)Bvdy7s??MM<#;Rs*hgcP&oBRFgpi^IwLG^S^q~@N)=$Pj|14%;kKSWuMF<3Y<PfAzV?)CK(|g_WP5si~hx*&x1#t3D9!Ry9!1h%)igNY`2V+x-0Z@ALBw z(-Ylvzv{j^9c}-RnB~HEvU`q9(K;9B{5KV(v4am4U2v5-Jf>jG+xqmKb?7?MIf{p& zrq5w(d8!}ZacfjeA28pl2>R^crUZHQWufzt7T3hPWKAqn$H%)ba-(-07mrBxqcef> z;d2>zF0Qk?hiUf>oc45gj&QN%YtcS`6yNQ%f2)bpJn-(NkAITLk0$C3Tx}TEdikBs?tA3 zjIz;{`d03lykPtbFW#|2O$7}!xk0F1;|!v2;-ch)2@=A7<&+4J%Im_yP0TxzE#HU8q~8%*p9-UV`dE zVMwwc88#WnV(v|;7Cf!2+FuP0Sd1|2qn;oGY++f`~c$XVjgvnxERPD<02gY*u42n8UEiyV^6EYyOAgQ-YGHOQ}rIe$8d$F#B*VwWx1qDYozC#J6wd zrdx2X++2G4+}y2;p2kDK=dW=HX2Kx%==P=V6Sqo}PHunAxgXaOP5;=Z@{AZ9fG*nC z`5Pxj(W2GQ_M)QX5!_SR)_0~A$BpI{y9>g^5n=M_8fgtE)T!~m!J^$c}`HhB1P3opK-7ts{kX3MX-h)xN7jz7;s4qr04-u(2JI@#PMN~w)l)9Y%FEZC&`D_MgjRP-0z@h zTN7CS(ieFN9gY!dZ}i~fGi;if5uRYQlzeM$@=1Fsac2gfe0*u*K^vh+_XHBNnEMFV zf&my=78FH>j4)udbV->-u)Lv^8KDe!l0TxJP-?tVlrVc7!f6nPiJ$5D`1Vb@Vo88a#lwq9E7h>%aM(IiEHE^%_O=S{1URq28{NoWUd- zcK}e{ZTNI;cYuWy?P3H0xFJXbGuBqkIlAR=Hnf#j480a@A--UNw3!(5dE|~##SS{e z`5lh|_xFAUhTa-dRjL6jXO~Yb;CT2*%;9R^54AA(mLQ{ z@R{j{Jj3U0J}+7SMk-nV;=zVh%p4+>^I6|CG?cYO>3C+`1&0R&v`fq7$bgIElOudW zlrOZC;Zx2b%cHFkMzZI2)%np|$;A6nN|DSTLo@vwYJCn|bv{lVyfcCMA?q^5Ok%ia zzTa23>n>`qtkQXl9p}4c?%Q1f=LQR%~~zHlg-0ewoH+nF1s&U zb9OxYVfyVK2*A^QcQk;ETmkSS#m&Gvogks+OE{(nAU7M8B9{hG(LkxTui9AIOW0T) zc=}k-3_%`I|8;d>Ws?($SSqG7uGmXb+6**OQ#8qieiZ>PE^q!dH@y%4hnrq2xa`Dr z{@tNHaPCl*e_9AG7Gp`3#^UteS^g%KR*GIkhL39>v(wt#bvW$>e%sRe1Xx^v*Vql_E6BC-++ATlMMn;YUTDeHoy6da z%$^yG5#{ADj^?{!ZJ%4ZqnJ-4w?d!wrKhIbZ^Mol7ezZi9hLJfHYkHNnq5y-*=Q+| zx1?&|IAgu?vwbILX|lkCl>pqoU~9#glZ=lU)gNW6ZFjOvlz{ufLag3*RSX(`%_Bta zI0^2qAaV9bYSk)8ggL9b^}Q~ZBzlb)A;b(bC^a7uIc226*rTjh+dW1a0izh6pR9ae zq3N@;ss2laO&(h;P;p~aXe;a%V+4){)9Yj>9 zUMO-bCMJWQQ_%Z(i+YaSjb$<3ZmIA25i5{;a8K)Pa;HHSKuuhRE_;#zWE=NE5Omn?O6AJLolN2Tl5bAHu;(zM}7Wp*scHnz8>zHaR3eY zU7loSie9V)%)&3kt}H!jJ%6J3wcPDPOkY@PT8U9CS_&a7f;i0>0w1#9f;H>$)yBK`6 z$6=n3hT;mFV8F7)Er98q!@)vszuCB-Kj-Iv{v5relJ3!;fKx1n1;DN^uJL?EM z;*qW=`V9snutVfn$VFjanJ<562B~(P&!!Okju@Unx!* z=qy3vj(8~PN+8u>e>J%&T%iYmaX}#13uKoL@Y}5@V%Vx#gFQ+oUZoZg|08#UwytSu zktaFP7i!I4Gi^1HVO$)7YTrPC8NeGD{O_JihjH6zj3L+0gDwLOw;8KqxO<74^Evkw zjs_|$m$_#8q-(Ol*hFIvLo}v4f;4SJl~ymLDD2z6+vHf82HjfRkhcBG7tzz%>XFO; zlK3E}-0t3t5cz|Tz>Q$}C6@0)tvh?@G`+pOhsdvplp@s|RO3JPWqS7uWa74l<*KIF z&Z_0Bcb$p!-vX6BMl`4DEDvn9+WE^ats78w zxRANFgXVMF?Jw&gDxq99s$(L^8v3ScRjpC<)2>wLXv{3}avrs{zrUA@LQZL3tU9KH?AK;$m*@;^H7l19yLtqL3Z( zbTpLc;d~o+t>APW36Dzo;6{yl1I_#Us45Wj%T`eB6{n4)OnPTSED7?Mmyy>e#HO1f z;p}kVY>um@MUp&$+t*|84YGPjiJhwpU2zX(Kz-wYS{4LU;1MB%gTf6bFt@x>9^}ae zw$L^WbQ~;N9#px-wfN5Ul9SunRVf5$RLf1iF4%q(O<+L1!Ikhzd9#Y!}+Gj-Hml1AGq3iRczJMd# zfMdwjX$$q`q2twAz}0l~#XRm+$JOyXZmP8@%3=RGra$Mq>xgqtz)|J@!`^#9HMPC_ zqHJ}md)szYnluF!rHDwCzLBP=2ogX_qJ-WNLWiJRrG|(Ih*UwO1c-E`S|~~ffe>1h z&O-9hLjC(YWDXQvc!+pDf=bE~d{nBPufaf}sG z)$$4sNALRlIqUWjUI!;7uUjj@ESX>VwtogGZ+<@l7NS#Itm-*qCLi<9KMfHfAHkd5 zV)}=Hst7v;H!Jhu=D6~PT`Hcd-K3epjcyX1_ji^hF7C}_hMS9v?*?P5W_o(lGp%uU z2SL1|1=d_E@<|tT<4pPQvYe*01KOfl9D|RcyK)ZNFbinL(r7GW35_Lyo~AOksN1>BpWAccox%Lf|6$uZ0bO^q;+Ig2L+$fXm)jXcW`ib zCw-_5mZ)B*>)_Vu#JQ!6pU_Fx$aKX(BEU;+ez6^u%<%K^xeLk3ie!hA1vBrM-zeoPeAw3Y^{)V} z!?z6@kb=3aYJ5jNM*;$DP-ov-FSbVtiCE!;)Q-=CiY~Rv@A5JqlEbqGWje6sgI$WP zF*R%1%PnAcXXRjIo~#@HC9ZLkrNZOTt!gDR6`l$;Awk<$f9I=Ebq~f)PmnO-8 z87YaBQKl@w*+G~47&bRWs+{-4tuzWFT*5Wyb+v=tXt|}W$+*c}0JpE<`mJGwT7#Y3 zT1hLh8Kg1I<+2V_-KNZ{rPrt4qUD-^{>}34YU|_#NWy>KsTD5NZ7Lssa=h-{OGT6U zezjill6N=;xuzPY{4oMc08JT|v$>s|lDkCidwV#vYRGQM~fn z-ff{HtxED-o8js73%Z&Mqp`TvPqFA88(wgIApO_%x;z#8*#Q(5NS9Oz$EQU|+!NiH zmO_6uE#e|Fvnt>jIyVurRa zc>86;IuRZ8|NDjUOi`!a$gLh6fy5&EFRm>CJMTPL<*H*9i4Io;rA~lj!88NFjW^gg z34LR5e!B6$rR|r!z!cyU>LJ7j2ugqm3ls+5J0q#k1Nb8bZ?I9t;ZsNba(75piG0g**aF%5&OG|HW z%k-Uh8r*q)ee00;G&*KY^8S7cu%p0_2|NwxE4#}uJ(9pADOpI2s&7D@4ve*AcJ@i$ z+2mu6k7h2(Kw=2lOt4rxNNvSQEseF9Fs49_%3NGn!n*KoPGHi`9g9A3yW$D->h`Lg z-7)YP60K-aF%&5O4%Y#lWFtxET{4`ZGL}fQR00n8us_H7yS#pM)`i7h=!P9~pN0?R z%#cgP7atLU$46*vX8JYR+}xrQmA8HnL8WtZrXp5Mhbd(FZ6TzfyzPPxuVUYTgGKEW zD_;5KKZNiEh0F7TiMFLYX^jm?%;wzEw{MXG|NIlh6y!3eZxeYOpr_q!?%s8Wq=<)P zylv#+k)+I67w}b#*$R<3sVpd-O>gu0Eb&b@j?Ynb#$TX0c= z$~FFD#5CcGcpY}Ks|ON;=;W928+D*Zho6Q0@BCn`9)HQi7`%(410?#%pWa4C;%n8f zi|%r1QJWi%ZmbtxG4_;beL63$Cb>KYdi>&LnygROAG2%yuAjuI;a8JPQSa#G&mZ6aY(`4soiasha(n22cxg-N3&J<7^~Vp; zlj>q(UszJac62Ib!iY7VbN6m`vBL+$@sOwwmO0oI%+Tzsf?d?3W>S0as7-iGUQANB zeCZj%v2MyT4UZ2n{on=Flnw{pBr?rZ1=M8^|iP6 z^@UY)?ok{22Hh#-#+XFw1Dk;aD&v-hN8T{B!jWGyM*HW~$Kc%TrrvC; z6<_=VXqDUXztq#74*usc&Sy;3YeVMoxdLjeAFB6TVGF+Bx33e|NFZiD_T)BO;uzM_ zc+^%9eUf&Ezl<&Sy}1CK} zlEvgTpxQ~QU4h{~aa(t6A%tGw&`)-F7po6`nkZ5ZnDxXhG|c+1QD#q|0}Pm9Vdv7f z4`XBh?6YpYem&uGAvqcU*Y!Q_bfv@Jgb_ra(de3u3PY5w=V+ndh&Y#Dj_=uOZ>xfP z)tk}?HG4-jF-E1hGaqhKj4?mg(9$*3RC443sXVsET+{MHaAPzdwpN^|Pup1h3X&CH zbyj4Qb;s{JL6TSb$Ni0fjoO9091$^6j((Oo2j;VZU3kL^Ia{7|O|k9=Q!;6Q%B+{_ zWqz}>au>_Sv6`A?!yg_@hkqGvvCiF)1>dw2`*U!1d;EhZx_pkT?DIoo5c6X-pZv=h zrRpOthRUjL?25=A5ALy?RIBGm&BvA$yPCP_8D@$}3tT<2dM30wrP4|6ICSecRLf+dWn^I zEBTje21nx51;)mb#qBntosa!iw*Q5Oe%>_>;C4Y;#V55?irunN?GkD zG|9`FRoxc)A}6I_x65C2_!h!tz|pn;hg`%R#72etkKsD)*uE7J-L!lK4Nljho+nB) zZz9<|XK3b~H*6^9=sG%#&GGJ!!LF@pfnvU6Q!~~eg;LHl*S=U6_cabfLRl^h*M)(N z>wxC+0UyU=!wQF@+ovDe{Mu_s`uk!X{l6U0@AIz>){@c534U8kJxa4EOd2vleUv@j{mcU&wMbX0EX zN{1%+qz=tF=dMS@q3kkmfR}PSXfKgw6E9otA^Ah;nXcuA&~#FKZ0>im*>utuE*>7P zjS{!B{eG1*bI_@OF9R`Nfbx)u+Ho;eJi=wlVnn=8BQ^*@47+8xCOv)lcr{}?*JQQ^ zjp7uyDqcWJ5cklu?o=Klb`1hY)^%>IrM!WeETK-pAsI`G32XPsa61=-hTnQohu6Af zwbPqxrji*E7e}|3|0@0i>QR)DZY*`stg7GKo`vld`pz8`>JEho6Oy}c;qvoyWMpKN zU`=kWi+m=zS35P|%loOPKO~B(MOXr$DFq3$ootY(!EtW~N#KD9eDp(R3tFj*TnZ4P zcTQvV5hK|JqjR(?R~iVORxuaQ0V{(|vHe@7$RhdntAfbOL526w-EvHS0uP72?plw= z3q*5QPGs1>dF<3h$SJTK?^n5EkHlH4e=FNrHDJb>hh{H=FZ@k&H}RsCiP`KDCvj6> zIp{yv*Fi8Y&}bozTpRMujA4`HUKAnEe=19_=(==-~i?f4q3*gpl&1}qnbgp4w! zTvHz)t-gL$W~?o`gKHgM5l1gPKgNK(=wIhT5ts}h@^;)#fLYYpp-VBgBKBGDmu?6XTe^9$n4d=t$lSPd&)0s~}1)PPY1zzw80oc>U5>oGM5G7>f zAThSo?$9VVTk>27L?*qxMsOSws0`=7+BHE%6>Q~B=L%Ea3j_95^?yD1ETkN-$|8x% zS{QFjR>mo9k;A(5F9!)SW#3*%=)7Xk=WbqxA6~WF!r1U|OFa1C>)u8Aact#?aUrS| z6iT$TBr>Jk>Mqk9UDy@T&lD{F(TtvAwcq!Ti;IsBoqi+>}5dE$UIV23k^wxH;0&l4LI$d=H(d+ z^euHu%0o=g(z%7q*MchEyZ11Uo->q%O!54s#yipuWlLi=kl1m&$yxN^+HPxm26Wze+V;8v7as>=)I`Hl zA0g}<98U||k)vfYA~G#2gnz2%Hck=$%5Ul^IrwH_SPD-r^=|{&`uRf1)=ZnvL)DSt zzy-du=_F()iB^}iBan!-=b*Fl>#+NYe(Rm#FhD8w#mBl$+*~UivZFSeGJW?jUBOElVBqERQ_BOJ(&r~EcxtWGM5L^+=EK&?@h@x|3Tx#{ zD@$yOq?|R6(No8T)}5N`w^1Labc*Fupfsq;pF6&r{-fUuBdde2R_){>690IrZglY> zdhMl~c!|xc{%Ucx>C*0Q^!9yhappseT7kKhG4g|W#Mb5-v|kCK?&8nv$$(@vZxcqQm2UZzKe zr#gndY-JB2OGd~Hv5)@7O!@nK`W1O(s9Rh#sT19$hv04Oi3b%Z_iejjS;Js|WE7Fi*3P;KOxlp?!%ldzVY_!Tqftx);m9E#Wu@o3 z+u;ajV>Bd$Kto;R=lvMAWV z{&2%(KJ@sw+e$M;T@bvGdoHu@`alVfCXeo5@WnCoV~3}5RwsR@;^PUt)4;u@OxsJXt;CSu~?pRtHPTydDRq`>N5!|%sHpVy(xv=!nF0bWVUH=H-O6X zm&tvV>41N~RkyG3L-p=`PJ?~9#U)(z>)GG$ckp^#G1iQjipcP#UDLco=RGi(>!ZCg z^cOjKP?%Y@c)pXBxr3;&*F@GhjJ_);FsnO(sD(s0K8HRFbj9!Zl*}Lgi)JIf=bZut zi0Z-x$Ns4)zkX)zN@a(qAyCnXX+kbgt(bPgeuuHx}$v@qxHodjhee4RSG#P^ij|G_CiSkYt@g|@M6T`hLH-@t+Db!!#419xru-5?p z@GqDHv=0BnrI!Cwp;$cyZR3kARwYaTkd`EB3s-Eaxa{a2Ga^cnw2ePgGbb<*t9rz=-Jee#^m%NweS zXUu@Q*#?!dy#oDT!`PZ7E9$YxP4hA4*lSk7;1Dv-e|-_nDE4bgr3$bt76Qn_J^~Ms z>6X7{*4P78p=h65&V7CH^XDI5K{he42ylY%$SoS;ke1ptn$V2}R(0-hzCb_y>v%BN z(|8~QN|M#wr|8jx?XWUk1==8w~rsfBZ>x~^o7UrV< zC%XUtRQvj0xo3V=>!E_?ZqFG9hk?bLJCZeP?SusrW%^MWX-;O@f z0gB$dxz*z@9`rI6#^vSPlG)on&Cx!6@*RY_yI53SZo^sL%1mF5#N~$Btr_R-)haRN z4KrF(8*uHR*D%WOTRP1&M(>x>;J#RMLv7_SA1{ZjpDgeBPz8`-v9r5>-_DL%4JcWA zfvNoylQ7zbnwc5w?ym6EN#^Ce>|bsZ|Kb7MJT^OU2VLWqajw@9dnP z@9czlLzTQc``+7d+j5&#R>Pf%bks_eKX$k`p3^#kPVBT7c7?Qj3| zTC0tYjkB|lj|;?K6Pno~eBpg1>tN1eaGsnX6 z&+lVnpMu1XP#R36cI#pk#{w4H)eKFI!`#7+ zLw;^<3oSA+!E7Fa{7)CY2HPUqT*COUGM@rHyidjUhka#ihbec|S?Ov`W!&m$WI zttXkEjgC+CJ9hgojLE2LuXs)&{Bn3*KyO%2Zht9J^T>)2YQ=ZOz;Jf3NqV+5)SBRB z+3ejxfpTiUDC+`C&XJ-yA`7g9q}W0}8+UjLeCzGA&Ka7xJRGiDbtxd*uPB%2Y9$-x zO?OL}4MyTsb5CgG$oMHL3c>cR!S&twaQO>Eob%`9-j|; z(>#i?Pu_2!(dvq3auOJTa!fBXF59@8)dqmIV8i>e)|nd#H=p;>t&t9#CO;vF*7nOqQ`Dz3cNQgYM~USrPaOqWN_ZOgHX|?zB+=XUbp0@2x`K2{&F=#eV;AiebsMDHw2x-+8y&XGDzB zy>yjHgFWb6rVzfJn<365)atOzYQme5Dq3Q+`Jrl^5*%>OAE$wk%ADNUNRm`>V>fzmJmG5B1uMyMm z6n~Au#`0{aKl0yl<0r~L zmfo6wuuSC;IJ3OD*4nx-jx4E!HS5X{7l+quRk`L3-y1h>+`M_CP8j9>L_}0nOia|Q z%4ZGGptcmzQ6?s6fHeZjf$s!v99vNAGxYrv$O0X)-*p(g|G#bF=b148e9`z^MpMbd z!R2RfetAQ0JpVUY<(e{CWpvSvJ>1sT85!2rE9DH?m7>(caMAfTxv-&P3f9__Nz4`X zfpdQ>H?mO0;`D{+eTROWK}c?v*w7jq0|3cD;T*Q>fW8!%Yy@E9pFHU>D5gDr2M|0r z*U>RIm+F{0KfL|&Jdq5*k^ok|!@&0@GfMN_&T*hxIXYr6j=Aef0$m*WpRWW#srVCo zd|bkZZ!w?Kp!yXiMkxiKYDbF}20*TNNAJ~cN^5I3HEC;i6ey&Ij&7JTmj@64C83VaVQFDdwKuKsk17EOlt0eBCL9es(M@uso#j7&uzRM! zJ^?_S%%nBCq4@adZw_=I>vs$-sMTs)W+6qOe0WvVvH zZ!9D4`(4#_Pm~Aos9r+cJ9k^Fv*r=VU6If_{%Vm(5p^0U^O&y3r@joIFljMH( z8^?z-*mMs^1~)9(-KLcbna?1jdpapS-&OI@<-b}BY{D}8yk>{!^eSKV-x}7|>gv|k z($hyr)6+e|zppJo$#AL{00|Cx2~bs5s5KLI%#wmu8?3cC=s(_C zU!N$y=Ext&fn*hEH}GdvysCwh)Z%l=Yf2T8b>XQfm^nEB3$va-OJ&4Rb1{O3mff8BfN-w+gYr z)k(f8_q3UO_m?}}rz#MNAGtSdmp7s$ERdFU8>s7>qopBt(VOuE?bg z0heZg?h*)pYVHt)7d3Dmiq#wmPr}-^TeEZe`dK)kp8qL@7@(sCyVg=e# z73}d;y@7VjVn#q--#PFfQ)fcIQMnI#nOFZm&z?gc{jV-V<|_PW`OW`DF>X6Mtd&Oh zDSNGr@@?J)w6@;1Su+Tr#+acZtaNIr_H(8Ea6Cs=yZ{Q;Fc;`zj?qDRxHke;F2@9) zJORuzz>;$Dk$m03*WiW_wjF7j5!{L=aHKk_6RApCe6G*FEy>YQ(br&Q0ysK8Ojo%5 zAO0?muzPTO#$Vc&D>J!yLlvHI?1?1+wGp^`ABa$DI)-P9_seSVG(-ua{g+0*td`G+ zgJY1$oCB<)^X172%x@xWUz9d~ZmlS9-pT}5p1iRZh}oX4C~pA!+r1SHmxi%9iBd%H#Tv74rl_84rL?~(DLj&_;6J7q_!D=>mKd5J_ zulVufG4?7j5U9^**#WXuw&&63PuJ{0jllNmao)+cii3e3M#jb{l(EmMi`&BYPr({> zG(-dm_1lFRL3OTIQ3Ze3+i3I54g+Fe_aD=xG*{#rn2`k=*Y+hVs|#i?$4B*cEaLlVK+C!yjm(y$cf6hnSsFy zBGGfUM^OXp2eazi%+l3e2LFE6C9mPg%s0ohLE8#&gaWNvm}*kPe%0$Ve-pwZ2ih1a z70haZieacR&sutpjp|@uy+z0Ha+sLX)?%30aHLHV7j`VQ?jC@k;}1WeS2nTO1o^3# zmIZ}V#q8}TC++RwG{)@}tq0ERe0RfcVFr%XYYwIsV5C>UQ(oN5B;uX821a`R=6|nxBOk$nj8M0(4x+1-cwdRhT+sb}Pv6&-j4>8Mgu$lE^ z0YD-j0@JfGJ1ZomQJ3g-+D@{k|50D6Y*9S4UM@RM6k%7U5Y%}Vt_k)s7=oe+u)#&o z;41}&!#mtXHoUs*9UPC*w-8kiJr*wPPAM+VNuWlV%0Lj6>Hw*}0A` zWyN>i?kczm6;ddnVnliIl7!ojXMMobw+l0!yop`9z5?h67(YLEcRz*4K9CdO-YGwO zU8iQ@=5i!R(v&1AHsQ{Jr!k?ybnx6C_)X)3C9)wOxji}64pW`G343b4PC0n0I|zL( zW07m>)A%VGyz%Lf%~>pj9M8I<^KJOJ}BJm)Z3oR$^sz0m)T5J z&}HlA=iuO{z{oFMePPOM>4F;&0@W0~-o9J=_903TUmqn%X#k@oPRP0PQ{4}O?GpfK zF89k?@e%s)yCT5d!ia6HE;#ntc3py;$Io(DI}52VXvLk?Bf4O_-;jShM2> zzgaAVq623;Z61G?`N9AA5_!)a^6bDD6GeBB3zaf`|Dt#!OTZ^z|+AD|E0l&O@p z%>}R1#%hYbB~@EfcN8v3Ev--9zO6~%y?lSsW=XN(VC{C=2iG|TZe=BfO7hlMC3PfM zx4lD96-se9@#yT!+Zh@fHxsib=^e(dT}SwwoRs`H7JkA_b{*XUW8Zu8KYYM6nDeG0 zeX6W&D_^=E7vG-u@cprQA<)28^Z9thejC&Z%IuFhdCL`Ob*XV zcw?=RU76;`)@rOXJua@-3RZ|d1i99#O&Wz7(t7n*M~r*AupuCBGYwIHW0d*X`Iz># zpd&{Paw~BYEMiLwn>g}IjUK&7OiW5jblk?xHEd(rDAv@;0CPmT-z!C{yYPU5{q?M( zL+l}21Og!3I=*ZhHCOoVZT#nA??1`g8BD3EgUmIXTP#RIE)&a`q^WvrA234I7N zXt{?AeV3SwHcdz-EEL+52xP|kpN>%mZl{S!TT-cTtCOsxA951ZT!c1EPBJP^e%x1B$bO-@_0P+Z zN}g(WW`cca5y2?daU=O%NH{y#*XK)+B<1A^`fgFCCSPnhabrxU!vYE|lz$fbHpt6( zj$W&LIOn{X7gDpopshcfoX$ClWYEew8BGAS2z6iw<{_H?$ai>(z8L&_$o{nFhIPS@ z@&A^oRN^m$Gn69ODd!?Yimhj1hLLFql%R}hF^?3h&_A}ohHNP~pirS3Xwa&nZyZ`x z?7a8o+!EQwt&nx2(qDeNA25^yaFu>kw)LBGbmi^PT`qG0Au1^EN0MBCa2yI<8MPC;{hihQHwBMDRmw`-KE4t{`P^gP%%@&yUqFtUQr&L8o+d6)2v25XwKwMaOJv>b zmgUH)2*sFGfnCvNt%QZs)m$3`)J~-$f_$oir&^2FG*~cX`q1}nwa+LI4<`(xuKvyHUcOyp()?9d0yos2nm0#8t~ zlgt*;uie!No4v8!!ueC~a9-6i_DW|Y-#rmPd3A2=RD`gVK#cG()$24JReNw`T&RNK z=YOD2u-`*MXQ`|z)V}UZ6sg*0-lFn0K-8BVuH85ThWgUYXD-;{uq|K2(iGmR2vm=*W5EjAlf?z%MVTE%nS){uO)&+H}{4& zG#Xu#V6J>V=tN=YcD-%))@7ikoFFstv}ki!%|OIV=C+P-%$d;Tr@4zIWW|P$YwTzM zs%A%9Fj_QRmP>PWcrO{ObIQc032&A)sFdR#wUDPCGH&04s#P57HQe1K~rdJFfRz}!6JJ? z#4DHkYHRIDtbo7xtc(Pg>y$WGz8ClGn<JNVJvfE^`y5;Pa`}E^v&wbYt2I0x z1ro~_|8DlPzY?TAZe(`@f1B;^`qcf#Yn}Hc3dVJ|D@F|qJwEUKGmO8GVy{I^@+`&E z>cS)>DC79KT2=>K!_kd!&EZ6j*{?l&577qB`Jk=a@(dIO^uI@(Yva*%rXo7}3#8r> z9XQ90_Hfd+^wL$`SgYKzEhwFA>SVoqmH#YtYU%aFlHIn?(N_z8vP+4cZ!)Of89WWF z=1E^kIp}TQmLLycX#ANwAz{9cHNUGP!bQ)!R?tK=2WfsMuQ4XSo%Mk9-0j>$r=tm{ zQDGh;zLH;$=kS=HUZVszVD9Ac+PFMsPwA!V%&paHSpwP1a|V%r>;-KJn-#$)~nvPDboktrS=` z3@Q#CsxGgeN?%}CGnJVqVTgHxYwh0LzSjI?NNKuXnXc!zli9_hvJN>%Cr?pE>8Xx= zhW@8wR6=8l!m|H%*AQ{9n{Pu2b0KvtzXNY}X~{WMSPJ9$-OpjFf7c<9g`d~8u889W}~%e`(3#~$UygoF5aLxv=z-t z;Njv{@W_$SL`1ecCxwF9V&MsL$i+d~Njk3p=tgJKLLAjYW%0Ac_nY~DpQ7aF7g~o^ z_piw|!M>2lDtaUgP?+;cjIE~NKX$XbUYXJ$$^yLO8-Ee|7!$=ytDU0w$2|d zZjdEEx46uM*g9!<#!C2XeYc1zemZY&pRyrYXytukWz-jx&hWL)O@#*MN5_qY0FNo; zyvRS1$`FpVDdm^JPD zz@v-sRC@)jwxerX%pvxA-!(WR)4%oi^MHzSgRI~CN5uM+@CpH%|A{eOFxkA$ua|#ARemS*rQ%jrzaF z?}^MKoey0#YZNxD6E>Pv#jLFz6HgqAHN&ixD2yF-jq_;Mn7X{3D=lc*wC3I2FwmDq zyswsuFZxG5_5MuoLwWvk?OM)4zE4I*Wgd?APHj&&bF44WjYlz}P^dFqm6SuGzB}ew z=~~(|Q~u)BkVpTIb2V2yWI(y}2L!+le5qcRQ5m42c3qC5SK{|uibVAN0=cix%7*?j z@{X=(&pu}qIrR}G$i*4U*8x);6#E_=Jd?*kF?iyAUh;`ND1p3{<&7oXD^%w51M?(4 z>>KT$lDbu9$QQ7>y}=h?v>gz#s=D--1>{z3m&o0#Kn@tAqwbT^9$S?OWPjw*o#>Y5 z_qm#o3`d8iC3HIAsc{;t9{lp+zB1a3sP;#p?wAL+ZAqPn{cKMo<65Wg*Iqe9=e1XNkkOW{9wN6!`}45{k*?9{ zJ+0xdO)O%y!d|y?p0ZnUsNLSFGwv7AcyXyZLL+M9fOu^HKU*AH zD8jR$%9lt4Wcsv3exvJx@0ho5Fgu9o;2Pc@DJGvii}Dq1OZ zcogXAGP(jX3;c#69+yrj3A~P0y%@od9P&S3X`ij;V*bt(r=8m^tmI+!K`Ad`haI=e zB9s*GEYE>RhLeZ#F?iaf1}%+n0$)Cqn+)Mg*TxfZD4%HgR3W{x63-`K6X_0gA3Rhw z-`IXjx}6R5ySJV(B-4xUn)rr8v_jJ)hVhyGyyNfN+jRmiNxILE5J^c2oIqRkM_g|G zIkT=f>csYIMsaagwf=3?*VIxezCY*}@;@5}bRl-xJ3dv#raB==0nwJ9kEe2U$4V1L z%EzH}qiykMFDiR0uC>6`KK9cO>zUwe7f4wIyVu5ncH6?-&a?nGGf&IBygQ?a_g>w` ztS-&7jvmV&A5$nE+=dC5?%9L1Z7{6t-8*4J{xMnERT|%s!%46YrZ6;fcJPPGzi;jw=fojg;j8+=SeUhGc5o*670!I$^;H`;xhO6&(6-Co=!beXKSmy zd#$X7*DJ|@+XjRR1G{&#sK`K+L4kd*8Ol9P@hr=3cQYl--(~u}lZ=TqziLa`AV}Pz zsq4KZd!%BK%Gd5x#tMA)8gE@+Pm~`mJbqxAGPwyS$}LO5Fi6r~yQ@L+{EhDf_<80k zqym(!gcsBu@+R!qtjSroo0>9iMwhBPi5ikCWvB@WehV%0@@@OiUJyBWXu9gK`P5|- zMsXC$OM(3AO3+~*x#JJ|JO(5hlxQ!x$CrcnKr>wsUr)DJRcO_AF31DCz5*3;>~8zV zouaot7u#(2HNUgf&o*ypuw3ypIrr?*1G?wMG!fYI3q0kf>#}2r#8Qrn|v&+OvTexQ|-^xy(sCfozf3tB(%^S zB4iqt!yzR7X!z109~!=#@P-6o@ySvI}qzZ-k1L*2pR(f zI`V(H9(HNG2aySi^3op54r|gdPg(a0r?wMC!xpM7)HV6OPUE^_J8kob>^zOe9L9WD z1t_EOwnX7S!!;f!X%e9ET{gJxCn*bHNgvDe(^+f6Xg;hp)(hmk{rG6Z+^}zkU7hwT zFU-us^#9<|Ku*y&ntO0~iB}TEmx-TC>Z{@STVRUA`o0=3O+dWGa3;L*&2$+9VAHWP&jgz-%ec<%SYW-&&;nZMvG`^)+K^A(2egA!E~@zvB7@GcgMmU zcZz}YqG`Nt8+i_Sd84hsg>zIF?JXOtMV^_tNi@ysWFObjcME@ksXUOKVCZ}3B{|)Y zQL^h7oS>K!Mm;pfjJ{%9|2z7PE#<5YvLvKWBD(?`OTRH3dV(#(7I)|v(Nw*{p0^;i z@5b7aa_hcV8s&-d{Ml}rYNf%x@tIzRMK=c0i%8XpMEiF_vs(@h`8M$moB54W*+_GJ z<<$?M&G6g49aq5GO2j;3R7&hqqC9va0H5%jJm@~UZzoHq;y8>;Gh9pl@xxF)WiY#o zk=ZFZJ&EtEuDW^EIzdkA(tWyqhv$Y{R1nC~OiNNT&%ZG{`evL*u7zJ-2Oyf|hf zzO*&WM|IA55Od0GCO9)s8F;9`bETGJH$2ufZ-cx%Dk09cVJjZ2XxXmkHFD}7J~)iz zHwN_Ot~Z1lpP@fvtF!*f@U_74kwCUl?$ytG6*k)B@s2+iAe?R&G%jp%hf0-v(K@ybzbv~dX1{H6ME)vYr_D=^S( zsZ~QHRI}p!{G@gCn15oHcu(8k`{aOy?1sbXVJA>KC5efE4(f3_jCRZQ_b?>ipQAsg zM%HT;{MPV-$l<8a`Ng11T@Ke!A@7S+cIxsT#Syrb6fU}~x(s4~mTQDcJ}NEdx+RSt z?V{gmSE!5TXYP%33{)3H+P_@xzGM<`{=(zOrIpW#{;%I$tE+xd>SIm1W?u=Vovp0n zy<>hE3&-JXQt1w(NYf(ElkM2kX(7>|nTE#2G zByx9gsF2}5I~lK4xiO)c=b(56WuCWl-Dc>Zh*DmEYGKB=Ax`? z=k=t|cE{a4Yk|ITV)^FrGN?NH_kLb2KMh z;@w)I_`_XDJ6uok-DJmr69h@zmcxpwLv(0-`(bEH7(zh{!cjzd1x5*)Y*h4i7n??V znR@^wrk;3BORheq)y(uya&IaHYUc`cVv-_W4QAw{(w}H<_OBtTSB8d%eP`>)`A@l9 zGU#!nZ8iyXbpl_Lv1^INC&xrv^9p74Chcr#wh$u?@7-ln1@?mKb&097^_TtN>6ImP zPU0TLCe3ao$_eGc`&&7}#}EA5-Bq+?k<5ghZt@Jlz=1R*>vJrI(0K;6)92|7|^h2f5rQOE;NjafLdn3-|8CBX=KM-jl7M$ig z;GCa{wp^=0A+uk1Db^ns6)~-;etWBEk&6`2wf&>n%R`vr*UjQi!VD$v_lXhf^$d`n zM?9FH9EU+kh7`Ao$+;q-zRYx~4UjFR`^-iJ2izLnE#z8XvTQs~H_p5E`jRg?*6O|D z%qRJ^xwoM6Q~6WB=!|MueT*uEPRmqi-Pt_9AB@W!oKX{SpYF$_8 z%-&mIOV9gh8m$pZd7Q8q`?HDb&y24|qEfE8WuDI1rJ<_SNe%hF6Ko-WUQwswC+=;; zT*iE;-ZZ5*JJg!nJ+dumKy4@CGyjhMeSX=DWF9a~*X3o+9iF3U(B^1U9mSaDMIlc$ ztgGgpMBVg<*xIf}Wyu0Vpy{*GXF6@w6UkqK?W&`_!FI2(oK;!hb`W#YuBsY5<-fxPjci4AOtM2*_(J z4Kes9mj_qrF}HE1o||O2R^aZfCAl$ZAhk`LG}9P=C(w9bQ$H_uCU~M!Xlc4ncehx; zCxZ}NotXhK4-0)W(=&R(GtptjFQ$EezoL6=;W(-x=!lFApHj0jYTHl{B;c4cp0xBm zK4jT^IUQ4GY(h9?e(v8vC#O?Ya~1390?wYhDMA8SomQm_*=i}BXs;%MG{eig8-HB? zvNlbzJKLbfTlY}D%_}*^jMFwJ=KT7OgW}_<_SzXPH+XPkL1xFYgOhc3V!1`5vQhlm ze4m!OghBPUzP_{3;=6YW*mGLpm#Z(33OG+S2~l%9B}+{IoV^4TrEZLTDu0!yrkZu_ z?94pX@*Y%dFsRuW?$cA0jH~LAV$3}WC@Y`QY1Z&Y){M>C$)y#)>do<*9odwn921j?eh{DQ)I&1+tW_EjwK;4)xq)Jt9=0Ge|Z`EFFOYmR#lM;^S#xrbrpE6J5V%N*Rk3=&y`JH zlH3dWzs*8rnp9(S{vros25Ug`3$xtbls~u3hCJO*4k#}-dL~BLQ|uvKFxR*xI#VC5 zJp5$T+OULw45i8I_AD`O^9APgl`EvmOOpD>Uk7_P8C(v%Oz?D1*gYEg*vT)p!+R#U zfCh?3WNlBiqH769*4C-6V+}#p*$sP|+ScHblu*rltygN2>A)+gV|wA^jS&{O*$Su1 z@)DCuJy+xxC0@6F8^1-}G~L<#w9U&fR$P`TL}T%=q|QYIUJk+ zTwO+Tz??d9nN7P;T`FK$fs+ZFP=>*%n|l76`pcoZ?pwCqskZX9lN<}Tjz4Q^t%rg6 zkw^ADZ_}t;5vV6`9;|g6-5snhK=9Pm%{f)iz3|uELk@3;weLZ*HH_~;>VG#oST@j4 zh10>R;+U%n;z)YBJpW7f@hjHG<%aLfi+mMYYG%tv>3y76t|+g4X}{&c+sWzO!78@F zep~6uiz-ac(R#e~m+A;g#YC^k@^;P87;!cv zB?dWsJ6Rn;v=NEyDOO0%{m3EER`ZY&*&v&C!g9fI+iS=si<4ksFN>u`2Mv7#MXz#! zhwh8x-BNj{5x<33RF@@nDdz3o=YfvQyiv9x{=%8y+7NVahS?S&xFKg1UucbPSk3a1 zD)g+u^7+TTi1M342?O|RxuLQln6k>8Gp?my{hO&5Tq+9f8w38Qq%(Y2!r!A8TB>bJ zSDVDQLje*mzfXfkBXHrJ!h3;-2ew5Gj12h}&x+g=^2Upa37NRjQFhs+?pCI_zizP% z!)vAI5QsCGSO%@GuC7TE?snl@n-mxxG*ko^{?P1ONL9PgR6JZ7?O_5AU=n35{VvV8 zmoF1G*Y(Wxb-sF~Pcamo+$oUEbO3i|gV{q3T_&S( zCfd&u1Pv*Cwdg%_&iWqCR?Zd0-^pZ8`%m`jiPc`+F)=Z*HN1}DK4455)BD|lJzTh@ zUy+}w1Y*~AU;fYkZOUs?UUIQf_{x>FU=!?IAtG8H2Bxl-&Ir6|-rO7>-t^m^+nMjg zyrUK-&2!2D0P%&;tE7G0nBV3pg}J%bpsfbf585vv<~6sNy}Q4$3}_;q$%+~& z8*0TF<(eJnnISW?D&Nu>T@}HykM2pr*Gbt!UUI6T-IL3f$`@GqvH;+9Q1g0RCi#UT zAB>;XssiEh1R(MT&Wk(VD90GidvhHReoKpYc5(t^iGLyE^-|*1^iu(4VAU7ay~d+o zqBLkwWTyQ;>gE3YB&=nW^;&%)+oRJh#m7?s3L`RdK|9b zf^}%WG2b%tn{gfVR$W<=){I>=eKn~UzlYV`35LEL~Qe+cwql*7s zuxj8|@b90=sQ4FN)5Q+I#~QWkIf4!C)AsEf0jD5q)D;5|qMFew^IK}i==;B@C)O|z z$ss9*UW|YDFoMLx!mrHJp$}~hJT+!oSeBFwqjiwA*hb`r#>N2NlowT^Mw3tc?6(B zCyB{v_?=~K_sO4#K6FW=qTHbuIR1g|YIQBE{i3`GD6|TI@q}%m*YNoVJzyB3?wUY< zDmd$bKI)h`3G1_GTNAvu0WNTDVG8ZW)NGl*2qC9gu}W|bZJgQhb%fc&RyN}bp4-pp z)&A)zN=HYb^-jMU6LE=MhXBz4o0vLt&%F;o5-V!fXe-s5Q=WJ+Wg<+62@VXne*FL0 zhnDyTTK^sS*O(6GLi{I9qB{fuz{<6e0PIlam(q&p*$fvhEwc_-;QcGkDiN^3Bu3!kS!bDe=V#jo-L#$FjyJJW@0f;5$mK z(;FU)n!1OfIBx$v_inX&fac4%mAJUJedgf`N!~*%iH4V&Woq_meJ)955+1TR z)OU8SEof_#s!HDe?6YsdEj7i0fr);X+XRkqbMPp-q*RjvCVQL%-2N~2-a9I)Z0i@T zR$J}16;TlZ0UH!rKxjnCP_z=vC^@5`6cU9Zr@~As5NVP`36d$v8I+uZ#3GdzIhKSX zR=BJ7F3C_?bX%o?R%`j5rOdS7}}szCD?iT?6#0-_jg;82EW)y zrtD{2iYP>*bO$@*J{*gP0a|bDyJK)2$iiDj9x6iV5a>0{$;pAQaDDQGG@dl5QQ6GF zZ!6+)0bE3Un!XOwVt`k>{}|*Rp@IWC3s9Wp9|L_61mxv=e>Jyz z)c*0ujXKO5Zz~U~9u=#nw>o}Bs*?RkGO zE{Tjh*$bYoRKXl9*Zhma@5~h+wP!NlqolyR=Wx-ca$VGQjjsjzs*)iF zArbj03=I^~&1!ahCTapaP?Fz5y?A*CS^&zwF3x{r`1rTndaN^Ux5otG!qGu-fOzHXX%nA!X`7ONX16Dlr|Kbs5;7z9G&clf?) z0ShVB>7i0xfBYNC_Yjp^nJ+^o^~5MQ*)*#j_?vNuk}%8x2#m7LF>Wr^!OduX)FrsJ zP`WZq39S=9nc46d<7RBu<)Kj|_A?(AU{*_Hlpgl=4J2H!P->KKP6`3(;vi2i5~fsX z;5DuT`*(H^?3dj9O?5sXyqw&AccSXs-f*13XfpCoV9b z7;_>L;rrKJa?>Ft%`Fe6Fz}fJ0pRk{TXV%3z@K(X=)UXS#O3W~b@Sh@S!Kn^Hy z8QPi*OVS0fQf~&EctHmnZ;C@t7Pu$T9eR@O)VL?T4pX{0P9}q3kf|6XC&??@GYJ^l z_KXT6+U7f*_iySoyzdz=2E}Us^~)Pu6$Toqua^8e=;ji_q5yEA$ff|9nW_zKa@jUR zdn$=e9WVrVoIstVtRdkE2SCgDb<@KWjK8Er%VTFwBLfg06gEBb*qMq`kib?|=W^d-v5V z&=qNEe(pR|eAq%PUg$eF7ikLcUOm_hFG~YVj_1$E#wuK#U2obVg!J{KJzX9=cr+E~ zs-wea5`@En&LW`qb1~>$GHeh(Ge^QqhSGcp_6z}%$Og$@%byAg*47FNRB%c0YCZkV zbgD6cTHDUn&>vu1?uf+UMX1Ssrkn%NI?I36y0ogQQr%2 zcew6{G=QM1+Squ6+hZ~B>g!vadl3Q0+*gK&U-RbXkk{q6?yEC=DL$8^_14S5FnsSo z;w|xKqZ*nz-HGb4J;;C52tXG&J6~4xK%y-k&$UPQ_iv6H8h6kssb6~4BX3;23dVQX zR^bm3Q*E0hPJgA;q6R`hOYq9VDTkaw|vWQD{MB?Zu(QHF_B~GF`(xN0w z)FZXqj%eb!+5+U=YRhT|VS;cCSG_|F#8q>0RkK$yEXBk)@2pvd0`+NH; z4GnmFLj%R#Y%(a6Wx)D|*>)yhT;-E*z)~| z#8a&I8eA6-^4<=4z7voRz}X!goeJfxep?~WhcMP4ogI=mwo`X>z*26%jhwdMIgLA& zH(_WWC$~9Z7>d;K@JiXM7A(Ix8p_|>D^??~FxK9`C#9ucIvuygD`fV{iLBOW4q#Ph zW~Kw6)2O=>W|a52x!iIF4|b;yilZ!UfcQ?-)d*BDP$}GrdwpHPkq-{6raOmiMy=|t zGunpux(G0iOy9Hh)0yH;c{`W#hhk-mpP!J18FfhwIyQgt&%g!-78C>q_U~4#vVusDLF3>Ce&>S9m?14NIuD~R&m}J+}jJHLJG#<6}~#!LJCx;3@xfD_wPKQ z#Cq#{J+5Y|>gr9e-WL1sLs+rVeSR!Wc)07hzP|r<_X^oS zDu?lepERuaDEB>iG)7bX7RY)K zzef!fH*T4R6cB_Gtfecpo#GiUv3HZc*fbv!ROkTjXvWAHKX6e! z>;Sc4j30wgXx0Yp@OFrr2|aY^=FLNgRB*6GlPC$%(o!5|_bK;>?RG|BhwFR=Aw}?0 zcm#fO3Z;NcS7+QE^(b|g46oMYIcO~-`Uv!9ek#?S2lTXW2%PC^+Sl}BTu)p_0wA}@ zM-Etk%wQ<1s||r|ZRX}73IM*F(ywY#R=zV15^Ptj$@*_u?HY03-W9H!o;L77if1Z;4ChBKyr0>4F zB&Swh?m@Pin!uU|c|LgZy$1kvm|sClko@!vv)2|(x92;tY$wTL0oemp1+~M&hlqrqrV-0BP>yV`sNOdg>APv@wI^sYsp3sW*Pp|BLHir+XF`DU?MU z@`4FfWUOVZy8PsdqKuQ}!^!UVIR{=QCm%naoV>DoP0QLCwJ%a76+1`J?{9clpmwi% z)h$7{JS{)k=CrQ7VK&kSTOpFvD2Lnq_PWUE5t#9{r_34OT45naY@Job|K^E-MZ zAD+>l_2Ztf*UKh&yLjy2QNrh9R=nksK5Z3I=9})Jx|H1Cu;VUEtWnk^Bx2e|xx5>= z-WM%r-)JSh)I)ipvM#yjl}jfr)*O|L><7C?=^Z3KyWd(-zgP4;Hs3z5p}&&now2^& zo2n6y3QRfUgPm#bWD7j{tvPfiUe57syhPmB4EZPi!fY*9XwiXqnOjqXqWM8lEq`Ab zotQ^8{5qjiuf&E?cX=^hbQ*j( zZH#Q;&umAYePDR@ATvP>Rb6_AE9T+%>qxT|>YvQTo4k-XC(jjGinA7YqexIPm^OA8{*X5iN zGwQaWjmog&x7r1qY3bd^=kN#lxlorh0Z&TTh3EQYd!fs&8-+HP@!bhn&|8?!$*VnR zDx@IrC2v}NCSY`<390A?;`5>*oD-D|R5n!p0N!!sH;1u;6&<4mzw&qPmHlOhT8lP~ zj}ko9)C2|9)P&=+Bc1y-S4FvDa$QrNBF+b&!{NWv| zF}bXrwfYL#P8aVP(KHTOpksg1fxH@tFCWdsUN+r6k(4dRB{Fa_U+Y+x&YYY1Ir6Vk z{EN*A2bHUjTl?XZ9q-|u{H~*+@YQdNzv7juH7Sc%7{{X(9{A#%&atkK;nE_?XAB<; z8J@ij@YUAPXhYFJOq6d5YEeySnIE3$Y_)q&BkOt*{Z&7_E~4R1fK_{Q5fbrE zZ^FY%2)Sxo8s6{jo!w&?ovL-6JV~7GNuXwrKOlVlnjA8Ip{q6ib+i6_xY5HYe;KoL z*gMs-GxIY9qGWC4HNEA@zsSGEWa7@|s^o-?SZT+9w@~3E&u58MZ_Q_shiWdlT8`g5-05Q=sUxO)AG1AA#hCP|qWl8M$PNmd?7Q#l< z8+=~=leq)uUEL?IKZuAFGchu;tM5^1W;;G1r0_w;5Z}lVJZLqVr8Og(o>E16ws@)a zM#y7j$4CsHd6k9BtLX#5ORlK!8m@{8*QNPoyFLy|`PSey{wruO7)t?&a(AE47Xi)2S{0agO&+ovg&E`TUOcYCRUx!VjFBVK9Zw9W75Hpu6bsmY(qn*^dIElV(6Z3 zdFWduxpB+crR{@M?v%l!#u4(PtjEPK@mx=rb!R_9vdyHxSl zN3cPsWwi){IhR>%$F{uyF|=>km|uJ(5!N(~NnpHn2lmU9O>Rrq?9?akWb$nEyIOZ% zn!2vQCzBZ;H2G~_S4vXyqJ~jt|3P=j``8Ocf$!pnyq2HM50gaw(0bHcjmF)6z9R85 zbOBO_Kb`Y^?{aohaS-qg`0AX6d@Rd+>OAb*DiUd`+UnPJbjE!u#(Ep0f+J>PyE;0$ zx;ku^wT-Cq`w#GX7p}OQO3c_#;XR6+H8Ty$4y{_&RGi86BsX2!5p^T?gIZK_|2!!x zI$8rjC}H+R2fI`iltYV~yPg&*IqmLhYrK(bGgcN4R@mvs-sn*I-QD5@nXIsS&D2yP zB-KOqBU$EU4qul@*3?uWdt{{4CMDO0hf1w*l~OF3pC8Uk)_>4iTl?k9SbofOip}{a z?2|orT>rA+h(*1-pRC#DMmxlAf}z&FY-s`Kogn`Q08}B`*>OSiWW_IAI)w{y;et)_ z9!?%R43JFVJ82GW9Lo`ZdT|^nR;49WwXRXn$p;45_ZmJ=hvw(|v$C?bd27m|L4i&1 zbXVZ~ocs6B6;)<}HaEuxOFTEn@OZ4;NWBBC5*?hC z1?pAWYL+ttm5Sq*?}8D@HZi^BHYV|uKe7f+oX4Eh^loLvhG8u9QjTqxs5_I_EXjrG zMzoX<$FFd>{>8K8%5GHVNS+TDcIUwdj3}ig9tTHEHplTNYZU7X)teSIdz&_gwjvEF z*&(L<>Ev-BZxfHh@|J%R&y#0FYd+ugck<4BtiOY+97|Cub~^#zPrBv)HhJSFUdVyn z=>tQ!RgV~-5g7@%Q(8bYHOf2eWFv39AfbGegAmqMBhe)H1XlPaJRNCtXNKiu@RF&l zJB$G&mIA8c^ocB+v8Rai9@$WqTiLjPiIDS_EvvQ z4Wv*{v9E(tY+E?JcBZG*H1t8#zkfk9$Qg!zx*=#a~#+}r{i2u zSggVZXq3|#2YPp+X9~LRr~@w?|M=sTSlIrCZEH#vu6m1@g_}-TX1G8zAh(dx%}nQf zgXA>Hh#TriVt=_zp@dBW_RLr{R{0h9mjV3^n~7rG*kCS z9_=;=sTsKE%+Ii8a$Q)MR9TS!b|!S?!S=LCgTSjL zLHcE9vfORm!C-Fk9V>&lip1Wb+6tLb{ok)NMy0vU_XUc@%d+K7ygY=f@%eV;Y)Dwx zYZmo8cHVYVv$K92yTC=&zEfNe)Gq0--vhebr?F_rYP}#puWZcA3k}Ut!ES2` zqJ0Cx1+CMar^B)c(VThQRtt`R3+10q?11(-3kxc^LhZ7GV!lq3Tj-!|&~JyK6uEcK z*Vf_-4jt-bfQsZ?fuGB-7!VwinuWYbOxYUp6vhn=ZZ`9Kd%N>Q3+5!V4h-a)ni{sP zHOI#TcjzO|Xz;q#vmfJ*JC_}j)r?<=r+bP`4exdaY&PZ^gXYz_#!N#W>(j7R%^htX zuRqq-#x5o*#_r9Kcwd)c)%2nTYGww7T9eP$@|e%ic)ZX%Xy&LLzpX8)EZQ0_OsWEi z5%FrE=5{{DxT7-zd)}g9h|;^uDr6uNC5|V;;%|GQdO~!xlfhSrQy)bKesPn?918 zLV%pI40Fq_<=%|V<9Q%U+;iPxXUys*4KSAa};S98Jn)M>Bvo{Fh5#*5*z z>a36e8@Gb2f;T{FQG+-0We-X9_`ZyE7Sh#y2k?Ek8qDT$#qD@&T3?FG+RhdJH#XHswO>`C8ZW+1VVe5hdyhK6Sg(&NOir(*C?!o3_ z;qe}sTEjcSfYKQ|`!`ioj91DT9tzi+o(pZp-O&Pq32sC?{?nLy*-N3|Zfj^{4jszi))sC9C@V z63WU4v__j?e7kmoe3~B*pY%}c$@U^EAIH#{-GrXAZqkLKh5{Me}U>pXjEy8ok<;LN{IUzV5;Xity+N&npPv!oNw?Dbuv z^D&xDas@qTuZgrDg}`Zm|TL7^`% zw%m4qSV{CP8R4WPrb;sOTiy`4$>StD3dPPYeC{Bt`@@npn|c{9rblzj+v`4oGAnWH zY4e#?8~$n;2Z1a#bT@I$*soElDXoin_PU)twTKiR;@gYv)TiwwC^PA)BcE3fZP?MPj%5mx{5PHMYKW!0^Z&} zDs&qWFp)zFN@@-_8(H4+&OVzIHl6S~D&$m6qlejkRMogHtCCj2ES_?>RqosTjH!F_ z%)x2>>nV4zZI4#>1v@k>RJ_{i}MJ+2Qyod=ddu)*a-W<8}?=Bp=c<-Cnz)G_iz6GI`~U}c6!|k z%e?gTVyA7qtwLLaj67_oX5GPw(HxguX#KdyWg+Mm9q-_>&`8Y z7iCq*(Qm9)NVsk=9QVShGWgx3fh^5ADsR_GQqor08)RCRlQU7o)|)f8B5}w4$dVKL z3wcsq&gdZr3}{IN)cXdG=Hnc?8gueZgA?+DoFa{!M0r%#48tZpa%5mfQEV{4Rfmj` zsh4|CksFyF|6Sj~hiLSOu5g+_c(;v2+Nk6ZMhZ5TxlfN)Y+|KOvJ>2rp&)L0nr#wyDOwjm8x|TXn`20VXQR^-VLOA3)#WpdjpJz8>oj zTTc`eV{=a=*WYWZ5Z&Xq+N6A+wI<@gVSICjL(^NNZF_DCy~=y&*hijzJVal>)jm}((=LrL*d8KPn53J zSa~@&6p#}1Y;PSM9d)Hy#LuWwg6zn0!o=Exhub9T8?pS@HGSN5b8Ihg~2luiin&moVcT#gIZ(x3*+L?aDy*w1rRS6-uFag$QYZZ+mB9q;bA>MtlDxWHznlV7%WnzLld zQ)gjZAD=GnDdKRu;P?}3s$FzAmfvtiP7sKPza@%#`c#49nNzsXr#gJKUC)5w)(sCA z*3C7kay(bO)VcqU@QdA1PW=>?IVq{4Jfm%+)EmJ0aLP1e@KyYI%Z98joqD#AO-kJ1 zmq^O@&JQIkX;U%msI(y+4DA|*$Q=%XM<=UXi(9F6zqohQ6aeL0z_X!XL;;0!GwNj+ zpO1^rdj>{oM!MPY0_g)zWML+JB}gODbfc=ZBonpc6W0Pypbg&DhpzJWdm zDv%Zge>=;}MS;Hk^M%pd>pzS-4-KK@lrwBncB7T<6OFB{a-{6+6O2TZ!~#$jQbSuf zzt?P|nFqNwyr;+Czo&Skq|F-B^&3A~+R|6W=$mv}TaDKS9+lv|p-)!xq3^OF7iW$j zu$Q20$Ghtc3pPE?ejI=>fHX%Hv1EL8C23$2d<2Hr<@l`JW9BUmpuc{aoqf7DQZy&q zs9cL0DrIj{tr5H~!h|2XFQPund2%;!H( z0RFS4|EwtxzyHYIe`vrsh5!5T0aJ4Wk)19AZE3BGzC@ty)(}cpNOY5R1IAo@UHIXo zZmW$M%=qBu48{afWk5*gA>3ex@mHV{>Hn-s{d47ip$=2GL|kfxt7js`aJeScr)x~g z3-cZ4G*X*bep}yU`E3#3OQ=D0eNr+deEQVf{QY}#b7!2fv7DKd!0SpZ2Hf(d}XD zLx*rxC%ZT@Mpq^Z3YtRHB3y~}H^t(lSwEZREClHX#k6s(1&W9b#47A%wcOZnaoyao z5WLvPvD=T+(eWW?N$B1D`e}!e2tJ90A)^R49~X`3K}5_ zKduIe30oYVP8etJF`;mFMaO+S6+Q8-4m9*o^jb2=a9T2u$Mnp6%_>q@YcJ?hT)Q>t zk580HYHY=LKKaf?@N1G^sc&sPu3j&0ktS~uLggYzJI{BP3GI|J;)uQ*>p9Fx$FNFp z3aelR@3gnaO6Qeb@V`nHx%jrpAdRK*ve?+7UT6&JT7lF8nb~%N^(ntNGF7MD&=HYW=jTN+O^4h;F@DCT%)=opI7A^ zU&Doxqv^OP#27-o$C!x$hzkNqjW_fkOga<^c!$>)s4VL|K4(a{FI(}ziJbk>NI4?! zm0Z`yZRLyg%8fU!6XI@{&rH+ww93OJj(#(zeRA-j^<`{tQ|2Dz5X0+4Fj+gY z3l(omf5qRiIHb9_@81o}c(v_0BCKfii4 zJh{#zqxvs&_LIkRledPcmbphxXpRla-v zO~%vi3&Tu$_JX=d6l#s=k-;-Uo!1o>uG!H>>wNh|bMKmRmUtaNB=vK>|M2f^sWwK0 z3sDS*VU?21Bp;ALlyU^eIwG#C;Tva^l2&Im}OoUKj zp_3@1rgc4j`)^k1flcQ&DS-BZbV?r+d;~nx2&G?{GZE_2=@T!?i_U6Y3+RX*8klt4b zoTs3jMF33qNu%n@a*u7tcr{6kl{vcj>4#Cq3LIe3!L*m5XSx#PO2~r&>EezpQNc17 zFUreb)K~}bE?~BI@|ZrnPrT(0p!}c_D<9(Z?d9AYyUEseS4FuG4GOelLqf1v0Husu zKOzH{5t%pLNHuRN&VR58QSca`pd4z~p7Bs->ZtEzePLyi?!DXOj!=-{TLDr_15M05 z13s;6t>Ez!O~MKSdzvybT3RwP3=)vzp2bOwW&BaXcI^%=11p|@^RT=o=imL^?(syl zM9gij2~r~ouKpqpCWSy9BxT!5;M`?|l1WvSf+1dsjUwqXK=6n-&UEJH#>uIvzIrXptZ18n-LSqVk85(7 zKQm8lidC%o_+-b427(@Oh9=cvS3su0QviwvxgF!KL4GtP$5nOPcHZ0`pG^Ck+0hb&vCnTU+n1PE7RkqIcG# zC0uWE6@=hnZkS&?1BqEx*iri?_F&uUeX4j6zalGq3CN;{0$K8l0$$ zc47bi&UE>a!%eUH&bQmi!z{7)fT|kHX+N#pA+l5Owfk9JTR~ZxG({O#JNNB7tA7CS z1+ctMFe$HT)myngD`0f3ljHi8eQIl^tZbqQG&0*Hn3`hfRI|a}$EzmoU zDseL$4A%0UhRYt`6v}_rPhENy!|&L*n9``=xwE^uc-~4O?!5XQPl#Aw8W%~MP#mdb zXH<=GZ(=laXW8Bdfw=qkA^jlpi#eZYkZDX!Z35lPWn%^zLQuP30+-=7#NzI-%~X1P zJ+=K9NQ?hMN5~RL2f}pw z!c^OipI2OK?@8{GKMpcE&OlCUOGU`3zYhjXw@vuXC%V z8zX(C>H1MIVJPdl#er{0(C)t$2Jij|R!uI zDvYxEs`psY)JE2b@Jp~b+o~WEUqcr}`V*RNOwXvty-UCiW=)!+m1D^sV;wnX1b0Z4n^N@^c^HI3c66G5;CMpx6*Rq$z$ywpM z5=C)0O+5Ug{8#Jx8$@j$SkSEDQXKf~bN)7thiai;9L`{DmuvX{t$@)YO!5X)5V!SW-$7w-}YThLdt{Q>H)QslT=n?fM)h`E*1O@X*cgLbv8@U1l?JMu` zIDII1+nC`L6q=NatJz)N@j&Y^#6KdY_kRZI5M=R-7cX79c&w%|yE^K|sZcX{ftU-^ zgl-v&-P^Zs-@0}Cvfc817n&KZhTI~^#g*XoR-w_wphf3lNo?nbTmiTgeCL+!r6Pc7 zf=KL888|6H5jCxLoLZgQx*^#hOt@86Qc_V-;`;ERo7=;)^oZUR0l7AbToMJ~Hm|Ma zG71?Ga|@Fh*e|Lu=P`bPXqlTVTrq^o8=>zYY$E)RY4pFCM*l{TA9hl=2aQGoJFkYm z3zwMUXwC3}mNdwyibWJ2gXw&?pk}v%zKx_WlSJVIX@?(ShdmKEcaz zqERqm(tR@Zrm%3NX#ZwEW_N@@+b99>zu>aQDUI1uaG+x?G2@C*M1l;&g9U1>!{Nl8WE-wj}w z5HXK`#6(Y#WMXt&5Mv;a05F#TR~1+2K|v|T z4;y8!31DNs$cIY2gFDAFogfI?b9^;Ji2#k|(pAB(OvpHKfsHZW;$xb>0dI$3+6O=c z^9HZhC~Yuwh)y0q;B5j59Uwa*aki+>v^}Np7YO{LP#8Y(3v*nsbiUD`?Z27T@#)AP z7lyLkAqQn8xKXYB{o;$IVuo^>7zX=}5<40e8;9z_xcX-F0-$tRM7JKSe^{bfFBk8m z)I}+9p#S)RM#H=|doWb8@l_{?yGw>zFsB-K70&43PGQ^o+3(_r`BhuSB+jtFV{v*j z)GZ_FD(Z2;m0d!>&|4U~ny3T6Y*t#lLYyk&mm_TsnJYtZnhwG#){j#&{?#e;x&JGK zn=g$J!j0Ft{F^ZF%)jQ9oO+T~^+`0%0?!zO64RzmOm6tlmG_fVp0NLR=z04vXjw18 zsXmA;>SCiA>f8rOBs2o1y92SMs*F{+eJdzBR2-Q1;sldRC?+Y`JaH$qiTK+a zOynC8c(&g!3#xav$I9bl2Lqz=hp-Sz4cK#p5CXlw8I6*4U!5vTVct3q+n%3{`>b8e z<<%!7%OIcC@N>w7LzD(SeM+M}eVTL2FsLkqfn1Xe&FX(5*AzOSCD<1(Z!&a$#OywJ ziOxj&ab~FnY>t(9?|N@OsyX)8!**|x9jGGQcYulbV|6s z5q8HDXmiv}(vSn43To>(p{L%0pPFdVkCO7Ctk8Ecyci=P>L%Sp$kuMJHREr7tfud~ zhW)+a0v8)^HTnC~HwXd(0Va@r(e1&UsesC`o?>L)&MnCJ_WeNs0hPX}!yNoBLD9h~ zI7aOW!cv>5PtP$uP^NSbXYybTU`s?w7A#l9=&MNjObw-tm-Zb=O#`1lfM(bcDm#n7 zLA|?J00ZDuQPi2^h+No7Be)!3Q7^M?Z_A@=)?a1kdK!>D^;@%r0iM@l=xE^--V+%0H)IG#CKd6~BG&X=(1Oep2U;aBmFg~>Ihs6Fn z5F{+A;pWAav&9S&z{a6c6%@J@M|yh)H){`hdux|Kk_}<`Gc?gzy-3m9>Zavd&cd8{ z+SS)4?o-|G{bYFLB&FRJ_0+r2UF3DGF%5_wiJ>A;GFl;aVGy}9yF*S_-408j;DA>w zAy$5SV?*60NVpu((7L!df3<;KSlE?B zn^Sisb#``kb#>-ud(pOAZ){UTupm-lZhr5&!%Qc)oXc`{TG7g!wKWTXySZ?I`i%t? z%#g+bY=&Ju#~MxBOUfkqTu(?Q?ygYVVS!U65Te1dhrfNsg78X7}76K9J!LC-DM?n+LuhNnm zZKlCkZn=%M1YS8EXAJxjF7v;vz`MTES>!-*p-19JJz;A;)e%0`?H0j9hZ4MweQvE> z;@@(cZr|F9mRK2n=K>vw2rM5YGANPp@u-BQxr}nQh6`THW%2RCx;|?$!n&KxAq0o$jxh!ShS36{gZoNKN^WGC zVzJ@))i0@Yb2lR+B4}Gpsi~QU5N9AxutLt(VLc389WzYe#XTxUqNfLIilrg73z2dg zBAultl`2-PCpzr5xm@MxN@_gTuU!HiZip?Y^({haMA)=Q+^AY!!F$`WA3}ctuG|=C z8+C8&?3lQ{$Kfa&_bMD7O|5n5>SWrX8x4s>W`Zb#%vEU<%`G zbIPGHkqz{>iGktat0TNHPYm>~jiRx7Z)Bf^7uo6=?9}l_L=YFo$GPP^JSWg?+&wc1 zxNLyL)T1_tLlHtGd1V21`RgM~@QgZ^M+|y0Egw`u8)reyKseL9aUouz3ki|~V77gd zyi-#-?4r6`&@+^*hM}z`tF>*~E*7{TixfVhx(H z(R1i?h#2&o2ozhzjnBR=J+`xJ>Xlc6^x!YbELoBheF?H&$0|IfI@^{;OuZa-T{iSq zY1)OwIMVja?hbMBG!Ot=#>g=X3q{Hki|O|06@ckX3|55L&|Qr|i3?|Ow`qpOKNE76MTAM>@|V>0X}+0l-cBl z2CU9+ScRst5zC-_viFzjLEG5Kn~@*V#EjoXM<*mix5#N0^&9ffvo{^XIW+R$Wm^hS zyk4_vUCbLmBFN)?nqU1q;lpevvHESW@dnV*dQ0%h>9+0;XQa9AU?&2qRCXsWt7evZ z3?etdc4@xceHflL%zBodMGgV-tZ#=b&t%*>{ISFTYtLNo1PB?RLinDY>FJDlLA z$xT$S^0)Dj=jR!1HPPru(TI(;#Kub~=>=o+h;&XgA z_=>ouZF1UkU3HX|m93-rLGqy@c58w|h8t4+Vm%o@l$qcl(&4x2+IWv4-j<-&P6$g0 z)_3lp10um^0!u{Pt`{-u8M){@EGe|Xu@dNaIDA_d|MD*XiDNynl}l~9e`TeWYrlBC zN|akT6cnM*Xx7m7OzkFTQIMl@X{*{p;-WGaF-}?z`|I*UVYzABh6bY7D)2FJG0s8#OrN5E0llsM^5IdB_BXlp5|$_lS)X7kw1@p1vVi zP4ker>dAHZfomN+WcZY~KB>HCfeLbWT{ijI8bvFG`y4_qfpFI_|D9l~_xFwIIDC+>q%apt4%JS0Z}7bf7wzRmj$au|m(JBYC2P;IJoF)c7#nv<)EGVr zH{wdC+X&udkj1mLqM0^NP=}eM+XO!MQdFD-%r33GV~_Oj)_&1w#g4TtzL>x!9(T}! z8&OLGb!q#G_iwr;GG5YR|Xdwwai<8O~-KN)15L8cxTFRfe=+7m2g zM_G22vb!{YJo!MtUKep8$zlszFS~7npgiyBHwz`6+33bpzbgZpRr&XJcH%MBI9&YC zMI+Qd!03Hl&O&OIjh>c+b%$eC5nuLkhvb1ty{(g`hr)y0JkyGH>u-#y70LU#b z_sC8B`|w_zO7hVtK%UF|Q#J8@W+s5G+8=Mg8=s|Z@pK~HaNym>i37Vt>VWdQObw%L zG3BVz@c!$)Wp-i!t+LunhE%b@ufZ>T!cJ>c%>5}T-H^kXHqY^y`0M%VJNf1o<{{!6 zr*REG)qm{qEFJ;GnlJvKYP049@A=12OB948Xi*MMC9$p!?XidrOG^z+7LWlQ_XoIp2MNLxGW(=WnbG*Cz`}gj<<3*fVP z5=Dn(?;kvvOwt`=@Sv|<=#{F*i#Oeu@oTk*JU0sSwHtb(FOt8>UX2cLVhyo=l$BX! zhe9DKPh5)KVyqS}N7&afSTij6ocfD{ZGD$*T@qAqgW$I8`nL!M3vt2LbBx5H{IIU7z_rroW{xi!@s9^QpC)yErs&+ zxtgc~Ell}K@3z4XAI;%+Qua%_86Lb*`KDK04oJFRdG(HTQNAr zO=I6j?z#tKe~PN4h@HF}d@e?+{I%aP)!zywf8(4{Snf@KhY#vT|CkUY6cOcDqGb&q zz(y+x=z;oe-a5bYDK@m7bgf$*%?YwE{xVj9+39U$PZ2E$a+Vvoycq9r{+Y*2R|qx^ ziGfWl>@eDh%a!D_^=+&|LW0*vTH?5Pz1|hs%nOoM+tZ!W3_Byy81_Oqy}a0yZl#i% zTS+Q5x3NYOuk|8LZ8}xWGp6b=OMg%oD)T13_ruzDolvQ_eG`CyH1m^ctJ!+X+og$#)(|C<@mmA7G&o*$=R&7Bw6ckY$Yf zj5h%X8y}x6QcPEJN2K}xW`93IoAqLeV1_+&Ei4lY z{)aIBeZIxkN!&@y1jSWzu05UZkSD7p(O)j*d>Xq0x^#oGqs7 zSWp%kv;EAaPYK;Jm!nmdB9P45OOV z`{*{m$TCcuDRORh5$uoVU~bH&tDFndl_9x%d?MH1_rA;ZS^2;~fVEXR1RDT_{E*7$ zn|oB7(AwuyaK=&FjzFWVjR4q)GE~=ESLC4=ML0i1M8;Uum7-d`JK+5cZKHH0AZBWB z=n~sci{N#hj~X@FawzM1AIhXT-F7$FP30fA&%wkV4uwoK$>+(P3Ys*>)- zrFh9N!>4NL{6_JWX#U&p4IV_FbKw6_R@PA8Hd^+cn?zXVh-@WHU@yXsi>a4tQo1XJ zOrA*i@mP=b86?u9&@IOAXt=-C<&pOG*SwwWuxkGI6|WpT@Em-9E%@`RBoF;)gI96y zZ_Va-6}3&DKVF$7_=fX+?!~<3fu>F?726uU%F_0@5rqkjXwS+#v+O^3u}kqV$3Shs z5Zx+#XS6FzQB1qd%FuIrb$xxRjkW*AgPw2G{(tC2NA+cBg2wc++RmPag0)@^q1Ce` zR&}^XC+BnMqv2)f37-tfJxrg6TEgFD=n9mP74=!?19V%W>jtC`&K-wb8?&-%03$&$ zL)Ae0Tlmqu_cnStutA$?rCy-IT}rg&+X^o^YmMUK@u8BxeIwdtmYcQh6XtSWPO>s~ z=$5#G)mA}RUoD>^joO`xBoaOplx}p!A$NKjaV;>bo$-d;1PJ3a9O)(d&dw{~M(PR) zRPjs8Y_JtG_MlU5MiLivL_d%2e7qCw6Ku{48)^plVkhM&oSV!rY@FG8k0iY^9e0g! zSZ>{?5NOyhkAJhQN*V?0#{eVBD%G(jZ!8ai6SfR90;^|CgFEpi7Iu)a3bu|2oQ$x2 zl$9i8s2s^B+1Gk5%8)l!sJ9M2#=74vUS0w*_Y$J3N?CkOq5w%&ccc8nrq7vDC)5_5X(#s_YY*#P0LbKPUidLq^a8dbG3#!i&(#A z<*3J?)3KXnI!D4W1%0D?Vi6U#E-Zx;&Rn`SCYS5UQDa+f5_K7Q^h)u9%QK=zWvej+ zqq4WU6CXD^tyH=AHrQQnN2{c^F%HjX9v#-0<}Bp}NP%02-Bon9qvvZ5+FRFNUE=e& z@8aU)0~C?AHULL2W@)DYiWp2m2~4et3n?iOsW$M;`p3Ol|0BDCVm_ zio;*KZPND@AYRfbvpp_G(iejr7(IIX`xkdv%l;s|?pZ_b;y-=r@;O=6*Soe@prA0f zu2(qOhy@)D8nNm1Iu!^@$%BR#*GDUTqT;CE>+pD+sajw#8t@D4Y2*mu@xw{ERyx?$ zhfZr-1x;YPZ2OKijy)N(zzyjgmEr2?RNdh_$cE#pDo+wz&4iLb1A%{8j@Q6B_FnFCFE*6Lw>x?kxc{|LWH9iH>x^Vdl1&^8`)1eJEQF{T-G? z_VMmpb>L&Yzb;(ORC^56srx#ORhAVxtxbRW)ZP8*lPdjxu=m|TO}6cx_*!3mEeL{i z1r_N60!mjQpdg6!u3!MEBE1F+N=HPHjuL?o2$2$c5v3?dlM)gHG?88d5=tn0ClBxL z*>mQ6yZgsEGyBb#VKS41`QA_T_beA@TyazT84}9jy}A5+x0s; zQ@Xo<{_O7d7;kRwNY7utL|5pz<@DUG8t^2s^`k}Cx}Zu-OofVOputP88cHJjThM0M zqM%f>SU9^wOqYr1-L%8$_sCcL4F7aeKQ-2yM{dzgzfqYu?!UK!-Fjxe-V@|P@gkpG z=Cr{yAdJjgO7*dW6@?@mH6psJZ!EF>=J@vn=5aY;o@NTnLNp*p7SB~Q{_`i^nvb_| zN>GayqeZxMhiZ}~DDaX?!@y!3XCiAf5H;cQcccaC8_M)K+fm?!pm6q1E&$k~l&0Ci zom4_0Mk%nuiY)f{3W4ZL@iGW^3etpo7;-uF^`B@{n7Y*Lo2xw}m12)HcFxVHc2;Tp z*nq=7;0~}JS?XL_#()}?AVkRi6t z3RMoB_L-aXY67Nf2%HsW;S1#onQ&~IZY2s|Wey70!;qdRu;vO6QP=y{G|TQL!bm#6uHJU7?B zedF7%C^gs~=KparplvXhVZkt|E2+5k^aUtiBsYBlJv z1|CX`hhaHuh!G8s*!&fb*u;bO9UxnqkBunbc#Z&NgaLsNJ$Ojfj56@kdgCW$C|Db^|c8t6#bW8s~W#9jG*1($mqq7F3o}q>P2YdCO0*djk&G4_y0PS=- z%AFBEhF<=cB8~p1f+2SHflRY8)(ApZyphiP-*9UGUE}$0yYc_Z3l-z07#PzJiDKqu zUpuYql~w9%YKn_%xUq6>;$lNJ$>p2FwLw7vD|3T*JSey@TpK{08B)09ySnh>N12O~ z(qI{`c(j{{t+z6#txx-occcT``Fo{@z1MO_y6<>nI8sA5Z2;#q-eFnYI)B;!6cw?( zMI>(0);0mUqb`VR2rB82Qnx}^wy}K1Ls3SwvE=6}8)L~D)=CzBzYY6b=y;3?EBnrv z*i0p^x6NU@`(0VOYi9=o{L9>8eHQs+nH?3~NdrYJf-sHe&owlrV=u!BE)Y#OCYuVa z(o?Jbmp=gP?C$-GW#PO@Nfm*9Ha5Y*?(V_L5*yNdL%geSK+Zef_q! zayJPHIoS)G8XyZlzVaO;?>)w4t1@Q@;kMY^^ltRzqzEW~$_3SPxD|_;S6m(kPeGLZ zD#7bEeJ=Ocil8hHQUUxT1IEpzeaC8M+_TlI7EBgJRS{5F@f zv+>rk0_{eAU#VcX7#9|-nDn(UfL9Cm{&qY(>rmZE?2c(^iiqHc4?VPwKPQ>oQ{$C0 z4!V%|B6tB*q1%777z;@H+ncR7N&`J2PiahCxEjrTdt;T%C8xZ#di`zlyz5;9`>*rc zJ-PMyFuS6O&Klbh2ZM`5j+Ue$bJ4!q?%{kTg`!N8;i|9o(ZMl@1eeY+GlrXXF%j~f zZnC~@t}(gWNS5UsYhwwBtT49%iThGd?}iDI$z4pC8BD8x<^5XNy|9QVzlOTy>?#TC ztwM*%u^6Stv7@^|2QuSAZQa+g06dqr;)EXq6W=C2oPHvcm(n`1RFhA)?wC-2Urf(J zM3NuAY~c8nB@S0J5BpFA1{eHxJWLw4+`4MqgmH1!B3ayH#}rztaZUdTmo+MVSH?%$ zJfJKqVa2Gpa4`ZJ_u<6D3!^Se{pO!Oxz+YX>BdF``JJvec~x2A($3P*8b)v_?YHHC zJ09!k1T};^<)&Eh$fwVbstA2240MY1pl1}Avu+APRUBBMfbpA#2oV9;Mj_$QF22hE zA{Zy&Hd0*JLfsJw=qoC!?K_ji2N%)dFNN8>||u<(h?q9j*ht_x{t@b z+0oX!txZIvtu1=UzHxsf8Qndu*IhzrNm43-+q|a#nyTZ@{!Eg(+aMLmE_cIyV6GpN zoo!xjhF35z^YotmAoaEnWkslwZ9H*OvUM>M&TymiAam=@t}NlZ?zPRRS$tGz(h{pg z*Y}izk4rK)A(Yh$Ct*KN*( zb#yewlqtoGz674XG%2!&4&kv9qP^Ru%#KkN_j;SSxfKI_AH1JZrq4B#tlLhk^iAzh zeSMktc5t#-X9f?SN&t1pC%}I=sC>$($3jqtTfv*8mPV-&x##F;XXiKt3BM^*bO2)l z`N0IbnygkEiM%$#;(`KnJE^m5yY_<%ywQVJ6QyO)mS1&3J%U|hxE3&tSLV=z1IYl@ zDd|4>tx*j`fFE%4@bL2T3{7m2i6yLFCr_T_LH6)`+K?0P4%Gz=m$DP=M+P; zt6H`iyF{%AHeIc6WSqu|qoA+Q$&_OUkQrqYf7gxP` z7y*}G?t?Ex)4-2_vLQQx8~Xj!AP_FITbt-XEt&sqYD8lu)GtX@vo23ec#iuabL~Z)#p1fsmKCILLjeYXtG* zLm{ZSgl0cLzkWKD{0^y7dtabkqutl+%4V$D>-u%@oUqg|< zu_K*saho2>46Z6(oXj;We{{#x`tZ(AfHohX7V0k?*2gF2wRQI>a`Rhte~1yBAKS zUz@2^Vn<%(X#Ws(tNhs?cFDHvHn|SO=V(;JzKl78bAKqv$S73vx_k`|`k{*0ezd#E zi;uD9-cW*1_0!DIAimsZeqv%_VZs4yU-2k>fBC|Y56FhC46Vxl;XPQk{M&iZ=qAD| ze`1LgV%w-omjBZ4r1%iD*b~q|D>Jmyo64Pecn^iCBa>{0Wg&}Zu+>OiQ9SD*Glq*t z$xB$}@%55~KZRnV(k@F&9hYp8dc=tPy^-FLSFG$387c^hjn&P@(b0s2(b4`r+yW=% zq#myS{;XNULaETP?5#jYBykynR;V-IeqLo{%Da?yt{;IS`py!+g9GBX3lMX(UY|P$ zZR3>gXS5Z3|J(>-76I^P%CB#e#gH;Si>Hk!g&G-4AEi6)Lf zB`SQNjNwH+*&j*9nG$R~uFiW~PzTo*+x8!=+jtnrBbJ;|_Ym%1Rn%fqH_ANfJd^8I zdRA|Lk)QjwXkq=xBJCgs8vIRs;5@?wR4J_Dz);(zk0MsvrDN;-D;6j6(S!6^>=Mgz zwGsVGNd>XFz7aSp%d8rOj$n2G2E}sM@XSoFfvlQIPVl|tAiI&+ihBmCU&T`_V5Rmn zYDUKFAbEPm2Pu2kB?`+S7OqQxWYeP9{byn2V|&E~1v@h{(@G%#_K@AQg6^gtc(UGT zh|R1s@iI5NS*izNW&U7xi4uO|_o(K;p!x^*?X9h?O`21A_#Nw-{^qk9ko{>IEYJ3m zm$AbgZgyJK%FFpIJGvA|rzc*#Hr%bw>gBau(rILD?$Jx%rY~Y#V`N=-AlRPBhK6g; z_w6UkI4-LzeLNu4EK3-$S<6mCmW|Mb0utF?cqs`}Z?a__w7E$a0b0DweyE0xdo<3MZdlGu*AA|@0 zE1eifGeSf7@AF6hN$)765R`LYT_u!sDwW%|cy!*p+5T1grr{7jnBOvILNT17axMp3 zM~)25jaZvkBkHHTfxI+i0Pfz{Hr*S5(vXdf*;*_t0V|-Z(2l=&rw;?Hw>xdX*YutR z1%vQ~t3hJHJ^{<1d$qB3t;hJ>5YSbwpjz0i!wj)4f9olyt<=m|dvONbFedAvd1yB00Ei!J^hU*N)U z0AJv#UZ4(N(e+K zw!WiGEbWr}%s@Dc;R;)+oDT<6`?0>dlKDp6H0T^75Bm1GJauK(^#Mtt+Fm@nHGUso zzAHGJr62o!GS-*CQ{CzlK9!)Nsj||I0qA^Pgt?$VpW=@~8U2X1Rrh61FCs%ERHGA3o*Dr>}aKC?n67EX zdUa?Z^ovdS;Qpg?Ljen?hQpgYkR2AH?k6d&GgqIh`pro(rMxZ6GFQCT-1#~*w5cgH zbZu=K55Og^LJRjO`<;OuZ2xgpzZ^A_beOJ$)35Z=iy{%frMhKYUK&d!_}HvX9?+Db z^&n;M1sZ9;2TU;_|17<*s^bq{@9O%CaKK>W-u?C-fm`*y%25Yn2rC*eolIU1%Ki&r zxPAL9I`Cn-GFsGx@|BC4ZqMqDdltcQTc5gz%6??93Tu3jo>6GPDfw&f2KuW4Wi z!l1+AqLwDf#?w~Ex6H{^ePDSk%<2l`y9Lgw2OZesuRU35ZpP9WxP(=;Rm1nfIdLtx_58F2Ry*<(04N0A4A9pRO~cq!>> zE&a~TCNbsDo{#v8&;Wk}kjyIeX&UZ8MJPbyIo#Vn9CiumPq!C_gwV46;vy>&CzCl?L)UP;`*VFcG?ci)@S9FK?WpIGPWt>YX+jXOFG7Wd zdgjqU8ilw5BH%=qkC+zEG*fRO-QshTy{&E6*R$y<7x- zJ_!t>m9Yghb%t;E`e{Xho&5m{keSo!=qt4Yr8|CHOK$`+uxEXcj_vM`sneSzC|P z0mdWvr;y&!&8?(zx~umh2g+8Xj173g;0Yw$DxzOw`SB{>CWzt@N_^Zz2Uu~3%?OJQ zvv_MPCTP>EhDA$lsIsKWe_W@=gVflK6x0E-)?>WTa&(1T=@>0`Z_En`uTP#1Rv&;5FB>SdI*6zII@Ri1J z+ZLTPlu|4dnxpUXsHjF+=x`VX^t#XX)n#rbPo(T&3Sr=u0GAMwN=f%3!Xt<-HaG6? zn1AUrzrDRV1*mNC$B@WRpODDqZHcV`#LQ*dB-fCxb{4B{sm4)lZ9wLdWNxUa`9f1H z+J;0pSKn^Q1{Qsd=ya)YdOPuwz`5@O1C<`&YgBIaqrE*H4t>9WCy4a)qz+z7{-KUs zf3KiEqN8n5by1QTZ*S$MaZ7@xqm3xkaG4vf^$={%O!^oVFushB#j;9_$mWOz*gy2+?Z73OeynF7j%APV&p^baZ%dB-)PfXU6h* zRe^w0!eULsDfr#XtJKS8W{!=@xw+)}_OQup5G|9*bM!}VHl@F04~e9m1^h1NZ_X%p zhMW+l>C`k^viRI`^t#ngPNIh;e>DYV$ra;HI>qwOrOf!gO#jTsVQR_&dWu&B2i-j_ z+<#H_RMJYx8|}7h+yUC^8heBl()sY$1Ldfdm3cu9yt-;QMdm&M9DTl${hz0@oGw{k ziQk%VR1`u|1+S*%Rp=}VORz|EhN+OW9y5!kF}9=Kvet~tB>a9ZKo4ao1}iG?yo@2THD5GW3(Mjo zRbjz$>jUuv5{!1J;J+}YHo3v63&I$+}Hji990pT5f;)eS>^rA zm()(3S_l19Rq|XtJBu>^ws%y7o^zHg2DjYrT>=$ByPWBg_C+c2OL#$2Rbg(1k+#;a z+0*8eF35nG@TDDXO-qUX;p}R5R-uVk-$}BmxclrtR;ZrDnfh(r%&o$dvv4)+*zyOM zQ@)xLS+gV{Qs`538HUQ;6x=GY&Rxs1Ej*afjyv<~rbJ$LkzRMa-`lDRDu2gq#GQ+U zOGFcG?{8J;R{8E{#@0NX{>yzeviGpR{QU)Dl>7zG#gbiH%id-<-0@zUcLrfn#pa|m zIdOIEk}YJ2KSSW4^fSXI4BXQj6Rfv54-wD-lpX}+kdgiK?SDIAyKzP2>uI4xS~*Wk zJTW6Gv9a&lz>O()Hcms4QbO^e9Qza4li;kpWAJc~pNQA?>ro^$y_?!D7%;aA5jFdU z%I+y{6JXQL!bkj>>hNWqSC;R<0X+o2+sFI0u7uIH==^iXmEL#%`8MelapH`FpF0(6 z3Gh7LViM;-p!x3OiM8fuN0thUdf8-jO!sgs@jEo>w=I9RQo)Ug@wTU&<~sj1@~d6T z3s}(Hx>OAPaNGl=G8Tb>v6!<|j`_y)lPmXapFDGS`89+mv`CIfQ5C%Wu3)NDC~NB%`Mxs9=j+mQ!v_~8+0bxmx78T4z?{X zW^Q-;wUxQLp2^!ZcDHbkDq8Td4Zu~{lX}GXt~)_DB!l+ne|*fRact^+VmATl^xI0l z{z_woUH~tbOh)gs4)TCGOC>KbOwc@za$|bcK)HWo*cX3((J>x(>8Y7IB)RvcTz0zg z;Vc=OERh_9tQi3nCB>q$v=kXRa>5whvfjA%^ONquix&|Of;qCO?3XTN8`R^i z0?5&LtNiHHi}QtF^mBxDttQ?F6K>e~4PLZG*9EFKM;o1~@&Y`geoW1LA>PwD@NA`E zQ>vYPzRA6`xu4^;R;D)XtjGMqRA&skJOfREe! z+z{q+M3{4qqVU|v$Vfc`0Z2*c{3>t~Rhj7$TT|0LJ0i*1)q6P3hkMWOb2fRx4b^3MuMN=`QV&-8oX06ezsYg!@B`zU)QZlYz zSC04c!--p4nu}wp_8m2AcD5=-+A<9lFS*R^MU=|}50^wY%eU=EL|%IVVoaQuu*zq9 zJ6ZI^bk*L{ch^2W5lM4NNm$7N8QE%1hn1*8c*Z01wKMTNx;AI>>NClLR}X3l&odH= z);O1ew6RS!V2W`v!jmtLfsK^1t4Z%IOsKH8Z~c;%Hf>al+;^irU)tF2%lnBnm?2yw zQK=qdYE$4BG)QReExYUvI@=4+rH~GayEnHP%Znnv4%69c(QGcBw6V~0kqODJKI7m1 z$_QKZ;zl%O>FBoayC#wtc^}h zy|(VJ;}f_iA`sgi^Q+$$G9~g|K@JGAYF|8+ViAV3O**|IT_0`k-zS3Pa{TownG_Lm zv)$>M#dFCo#=-YB0l}?OxZ@6vCAtZ(+vqu!=^Z^eNJ`RbgoAMwjr)q>1=V}jaINK) zMH%w5@;7M8XCyOkw#wf6HPW~SCRJ+vK(T-k8h^C zC!hs$o@Nm}@{Nf;e`5d36gf)#$ZpJm``PbJ43qD7*lFMD6L0Bn4UKBk#+Buqo#j_h zPZ10a{n=Xj_G{5E)1w;7l?U}o0tBd_-MJjVD!V^r7SQIfZn+wtdPuS{n4is5!I0ah z1?S`eA`>!X9cIy-1V!cqa7Oc&GQuYQq698ZeVs-q;hHiErQXh%|%YrEqtq z{rKBicD17p4MeSD;eX`mNnYeHYo{fj_}T4yJR_~u{cFYh*d0FqvV-+6-P%~&_VV(>V5fqlOdwnBzz1>hLfy1&SLLkJwpYz0n zf}z2kQ0Jw>isn`oj^-s`7n8~Mov^P(C-y?|=zK`uzHZ zoMBrj1={-P>->D{U{f!x$-s=T&*bp1-%XueCJS<0juzW<^f2=*4RhDCU6{qlwb%FM zVHucOwVx6<*-)uXwPl&3^Qm6DWZ^h!@g%nB{qoq>Nuj;Lz!?mo;uvTP)fHitrW4HZ zJ|nY}ek((0^P3`SH{&kN`{A!^CpA4RU+#4?2RRVut*iGh-pDm#BUl(FNo_l>%nc6@ zRp%?$IX1E`W6ZU*%3YfhSoE4)Q4QAuC~9iJxP2#D_%-bDs0)jV2Nb8+iMuC=U97T$ zJ2i}L`f1nj>E2xKC4Lomu|Ah&Sr7R)^CKz<5XBBVE$i6nepvVBP_2T@3G(b`P2JAV zno*rx7!K2f)#wk>AnF0Rv?%TMZQEL3<`1q%{=&hu*-z-cZvK5+Tgb(8+kLN@Zyc+$ z=jCoRnlD>)ZuEc@l&@xAxvf25-?B#U^tdwL>B@5Dxz6F)eujxwxIdAGCP~j8J+MGdeeA^?C}=YpdY39}TaqJGgrIj}c}@!<_~# z?AqoyERj_|q4Z%YpSq<*TIPs_*uct2X0qS*=4w^K+h!tgE={>UxfD!%s#t_l5(QHz zIl)c;co1CJiKvZ;)m%}!Xu zpo>F=zKY?}Cw8?g(zG#KP8QjeJp(DZ4Zh+oSd~U%ITq_SQpZS3GK%hmw4(N3R4Szr?r1j_-vbuXeWZfyOW)@#JMgH0BxeQ^=ymhykhLBttVVb6Dby>G< zDPAWLX16v1xegTsv|?_gHIR-?!JHcG07*OCCihg~H&Bwsm3)1%0d$e!Rs6h_r@fZs z9Q6V7C%td|M3JMgIOwWVVn0$R zWzP28Y+@f>dWt_bvP(rNA*;*-gz7adh95WGCw_k%4u^G9r=Ay}y!92gT9!}sYF2}h z_+^ASuv45_PRs+GRqm^?zH;d*HW80`d)r9A!F%LbcrywXZUh*|F+810KG5=)=d|#T znOH1I(Zg+pEiL)CEqr=D9|aCKJ$N6|+PDpqSW@Zf!FsEfB;;-D*>#t?NxY)|l0ScQSyy1mVuRDd5doJI(W1kPJC4j0 z^=d8HU!V#HbGKg>$&!+pqvk6fa8ov-8yDsOM-L8pGx>ux@}5(?9C=-c4PmAqSVM<| zm3OiLX3+}eT_Dsk6y9NJ5j~o(7hKKPDo({)At&f!Mfac#EY_uP#7tu9&nBlnm(!LRVdt?5 znPCprkd?%~eZTed=gA%cfp*_uFrksmZ&qi1?cHAz*CIu`U$%%a>OHUb`l5t#>ejqK zDs4#^EGGl_^~MH{R#U4jV*SHPPygPXI}Q$aPKAa?xU#UsgO$y%HDhCAW8W^mWoD+o z+C(W}E!^mRl_#-0nOp8=tfX$4V`P(@WS3FF-r7iWWyJOJiNj&nDJJ6YeK$l)e65`v zqr5!29`(~#K7CzHEjOZK@Yk=d2ViG1xo}pLd4a+5mEX=IR*bw2KRv*3t0?J}m~@fZxtVXydRn6B zEBDwxWF@qUrBGkrFC*qvu$~)s%(j>nkbw(R)$0uh1K^3x^zgHzXK98mz^mD|SRrbC zQ%~a1>F-sbh~Pwg4W%SdVP$rDdaxWw5}nf|)s+RcMwr(W3v;?K$+}3*Ygkqi-6(wx zsf?d6gC%$W*j%x<8%C(q#neRzX4eML)~R#Rcwt&$1U&B=tzTLDX90-%Hqpbtr~ZWJ z++IxVbBa!~YHfaJ#!~RsC^`Jg49S6j75K~8ViY9|in>wfhAnAzpEWbehdFtJr!jz{ zoAN`Hr65YAaw}i*@gHzUgpthMOv{an!+D4eXR4ZWazttua9z%0=~lEevx{gQ6~G@X zQ<5BJI&F|nMN*VT=R@_itX{&WCmqb?b=`%ciu`I%)013K<(rqsfUed)9P8^6bZ8`K zayDg z-@njYEk2ERL`SD9;8CD5#BWho!=-yp8>MZlriSO)=%*=HnYo+L&z`G{P)4{18ksd#OC6lzi zE7~%nAS!8Jj8I`xO&(&G>6EeE4drvgHn;1){zl+h4X)=^MGK_>v-<8sDYq%Pj^N&{ z`!@snMF<=4U0u;-ejfZ2riyn*)`wBi2Uue>cea*YXM15w56S^~7DM3HReOc^(^d0) zB87N!+}PQ+MV>yBaji2RmMObde2=(|W`XA#lvId|-(n4>JpMQ-T9BA>Qlt=y z@3?(;XfA%{m-X@1u#bTMK+XM*r1uhaiDo@qlF6KRQ)Y_TM@pDw&P!hz28CkQ1!1uy zlmo{O?gm6RV5yS^8DVHr_yIGd_TK5Ryc65qpJ5!9kqLZfznMl<8Hos>aazf*Ig(Sw zXsl-Wt1G}&J;ehJ`~c$BpTaWU5h5a2FB8wi~Wz=PS`|9_J@*2l5clP?Vv1DntIzo-GKVfvuv7Rfw5yNpU zCl_mxoY7I8=-5&|_PcqqXZpm)R?%IigT_sRjkljfyCBl4SJ#fa#4H&qidk|WZwGv~ zL)(U4ue<#-!ip7{PfWiPWMY!{pdW* z@t0p@Qtks@DAABl-6W031IMbDp}w+;S#ZrAR5L&^z$fii zs%6?*t)eA^{&4gp}ssJw}tIPUyh znC8f@Uu#S8@$HwU%m_CU{frI1cehVRUux@qNg`_cDH$pLBo3$xBllYcc{xn<8I#i; zJOV8(;L=wU;dSPA-2*m4-{GYhf?1-vYJT$b;bHw@nL|w~Z6E1cAi9ob<1ECN@-@b5 zdE4)rTvTByORb(oA9gD@rQgB%ysSuvrQ(X+h{O{2Ag7;CB0WdfW%Yl+Oos@XTMvMy zU2$)6G)aIVcLI$NOX)b4OFU zU#`A_LIUC=FUN#3iX${lt3~Ti8vD+kF*Sjl&uFXY<+kbw(q=`3EN5(_h+ttBF+Y3j zhT!y}jdXxtdTBf@lEX{(Le_HP&9#^C!&i#RQT&9O(cHF3p*51`E!zMAF6{U3zg(j> znB_$4s)|oY$`KbQ;+;D|V8`qme&w51P>|o1h>3FN1mwh01(M8pK1Q^ZYzYHicWYS- ziylVDpFc~FI}c3Qb-+v9zrB;hCAKlGQr)4QD-=D2D2`$t1ieXtjXIWMkDv zp8Fgrqn3)~{R*h-$#ag2R$zNH;)1Go1_fa^qShEKc@t(GkbdF|yTl zI%ULLYl!7#ypxkPVhELOtM7DOsrZnno!~IBoK1aCn`6GXn8Zk3IxN%PWlorA5meA_ z8e0v@sFKuXD~jq^$#!h$$b50wyME(wBC10F2zP}$`NN}2!=a(mcy_j8%H#HB*v#6F zDOhwmBMF4!mL`5)^6CEQ-1t85fK>JPqF%4FCIyEd_9f%+5!m78XBIv6Z4OGhY8hdU zUc)TR?JO#(RE8DdhTJaWXlV2#_HZDbz*Y{3B5?NQ(~w>RsmdE@5|i7oHfnD@u+ zEA_m{X!%4UJkQY5u^>8zt9zTGlPoMT%RhLu=nkAapp38h(UVnqSa%XX#?fEdJXF%m zDqvrUSgbcprgdFoYxTRyZRz1J{SO>l;+}W>2oD2G``FWSe1cQpx*8KXPog;vJa*15X*w`p)-V;2#;7lPX{+sXY(3S887AB=@Hz^ z5I`NGx^T?*ZYf`NBo#C{Q8!UIj(TIGArLUQBMs&F&8(uMcRY3IqA?H}XI1`cG@F!; zHMIa;Dx08{BB)X*h)dBu&{5L<24;YyE{ zxpV~x1eJJzXD337li?(%$Hht8!(g&2gAL`cvy8BkUVh7*$l-bu;9qz?+abiaMmUf+ zHkPNCmI!~gmDsnmbY`SOSWf`YniWJV84g9vJzSpp{=F~Xu>23Rh66qRup}MB;GdHR zgc7lXuQxsNR^!oOErvmjtl|FAgz3A3Wf>VRgS`Ot9s*(BRR_bqpuc=s8W$8Sx$@|M zLfsZ%%}-uP7wu6w(Zqb_uv|DTkDJ!I0+Y!XjH#c%ZkIB%h;)oT^2 z=|v3GSTqBNt(qFXV54mT7c8||t4v53*$#$h(?|WzyC@(8CizhnIRj-{T;5=e)|$cu zdQa|Zf#PLk+>~v$=Em^rAi<6l3LAh@b*_M^qtk`asojo%{k-rTJWb?37m2NdxYW)t zZPmKxEp*lUf0N| zcSFmRTF|7>0q{{ncmAWknWaIM31kJ%6^dIaBDP3PGiQu?-S0O)^a8sE$6u2aBc}{JDR_OdwKX?$7($E;FKt6RVE688*LLq_j%|C!*(P!4rlBLEKJui~ z9`hY+7u&lsI<{8zIm!~`(5oO?7LEb;iDo_Bg!-5iguoX9Ski37f{lyJigQBCK=Bm$ zjpjx$d;N5i!su(B{QyGW)qd!9q-+G8M()X~Zw=;9$tI5k9mNBG!`K7mgr#-}YjFb4 zkMLa1>aeaxZT8626Lv9mrK@qN)BCwioX72IF{yHe2;+>AEDSvb@Yz%OsmSOZPE(h! zC(EEu9Ev;& z5yT1@=atP`7 zW{M3A+HifC&XQ9Jw(ycRn@zRn-C6)oLCX(DyNwOYfl%9dY!Uw>wlHNcqs1NLKmJ{< zfPe9q!L0nd;`m_aGN!64cx!e9lt3#-&|I``2klxlvWM^nX!R*qJ}s z8PG8TU%%CYsPXf-KRMP;5(8BgeC8XTfC2;b|NOtMr|?e-FQ(4{c>I?#h5t2mV21xm ziWU5Q2H$f)_&dK$okjm? zg%1ZUh!i9XOf5xuGcWUR4@r$hwt_kbAYK!Vfw*M~k!9=5>-_5P z_YcUFX_;JjoTTSgJOvyO@W2Q_1q3Vw*?eL`GxA;Ad?F9nDTrqtrhnSIb6ZsgtvD{E z%Pj7JEdA}S^E=a{mpNarPJ}3N zoIBs`v*{jZ{UemQILhp$;R>}g3w@yHx z|ESm2$xuffcv7;( zpV||}`GWop)t&9elyQnJHYW6)T={cYr~v)Pb`TRxOs|d~y55rYZFJkv_H`*^09B8m z0leSU6nSHNc4RAdn^KNQ1=g+-YzQ110E!0ccK^<7?vBX6utlri&fH#_-{!ThUu@ml z#MT_g@#aHqGrDx3O%-xD(!sWod#P+K7s1K5or^$MWz&C1mG-xW7iCL|Y03eijdQEO z0t7~b27?vGH1N*ES-$$0C*@%u{cR>VvAU$oA&;W<6qirP!QthCDmg*X2eEu&?PALj0f8_sLCPdS^4> zx%BmcZtliy^yvm`caXk4%-a5+MuJMDP;6V8TiWO#FmOiFVwKlNwrR*%Pyr5z-tNwFa^o0vt z4%}IYCkSWOR@c2H+`$EmVEgFB3Egm z)hANrE|$8DbltKZq0R;cJ$WKgbK#PSccf-_Yb%l1+Daz{dw58ur)}O3x+BuLx>{sQ zCTB(eenucT;qC0n8?meEio9=!*G10iM%8}gFnr2z zvkaY&kY%uk4829n2wSL-o32uCnpSK?6 zA2jKLwRG@kE8Xbm5|~PLkTSo$)x@rw<~xmH5!6-%mr*T`9GNRIG-#tsNmUU> z9Uufq=nRe&V)|5g+w#HGf*GlSru6&wj~~fwOD+XQ3SMZV$Qu#n)(^z2l-RT^&DJ-T zva?Noi;<%Iwnc(DtwF;r;X@w`HAGLRD-DvrhC9Z%i|0*h_I>Tt)$Q!G=L?sVEsShx zN|j@0=ZY;fxJmYa)i9y{5IFtnY#|PGck!S8^<}+jgj`N*d)=8{9eBC_!TD;srUgt^A%uGqil;GVTb?hfJ;h2NW&8+IB!lZEH!ZZb+(bm`2TkCl1 zV2;>}R>AT%^81g}P>GLrOD>yu>*~i`5-b`2wzh-=1&rb4hgw-}ouh^&tD0qQdsk~3 zQ`5pLVmo|RbgsXVCnouY@`!^N-3tHfmQm&IWVi9-i9wa7)RQ+e@w?wonrAF;kp zF%Kk**``iTCMQo$7SsE2LFm}`*Mouv%g6C*+E*I6k;5i&@6CE$Ss&~{$z6EwWSo0- z>i07e-y^08L)cXZj2*jQtZ*!x-ir&0p1wJ56aJImOCJ!OC?b-|0r#kc~NkZY#=T2%JxOQ}uG#DELBA^ov^m#9{xiWRY zsK5N~{Yg+zK9NM*+DccT(a2trv)x10ZEbR{-)sEHFe=4iso@FeZnyB`!-tOa-MeS! z2L0&`#R9>hB}GOnVc7d5$$vK#dwUK1=fti2^?Hk^es=)1W&za9hx6btPSlE) zjAkp1sEWbZYTh^1jiyyqRb>DhH-dL#nZHTU1JhJ)C0A$PSAM}PWAju?HxgUt|J#AG z^2)bkeLQbu#T8NBI@OHila1*`YH(>d!bcmPBqUU1k>l(-AKk)9H)u`>ql`oIPtmwq zo4f0MBoGK&KC;W9i8g_l&5RBS%%{=7md2@P+Li8JDFm`;BRHW0%IAa7*I~^|@g0 zKyakA=7wT_=EGn~fBmA(1KOzI0Oxjhy_NP6nT%)Qe}+IOGs)TEd&WTIA$CXHA$FpY z&~fZZ{Y!o>k$EX~bsz|7_A>p_SL(p8U;FvAb(+;aL*i)YI5i?D6HrrJTwT4r@80z; zBSgHFbU)c5J{lCenZl^$;aHQq+^1`MLuy^Pt~r?{-n?kuUF@C3Wy7PTH==kYAKJA% z$7tF6E9{jVZcZ+%@Ld`s*SE$`Eb*z^Lcyd^VXM(-A5@K-5oRe zax}&0&PT_Tj;$mbp5%yn+FMZLb=>L^M~8;V>{l-oK0mp#WtJJHED8;f;MXmV5H!qR?Rk=6jt z2^xarcIQRI%M~_zTH}aJP@bp|= zTGH2FTEe6s3Ex!m7#s5GOv}rx3I8*;s0i4H&7AnO&t-}kCCV4scD36HFYBLwk%#mzZ&b?Vbv(%rt+qAFJP4c)MZD)Fk z9Q9gb>arz#x#8Gl4Hc^V;O^x|HNKdoanQAFW34@OXfUX8@NIQrf#Nd$YN<4=tfn_n>*>HpzJ(xpmrR932t zbTaG;ECw`ScXA?NGXod%twD>Dv2(mODW|h`OFm-7-d0&NxcBmuo0*dbrZu2x;uA^# z(lV#+A1(%MPVIHhmV0z<%`V31aZ~KruSDMen4}7NC*<5wupZ^__>lCj;An9pB_ezf zBcaaOqSUc8laQ4Db$X*4zk8OAMkCns1^p)2x9dEz7in$O1635Px7XVwZ1)Ven)F7K zHZ};IG2Cl}&MBl~w{iENq>)@ebF;K_TK0$r8!0Wy>D`R-Dz=KEBr+evUl?aMf1cD= z@-y`S$HtBs(Y~*kJcOpy-t?|%ZI+V9a>WLR1$QXyFMC+e*^+&5MGW@whcF61XnZB= z1BL5`Rj1rmZE?j6?%2GLyV$+Ku9_p;N*;|wfTh}vXm5(l~uC(^Q7cGWcxJ&-z(KucBrZ=%+Hp=;3&Xz^A-dUUdeq_fI z(&du*46YN8+oFo$%fkUiX=_cNcDYM1V|im64SUd^ua>@}oIF_+>&zvOT!{%DFcDtq zGuF`H+K--U!f}XA_R-h`?k48Hv2wHCOHjQ#JpYVP>)JOBL%_nMX;p;QICH@I5mV_x{e0v(J0>7~dItyk}($5eCd;&ZpeZU9S7Oi1K~L zNOP0EbDi6ZEC9Kc(Ei0%@fKKi1`LN>UWi`K6J^S=mu~g)wAtMhqKl9Mmg6{*v*c`& z+_R3^UFFTC)owO{z4@p;)f{9XCP&5%^ku4i{X<7Oz0dUOwO+6GN-v_VQ1WG=^r}T( zzrMPdfu85i!sYagxvA= z3#sV&^OZTwcP`b^y(50vTt+t|*wu^QN!-mBJhgSpH)x<-I&7b2KBJ;FU(qcC% z61A?mGOx=ofQ)Yu9%ffRi^v&Npd?buw_I#~&yp+Y=S0e79oC!0AU17gPHo9C`~I5L z(>lq~-t9MbGzSd9MAYVEgCTQfD+~WU|&x04+X<(mKkTweGeI zTGiT96NZK+=Q0oJDYCHDFx$-mjxbD*ZGh&@ zZE1>J4ccZ*)!DkbfnGW=FE2O~2Ex_KXONmg8;Y`*GJ6&+izD#NrwZN`W?CJK!PBWR zmTFP?8-mXT8VOMixjrnx!YBL6eN>zz=OeYXgH?=6CGvq(9>XwsD`ICd2OGSxt!0^6 z6;T{#eUg__`VCQ53MtykSnuFH_pN_?K0#8K`Gzr+34SWa>)d%o`1YIBA!9NT%)?B@ z0gjoPoYPgfYa}T*mf}&8WMah{FF9fVcFFawy!VCCp&%5QX2jMmYzO~rW*k*9#OQgw z`DLAmkJq$pJTFQ0m*;)?{(T||?HKG~Z$E@v(TS|5qiokpuXJOb-lu=KXB@EIlCZS} zZdPMxWTZJBv@RNsweL%H-2Dk3HJVjal9wU|{i(X&+3S8B%8YFujK3~ewdon)M2FJ|-6%|hF zc+|EOvyn};Rhch43kc9|Km;(Sbp`Ze1a6Z7qXiqHwG7h^I_~ddVHx2I6A0){FF@232G@$G zW^V)?N7i_bsL=Ad*Q(UtC{ljQ(5CVNL&nHngWIjm2-b@7#H|pZcumKru09>>-cLW_M{PWOwD6Uq#P1M6D@2 zJ2LB{inKIQjkXWxUR!TH!b1mNP63Lz-Fs6$TRF|VBp`pQ@puM8?HoqhTTYE4Kp?R2 z`52Vxw0DItnMdr;KJnSEj=v^&ug$Vjm8#}6n^0)5g21w4*#(6N^Mt9pWD@})e@~?7 z*&mZ^?HI@>5H`9729jm4!ktV?kX0?6CD(VQG7 zr<@$0`QjGhzMFC9lU>eTwcg#E=)>hqw5~z2!hBdbl`2$MEO?5;@X6V!}KC>w) zHCvvps01hHceWqT36gK5$}Z%lTHa&~kt9o2a8FIw9=%%$LP8_YCt7*lV?1zstvI|MEIv7M zp5OX_1UK@Ob8y;>6%9^OiBwJ8*7psrajdpW6I&_ZQg3L_=is|5Z*;Az{D4d)4*DAlC;d1R`kq}penyvNyYEFNl-`R_A_*=e`8bt7{#w%VmSy4z!%SX~yTxB(7zkp|P zSh1JcFt9%RenK#lL(>cWTu7)EdBbhhf2^Ux-6&6`SA2ejZ}Ex(XoXx`>z*-Gqk`t) zdXN};VDdLenlU9#chv=ikYl)eD`Q(i1zHKZ?ZzoGyrJG*CObr>Zqp>=VdRWfK}St@ z50CsYy=0cqr)@dtT#p!b>=NN;`4>w#&&(<9N~(5C)lenulO!njGQSqah=H}^n+*Z4 z!0vXimSpj{F5N9GoZ@A`DgVK9X{n=QX-U=RQq@yy##wXA9+3Ndc zmi?EVMQdj#jUpp1HX7Wzsk$o+-g-7=#bytB74_X3^)Gonc+{sY4iF(kxFmy z`L>hx8g;q7@4yL@L}BINW>IAJ-i8P8vbh`w=DY$P7)xzimL?2fa^tL%)(C@pXDy2aBi3Cn_53l z>^Chj>-6M2d6JX!B(s128(uwh-3+osCt~VLv+BKNB@^CGuOG^}s2|FprK;u4)us6= zyOvi((xtjR_ajVi)`+(w-mJdAEF$7ZUyn>VrdyV!K^*+gyE&itn3u;dfkqPzf{Xq{Qwq*f?j5M#buna+{}wvm#36Qqg_R9SGB+14D}mi2anG2~FFz44 zoLA@6i6ERcsnC0=%6gNNk?7Sr@~Z1|b5K?@*d56Dasog9P(Z+g!r7JRsYxIt#(%Te z$mNV8*(56{BKxa-1(gD<-LEM+MVXv<@C5BP!Ad?z^sLQsxAF8hnZ5|hDwfinu>hf1 z2(Q>{5e}n;=arSUNcX-RfAXYNar>R1!q1aD0bA6~J0=%q0@;FC2%XlX@3uat{g_>x z2k9_@-*o3lw$aZE^(Qa~B%d;TwiJ~9sEesA;_5Hc@OsTyV>EoMcHPo&Ax~kPgjKIT zU}U&CZEk6T6Y0J9(+rs)XOi&ouNp#)zTvAg z2sB6p{vkuW$xM{vPd~HDK<8#;CbjMEDg&R=WmH^D;`O zuNcYbWH^T}a^7>XbZYZhfi5TC`M<3e(>d9G-0XOEu%!RlL2o@P-7Y@C3D0zSN-);x zp`>dO_oS%muJm^P$VeXFTU`JO69dpE>aX5L{?=A5+JJrI1G(wFDH=niNRYqDA!r!4 z8r+gp&3oK3RMm~AdC-{iKS#0Poi67c9WoUXI>IeCU&67?3T$W>10~V+6i%@&J>I9w zmMzh7Rh$Ueb$|Yx>Y#iDEEo5s#XiSpyl>!fdj+2!OJQF8Y`=$X_yPwvYp`4U;nbrO zwI?2ox>y}3{rbJE2Ka;|6%2pspW+pyZ@ntkq@uzW*hKbo6$jIuu}+ox z_!Y&X$Qi5?ht0MT%?BL9fkGGZ+9Dz-l!ypa^av>o@JAQM|kZ?uR>s$7Tj@gqS7mYt=6Jxz)EZ395_oKE<4DXMz>CdW=Q`6^D>{ zM^XWx4XW&D=GI$Trd(3rvbx>3!e+voi+J-0;yr0@w&5=b)^D`mbndGkovhV$lG5&9 zoBYWYgDO6YNW%wAw9x4zA`XVdf~>KP@z29=WUckjs@R%e^KK`3m1e#*?jC8Z>Disj zRl@d`YP6vny>C@beq$GLj93WBU+L_|TsrfF!OHCKC%GrN^L<~928y|jx~ zJ!KVDVK`cJZfF+>WTdJNWV^FG<(;_eM79Wd8oKn8>MqB{^5fP`>v6x#f|!#BfP72BtO zOnG`C?jXZ9Vj@J9loN(?_`Kr;Q5Eig{pmj~eP_ z`kD$-D=!qwB`YP>jWc!XJ0M!O-LtU)%5NQ*nx2=0k@DJ}4L;qbbnS zJ2S@}I7RsMX(9>ff1?}~s-0DmVRo52E~Jx`W2DHEVC(B5B!7GWDm}4q8l%wGn5aH{xhc*KW3g&-I6--*31T)Tz zeGvift9g{B(b0Uof&ytCf7UrK>AKzgA62Uj4bM4>XvgX=oBdYS*M3QeGi;U+aUA3y zbh^|G0QTv1+kgS*TfY=U9TY~2i4frs7dnXEzrV-7G#z6dLQZuI+#f#weU>?Vlry~8 z@$MCQCTyJvN5ONf&1D|BUgK$Te^dNDJ71B)^7~p|^#*_YRpU@ogk2HaC8GK!M%U0a zb}o62)-@Z!e|4S8ubi1I@`OTtHx_;Ml|Z`~Oba(T#L$8oiYTivj?A~oA?Z|vDX8`(lhm~2s@N#_*IZXSAQ%If#9svAO2?s?QbRol!Zn_NPS7@Q zgD@%jPPheyTue;9jPoA;{{IsI_~c(RGef>bI69F0l=p!pKYvM%Jj}Z+i<#@6bslwr z9Y=E$6Eiat0R~EgNlABrfjt2MdjLc6v!n#*29Gk|u7)vt!FyO!3rFFx`Zzqi)OzIb z;lsFv&%<}NqD%U-^h$tPJ_J3f9CZhG`gbxMQCT}og@{E*m(rb??}yqoVzi-D^{?;| zWGw1{;3o{up(?rzd9Qt#H3Yr|3$!OaI^ojzf1pSHWp&N}R4jBU#z8Lx zN(ed*VFWW6|8%g-}<7sS+>1$WcwE62SF+Md;e&9gK_w8t+<^Y2v8mU^7%8C^WZ^<2W1Gy zWx1mxfuz-7_U>Js_nQJDAu))4nB3;Yiz8ed+@ylkd}Mu?cBz~xjoLEyi%gI?K*Yklsd3m4BwRz(1-BoC|fbq#kUi247dU|*in#eEYF$kZmz6nNaT=rBx zjy7LK#?kU-Z9V#v{Ff(@APc8Rr?)Ti1X!XV1t2Dk9$B)MHf{;haXD;^zp1jCh{?+* zS4@mVRDZR8$`Q`P!zJ(UYh~pN{>_Nj2ZEYAC*H@J$M3S@cDp)l^~j;Qp`@~#%OC4P zIy%_IM1_S#MTLLXom^5WcJo>r{`BLAA@yTdPEnCjJ_+YH(K0uXi^US?1OuzA8q~t0YKEZCQCj!E z1$BynFvM3kRa~soYYOy5#E81MijTBiVLcg9-`vF&dhQ*5-PUC;9$9D^6Qf9T_P%P0 zs~8LC^I(5V*e5_AzysVRerq6785@rLB}B_(#8(tJ*;UPL4P zT{#_Lq{Yu|HDvvEL#drRV5r2QOow(5wv9KDmS!x`mOYKmblK-u0?;2-|D+@R+MLnd@bx^*b9f)i?Q>JED7(j6zbI3?{fRQs6MXBw`0B1@tC!-w+n4{iND#K2?RGRyA*?PL)(ZaIbF zDzBsx_ML|&Y}Xz%-Qkd*48?v(Um13R?^YAI&l_C)e`xP)=mMqQ#x7Mig|h;(9L!K z`SsJ2^|xOP9jY(87vI18gMigGI2u})%-3S<`f+|`ZftCzY=F|l%;S8@6q^`cnK(!4 zn5Bpus{#!cps@q$gaJ!OIr!=CA;M;Zf>>)5VBITTp2|p=NytddR_^-9+uoIVw+|;( z>kYJMM_?^lB~Liyy|rIA>I0-jJKYlov}i9Qk}D)ib{G9RMu;=0kTu)U)KhkvK!&)x z(%;X`jSmu|)lT$;M(J-xBL!fEeXH?%EwFS&6KOFrec8k+8VyZup*f76ww0}3{yuz% zh8eDy*w_H3Cz586ui@9Q?@H;4=S&O-pr4s+N94a^v^zJ$J-Erof`wJN3^--`vU&4+ zzx89gvbc=T-tq#3Jmxt-jdmh*I%=%@17iR#LwzG412r0~zMHe>)~pL`uq;tRGW^PE z*i{)p;ZiBnY)I#6C&vf2TzHiq22vy9HACSXt?gJJo^Ord+k1rj>UXdBnI7aIkY$<4 zX93Efnrfa+KYm<)@%%;OJY$b)v@@c9@LqG&g&!LmUtXw|`c5Q#`-Ud1kBI>u<6cuEAHoZ=*RxkaW?&|ByF=|cB?P^IRHasQ*wOdhFm9u=E#tCD1X#FOe!0UIp zJTuY=CB)%SExZ^WFgUb5$`eB*Ab><%2wO(FC;}*+?g~48e022q@n(g9H;5E5`hlfcul#{Bbum&#Eg1){BP$2Uj09ul=7^)-XTuZ*CrTJi>vRbD2t}q)i!y9?0 zz4IAH)sEJbuN*bml)}z4yTY(#L4RTU(lYta6A0(eH;@#;^D<2y{xw`X3oh8%o3^sJ zI5%KZ7ITS95uAdQQE-QlsN0Qp!mJd$B8JMj&q9~~eU8yKuM{4lVG?mCE*o3x{kZ;em*NDPZAXNsA`5?GwJL=ZGw?BrqT)o%Pc%iDaS7gW{B%V;K zh6iJYC;XOmEf0Eja%P}lbdA#9&TsG-TBJ;c@Dyb}F2Xu`ZQrcr&*EIoV^Y4_Ng3U(Ng|CI5uAI9?+BBHKGg>ilD*I zbLX%C3lqqfQQ*R8|^y_nBoO&#}(&DVo5ys_i)_33f$L~9>W>6Pc$Pz%uwgL2XS5Dv2yAc}^ zs)-UbuiA5|yR1+ZKY3H+TjhItRtMcFek&Qk@+*Ao35%}H*<7pN+ES#uQKE}=o|-RN z#l(mvC&lwkK0JjO}DZ9FnU2 zLY1Z=$&$Q0RQfq+lJu`drDA{8GTzz4UgjEy!A1)A-KQt4&|;ZJ_OywxQYihwuA_%V?Mbg`#*RCp?d_Y`KSKX z{t0w=XTA_LVZw2iC z4BbCN2OEk17umJ{PEGKi1V0$&|KU}9xNZyX)0wbE6B1e_2T;4Fr;RK<>0NmG#*Z)g zc+tW!SgELFr3=5cGJphM;_MQ=$K&i4OTFhSaJY%Y0oWmKf$7;kFv`^+42|+%t&&B9 zFOdA}71aCL(#sEa+@1g;<9OQ-pK6n8tnV1XSl(qh--Y1?kUIddvsKWcR6I+PzxV-z zR^i;q;(lQ8_d_TPQ45&>`uDU#vxw*-JExK&J2=)z-Qg>W-=WH~(0cayedgVhRN-Im z04o_YwNV)qR)qt%&r1q3LQiRUl`Hjx)Wukz83(^P{6ibsDwF46=cSF$>dd(Ds|tV{ z3p-cAxMp?d8l9~ElqTJrOVftYLUkUE8$|bUdNY;ObekGY*#GtNV z(vO2fvgLAwY5CRZTWU8OpvpUV74I;7EtiurJ=jb5QxDs~4l`3f9LX{X0>6yeUARgl zzFo6~A9|#Rgq<5dX2m?=`sY3r<^q5{K?QsGQ}i{^A~S=WnhuN{z)qIpVZ&8MdQ)op zN;EMVVb2d;(Pjz{+pVj*hbT}8I0^`t8b?(vzPVxfSKP|)!M z58m6Vh{5C-5Av#}biQH#g?D2BvhWg>JrFw+=3$V_x$M`^XD0*QQfPhJsa1`OyP z9BVNJyp23~T|MEe_HzNoXfXtf!KDFbSf7WfrVOkKGTZ6rGawF`J3MZ3YEKVaf0{(e z1@JpvXWRu5PN*??g=S{6A5<$_iA&|a*ZG{$paEv!#Lj0`q@VziSsC3~clUQ*2Gf?3 zH9RCAj~6>qCNqEQ%T8tl@@+Xg56mA}mkMZzo8_yb+*XQ zkM%7hwtfxAvrLzB-R`)-G+_}D+w>uIH58ef9Y{#euD}n!L#SkLDtyszb8Ql2D5#3q z*I|GQYy8O)74aBHPJv$E)DSE7us*FB*81GbD3dXgMK zsiV^Y3P)*ug9eED$(9y=3wJzA=o!o*Y=NNCa60_I&A|K69I!9Ul@0luL3O8x^F@^# z!=I)d8GxQ-Gsc|%J+ot58(?NK0A}ZfnGKkc z<>O&DW|g=~0$*T`k_?8C+^LA!hj7Uz`l0H>{)7Dmd{Qpk>LvrE_D^w)G4lUE_O*Zc z2p*qdUYclPa0=A^&&kohM$b?ru5mL)D03HMM`k{Sn_vpf3f8-K5S6v3MrlA3+k0#n z&<=q{O7JR%6LD?7gGNA-1tKh;q5*R`amg$<2)#IrWdMeyE`D&I_^8w;@_TYrbTn?w z%E19J?lv_lK# zCH9qmS#52H7mS2%a210dDHk^PQLH6L_)f4}rXC*ILE`!)ze5QBDG&GjjrZT;(aN%P z-XLJ(rICYy`>daG%_Q-2Z|}6^rpi+!R|;km{}}!TEE}Vh@-~m-5en(Vz^^XEky+^D zY9{tF*yoru8r6E;hHzF#gon^8g&Efy8uuY9<|AaG;(6k{uNYuBx4 z_L?PStVu!h@^VwtvIm0^iz%7PI6WTLgJdmEzU!|SAQzdHwZ2aHt=^d`_M7^{<*}}@ zW@~4idIkjX>rR_pGq*bE1>G(}tG{&{Tu+>ocmMNxq1L^jxEEFN=6%oae*4qd>C1!X ze_rT~`SJKuSZ*KNBl&`@pPyuotBO&QKXQnLUTF!jYAraaD9{jbO()_MWo1N;ZO>G)QCeoB zCqQ1tl)g?8Y4gbGE3|2hE-3*e=I+-F6WpcV{`F?iL4>#S3^mm`u@5udQI4M4n zPR-BAc=0?D<7%I|7Dsv0kpXz!{VJyni@@86fSx4Zwc%?W09MJww83aEWoldE9R^fb ztpWZEcj6}U-}`P-8vfdJuAF1?b*T@VNE#}zcB6bxtE*e{b8mecZ)mi=InmXXZU#Nc za(wPhh;Wc8nL}J;3d_mk?=`9xHz6imy-ND5ToswL>sHxh>-NG6ryjtv8TeM+OO(3@ znL*HpdF?jWa&9v4&XYO}@8zQsCx_2`#oM>gQ`MXmloW*@po5Xi-H2qkb#44o(k07Z zzlI0MUHa}Z@LO_BQ`6==T0&yyt?U?YrIciBxjhm>rGh8&!KS7LZ1tq3-Ynx{^0}_> zSnW3Y-AxfmFf1#`7K#mC6m$=7Z)dC_nJW~+O}OP z1+|?6CZzXZirgyftvj9G#9jf|fD~C5%);=GFME%4cXKP4o6&6J@nP10<7<0!VWGep zzowe-sL{jp*zrI+Cl>{;g(KSw&81xO8zGjb_NC+QaYAPU-oV0%$I!c+9C_0nf z%h8&Yl&BDZqiJ%O=?hO0F8Oab(7Rd8^k>xzLUp;MN-HbPICFzp{oMtm@Zt5h%M7PN zbmoRXC2emGe{x(+?N00U`Z<%EBP(w(BdzH)^xajb%gj%|c;S26`nqP!h@Bpz6#fSm zo{uG7D}z&+4X+dF@%XqzSL|&U?P_vU6WL!{qKKlQJ1d~`G)X3C>fPYW#ZMi`(w`ZM zTwEey)Y^=I^~KH2vBsYKfVCggnK@BpwSR39?}BMpX?uW`OFjPpheyaoL$@{&&O@QO zyatxccCaH{+AWN$Cv{g|~ZAQb)>UaYaa zP!z6Se0k;lh~&|^pw_M2vh<80pPVM{%L@J*%WGbn+>gdlJ?<0o1`|#XQqQT%8#wPS z5b-3byNE6j^PFQn-7(kR54?R#4!{@TJbX5K@QI1@+QRH$_BDPzd&`^kIb`F_u#=y% z9CLhCPuse;*Ek+B)#R0l%;jZuOUm5E&O$`Zdi{8YWx7>6S}2~1t>;njT{w-fyL;Ec z;qI9c6XvsysGI4jo-TGdN#h0xVb&z&QjWoKWqpGnbigP!E>4>C^CYGp2(k+A3cN3#mYXv)IKSv^WmprUOL;^_%CHZcoR zb72zq?a8cZv&e!&^=Q4B8TDIY{_)IxtbCErWPNBggR@p|(6=qod)k{IavkFPJx`-np6-U*x?;d7axD zbpQK25fBp36FqgdU#ABe*j#rmAn`7n%wcS->xuiX5K3bXEtM$nmlm|}`4spOE~zCn z&e90vfX&gxMV@VATTO&{wXe5Z0hQt`{}t79cR7u6pia|bJjyy=0D<)Ovz7X8gx+eY zmOM$uP^U3uiyBk^o2C9jp2v2D`OQ5!LZ_1zfuPq~Jl3in)pf&Hk&cWrx^P@2%#%~I zpkf6wNxo%MjCku+iLT`w8Xq4ga9Gmz`Wfmoju039=?xkGo=%^E*z|Qa{1ue2vQ$m) z%eBEGfQ-@hL`YuDH$^xjW5ilpEpdaL98Q0sb`8#g8altWmR9E~jyWe=R-?c9mbIaC z4Rc&G4q5u(9)CWM*@O3A7Pk2q|fzitYESKGpWdSm%d2MkM4fH3x}Y9hlLSdj66rWl|%`L5|5mBJf(?9t0i zwy4j0@_)3w1$L(BjcT#^Q-;MoFZVsIo4_$+vM=2?9DEt*+hs9vNiz3QOE11s z?}MNp|?xZYUh;>aHFg<&50 zk55lvhd>Ikq!VCjB9KR8p~M8HOBy5;gtp|M4i!q+`O^j@?5w!Dbv+BU0tL^r>UeYJ z7z!g-B7XdsNaEq1Z>g&H$+KQucAjgkp76MohNOdb$FJ-zGjfm3(_4es&6f+9ihF+>?nJ>Gx*=r~C0nsdLi+uc0{U5}PI^=7A|kS_ zTnb9q_Ac_iGIS~F@fup3kw_d$xYFO1qp4|>Pf##W2#v-11Av=|i~j4-o~wXdb+)%a zX5_6vqxSEJRgBGylNwiBwH={zf>I-ZI09~KnZfAFl+4E*KY8}gSJ>Mu^|6LVid!C@ zp7mjDh@*D1O>J$f_@RU1F@a64D(SD^$Hw~qSuE&%rlnW9=uBy)*|D0x4B82o^p?5h zfo%P(?lxXx-uD{0>bn?XrvtF+*AL)lM|T8|PM z_Vc*1QcVpyr$da*q>7oZVVx7c?i;J!laPIznv=P^$3v*M(8fAtfOMc(mFM=Uh|rDv zu5PC$+=I=gc!euDvQ&;DgR@mUjj4hKB`1&r6bk@jgG^tJJA{8&jwbIo1_7cY93dew zVq&6dfr4VC>~FC#2$=^z(_(;HxW_;iLhBl0wW^cZX-4(SmsUkZxi_mlfl?@^T^sp| ztJ;qnZA{J3$qTFa?E(DO7C9t_cY4}W(YHI{;yKR?mTpUnW9#dUG4YJZLJ0#;_q&@~ z081lYP!3RWBUXrqe1d-`$WY^l-pF(%w6PE`Lw*AF#bJ!i%lS&x;=C)up5skTO>q(h z^-)AUj!$)Z*9EbLEuRqnvf&fZ>{VI{!8Aga`D2hu)T*j7i0Dw{wXh^Y(&AVrYd)S z(jO(VMLtx=5~?Ru?p^dJaC5lT>}xV$VV(Wy*-&4BBGW11XmXyNg};bZ>Xg<*?rn#F z-WpmRTT&_2)*3hM0cDD6lo?2v$&<{rK{9rAZ*%rtneU~1J}8UI)atJCMcoXl0RRx_ zbXC@^Yqhl4T1ql98nu%<_NX(n{3OP8GxX?Pbh-ubt~zwcB$1`2d7a&d3v(snju40GV$a{2{_x_ZEdUi%MHk85$k6eG{|3QmVD$L_NLG3`C4RL73FX9&;PUNL`w z%#0EWpp_Zry~>KNyvl4By76nuJZ|63%f5eh`M?-k zm5?FXSaDSQ=OzjPdqfeCrh!hth2zZgxs`Pi5_lT1scETodmw^;Jx;C0k3MK8mDDPAvQ+P^-~Otnc|w!kxSrblse zVS{=pzj`NZh#SkroP4V=R`!ES7U6YGQV+W69b35jj9^Vv{%ZG7ZN=9}Yz^UQJJEMF z*17V^u-EE55P+*O8}Dk+fBdJ7AnDrcc@o${jdRRZX#Cnp$7dXPAEqfdd;CXWKejCo z9`>nmS#6!|<{>SeiJSZu{twol>;-AtCwm*^^ok=$2L4CnJtij<6+JTlFB{>{zioGG ziBslJ=l|vdxayPWokaW5wl??HLx-Y{oF5Ome=lw;k49-W)a8>n|C^Cf)MCTzTiyMV zEAP`87>HYNt>B`PmWD{bMpdPvh$lPO)|5_8;4U^DBo}Xguk-driKVKc=BC>I7VU$F zy{()Cjz5+6+gPJ?WaRh^!AG&H&lI%c=y0;;&%axq_%ZVaH8|F=9|z+J848z5mgSed z;~jndXvm>Q#%Idt=m!Yr5dpk-XYA+2be)u3?)|J|Q&+1TI_A^(`7?$rB78!TKc&XI zqY9}V=l5Z=mf~IL7v>WQ2JvDnY{9v^=TEdI^e9*PP-|B65ZDX1tGZ^@)ciL`$AO$G zN$~iyM}|iEq9PVG{T@Q4&V9~x>S+HLXj(1WJ$?CJN!4HsRu+$;yX5I&Q-Gf^!Tm6rjm z>GmeV<#(|^uGNwowEWDXs~4@7yguh~^*1 z4#B6|^5Kz3>MC@1Gz+@s@>aIHvS&uN-IikNas68=qw1WrydVEw^?GvkIfD;sNx*;P2(lg#*#q&}DOY`VlsNiTUZ z@iNat>=?ABs&&1( z;sU^sKGjuIL-@K`Z+xxx3GV8C6|8h)k8HypE2}I%Q+*t-e(l4NRXEq?t?~X* zck8*wT+Da-%r(wMi>9k#jRwrQ@{JWK68(c4aolkFrGG;}oih*NN3KCW&*5D@&T_PI zZ?=_)i^?b5_SW0C-GOFk(_=}R&ii4H%^Y`b>r7QlU4Nh_dpP=7OGvA<+1<PR|4goZfO9Ja6X(!o zR>#jiaF4WV>a0q^-87I6<;WDP@%Z^wQ>?_*a}3{ZYTeB2c$o!?TmFP4pxE~jv$EL> zo?l|&nb{|3V_DR+s1_^0+24q#n`m(mSw9I&o`K5d31}o{5m{~|oclIZ`mYywrT0LbLqL@WuG1GO{Enrjm zQ@nfb^XKp1KP#(5G(7!QNL{h2YWBw!DB7kz^%Ntjt<6GdAnaCwq-l)B_Z@E%MfRuE z5vyS9>fgp|L!egLRO=zP)9I?7T4@WE zch%KgDKJPoI$7*j`lii^-t~+~Z{J2I-~kW3tuNY?^NlS$9QG)b&YDpj`OJ?oe}O!W zZ`xdD22Mp}P8MQeSPaht}UW(ahkI$!T{@x&^LHwj2QM}6065r2cb27pA zx+lKGyKqQFT)@59#a$#6A-_^}dFC@A1K{4B_g&#^7B2e3YKYAdLIiw9C&~d@uWym* z@$D{_^0c}mc%PNVR-EzHVo^F&{{K5KUfRgel_clu<-57?1GSA*5wc2YO^2rt}wTruAt)i1+XgKq-RN>{J zI2Iif&HP5~Pjd2G(g70k7U^mZsHz(H*u)-i>H8K@Wahk?k189m?YS{4)>m_h^8wcH z9&}Jjuf)tgNMlt=tqvSf9jmLJc(wm$N%w_jSH->>zS1PGck#y^jfC=bzWRsMJy89< zK2^P*5XMby{vKjQ?lpHtJ33lfIZh8&Oij5@yhecbk%t~GeoL5uzKwASwt$6HJ5ecO zbegkzn(=FFtqL?TdfgHK1^lL%;@Y(tOZ?_yqZod(a0=?L`lW#gI=xmDwcjuOo+`mn1;MRL+49V~FKVfcxXw9xZAOPFg3^i9-3CRRKL2J)M7*1_^%yW}^{0HvPXuR!m?3`i{gM&1*B)MTUt3sP`|(8@#N30`CYw!Q z6cv0o%i+LHi42VED!q`}Y$r+|R&ONP)A10_LtPvIrXfNdANvp zMe@~xKWy#9EE5wfOhI$HgMe5|EPo2t+IsJiFsvu;kMVI<*75QA!3xZ3OM>I>)Pi>f z9+dS&S!sE{dGo6L$Sn`6K3NC5Vn=^^L)7|ub~Hpw!6Z{aZ6Jipm!DD$oO*FbB{F}wVL^&Z|KZ@arc63`=(e1g)ln5plx z&b5o8=0N!KARF&=Sw**+2%)*|WMjyEM6p~IH*Y$SQeIeSN$3j>Eg%7&T$2h2n2M^?pk@$kvjCzkk5n;>Dq~IG0FV!4PK1eYw;idl zjFSb6B3!by6iJ_UMbDM^%TQ#*Ydqt&IcKiYjY{R?ofx6#EfF}>bUg8vP;Gp-cC`P{ z(pZpDNVyq8GyAovjO<+xz)4}i;H1d^#YyoAhs~-Kv!s{WPmM>*KMo(;9k#S&l-JZ6 z6V6>Kq_Da=WNGfZy*2xjXhNB1&JXoWEGuC=_AmYc*1D2oe7C!Aq!HwpO&V#S^poRP zzj$0;{f&6G5n)@<)uluvsFhdN!E*xS;BQu^>(3seMP#9Vki4R0q#}rm>q%ZQxLsJo7>X-;2XsS6{C7R8-^`0bJERzvvj` zpfMusj*Xvq)g|ga@{pxjRRF$!H32Ns4Tp}~Mv`2plYI@Z`m(-MEzLvksx-0lwpv=s zT)*L}CqR$?!X5v~4}x`i66yzIqFo0k4Y@wW{wXsO+Dw>*4N69TPw3hmH*?8uZ~f)> zn3F|G&T+kr2mhM`o-jeZ^TNiqizC@5Ud+L3I|t)r*BDLD<1#*9%+Vz@<8}kjo6T$ZN!+Ogq=(H#!?^ENuHn$(6>NzvMNmsb`8AZKv`6K zR_Ay<4Tj_^DZ+kkG9P;WeIC$O7>^_7_xC33Pj>KxNI}%YPACH9Z31>|tYt_@8Ot0K zS{Z9?4dH|(Yd|sx#4^nTaKy+o=@QPJo3H4j4_cI3Tbka;(^dwqUq|AJ9ywV(-y#87 zjois3FB0n|>9P+OrT-UuZvqW<8~+Wf?zAV#E{dWEA!MgSvJ=W!vdfz6##WM@BxDOI zYh!N9*oVZ}_kA#?Ms`D%u`};AGw%EUJn#Q~pZ7WMInO!I`=0-t4%KYe@4Bwv_xdiM z&*$s8G`_xG>6)fCh^+D5-%(N&z84jAd&RTBNdq=vx4V&0TOf*>HG63rQCtPZFbUg7V{{Ez!h8xG9z- zS5eeKoOe!18K~x!@;pzoJdR!`0WhdC0{)u|0`a(Mz&a=b_C=n&3)+EH-H}S!Og(v* zL3y2%Eh#PB$8LUoZkp3?F7%l#1tJ@0N2#<1MlWKjp_>y(t54oDUGwv4QtIW2)~V9o z>$9zKsP3ecYM^IPxaUea_<)}IWvPKPlu0_=^U9o0^tny4?Iy28@L#hNpO)d_75?Yg zcl{4di|j7HC55VnGG&G&ONHI-MM?Y8NLbGc%{9KfhL18bY{uhaI5Q%8Jh- z?0uVZ`m54z37Xc`(t2Ma=WzoMD5);_mzIcFm+m5CXng~ zo7?eW-@7`EkyW&g9S5}m#M#+{eUv)~W#E)5sJzt%#SYC|Nj6l+2CRIjC z#C5|#974ZR5!&{k)QPn0dndWw`RH^~Q%QZPh0UX_j>eL%MLytA!UxpTKgk8Gn473K zN_pxVERWA=d>4A{5*G(5bP~)liqeIqJfcqdoYvKCYW!(wl18~?#uYUMV@-t!&5v5H ztE<>L*!4crj7F;_THDN!-Jf~ehAicUg&20^&OB)8GheU*ZJhgDXemCdBL)M__rUs3r`kOWksZPaXQ^J}Ot4TJhGBJbzypwuz1JylL@q1vPv2oxr zPrqifVqXYN3`iP{r-{1aM{ziWJ*=q9%}^(#=4-?a(4c}R{Ts~PhJ-1nx^g*)&2jwD zNHZx?{8ChR?jR%KB-alX0KF zUF)=EPHLBuu^#2tssCb^MUfFc74`%&@TZ!h?Tw3dI0<)WnWCI^Y{1OfI()<2%u!J; zOR({+rT9O)YNSCisRjKfC5y8!OINBX9xiFk0LQ_eGAZPL{FitIc^dvdos)k!fGz<= z^KRMA@wINb&2dVDO$s0f-0Tf$QUwK`OO1MOD{y~idr8d-)XUFMk)c)(AD-o7la-eB z-}-DfEgFwzz2_vsatK^-=$$6ufw`6 zQ2m@Exf1U$WHvGBS9rTxO-*+)xp@)yQK(HUw#?DicC0LWjI4J=wf?rE_<{j5=T9bg zHqg23`>RCLXl#lT7u6+_$7K5ilpOD$5*q)rR9#2huUYKr}e3vW#zl$TEy+FKW5j46RaPZ@J!wK&tY`~wN2m2&1erDQm z`MM*LD&2;7(A&Ge3zSX*>xC$8+rm&=8w}#I&C4E>BHv~(7s{oV7;=OvS979h0SN^m z`V#7-S<}}=gHpiG&gO2EU z!+yuV0{J0j4%<$oOb^p7+V%*kn90}*=eyLj$xfHaH%<>RJDZz3JKKC7ME9I1S`UXR zcdEawNK9?{;YQ;aSkO_SQ;3`FZJ}XbIOM=KTbbOdtl9~Y`&QUw{6*-3h5Avd((uWz zi!KAo9DaZ5l{q@QO)G`v(vcqk8?KXA)+37c2bl4Ft~LS7=D{p^(A6`FRaI5J{CuDS z>Adf`4tnuTci6+NIKt*~&NaDn+L<&-Mqd5Mgmb6@IKaG!Q!ShWvz$Tzi_*?st@Nk2 zi0|?Brje`{@*69yt>P0OQ+?NMN$qP%>-v*)xWDuxv9?Cz zk)g+_&Sp1+G&j?5_%Bt?qz5cjQtH_bRS*8)0sZhYrX0Y;L6p#CP_@76>fz43SMhIDFw8_en zcj)FnhJ!Xxfuu#CRdg5WeEFoT_v%Pd91n%2enpfL?(~Cd)PA-mgKGindc$q$W;WZt*1fT37ESKJ2%=$RJD z34c62-5DRBA`V%0!Is!qT6TXLnj@F3uN(*3Pw#-B-^shcff$WC8cpR!a+~C>DDkqB zpAk}tv{Q9}D>~WKrvonS3jaSIfY-#L9u+we5qSC%HfO}glz;b1|9ZV5|Swt|KMnFRCA(WU4)v(fvCWV65(>&~o;n+5^9}>9)4c z6j(r1eiXUvMn!c{Sz=>j-Sr8;_^7@J!0jv)f2*sWllLR-G|Bhk^zLf+kT%^dE#;VY zvV~O#vU>TBN3@hy2qMa=3&yfD7@$xg5V!*@(z0)=ndv`13zWsNpL~U!3 zM?@fYHz#93YL8vU`z0xc@mRV=Y%8M15iIN`_XvNJ0w8>?tb2RG=m-aa#bOtT@>xv{ z!A2fAQ!K4BBnl7uJ^lJ+Tur$h1haoH#UQjpCh+tTi8soUMhwGqxKskdihUV-AU;JQ zMTtjlE};D5^;bHJSCB6@mzpFrRtpvI^n&Ia9 z5LdWIj3)J3%NLBvodn4t`9;s?8=sMV1&s+}6pOj%;L|De#5|^g60OuAEoQoY{YsM( zk51chyjA2GbIp%tdM^H;&agWtz4hofnKc@dqIRAMtZAfvGXeBqqD zv2vPoj#&1X-)SSwD%zgwuhYU=L($(MMHd%JL+f*uq%l=gQURO8z#G`c;}#)j)+ zqUZc49*xQ7rt|D#nj2sf^e}d!TYSf{coYL&#ecCpfiSu-Ttw*8#)-Jl-Gv z#Ka>d?9&&7W;->AlTPwaom+dbYf<4rihuI*md={)2xQ288E+VAXLE@K0LRI@Ao8-> zs5dW)8g%L969_SYmL|Qd%QtwwNwk*hTr{;{p7au3ZV_2lf^V@++W1ad7G#J*Clr{A z^emkVZ$xwbzPZGjrTK&suC8&#bz-J_{j9ne?l1W8)#eEgHH6WTqH??_uz8Zvi zhA}ufrRET%oLq6xFT@{qzw^ql?NWC0T}AdhI#WZuE5)8T|f zLtpUl7#Q&ISTOM|38@p!)%1J{3>GcJ+8lVU8nWMar&(<%ZMHUtdtjenvpX%6^OQ8G{8EqmHly85%JIkPYd3Ts3MhLj*$~c?# z^xi}d8<{WIg3_|T=~B)}Abj=-1j4ZC@}B#{r|ZUmcs|!R<~&0vDC%zNeH?k&ia2W6 zdmB3N;heuH@|&h9%^Zh{^>F2Ky>Q<$u{vw3T1v?P7`a>$PMV&NT{fb>W_9JmUH+K# zm1OiIIhW5+im!sLlauiy`rF-S?fnrNfl_(y@dx*OP*F))oZ46OpG3ISja1`fkTRpn z;-aQxlO=`NG`14GolNA2e{mukxDgvJOj(4oHZYhvBVj>KLU-vsJwO*K9By}cbJKWt zf0)mj=YDG$Z^GxjgmmPvw11Pmb$Qd`23-(LJBJ6M>WT7Y)Ji{CFcWJwp?>_JZdB7S zhQAOel}%YV8TY z2JH~_g+5_-Dj(B_o(Q}}r<3xIB|{j- zI1hggv$LlijP!{!I8nKs*&rI(o!opa<8QAjL~8d`03~yWJNYL2fGxfKK}&4Vi5RxL z4gdO@n=$=K?3|hot?(LQ*@HGd?}gh#5X7-y*fQ%Fetw>VSW#u@_=G?BRNgM!zyTj@ zt_k<}Q|fg^&?o*R%7f?UY<1urf-a4Ppljm;$tsCFQct4qM!B4iC@#K-qrSPL!Z`L@zU!J5q1gjxG$m?CL`Ik4vE3d z+v=_Jz{<1U*mb|YU$?<)RmP_q@jk~38^bQ>q>1<44cqj=BSdUWp#1*b?B!o>Uv-ze ztU`?q@?IX}*bkUlZJL^$D;v;ETDlYnBe6|cG!q)!<96`?8jTHQPL zwby3o<;GClM!4=|M8bW%|i>Ot{{skt*uYG9;#-|pMjRYp&X z0JyVY*9Nb5y5S7+kw@{5MN%U;A?+E(T>Q{yE7Cl&W9&Xjii2nYnxDV{b{IKYfYLf0e1G$Svh)@3(O6C zkNKq;!2qbX#vh9f_jiotb$6;m(5e0yaPkV-8pa5H?hd!2S! z&ix?W6ABjykBbS$pE)A5EHH!eCt#*g-`!lC(VZ6g4)BZnDl_&{wTwEshDb^lxwGum z5?|q!C$OlkpZyDZ$|v(i_c@(RJEQ^x`^N@;k?h?Xbl@<&9`#d4p0-5!e<)XWn;FSN zdv>staV~F^&_zsso)^ z-mqsI2mPHWf%{yC;ulA&{wNYWjLR|2Z-UV9guIhuX`iH{c}w(Ofs%) z^*Bt;U1QJNiFe%POmw*==^*}kak`0lf9W@m++#)!!f+p97(#}@D=jwL^8~#cw}$qS z7Pn)rwa=0ll$qj^PJkGW=@N)vN?{#0U6NGR^L2c83?29X7N*X8tDbZuIXCyQDXY0u zq*?N+uK4+6rWZHQ%P4w%j(+vtYihN>=MqipSJFkTLLMTz;VAE)YIV5 zb!E{ix?)z2)~3V6DAJD4%C7LS)w|t6;n=LHsfFPMyk4QHsbRS450~+gv@^dtHOkKs z0EYZRd9a(TsHF>P_O6Dl*cqHubY>O8@C=$3aXK;2jH$&eqKH6kl zj2f(5)j18C5(&!6$u(|xQ`{7rg2FCxVguXxvDhD>_Te_GZJ~vCNk5l9bo_}g0|R4b z30=l8&+fUe-Cvxnq8aYF+%9^uehLNEIvr=dG%g{X8ZbR#C|dT>1k`2h#;e77%j?9b z_a6$YyZh426OUXGB`sEFho3)mb7=yD$?U+rYTprP{SANRbapyh>rY*WRg0v?DuicX z_TY`O)!U*f^e27R`x}~daTQBFN;L>zX=Chc(3x%fVrtHR~ zW!icQD=OIOjplQYLb%l~xZmfpgo&6D9J~6NVK!^Hi5;D*C3tTTr_G<80 zVOwdYFEQKTm9PynrgG}m@#t&1`uNR^0}d)3Anzd0+Sd4`iesL4EgDmE-{r+pI-jd| zb+oEEdN|T-;SzO^uCD*z*-}AHMd^pAkjJxOSg7BRWQ7HKE|Ze7jaD9ed!cU(b@0N2 zUo-^;>iDqqfZE!F?HNn});ws=fCwa3zpfzP>_MZD18|(0a|6Y{3qw3C} zRxPIkLba;KcT#Cz?^%0ey@e_h^ESF0uze15oAXkG8mWw9+dq9Fz29V@fGc8HXLvx} zqHpDsd8uqexV*OUlZotGu!V1<^o_H{d{5Iusw*|Z(?w1yzki-TNwP9wca-gq%o>B` z{h)L3SH+e_{lU<}RhS2=M`cB)`I!si*p2)X@h&r6nCdBNEuCB;bi{i2+}Q#>DWj1} zgv``fmlK7(Z6u;L*(W4=j3G1KPepP zYvagjiv3N=DmNXU`}He*&(dp)QvaJ=sp6S?+Z7+ffPijXg7_y9%g65gy|8Syp*Q z;mu-ez-8T!ggD_Kr+ZOpJ$(Athb+h2S)rSx(*n!(vziizXG2b7vO+TBxdi$c>k%K5 z@nZ)*q}CBhKCj$+;AP=0F9+gVp&IMFR=h)(+TXHd#pFB7G!N0!ZJ;i_!q?vWODL<( z5$|5&vi9+H$*yj}{esn^A0AoM<<3|nbl0Q5-2gdM(k1OfPpzBef~v2a>WH=6ncrd` zASTkP3Y|1lry|J$%HOogPCw~FM2|VsoI~z=^uw}QarX>-m%PVRjM~Cag-TVvs|4kD zpM7viA;QN_S9_eJSsop9Q-#Kx6xA@*R8vwPO!ASrpUnhYC|9Jz0I_y?C{Vk}!-Ms* zc=?ioTXHJXsvG5n3W*BVUWjR4f$$JUis{CcwW^oCMx1GgVNa7I&T5;Cd$^jbeoBG< zdFJHA)g^W3js!E(t$NTe!_W$M96#i>G@P%B{}eT^$8=}EYS}rOtJOri8Qe*APmD}& zH=U8kfJp1c_5HXw+^*t-ig|{daa5wn-Iz!x`1<}UO9Flhqkp})(2s?zYUpw{Q_|2pR2YXJ0VXK+#9E_W`)F2HNgW>K>W5nPy zGu@^YJp(%Ti)QKD7kiOjK~LF`1H&(or}sN6%=whx@8|4oK1(>Lt65)AKiUH!6G{U8 zFC<=@T$f#zaJg4oDYgBW=Ji0MiG(nq<$>bFn_IBN29O@WiQ3zz`xcim1iADYF#SGi zo5&@gbHV*lac0vYKB{7b0WueS#u*uJ`b$vYdhKELSY7ZONcS6MckbHzn$W(uMNH1q zdlls<8vUT!)rdv81<7>A;=&ixSGV@R_7_mC7i!erTqjyB{*zRs04B7;efU4PP`tl? zVJ@q1X2ynM-H)KoBKaZUS5$qH<(H>HXuhUkJd>7`|K32}Wb@kFqm$>SV`O)J2F1vZ z8bxt1Sq7mSv_5eOB-1w^>(xbh`q{(j88^cDA{tSZxt=`MhVe)2?34fPKh)F{V)R~+_*&a=z!Ua=!%EBMhOv*qEKUI;}crGzuD-%i9`3) ze4|m?10H-otQSNRPFdz$DI0u3Q$%PI!cT&lfUJKi@_EkQ)SuaqYPw&+-EyX;#Jx2; z(-PC}xvCN)A9|lyCJtml^m2V?FY%Z>f^@%r#pnVuxoTW^j>s8QZ*GrdFq)Ke*VR=Q zG=^?`o0%({OYet@TL#>FEZvn} zD|_1!j6i8lG;?90f4#tpj25G(H_Ms{tG(X!sBPU~E&_IAeyQ>J+z4ZvgE79T)6BI^ zO%LrfnCp)myc)bMK%HX{cBRX;TZXvj+I?zjJ=`C&Dc&4w3D2E`{(U~Ehq!Q`ik#8; z`Y$Br>GWUk5A>}iOBUBHtv=BOo!>X)&p2ZaP2<$dhmWUBcp!hc-hD_N%L;_r{X)^9*^FzO<4c1ue1 zheK`5dzeJ`etnrK%gKt@^+QtEVv6oZ8RYAS;)mF^2~%CBCf)!!m(wu(gP<)sc+4X zgomTlWl`-=(rMmRliQ3R^IV>&HJ(m_ZY)k1|P)=8knbUNK-Zyrd*iP>z3q=}7P;Zr7V!OXbn&paSx#unx+UFWjPRwEmd_ z42j?prl41zHltqd+cEdjTnbR~PV_2?Z5D>>maksT$yapmtC?;1sVF~N3n|yvcf#<{n=L^u7#fkc#Vc*37pzl>WN*c86g@oavX{cZmGCRh)iqt_3Ux!&Djj_#Ct zh#l6r&QQr$3B~A_C4&yM_*P`=D&5zam%HXhRI{$NEgNSmn-oFn*MzO{WrKB_IR!4>0 zBYZ4e3{%n+1D$xq)6m0=-wg%de|@2HOS5fL{Vg3&^kpfR&s!2jF?Xb9JvJ6QI?yF6 zm77Boqj048Hq4xY_g--FJ{|O<$8UeMK`Z0{IOD{|>s22=OD;?FrZ;Mv>n#_U^262A zLH_#8ndKU~x@MLLi?UFK52x;fQ%XzeV zDKu_l@sy+I5Ajl7k@2{+vRk{*v&oDGSM7!hjT;)OJrJ^=x&pP0!~}FLit`l8hkoUl zj6+UT8hnFh*uCHudo3L_-k@a?z~Ryh2bu{r_)aq94uW3^^hZPruQWPH)k)C1^J>r8 zLL{5xd<^H`onxK}n|#w%{(aaq{iD}D0XK7p{@@1Pw?$u>byK*`G`(a@f%Wjn^!(S} z55iFOjg7b2V`FLVxQD|0m~HmOx`R4bZ~ObXw1JYK;@DGmSK_9wMOKF*mAlt2UNv#T zU&?>k^79c%M*G^Wm(a848i(Fr{%g5Qu{^CsN?S9>&f$M;ho*k2!Di-1Yvt^J5D#mz;u5QZsLc?SDoG&Tx~O|iOFl2?6Xd4{Z=i= zTJ4Z9Z@UlMbqNuc(OZnCV9$oPrJC>;P;nMwh-Uhj=Z~w|ck&EwikQuk48dTOOIXeN zXIvH{+bJn}vr|8N`MmObZ|YyrzteY!T~qWx2>J#;l{$b}Jd4A<`P)lAe{`;)EwsJV zfk2WT=(K}z?&-wq)(;y|DK1|7r!flMW>QDQ?$^V)Ojajx~A&av** zi2^UFoMCsl7M<*vsu#atDvdp5fRIE>B9jYHV#)6t*}e*5dV3-aOH3(Y%SDt5*M5BSQ452#D%g^(Mn`0yDoLfZDV7bQ~a-cxh|T8cJ968{D;PJ-y~Hxm&($76AfTVlWIAGBfg;daB$2o{Z=2Wdfg1NM`i zFYGTWNsz0QKVF=(;>{>x{=H}vI%@t5Hgct1l6BeW(~4$Cet&#&Qd*44PSPiNEP0FcFRC7 z?sEA&*i2i|ds_IvVtc0-scaptu0U6U-<^D#ux+1o3bM8(aOL=ZZ^Q4RME63|n2KBo z8;m|6{q4sWe~)^`n0)oM6&`DMXHTA6_D8rDX@oDp-Cp_*st+nzl;G$h*U}azt)g@1 zXk5xJ%9V`W*Q>7`aVja{C=JKse*~}12pY{!>&&2h455`>SRtvhcjbbft}^S&@QX6O zE^@?;u|xq7zHSdk-qrY)G$J-|W|_w7XtetxqtI){$p^hOjSPC(vPP*AL!LpYT&=fl z!M5uK+iv1%icb{Pz>E}`ko#MuzjL+>ka2M#K(}Ga#`K&2_EUg3niXf&?W|pC`fX|A za03!tfT8;orP^Wu_g_1x5%yF`@a|lkbYWp8c#Xo;FXGV@hR_gQS1z$ z2TgSm1-`2Dy1t_v_i2!#PGC4AE?rpL5mC=90SgBFv7gl70om$1(7@tR7VR-(FWXFgZcR*Y zQgAyJ-C-MwoiP<7yTMv8l z%-nril-4K~hk2V7gST#p+_~gze6{+4Rre>!IU(~1I~jwvaF}>Lj~ImB*+O!FL`_=b zR2+=vA3(BG!whb^HW_E1LqixQ-Oba!Xg!a#L(Q;e381tmJMoO(e$PB2ct!G);yn*4 zD0HN>bPmEo?C%Uen)8N4EPz^&(mEA{lplAG$z#fDw~=)Yerrh)a$CJ3j44j4&kviV=+|h{2-cKWydPeW*f^Xorj#h6(p)tg$#sfyMlub(*;BjW2 zER}m5hVyLN34tjA}KZDjTU`}fdFA6hv&4n#>V9}H#cJs z>Du`Q3m{FRmsH>Q4%YmR@n&d(^JA9^5>$9dNqP%uB$9fLv_9%u59GSSr)RSHfNFz&#jh8TlSpWEWiNmVN#%M3DD8_?uE@_ z8%r2tWJz~6#Uf|Kepy1!l!GMU(_kP4;DQx zwqHcZNjV2>d^!N1ISgIi;MIyZSvH$hBwlt=I89w?+DUycVZyi4GnzvwGo) zH=rrDWjlt7(W}#D&w3geinXDHg_X~X8~u_hZrgrWqg9Hz?q#~4iR3C8(r#PYEJS*d zrvjwBiZl!-E0My^^zv{z$4ev>puD}~7eLJ;^Yf@ZjsTStQR%(7^s_Y0U!UrwGqdSE zkfh3B+-AR4=-G&fS)8Q3XKcJb$zfms-Q3_0G!bg)Z6X)G0}o<7ovqaF_5pk^6_ps5 z)Yaz$DYO8m9;k2uAqL@CCM2&@lyC#v8cU^MG0l%J0Zb+p)iaSpq8fIs%Lm|D4&oRc z4>v4;NcIpx=7Nfsj-X1WoTMb~e6CWNxB$LFKKYQ)%FP+6Y}oBddMn+`(G}3+^6`t9 zf~({bBMI=SRa8?=f5^uqqfq6aQt*VRWgs3k#RjyaUN2V44=uno)oKoW{sTgI3l4h0 zN?{&?wVp)n(Td2$kjPaloq-lcS84)?^+CIvWCU_g0jHZ6rE*k0WE-i>&C{Kl zlsZ0+ZWhn^tWRplvtw$4Z2I`Mty6=_4NKO<>FEY7Z73kejn_Cz^8T>q1g@f%!TnRR z6Y5!|hoj5NC?Xqj1d|amCo7{Cz$drj_U2N%!IRdBX!>#oRB%UjVg+`VzFHH0a;{(8 zBfXZkCEOEMQcx-P&R_K$X=W|>SIln{6EqZnJWW0aL%`0Cf-)Q zd+`Rv^!nzu@}hu5oB_gDplAB$#l+O_j}ImDTNvv!0KYO(B#X11f`Q4GJkxmgEGquD zx#spE4*g>ja!&u4yr?hDdD7;T#zj(H5rts)CQQMx+kxWPn@x&PFaJ8G$Vx(m7lvzD z{KZznc*SBi$5)0P+zPoRplHh^I(gW=I8=_adEHMxIZWXzd#;Jg>5eYKXb$JwO} zTK$p;(x0hgw77!u)UM~-$@l}AuE}O~L2)VvG!FWXAHoTc9u~a3wTYDvPJ;sXZ`(=o zzP%mVM8+{f*Jt;(m1A(img5}EX3Z5m6oDG#RRMV=X`%ev)&Wh9k5`5lt=cajp5W-! zuAKDdT&?!o?djRT!vgTuH~s>hM>n4l!wN2V5uLpL;n7 z&R=i>UpWOk(oDh2N8J4VFaEvY4a*Uq;Va6lb!5dSk{a{rN{4qV6r^W~8E7g39)YW$ zqoGDFp~~RZ6lK{{0H`aJ@wekxPfwYn3yqb>X$i_J%@3LRlswlV2Y-V_5g4+-911IR zYX%qHN?FmgG^E>e508J6ON`{t=urhU4GNd7m_m_LFF#4uw$nwTYDs(dxrtQa0cT?C z7tqn!Ml%+e`VYB5$nPZm_?iBnVUhnOltZ4L|5>4m7V^dhZ(09DTr~wkw1UJb4&q90 zR={2}&SnWKCs`E`(2oZU2f)ucY(d^r7C>AM*c}5uohOW*R(h9gC4u%UzFS)^|Gtvk z&V)RY|J_?L{#Uz9uuT7(x6Azh`5L`DY%fGL5=o)j95N1;>UINmoi~^E%Zd8`km@1$ zYLY(LKMG}2GBy;vKQNabi@#f!;AHKS3_22_1MptsD5;5st(D;5$>x8k;S7G&6sk)# zvGyNkd>@%XmtscI8JkrB9qUFid4H8Nvr2iraB%p?S2{!b*nhi$0=iwwCs|Jc^Jw4` zc#oBxJLfpYu<(WCl7L4Fh2~5pvrNhH*jCYZMzAEj{zQ7RXMyBkgEM*t^}Ah&*0ta9 zxzazq69W$p6%YM5U;6=3&VI)^F?T-ltJ5h=clq?twqVLLpK|q>-GQG)9+}oTZ~%hG zET49@5P4K4+Ub4Q$i&;8fWvR3IrrnQPVMxU7w)B!UVGd2pZ`bw)wwSz;r{P1{100z z`tkh;5OM*-$m_c@@P*8HS2z?(^aH90y1I09pqN?ym4!mCeW2cUsD(Fh*l=>kNRgGI zdXqC(=3vrzM-WJ|@Do_bmxQG>Qu!_)+M1W*gqnUJUqa*_Cev^X|9wg&^>Bp&TliD1 zI9jvcj=W%pf*j8R74c0I`FSfZJ?;m=6(nYqODzog_V#$FP}2+-vh(LK3Vk5WRI;Pa<8NL zbgjvwaHsIc179&9So`y0KEC8vA$>uH6lG?gWjx3g3g!*dmJ_y95kmrI8W- z1n#qDiihkls#VIhQdQ2MyrN`VEJbHRzDOj=*9VUl1c}G=Nk&gy!x&#cnjNZrN)t3H zukif&TP8JL2YaXpfKebl^$w&(NWXvHr{^+094ZY_?y`0wJoG{G z8y3>r4qJaw+1_`-Hw6&j2g5{y>3+e`jHhf8Nq(5>_T9q^7i;{|=dH&?+zlWGfM)gw z*mOTq$6!CPc_}4Wb2fxLn)Qp6(PTIRS%L!8R$|>=9dX(8K*tD6zP2kdl=y62Mg)gHVKFh?uV`n=hNQfe!9wW~xVa36VE{uHxOPm6L_*yGmbe+P4=3)J z5@(gcPqO^vPX7-1(L;IwVN=f~Hui~15#9)Kpwbv<2NHt+*Q&G`EEGN|mLfkzqL@B= zvENMGBF)l@oES;9k-C>6k`e(JeCsFvCl$Xax?&kD0R#iepN0m(n%mOjJH!77Mb`eK zUztEwW&yjD7EFJj?20CFo&2Y?Y=egQciDe9wa?bn9+DS|G~@rnzN6$bBXAn|=SSUM zqF~M6_QS-DIy}H`k^SSqdcrK|`y0cKLGI4wc!Ep`1K`q<^fJCDG=vaih?9SQ&CQt- zeSE48jY1G0xlP6x=r=WMrn7T6lbW~PX{9I@kjL`Bgnxe^x3R=iTDaE5>Xps(whYc6 zl9U$t`^T-+*Kdwp=RSUJDb->D%=XnByV%g?-=Pd;{j`v-uhm6p~Vg?9yCQH7{rifcn-hnmY))zpk??h0KUeOL~8QsHiIcC~9S z)U;T79G$q zTdUnHlZR@ssF;}O=$K8yWyyhI0}s_qf_Yozn6gl58%dF;pa69rc_CFoKE9GX_;|mZ zMc^vPBN~4jJ+jmy%I!gel$3~w_I7O9tfK{D)E+))r>#=7J=KcCSUm4VAi8BC zkcjJjKWOxx)Y{Y^QNO;#3jbozK?i6YYO^$xt+}Afg-lwyXv^5Y)TT~1< zwr!6lyU*n3tt4?-Qmu;(%imCqL>QQOuXP%jS~cugCQ*UrLkVDz*m_j`6@kJ9u3hv8kbUr<}KA5 zZ6S>}K)c8dyrx`KUPTQ)i+gHdV10eyv9w2aT8}rax${*k;fR^{N-omD4!{#waT z!pk4V$DX{ap&9U}{-1`kU^CbXk{o~=PhlIQ_6WmRev79!=(lc|+uK8-_63V1&#XsX zNeMJo6^z&7KMk1TrDTub!>#Od;hOAf>d4(fRG#Wt@O6f~*S`HzdEQek42aQXbCREX2QNYNb)<9@KP0FHckB3UrGs{7}&0OHS| zeSM3IeL8zJb&g;V$(|XcZB3;qgV?;DK0Q4>Gc(=PZ0hUcTeDae$DpbW3`0-k6Kl{l z@?dKcoMVpoweH~|T+A$P8#z%A-Z?@7TTnf`r$%!LD{Ciiox{*JaZD%Ejte{oyQrY# z0-nuSyG1%}jc?iUe88OUE3WbZvR9PG;NVq(`y{%1pgn9BF@pst#$l)fre$M zY3G5i@1&G9=%h}05sd7Osf^eon{ErQj=OSkS(T@~RLpg;v-1Huk>n=i;Fa=xmv2;q z1^lk7DMJ{b*hN%12M1`yzOqtVdp|@Ham9V$xRGgHK!BeN`|yw~(btjRdAGj);Gn)f zGt=f1GfVi%X>>`BMw-l6s~BTZWhD|>Sy{G$wz0QoWu;+vaIoAIS|)}gnb>7L9K1lk zcamLQS?TR{mf}QHnX@61k_;WKtpi|DCl@JZP&xq5sU(th-zWO-vZKh*k>m+3%)JM>{K_;u1sPqN+}jkC}A=Q$-b|JaQ!aYb0gOv zSo__6dFxVdz@IAt0TvtZv4Bxfb3(%T1V5GNWy(>jfDHwDu! zsX~ek-uK)M)LZUI(#`2geQpXZrmYGNo|*~{u2D(cX^R!wrY)bI6v}4CiTRXDJ7p*8 zxeFVL%Iw#l=Azn9@DF-u$=I9Olr_7{`6 z9JdLZkFzxstLVS0_KxTEac?Y#Ayjrz@ZNy;9LK)bzOBs$W^+d}D#kyQ{bNc>dU{I1 z57Bzj+f2uw>M*&u(z@oAZ8kY2;^R4gdahBgrdr zY<;+J&~CWU-CB}&LjbaTdqUht6#hfOqDT<^D10}i)^5DZdQmV{TM&|U{W8Iio=(o? z4wtRyU9aCQOOLhGep~X4dZuG1CdNCcr*Rz)o{<$>vKyo=mS*tfedj?VqI7Ye&vYHX zxCmrkx~2)P9-k#&UjYaHFk72RT5r4 zqvODQ2``Jo_ZV~WX_k*C0-JM*6u!#0;;emQ$2x`ZnCRyOaIgFBHfj9ld{1y*u4lcFW=sA7XfuTHn4nuH^XTeY;Y!8lRI68Deu@HZb33tfxY$vXoQ@5 zWRazJq^e6lSo`hk&qc*f)q+IL;vi@f4{Y7z;ic94a=YlIbL-MBK~mXEhL9K@?iLHl zFaE{SPT1y4+H38Xn?8J}xGo-o#r@RYx(b~mu=R~jKJe_Kf&FfYUO)SaN0^B8lMo6ad8L-Cfd2;Rs? z`&xw~{iej7$(Vrs1*FC+8@@J0A~3gt0oy1>m-rj9Y2&K6J>P@vHK4mwE89aS!{EoB zJ_(hvn56gU?sI;9#zmuv>1H)eEiK)Kx9(i;vCdSUsGu{1^exnspMKLr^a2mG^eSt8 zSZ+ONGFQ7lmjgX#*_@;2+c$-{21T~L6_Lz?#`v$lnCNw$X7yr=HQ8^vXmqS3^0g0A z<}_}Ul_=J_uszbxbiC8ozB4*_>rPnFJI4l-#En8Z+zPT#4_gHJ3RTvh99AKcruU0H z$?QG4pnG0B1{DUDxq_C?K7%!vKwgdW&UA#Roh1`KH7CPOtZWn%M)ip`Pn zu6Ans`R>7re8sH{z1}}wor~qKnFOsKpo>{zVll@VHW@5i>EH>&1-}4Gh1#xW0tUC9~ArKwf(aMZ7q1YvVxe_ z6Qc6ntJT9WAM9-RNf!$7J!v>5i*BDylzR*l7e#1lc}cSR$>f9fGJW}vtU{HEOAkXK z^Jt&5FuVKXPr4({SuuJ!n#wh@Pa#zcI28%~hAxmgk--xdu?T=VaaVkDtt%YUTBff- zCuUM5FyRSz*9tG_0yYS9JsaGw#(Cv~&BMi<41a!w04&;;nbhZWv>Y@voi!lKw!CRD z5+_oAnXBC8H)zj;@q}DnWCczZP6f+Ryni8G7tbX1;0Igpdk^K>eAo2|K^Io+2G@`O z=0ZprYnH!+Tl^pFy$4j2Y1=N!jCFJ@NEZYJ6{QLYC|#vE1A+*l8z;sEzz~smWV-AuNlnKwSPm~~T zcbPTj@(RpjdR>HCPhI1CotWJe8w>Cv&5GN9|9yGw!0$dz^P+dIjs-g9^Tb%ss_KUu zNOhx)^RJ~CXNz1jv4x+^uo7$I=c+HTxAgj!Zgp))DaEp0E>geR+!7bPuW;18vnEWt z<5R#d@zs0&Na^TTxfbApF}DrArkF8tzxtqof)GuOGib8gQ>x&;rS65)A>kbamlpv8 zrDAu@t3~Z|U0_ezb_u~f9V%pjf78lLPQFTm!Gtx32%*V8S~cq*c^TZTwb$mSQjyE) zbb8jr7yk#xJd~Z&kI(i^-?D}Zjn5E(5l@}nl%B7LD6e8)+8LZLtuoW&{Pd|I5>r5O zQk0gGl9iRJl|%9CskVI;#Tc_a6Dw*>w?HiAn4<9xaqGR}83iLTS(xqz@IQpss@vly zJ{dmYP+>$O34$mWKHm&?ui($rS+>o70-8`vO==mj-#MX zl<%ZFdh+i;<^ur@GI1Aj4M+B3SK+4Bo@7^lEMw!!&o>sZi>2>gq-%Wb_(QIn^K~NA z-CAYr3yz-pl}{!j5?cOcV<==2opsR|*{ONbNp#gz?bI{nSe{=x{o~te-;gY@Wp{PE z9iq%D^hsr~q<1dBe|RGns9I&qFxYFqj1V8us=r#GFiEbB%PtKGw7ulGmP z&mWSG$hzX(-?@ zPG$KUuHOX5dL~?K_>N7jVsiQ~Sn8K_MC|@0elW~TUSj3axDFtH?R0%{)u@J z{?AnR+9zgapjF87^fg90Ik%4cqFd*ji>R2GxM4&@tt!lu7QwA;R7SlHzBw3P4)F#$ zuULDp{rUccGm)+x7*`BnrWsV-82m#g?ns`Y5pQUN&SqK+IuPZ`nV%oLYFVf!C}^J8 z8m6^D|Fty|!HHXD%88S}4xQzpQ>TjiFIDM%0OY|q%J--C_FP={_5r;c)}J7lq~}!8hD$#7Mm^Fy?ua|uL#ww zqF;icLe|q;&oWvvLZC-1SsvmNEBpaBeEjnF!qsqtdpcJlNmy#Q)MMUGQ^3+w;SGLl z1^%Y;x7=w_2_b*JMpv23u0Uh;-XbOPG1Y`=)!KvznP=T%{vuw=p^6D{B>d5ebVY zbv8m0j>`y#-?kYGSv{>$Kk`imitFx>O^au3FRAobAwz;Y*wGg^;VXXMTIQau&Gm$t z+h5K@p{Q%&S3dc~vy)Py*7e)9q_Et3)n-?4cBEq&)>Gow?e-kw9S*;(wzv9g0#Mrh zpl7Bh|M6psuLX2$(WYNlMD|6Fw~#Oc>>4ye%!vHVBw9i*ACeO0Wi<5^rz04SFHX0; z!cxwPOj#ww=-$6!sgQG~nB}25u{_jvm)Wm6lYQSdr*d&p8;7wg&Q2~MAgun*_&9);O`75#Y0IQhb(5GkOGN5wHUswDNFubiyJr%Lq9ye5mHzvSU2nW0{ ziK%o#dq$@H9)(m-q1$28*W5(R>x!vT*JsxwCSW?GBV#T$>IEttF@2pq!m@gJ7n^O< z0y`k~+&;S9x~whu&YWdpODc=n{`}_RY1^EB?V^EUm)-*_$&}+o?0FHwVYBH7k9O#q=r6xfzY6$SeH5Eis^;scIT8B zxUYrTTfKQ6lWwb6aDOG@L5|!WmGmUGRGgYqbyNl(@~~h^HsPSjx|cO0To$p>EL#(T zwtw*Yh`wUo1C!H_(ry%fEqrqgg9xF>5U!4}lQjm{bWK5vp4DtWrrJE@^ypTQaT@M< zPyVg9TT;eH%rmQ9yV&UoDZ@d*y_=Y@P*`PeTiZy( z$31631}<=gDlu5cG+-G{$`>aYW0*SKlvTi8&LhS2Bq!SjV=63R@NkK{ z$p~+6INVzy%|Biizwu+NdDa863^Ncv3*D}im-nrrr1)ziLTm>_7%22_X5B7eoF#Y6 zovXJcYw#Kj-g#}*Cg1SY&07iiFTV^OL!ik^CHYmU~eWbG2J2|JBE5l@#|ru<8E$h{_l!9 z8qJ~~NJKHiq^uub&%HT>u~tF08d#Fg1QVoZA7$A<-XLOkL^l@~c`*=0o_tK*%oF~6 zyBlC$@Qh;nuK}#hHPV)#qQFH5D;};ttf<;oSC8#rKCnv~(`lty5pc zyC+dy+>A1l(LFZp9fgNkZbLW!juhjRt+;%z9U4Zv^64ovhdw>S zeeu!LFrz2NM}VN6(zl5(xSrcxaJ_lpWZFyin}@*Q&3v8jbtIHl>V4pxVC-|9^mwmM zVESdL^1(FT$v(g=Z*LFTJ;S?E#?_r1*43InK4Fe>y`i4|%EgYGdt)iKO|NH4=lXk# z_42Anf8YO`t{JgAoI!Rl_!cSax0wEU(ZIaGrrLk(Pbw6Dq#vg>`(6vlc&|0S7Vg9k zWWh!bBcN|O?#|V@^fMI+Z}wy6b7C?Ea!X#?K2kd@>%qU}>0mM5-b+i~-d-A+@7@Pf zK_1|9cX8r$>|e%!=RB#SeQf7EDcL$m)TfuTu67Odd5tCLGLxn+1enDhJhiyD_ckIr+YLWk*v7TOim5}Clsq=r`%}fdt(g0K zlUzsY#S^dFP95@lNRn8edf;J0C+a zj*7Q2iXZr`{IceuMuPBL;sTkftDgx=${}$q2iN-il;5KESJZU=bmBq0(x$ z+uRlNgXrqz6;KfooY$M<63#R{8mDjd^@knN;XUzYH?UYQB>OcL0CTs30ty>67OPAI zKc=>uEuNjrE&g>Mxn}R)=4PWX48|;omphwg+4E}(7>x=(t176eT(W%$>Z_4=^$pq@ z&b;GB#WT&PvNA-G&pOu!?5Pgh0Mcn~u0nC(JTn|;=ju&Avwg<^Wnwx)t7~G)08Tn0 z*el@j?FTMwZEBQM7Qn+LxT_*8d=gzx>)fO}WkyyO(e;@6_G*D>$JYte(P^>6QG;#F;p7 z`_QdjY2$`i5wb)?-cS`Y6q{vPTAoAL;rKWw=lHnA46JI!<}K4-o2l*whcP2&aB=aA zQ+zF9r;B=IYzO?kN}0}zvQm;KGMe8JI~_zjz>G$YW;_%0)46tutlcB!hTOqr7L}SrXMs$jI}NtWXK_jiV~A+_c^&1sti7IlZaS!~jl#Gx3FvfwX7N(UXiVcf?welb;sE4h~-WQ8iB5r(L{M1%DO% zOUgu1Und18DZf!^E?2ksrvI2oaA|ywl3Y?65@dU(xbxiu$ba(87Brc>IibUQ+T)3_ zGB z>dzn;<7a+5bp`v^@)dQpOW$~*KG9C!6|y(*J*o07S1dQ45O*!}{@7UC~u6K44tSOr>|$Lyb~Xbk;i`wy(vt#Q188@xic{PxEO#R>vJ)iWj0s zHYUb#J9*Rhb+i-cQj56K5Dy{C?rPZ%37s0hvGW0U9Y#kR0Xeu8WqqW0Z5zic#r@SQ zSftuQj~|gyg=iOV%cLN!8O5nF^?a4td-6(2cfbxftlZbC$3h{&J{3{j?zn-*C7llfz z(m;-Ju%S)p!>vmrI==RJQOX%sWudgQzuy_1pU+MWIk@T24$^0*5mz{BWi)iP zwrJV_{fIxmiYLVA<$9U7lk)b>*7a6;nF7%%Tu-n>I$KkPVD!C@NV$9kW>kW14wz5) z;ag=LiBKVOeZAdCFW)(bF?wM9%t?P$ykA65ZJuaH@XTjAQ2X{dGIV$kBVAEFx;sA5 zu5YLk+ClU(w;Bz3Og+@fbl*Pb0A>y?!ah)2NNWny^EvtCd1N_GIEwRl*p6!>Km5PB z5Q%nw$O#B^K1(V55u2hG;tUhp(XOs6|2+R}XSu{Rd%_%7GJb&ccCZnGau<7sjyl~Cxd8RoC(RX{rvdV??`X4bPToRa7pZFQO+5qQCsHWqT z_#2b(Ld8z;qAN-kSF4>#E_cS7>cY(n-UQ9|=O6JU)qeiWtl7-UgjV6nU$6Jh4xOV+ z1{qz>JP}~CZLMFt2xa^QCEduysN5WlK{GaEj7TART~hE?A`#o;KNcDa22JhK2G?{~ zd)r?t5O-GjIY7o)rWNor0RVRn^)&NbrRR}Zj}Jm1KYzU|*T{{zSxim1eoCn-ij1tQHByVK+CB=hIH6Zu_kyoOls~Lue1@}~9?&?~ zPK)%X-7IcZx3c$6a`M1Hazi=qVM1Ug>B&i zVlgYFlRraaUhSUGR<~+w8LLSEQ0y=pjT9L!oOdG3o8ecOHWTD84cPbe>RHyuK6VkG z=4s6n$!rc0Y`HQQ{n-pW5?a;#;z{EyUub@)dZ{_6+>yXgTGw1WYjwJm*YEZ)}tro-rC zK4!889A8|woNsmNIFe3eP|=JER{xvQ@ilyubIs;m@11NrHME!Ufn{uDYiH;8+()GgecW2<$^iNM! zRS$^#SbP8FOG*3O$_gtiwHouRp;M*{@$||WC!ck^!Xfv5bfj$=slR}qTW zQ1GTv=yAvQqwJ$O1HQSge6YAk=}4cQ)r%^Qnx4kfS15s{0mx<%y@@k`rTp4vI|Jl* z7@O_lj}PN0CpOxM-ksHBj0FKiy;zS3|CDjtWGS!C#X zHZWq{+v!%U=m@e!5giYPXrXy$NM3N%@)*e0mRm#hlWcFORNe3yvABubZK*2=v%SgS zY&^&#up}B>74FmC8f=|ankXDHY)`&7!hX|r=}+N24KzNyg%zbvwpicaxvomDT0T%? z-i;;1Va!|`2+3T}7R2&L7duHP{s{eSHRLHa+?TT^QS-&SRsaC|ZrY*-2>o%j#-C-Z z1|>M%-A*PEtU8`khC;^0*86JwLEGNXA~2Y2I4U^OJn(}^cef)E>G?eSTi0B5$F?fZhuCqByJzx7iV zn_)8}qoc8sMpki^m~cDgxO}6;>sr+c^DGW|2zwq`>#H5-R0w&@-J8CdsIc++liBTR zE$`Ah0Rg@kn(uadxrq>kIKg)SVLzBQgo9Pzt=%Qq07-98PjjwXI}3LRua#j1m#pp* z5J{AZ-uxgM&WHVTe;ggn2R-G79FQg@qoO=tiW7FETTidf#>Ng# zI6OM68c@T$xRxS2PxLHHl;N!Q!8Gg7#f>OOMR$yJ>?vv7yH2AXrJ zlvG5e0{lhh<>02>=gL-tr}+f%K9e6GLknMkW&%Lc$WF8!HP(qsi_Af#xIv=*W(;QQ zr@Hn_9XeGV;O$k^-BCZ|s=Ly3*VTe4vs%8)C?7oiOH#-CdPQz{cVi4_IWvkI3~LKL zcY6dExhFh-Uu;e5`Ro$h% zZ}mq;;$`YM+f^@l4B1$e2P~!dxnTT;ZaaOL1I+JcFtO)gK)Y>`$UN1vU)kBT=Uf1D z)^Je`CGrU9deLx3R@8j_gz_?B^3(1 zt`E@>*7(-zx4tkjK`7bydsx5=OmRJjr{)~wU z1?~upBXe*-wy5oy&mbjilfHHq8mFTDzKE=3oGn+yy~5l~F)@zwEX{1XMMd8juu1@9 zIcVwPF}5L&9fUS{O@4j6H(o|@2oRIj)rso}mJGa9J1>8C(P1R;8S_$FRa+waa(5h0 zXnc0Rg`?M#vjv0vJOcBq?2WQ#vKy`*6rNt5N7l@!Geg|ik&5aUgG`DUwdpRiMVqN^?hEGqal>H(#KDUE5|A;-x+N7}m;RKO8vQ4rE{g-}x z{Med2-iAV@sPY+C+#N7E!g=4_W<(*1^vod3$sl4_-i=9wM8Q^wR? zRj=+?4#7P==5T9v-$#ganCmnaFO5})m>l0N_)Ek3c~U0fZB}?SOR;}pH8z9Io_~2* z%Cq8oTgW;4vTGwH?fVaCQr#?Pvol0F`;wLU`25mQ1hi4AkihwXVy{6KMAEAmwl8_P zpDjg3hK2?PhCkf7zJK(e3EGRKbP@(|DxV<>s7G z^xL;3`cL<=c&oLN*pD=E*ce0JvQLY@M$Y4uRjEU8wxALsmDGaDOO_VC0|#xfsadw6 zoij7M@_EYSQyWsj(V%wMZ++SQ;U8fY zY#e*$Vi+iim01K`7|)tjJ|8gFH0nT|+V30W220$W*jTzC&>%FHK7Yzwep<|#X|O<+t67gtOSp!BMeW`6xW=niCswS1~< zBbYU80kdx6QJ=vHLw??$`CueGHvD3R5Oq&=Idz z*#7n_=Q_LAqC4IM_uHrtH?x}+?#%0E$!}fP3ujs-xB3>#$8LDp`c6aL$JRmU%#;e)9JKp#6fM{4M^QO(P9YpGJwu!sl7% zHf75k2l6J{#kM$J89aY&?hqSjj)mF-#Ld%-MrmnTnVyByvi1r1iZ&0Kv(7HPb$0A6 zTS#5jE&+%d1lzH?tQCG=k>(-R z%X_7|^!14M*v$+@2Q9H$gnS(q!4vqk~Qfgj7>CS zJ*wE6mEQ1SfwKS~7QM)u9GMhpOe~sV?Q@PnR>M8OR>{n2JIC5^0`1yI8UUL#Y?}FS z_dK>X@7WV+C{FK|+!5ARzWR;b=A~81?l@Uvx1TV}KZ7^@*Bdm2g#jK9#|zIJ!=Iz? z-_Oqn@KOCa4EI{nPhG^kF|9Du|K+ZdyR`3MjXsIcufin3ZO)ZoR=Ulttyz6GTQ{v$ zFpp$_C8w`7d$167LFS2xuKx4*58Az+vBSQ5LuXkiCts{RY18%rNJs^ngsLom7xd9ywk3Xe7##fRQ@fginhh+RwNUq<>{R^` zMO9TzO;wQYu_DkFoDEY{yieLij*O(IkBoq_-u{)9SOCh%JYHE_K4WHOZ*8HM8KV}T zYHnsCDc=vs+9H-(o8#HILisa2EZYne0ISVyB5QV*xakD1qHnNO4s+PT!j{OK7HtKI z%(1b$x|J2`T5oSS9>E5)GQB1wG&3(ERFcs^dc&?)~6I*;fuKaY?r04d1t{)tmXX9wS39a;IJ%UvFQ zI4!5o8ZxY6f5Kf$1Gef!AN z(p%$cqC5h8tiIQScN_|t=nVpbTLXpZ`){$J-oWBtXX)a5afyl9-A+!$%@*UK_eQqn zCkI(sERLgWd!-f5p84Y;+=amq2$4sAjM>_KC`zXc z6eQQKhnp-N|JicOy7PO6Y=hr#$DAv$1Ewag?ohWQHi;Y581VQSC{sa#V?v0fpPX+UpQcV#1MJdgF|EVg<(dJHhgB5TUbV3L;s zsR0uoZ>zKMUK_i%afprk_dmmTsjw~0t#me?6=}daFd>gESS(yKU=A|bnQ`}JCO^Od z5ndwV&cf$toc6Iv2teIpk3?@dKYx9;w0B1a$aV|QOiNaEzwivCJUoz*`BUGIu~+7h zyxL}q!lzG;*az}DZ|j+C6K3XHei_HsXZr05R}O#r{Mox574c#a*2MX`K0AAIGCTX{ z1rxWn?kixXJWCH?7gPv`<}f+M!eYQXp3bctPLW>9?0Kqm#Pk*>LQe+ zcm|}h0o~Xbe*=B~3peb=s{8^N=A}q>S9EkVm$k?wW{^5(25PVY=6yQZ(9IJkI#hLbb_n3B;C_=a8#IFZC=ph%D@v9D{&M+_= zlaewU5v21j0;w<8A(yA=G$7TsUCxJ#bv7!xV)#R*#oi*t+P8_9LufDYbxR4EUUzOYKGgm+JlHKHyo(1tyB-Dh|s?;Pqy{8_aK_< zGt+3CVr>W&GJKUQLORIlhdDkeF&ogEcGb<~LhB63Iy^7y8x>rgyI$Xu^IvBgORc z-pLV}jPgL83}{EB8)zRTX>eZ6N{U>o~L^{Q^k&xnhYH;M8Ak(3Fv;kwHFxTX=7U*T@w+fF!x6$b>{{Q+?k2>^^ECGda35?a#!9; z@MgKiSN(XzQnG$LC^W%+04N5}&GM`mb4YIgVD1Y({Uf+9p+H!(y`0C$5pA>Ne3SeVB&zQgmk^r~x*csK4W&5c)z&^|)!v(VxEwp; z>m$VdvldME{}mrA5Ylq&$&3g>JUd0&%?Zm+|J70(x=51cbPSm=nD@ntzB}|icBa_D zUNQg-YFZZZti|_6T)rx0jG0!5D_C2LzMg zK;X7j@Yuz)eVl5ou9wJ}W>6`39;#1PR3gd!61EdFKKc7sN5_YCf^b^j#YO1r5_4k0 zg-zJ4X_ljdV+UOl2fsg89$H^1B>JbHyLIUGqXf}11N9OQozYXWi`Rr?CUv&U+Eu79 z4!=$uvX>-;KEYSmG(>g_G*2iwDZAOVzKHs)-Hq#$AVu#sgz$W8G=sIk4c&AIN~V&l zbGI&SA8gtzoY`JeWK@^b2uEyG2IEW}UHuBLL6xp10C2$)8G2VGPwZj)kJB?$U3g$& zlV$bEUU3bXs(pOn=FNK{diqLAIJj!fk^UmU5X7McVH??0?cHJ-e@I6P=p=|)2juq0 zX&)DrMR2byOofOzI*!<+AZOLXe8{AaAM3&idZJ++?kgWq=K~feI%3495%Cw4HRwxd z&H0kg9@{v7&6PM83px3mM>;#_fjwR2bXBmfeot;?%~IPE$!jeAvfy6neV4#3n})Yc zk2h-@SMfCSbY2?2^65xB!D6jHplyJx64>}M1)_^+F)X$A91#wa@r(5HwaUaH#TOZVF_*1kXHe1 zDCy}m%YxmF-D|(?Pj<=uFqW9O(OsBoj*@bDGp5rJXzr98>Tjw2Ta-{@!nwO|4EW_f zKCWw@bj%^#oPaci?7rcp_(4VV-k793%zE-ns;%1VfZ*6)?FZTh0+%HCcwHN?msI1J zwkWIVM+Pg2VLCz{$uttt9O~1KBd)pt41dJSeNDCVW44;2zZ!4Lp8G>s3>L#Teg4j{ zCu8;fefm=#(X!{*deJZdkTH)$j_4+uhresCmxzJ5y3L4d3JB)9=nERw`L5^0=HKJL zFk0%{h29|Jlvyv_M z;zE*~X^C;ACXcK{%?^2`(H|k2OtC6K{&pb9q!-47P_?^@+RAmedx>g5P6mRGn}zTF zR^ua__|NqfQ_zC-ygH4*4s;_#l_-b!5ezHhspGYtE&W($X3p3gUpY8XpOaB1Yzi^; z@ehoZZ!$20HuO)x1Wz$GEz;IsETx)gh!Vm{exba&3q|XR{|~pPPd9eYn)3^li16CL zXCLxiJttd=2*V-C>3wx_}qA3ez`ol=`Rjt9{kZP$t6B_@X z2g^^?($~%+Pj!d$6++!BrD+Fm#*-vseRruh^B)P`80+Y$#_MLe25iI_#l@!!57a}& z?L^2gNxhSU>+9`E4GrP>KGQd`sM7@-@jT=jM32YswCuQe=f1#LJ4Y9eURnTc&JREj z&Lcp1cdBdDVTf1I`gS6}hm^6`Re5M8THV?U^*%NhNy!}V-tYFeDE9azGZ7Fu-s{>_ zEOzLb7jSE45&&1?02rMlk(T1#8Vvtfo+J=DGr?%-N}%>UT|ha5XG8oH73lQ z!oH`df>wwW)oJTA!@B4idG&z5wGM`@b?%#8fndAMe`E=^L@}T^x z3z_B{KNx}Zvpo+XgYOJz_%o{10b*NQWJC~y`jF6!85sa*K}<(yd9LaTD}#RTN^)gX z^pW$6en`L3&u{gC_`cO&(UAl2Ki@xo{P=h;b(OXlPhG_*ZdY>KDVbDX?=z75B8p>w zoczsJC!HN1s?mdU`G0S891C8Dga6D!e$|lE_oi; zxv{e-&_**e0JXw}$tdQ@6iwsQ>}9>;aAH+Tv6AcE#jfneMoqfc?a3&lP%St~n#zsN z{H)%i9ys4+3HOu?wX*L}N+nRdKTXLgc8Z+;%s?4#bFs|*3f68RPV%FAfE>nc`zU3_ zNTULQGr;8RHm^JW+W1l~!S`|iVB-qzFD!SbEKAHn4{-l∓oMW0HoPxzTTRKARAD zn{(HB->HcXlT43q>$G_moGYm=*Sy(uriv{IbIy)UL#=J}RcuKK=*w0{E-f~b;hoY` zSoxw5Zfj0Jikr3zrAt6No6XHOn$jP3N3H1nC5mU<*c<*A6Ah1*ttjRS z7Mn-9SNCw&0TB)VgT~cd;t1L_m-v*2QQmmSJqK}GOG))+qj+;O&DPULvh1yV`?s=L zJ9?0u+H)$hGIwL)w!dPDSRcTgo;l_Jyi%f%9 z4EI!6-EzER>UpN4GX6BTxcN}b?N#92i?)##8V0M7^rgWnnj4QGdvDIgu(Zo*zTJX8 z;A-53DCV9%dZX*LlnLtYL2(z)zur$)94jqy=GlIs^#Q#4omcDUPj~T1Dm-@Fo>Q5! zi4CgGmQYZ@Q@UFWplY|?Q@<$7?_c1I=Nqt=!inaC;68L^Mo#_WyWWVgg|bvHMe%DHjj{tUTgo@n8~ z_~e|D_R-ehrj3xAdeHhjeQx19$voWG`lOl_wmDvtGO;+;*cbykC#&{#mHoYK*#p)5 zmpQ9W?O(@PT5qKzq>=W%lqz;dExIRoOsM$ z!uKGL2%G83CXJuvi8{q~KkV6+A@I_mnkkbeSa)hiRHbbg3THUa_0XwzV4>?|gvuSJ zcb4q-^yVC-I1P5)>x;rg)Ri5-sHRui+XL#$J_|B~<(>p!1d}O2mudy7t{3F}t@@$t zddM~@JLS4gJt*5eF5}&ey(Q-hIWv=)-l3CIU_iX2IWJ$bMWrDGVlBiBxY2xPXUd*f z78MnNCfDS?tMe3*p3rxutvdmkd}pVjU}f1&&RDdJjfI89f$%Qrp`!AOP_N|XaKFj% z<#K@D6P|u!OlKj>zje8$n(*ZsmZu}ImoaQ=&~Lr@GN-H&#{UC)Lh?as-G%q`LR6c2 z*1&J&&Q5gtyt7l7eXHYYl8X-JvqizW5IoVM`meC;hW(bj@o_%OHJ7z14^))x;VMd| zn>NEoPyn_VrMyT#eC)N$r9-rcVIRY85NEn%o{>r2xdsd$4OqXS0o#+hmgvq*#&;1z zbmZyaX?T2}QW$nqrK}N{kH)a0H^@)|8pBD|pHU?^5-k|O4z;vYcn6?8rSy@3y&-cr z+Um^AfMJ<+hg{kxw%NXH!`#Q%9Yx##i_XbU3N~+)4#{m zO^*k=$YE|hdXy7P46)-j_nE#9&Htbhcd7?6WGl9_v?%8(fzfT6AS)_g#2pipX?Vr3 zhb&X1&fWVp*E6}LDfdCotT)9o0Um1@FGKsG94|8lQHfAI$k{^a(S5|QBc4{k_!Rj+ zmj9)P64=|e!h4}IN2Bv^f!cBe(?ku8HLAPO&#q8J^WyjbL+6*8V8BgL6AXdyg=2zA zwS~xFk~VZf^rN!zvAi0`C?kC2f?i&(MSa_tL~56>z38{4Y6&lqpf9c4%ZrrjD536R zQ)!TaMR^d>qY1v&-Km1j`~#PUyeTDD>JMG7C@u#GUStJXDCn*BFG6>=#VNPo{xmCl z?l@EGSk)6!`Y7TGTEq+T-Q6mRMK5#5eg)Au(xpzPc0AU6 z|B=A4*fw=hjZTXp(miu$w-aC=U(q>YHxObZ(c`@dby_4E79s2eKj(H?G$;wcFO*h$ zr^L^<+8qF+6kvqCPPRvU4=AkZ>3*H8dXjJ6SR#C<+8=o1>$=n5hNi3q3I2|l`z15u zg6h!m+FFA3ex)*W{k1KR*rC@tkrFNVxApjvKd%N~ogtJs1Li7X4T3y*0FNN&wxbIW zD*;RfDUQp%t;v#-%AUe%=gtQ#+`uxFWmff;Ur&x*Zf=%kv=XZ~?{?;|hp$Ie?sNV< zE#z}c8+}Bxtqry_UzFc#_&t$6dw*Mcx-4EzrC%V)@<1wm(kM5V^U3GjGrjsR#El-^ zJ_#65H-C)QaZZLG)OFStvrfJfv^lPcgdVB&kDrlunl7lzb&N02L|-p`?QbaOvxD1X3U$ZP)Eq@folAtwg=Mu%~iU(X+2hQ5^`Pc|sM#6ETEt#3;KdbBNtKr3zO4{28+wb3lMbv&^s zsBS-@(YYc~0vukfJ8D{vh?g>7Elk0EsPEYlr6(q`#9uo*!5%Y0tE(z_uex@6kuS&HP^F$@g6?h-FJf?;FE6TPF+`tn$BeFOIJ#_a z6MbN2d!nMPPfuwfI@oEuhNwq3wKjz?e7U43#|`q4#BZBAB&H3>1^N?cc)1Rk?9C5! zx0u)Gx~{nspWh`ahT#4To<4Tb9=8iRvIOt|`-)65uZ0wut!(JxgV{;EReB@a9Ll|9 z>6)pncHZ~cgYFLe+R_DCvwpyLX9TUyJ>11ulszD{hQ8b9B*$ z474Q-`?9l;lfMV~gzf&kxG0X6l68)kQN3~pHc3M294THWbiKg}S{Edwo$Y}H$ruX& zBOMz0>1l3CjgpaUM}1yfrCqjr%&5Q%=v(ykvDMZ#>GN^H>>PkKgC?tPlytw%Lz4(X zWA!OBGr?50muGj56^nMcC#0j2*@2%{jOa3;?W;}DN636=7>Hcp%tOECq zGum#T4D$UG1_QX{uT{g>jF6RZticUA3DvoQwPxG70cEAt7<2>*g`zEI+S{i99`g+< z9-VkQu!0%Xa8RzWE5?6TAZRg01-7=98}p1{$G27fVm~fzVB0GKNAHk6c~=#wLx~mh?NKy=~IQe|!StKmrcIFmMU11$?& z#_~I_w>bSc&RqUi=>*x=0}FkxEC3K`*#?p8MCc2e;42Osze zg~V~S2!w=-U>wB8n!4DG!L;EK^Kiy@&7gVs)+ZBLsTH;u|3^(0TbgRJztY#iVY&wg zZ)jMXo86oSo+PEv2n_}p0K*~}6YiAW@rnqZbrZ=TGBz|Bv%ror{|iHJ^dfQg4Ymwl zKtX*}dH@?0=|LRwVHTP%_xgc4C)1c`BzJh);@6j-f*;-EM&@`*HG84n51;qHb9Zt0 z1Dk$V3h^UG9E7ESf1CwZb7R2`F%Gx&K#V_LLoST)4FPsUniHESm-TG_8EWvK3$Z#H zS%bflPz1Wq(rNyq>*Pp$chty}kKlT3U6a<~KL|i0Vj!yT^5!OQw(6 zC9yr3)cnu<)3ZJKv9Z=zJ9P({_4ZvkfV8-dit}_kG`jnUZkF7|Ff<}? zq&^d^A=|Y+D;+Tvu`$h<#p3gIor)~b0nJ11rf2&FniGOC2{}~*nBI-a+UeTm9!I)o z-uSvLVbv|~1oKsBv8#z3S;mhJFWEijjG9(zi5G`TlRN^Xvt3*!XC8Ib6@4`1#6-m9 z;Xl@kJwGNP|6>QWCv&RCpFEKER?vENw?^E7b23skm8I{lY6SM>uYc2|uTOPHX|@>- zrSQxPlbbBY1FZKlulAFK-o@BPQclznXX?_z6^#L<-SlCic9H3Db5@`TVXL@Tb_5RFA@8BU>8@<-P|nC;x}7cb zv+PMn84v4VQ40aNDi2rnIOB|+-`U)vkoj`q6KZ#c1HQMp5U~ci#ZiAlbX;ZQkKPO8 zG1rY)zdp$Ii-?NP@cY-70+1#PUQ-gD4#KmTd|arsVG8H~U`xb$1rj(pJ!JZv?;NMk9OXql=v+zj{CGPYatuITvcT z9H)0G>9-`R$%Qkn?Vui|4o|v1MbW-fvc80xJ+6Aw2NM?;7vGy6!|5_JzRFU4raJ6r z+4Ca%UO9XXXg9yL63EszTk9nqbvgo@7Z%J#?sFSdkuUaj)r^gxT^#6Nq#~nT)i)EVyxF)k0{G&QC%&8jhrz(D8+T^C0O#eQ@89_6#;IGNw*|3 zjc{*>-BbD{X0hWv>G1!>-g`$i)xB+lQ6DQWQ@jGN#c&{S;(p2{rtxNc zUj}-flgqHVsdDH5Dxc4Xj(ke{&J_LieNa&DC^2dA#o;)#g7~iXXSGMLNU(2q3s_oM z_9nYw;GaM5Oj*|t4=wMmgM2$ngqpZL8Q|i3xOp9|AQKWQETIX7(ATfs+$3%SSqqc+ z-3AS*JZ(&+({e}0NXeZO1bdUteteC$3TFi;gKr|DSTSRKqV;BqgSIw#=R|sb8rJd( z$L%B&dJz*X_3CnawI%z~o4>#R{Q2gq0*9`c1F<5w`>NicHDOeJgh(ja^t-b~LJFQu z==2-5S7ln}k8x^R1X~wtq3&U#l2TXK^3#rUg0XwXtJSc_ z@S<|hgM|g`s!AC#_cTr_;rgUTmyPc(^v|?MFbhr^B~qMdud5@YotxW8r99D&4m9#( zt?R#)h6ASw2gcN2xmJi3~hCInuXkrguBG@ZE5_P{lA$95yLZtR2`XIFUL{K{1BD zmbSj=Xm^)5jGq-V6bRq`N|=-=W4~fj#s9v7fXm)K|1pVQ*kv94%Rt$~12Z)~4u}4% zwj7H-|6SoLpI5~0YiIT){xZVR5OVJ0!!_=_i4~rF<1LrWr(fW(vj&4rHY8c~zG0_) zPq2vQ5+GabiJ4koheJb_C5h(A_g`zbGA<}oZ9TfRRREy*ZoNToOb%DP$xu+t_m4ni z3kAfJNHgYVahVDAawAdTV@wqYd|W0i&Rjr^0 z<(tqKzlMe;?A25dtP~N}%h_!zTCFSE2?aS78prA$U;iGjCmJw`XClgsaJw^&J~O|5 zeIz5G?g{SCXum(nNn6++1v**Vqcb~!gYt$yyh_7>xCamiDw%8;b>uig6WqJ5(==UD zl!QB@b0j_Dp?eH(x{G>gqb}*_f8#5B*tEaYp2K?;TE$*st zvyNAgOcg)|mpx7TBG9IzsDw6jUZG=}gk^asEnQh3Y5Y11+O2{zhdpVj`~RGCY1)~c zb1`zopOx~O#^p{7*k^{3ZuDZ{D>F^{Zn8h}W85dhq1ey}403QU`5YF}+f)08G*RHn zQcRSOSx9vu?~(2*0=&3kgnk*QdN_6H*9T^n5?LvF-myfvq5;0{{Jb4{J=T+LY+#3)Jx?b9m4M(z)^iY4J!0|3AVs$enw7nbt5o4a#;-&VBJ+!$ z5Z5_xvZk#jk$!vq28DIVH7|nIfX=ImM_!zEQq5{487G3BQ&x5qo4vo-0aQJDT}-A2 zaz36;Bl@>3uj7k^g26xHiXc3rPst|XLS8kb_<`^T2;`7dwDJ0Z@|Vpylo1dMpTt@D;Y2z?k5;;+Jfn^7N#L~qWvRnSjO3v@Nh%657T@DzHm?$cy`U+yAd(N1y3o`hVl7i!6WYyz$?BfulVS#2(Wyjd7KmCp}V0xp8X1 zQ}u5@Je=*?+^n%HDgISY9an~4uRWjb5fd)*%*l-o;Nn1}KZU>1&}f-DlYhgV-oyAD#RgQ9}6nuDe#juf8?PSQYpWXdZY zar*<$_Vi_d$Hx|Km|;h8cpM{>yvd*0D5r?gXV|xdl%ZovfW*|k0{z|PI=$;wZ zTWbvAGJd(obE6VPGMRi%Q1Ppu(dWd3CPg7*m4pB`-5y=q-Xrrq-fx`hvU#iA04M=^ z?EZOtXkdtIVbRu^;U1t1gxxs-5k;&&!-^~os@ebA-5UJ(Q7MjwraZ13T;_8`dnw<; zF-L|BiOiLU&IS?}?sF1T9%GKn5ISdqMfcp8OUhGg-m}1dj`tdWVZf6)XI|Vw_aW#; zPwvEN$fx>vAI^ti+*eOQK9#S)tp8zQYIvw+8Y*$&>`N9vemht#C?M^d`HMm)c^e5u zV=LO2Ygtxi;#Xay6aDHh`3s(a2+bWkRs?_UuLp&*O+9rVGlgwvJos2f+c^=!ZTl0v z1nIDwJ#>S@FIgho!*W$iBYeQ0YRYM0U}8Fw++`m47O1$u9Z5??rRH4IWU2)4_ptXT zFT6?uB5eS6D|zrpS!tEwgikIZU>dr&Bxu3%Bk31uNvE?tc2`UH2#t3p}U@7nwvw?TZ8b-fdWoKSi83I5q@47PjJ-tPJg zsPsxrfrklRzfl6zf{^FOn;MOEAo`L@%_Scbf>4}#0;#UE(%9}fS_*?1Hi+ig6AtpB z0*!p7wSn{hJKj^=O8A1v$vzgsyohE2?xj<;N z>pkJQ3f&2hz)ViKjT3WXZ0c@gKBjK744Q|h)9nZsLfw%RENWj)j9g!7gJeTez(0`h zrG{cIYvaRvwO}lr2*i_x$I9}aq$!c&9ab1dEWbe@M{v^Od<`aEHg}noncN>0rHj%; zDMI&1VQMGkhakZ>HdZs)MP#9?1U(5Wv^}A&u)ci>bq6v;U`VFlC3p`9wmYnAQxZbe zP?y0HLQvz=9H>4+G8rm0r+s81U4~ihwK*UQlLY3&*Ed08HJSH44>={LgT%g~U%nnK zO%Zch4bI$g@Ya>mXD-2^L)s&ncW*KW@gMA9%1S1iSt1xgXbseYCu-~`sy(55I^lv> z%2+fQ=F0-Wv6waNc7eXpaGUdxb?EBmaW0GlP6|vKZ3U})nZy(nT~@{_EImAWQ{y8W zn5?rd`A%@q@I18t~zO+G2_#)g*Ho`TXa{HSkxQPQ~xX zFas;{XLEFdxNPm#zJIK%+eL0An$@;4JCI@&H)uO{H7jePri?|b%9hjNO~ftF%X9S6 zN6mULL1)QS+0{fE)~JM?|0<&n>yOY~o=9RDz>QUs^V=*Px)Kim>~(Zt;OYdkg6_degQfKN+@g>mbmza~2#a(&rjl*&GvB-M;0XCcwtZ_w% zQsS{?TI|KfHdYF~PKLP9|0S3yjN07ju~mvP3@W9DU1x*&8)h90K% z(!TUwIO`v_HHIuTUD7h%VT}r#oa^^C@1c>Mcjss-;2&v$nJI=dij8#bJmmqHmmt6@ zv9n3e+Z3}iLqoGYO$0YICor>2g4^?BRkUm@k1g~UpkEqg7V*m(9xB;LiA(8|Ld~0S z9^vNA?*~%5>tq<=$=D_yD5z|tr=VX7s=`Ip#%Hp*PjVo3uC@J`KFeazbolKoAPAny zn(u8sSlio#1MXQieC@-YLwypQLv%!zT%lFIc4g77xF+8tR^qWyv!?OvwZJEj)~&Wn z-Wz#r3g|sAKnv2+piylIh}@L%O!$CTSwUE^t&P>vYE545hJUQ7{?8H9ZiZ*wc5o#0 zOr9c(ig{3{&D|S%%(6}tQk_O}5#~Y7ZPs=xAKcxV$mk4w z2Z+x==AK55g&F$c3rv~7JzSwtPG}G2<8Gs=^^c;@8k~|yo7oC0d-ANbrRC42(7@E} z&FzY++H(CDTUU_+%PW9_UBB}DnN`gLgjXD`WoWD-Z;r?UjS|zr=;2`u$L1zd5pB7Z z)*pwpl*mREIaYt6WgvUtnHEZ{}0D0%P-KvWLSkr?aMsF89RLNzdBW{#l);}~~US)I)`BngwF^@xQ zXH*rc$%s#k{zgqU^orPa;ToR{U_ULhs`8MC%^*Kz|Dg0jB6vYgTO}T60$LE)? zP&?^I%^IQeE?(SPr_m>_cliz7$v%^>Jb&Iun_PHHQc!pPwFeXCs;RCJ^n#)I(}$KY z+)8vU9HmI-dvZB$7!P;-sU`(?m>E zyOTCVQWkG)w8kEeTSrBe!DDMKnZdQh)W`OkV3Vb#N9z$qMZ^BX&*DD;BBQA#4;9qC z$~O;j3NSw9?o~^jP(+>1E>MvOg?CfXE-uVxdm(PbizB--VY9S4LEL4YII9}?6KC|I zC>HI7Wlw33a$>>HGkweTwtJLnx0AIST|r%lUN6kNY>vJ(>2FbZ73!G0=)#N`?k`+W z6SY@7Kb(3x01%zct2s16BJ*5936jT7ZPoK2Xvh87L-y)ruF2-T;gl49%NO&lamQ8> zk7{Uy?d(Pa-HHPn+Y;dTRsQ5vdxRqZxTG1V(&{bAHg=AT)~(!pDcmR-ZIqq&zl-3w z@!m63_n|=&XK3A<19&h4+xscSv^?4_I>yEC?Xr~kUTGOKHSKP(jz!b51-e9x$rSJH zbN4t&roNS^%Y7KOtO0ApVTUxao)s~yNk+n2R>o!Va55?8AUG%X8~Vz#nqZ!M%YdFD z_jc1JvPUWh?sU)uR&IZdY8ssc%Qhhz+8?IgLGY?I^_kz;B3Nvv@vkr(ya_HcQS`^t z3ckaR{BfZoqIr9yK*LGZZ`_tkbYOI~W^-U*ds#^d$*XA7qNSC>Q=!-%n`SFJxzA!n zjy%fVEJMKEYqr??*sx{s5jpQr5%!z4VReNw!7H|w1vkB1vblct(=z(hxtPFy+nQKBm<+Swi~V%g$7dB+>#6tRb?_e$C}b-RGIzG zwspoy?VUlRzXr2=VBr}zKtkRc)PkGYL^-fnPPN|OwynX9ju^o<1%gZNRp*m}t)iuY zrwO-%T`+hmt|qcq+WOFVPQrL}1WVlfU^hI1y3o>B)+VYvPkvk9rDAh^3L4Y1;Km8{ z{xHfnY%z&>h+idndF$`WB_1FcZ*I~1Z$j1as1Ts_tYeR|0}fdwLH3Lx z8lIfIlf1w7B4n_mQK`qxWoTQGoTc^elbV=-&U^1R8bQ(W(Dp>RQL*uuB*?qI_Z6Lj z*J>dZ-4lGu8)d{F^o~?@LRFbsY-i6#7SH9g865VZ;PCV~g5nmR?+X5x;qC>ZBU$=~u#ny?8579g37iOwiqH4|B z7Z$>qaL%X;_<$h2QG39wZnZU5DR-)h!?rmxhASEANd zs`sDUxrTv*)%)DvV+vk4;E%@jB=W*%xr4UtZw{^Sx;u+@c{SPR@w=2`H4^rnVN-=A z^;8)BrcA907rmX*Nx12rh`qzPa(#3EA4?4eHGD094W;o_4FQWIyesD6wRC5ms@>K{ z-QI_t6C5$T=s31Nd`_8mCl)83+lgiGPups$o`K%@SU5~7fKS>mEHtApd>Gb?xGdRh z z>m|?m_9oB~lTZd9_EZ*0##toWYFpCyT=;% z8A0<*0obgL-~*?GCoh~rxuTyMOtOa~>kTivQ=db;pHnyf_}vtT67%1DR30kid3g!n zHAVvLff(`Y7m!9sw>DP|>Qc_si9rvJS}g~}@R1AJbdvYU$>;2kUzso=AB;|`cNlB+ zQF2(g@tx9%Ji3)5F3msJ^%1u@xT#pw!@|kfP8+G$XLkLk8+?xF{FyDx{7t}Al|KL( ze5eCoVM%mB-Ln{zHJvWGa6)l!c~vVmj+S;N@(rUfcK)Gj#h|UMXD1_9tL+WA+QsJv ze4^o$i5E?%cBWv=g~8biT6Aa3PyRHw>$Hk8k&INGjz;sS0 zGgm!nCX$slJQ}Q)xv%l#PLWg;g|L{efrqe1zH>*d1~yV3e^;E{sFW0OET}03d;a|f zkjNhDhspJ)KC*c2HlRFrP1f@$X)C${t`G8w!_kCPM{iMom61%zK0A|dtu1+r%Lylj z)c@R}SS!|zRGqgdDkW8;`+DQcy{eR>c^O`nnz>aBvT8$r>3Y!6&}LLnrjOn|xR}$M zZD$@%^U#48rn_|>0ad>cWnqVs6ixJ=8+$HT9dC5HQ`t(qR+!zTS;!yvh+oCY13b19 z*9C!-DAzZ`GrJfbfe#Q?Q{S5Q}*7Ad@fi0Z*#YKhp6g?*xjK8yq}`EKWrbF zs=6$T?QM>#UF%b5JObK9wd;#{6cHgZy}J^*OpL!e^;-rx+VW^_4E1DGj?Eoc&`Zq@ z)b4T{O#StsRZp8~$qD}h%v%$JdMYRd5XQSjmMbS=@tl6$r~RhN;@kv zJn*?X@A)n1adoqtr^9b)Xs-gB!vc8*G^?a(J6p5&S(ey0!*PwW4z6oy9Piv3!j(*s zqoF3tRA>4$G`zm#O8MHs(o#ue1Gvs2g%ZY9JYUW)1mWsFsXwxh59=0|sF*z;Bb)h_O?z)(st#0=A$|Ch3y>^jv?H898 ztf4g17B9lzmasXz2;#cSE@;6Mpomxl9?1T%DuUK}rlN@b_t>5lAwi?6HVQL<<-|xS zgt7iOP6Lt+WMxSmAQu+C`9?*Zz1#-&?tzPN4i>H@>FHP8@|2 zAGwJNQD{_}%{({loC6*S#e{`6?idZ0UgZVO*}T}3_c$St@~70S=nfSr?Oa8 zw~#hz5AW}15=*pOOdsHgrS)|=PVb%yq|my*a@uW+*YRzue5Yv_=cONrog-51kBf+1 z>FcFg(h)P~$(=*cqSr;dmUEc(oj)9$vl@jvI+*PCXT6(emgo$buv51AvAKDfxx2Tl z-Az#;H#akGt-AiQW|4AZ`oy(f+miaBRb;-=N0Y%3Pj55E$CrXdo6c_I5$OF7d`1vJ zia96EwwRVog}TfBU8_c9mb)pLK5xN6ZwgfQXVPid`I=k}D^wro`(H7auvdoGG*{n^d88K1)sIuJ;rZ$F9tU*(wr5dIR(luB% z_rQ6j*NgdRSwFEWd49<&?~UuvX}0)ZYzY zIa+gJ@rwWcr;(gt{li9mSoJ^f3oT!3P4l)6v@92pw@YR=2k`UAu^l#uqf#Y>&$G_7 zLiv>V>4Z-o)Gb6SYHwA;$X1b}EDro>8*H8xb|@o5+Zo5F1^pZr^64gaYiLAW9r{Eo z-G$J8g|W@hChiV`W;#$Z&w*X=^im7~9h|%@<0&o=DRC3i(#Mz(M}@gku*4h(APcBh_cx zS&ZTP>+KFOuIho!d6cJc^LJx|Cmy(M;VD<#c5a-%O410=;2cg+vh18ltoP5M!mn-B z*V;3254giv|Hy=X!4GBid8x}$J$wwf=FrOu!oehja4nt=_6V(`eFRY~8Fq!Z^57j{{l)A={Q@ zC$Go&xLQpstpIE-`vhc1zH)++HlK>@Uew>58~{@w+jIz$`1pPsJslA?{J8LmXdLR#7$|K**#-^q@5ACtv-utYl^753B~PAbK$hx1exBm*uC_wdc==EcFG*48^~f|CO>f;Mnd zziT0Rl0hdLYFXJ`EgqNKsc*j%t1yJ`;lK?r^eykSUkUmmb1QKVM&c>HZKbCcZ;2( z)on2+&e&`5c1E&H|NmfYmU)3UAo-7v6=5IiT$=>x?kU5$1asBvuWR@R6U_7Oa%?8k zIli*4@)3%indx)poS1-0CHLj9wkslZei#E;EVlT zbVAQcl198l^M^-L$Km9Zu?AfmySJ3Xg%Tz?;PJM3T^p36k=iV&QB48)uiOc;RX%bF zayWxOEBE-V!bmFPI}u|BK*}uGtE$n+bTGwJH9@ID>&F?+rLo{(go-4h3p(F3+0fIN z-@ZEGf|+Up!vxa4zmPvQEBLC_bFl-2O;JUWAtb`BJXq1`12F5_l1CGcN(bM-c2$T2 z+tn#)g4W`~!-Ut_1arsba_w;gF(6aO69~ECOLslw#Fm%Q)n&sAExNOJJu7;#nUiqz zuV09eUNS)i=^Gz^7<2|6l+0KT+Pzl#0loH(huItxBAo&a?OW#NSZ3a^j){n1r zh7OO5(^RThe--i_@HC3wSwGhUY6M+W7HmeQJk0;7nH)q28e4p6I_y*PM5A9R|K-z# zS~oxu{833;c};N{CjJ@?P`bi_HSjp@p1ggWxdm7tTPLffEuh9|g)(f`Q}9qg(0XP` zAbre=VXxka5A+t~Z*Jk0Ej?Fb!hEH*I_0?C&ojn7z;GDz6-k=k)Mg;7B5X^nJv^O5 zc8sYM42qF#MF)fgLBXyV7z2`}VTG)M@|85Lsd`rT$6o&T@%8d>m`KS_)tk~%F-biu zJhWd$c_MS(JmysE+)?p2YY%69;9R`>02XmyY{7Z1du1g-V7>10$kaGUj~r^lpgUt@ zYm-hj;vZ#97MiQEyh9n2iUyTx2ALsjg`26AxW|mQwYQu0LD#LV^DG{3_BPJ4v|?aw z=y)#EE9T#?I4$G<`5+T>ZJ$lww{ACq2wB>xM|5f~M+n^)V}52)hB23@EYm?Yc{*;n zQLgZJcYfUSH#apmq-%Efx8OHvonQ=;8))5%j>Ednr#KlF3V z{u|?-t@8Tvl`z+dq@;#7CU&LcGbeyuGDim?oxR?%FKK2D^jABE-`%bpy1~HTL)m^F zZvWmM?zA+dZ%-|iSMXOc;e|P7Il~kFBtd6EqBPOajG-t_L8Eg;5IMn+XlL&|62$tW za%&#?#S`a^*%?-Z1l8~AvKQ(bbWwzx+;)@QMF*8)p! zi=CTF3$lZLcF9at9d1*f+mQX$HG!`-4@f)XVn56=l2<@x_UV!Yxq0P5U`VxITJrv{ zy2Bp@Euf?2dfRfDj?)i=W<8^XkR3iJSA%g*fdnVKxmn!LXG|p^73XqqPy=D)hF$6I zMpnR^&3;{aD*nJi18T4yuTZs|p#tvcyetM~tY2gos`SVF3N=fKpi`G)qkT2Mr?*IC zw*WcH*LDdHjnny+G5J&vk_a@`zMesxH(B@5tQri6p@@4u<0qtQ08M!1Qy5+Mk~yvw zoe53o+GM+@fyiDIvKz1T+RJe5;~Mc8LI4`OD7Y(6ds~e%-MI~geCY4;!dEkYhc2g= zj1RS_r&TjM^xK(#;D;f!C%P7Oy6(MuAPyBzv-m3U@G`Cu$BBUl*StGpi1+0*-_Iww z9Xp46Ry&>O(G~pIclXt?OjixkO?h0VDNC4&yWe0O*hrqqKNC#Bvo=4bv?ZVth82li zxqp0D6bFVz6VCz-PxeUyhhyu{@!r)K)e_jaHGf}HH77TB$_r}6&Rk&iJ>`B5WfDhL zw_E@+YFehQ)kwVmov4PUADttI-8&|YBd6f$h(K+?^3G*tq*`5|?lPD(HM?_gy4D& zshQI$gX*!1X|DrBdP~HtC@0>nCqR>BhbcS*CI77{G#*t*nUURKKfroRR2Jy4F|*Zs z9)Ox<#iE_Ly*8&*k94A`J$vf6Tq)SxSm{1}M$gQn@pV1u3Nl6q zYR~RhTjU74nmYXK^vJ4fy?MyA? zB*qq>M(29q1>r)i()#4}F$rzvX2Cn{JWU`o6~eQmTQ9U7Y|d@guE{xU$HjM_-kNR1 zva->2tkhLn;9#`ANN$%qUD9^R#NC94W2A|dd%r;WVx;I&bZd=Af|8>{U1gz>Dlnr7 zAQ}uQM%+B}bf+@LZ98~i?|y-r4ZdHt2|NDhk9FBF(CWBId)6H-Ub1M;8YeMw9Vcg&h+5zitd!LS<%!O#cqi{2S<$^0>eIfsotje zfxQ82Eh~2nRdJ$;U$_iF;lX|)C{A1m!gnTWEUAq-PWTY?VbtHRy!Mzvgwnk!#->)& z#k(E{>s}GDa;`<==A5XFdJH|+U?l(G`j&A*yykvf>&E!6J=b+yh{}&ka&uMU)oClK z!v_a#zsE@xsB5RBffmO>l5lRfBwd%*@XPj;@qP{()Mu8$lCs^eHzS3{N3M z*-D!Sy4JO%v>3vE2CMP*xRgi4J;=;uLfhQ-vZx6t{NM?jw6K&~pqiK;w2uw2NXx=)Q%lXD*yI z4Pn<{;qGBvZjkBu{F1I3^UZk+&k*G@k_&(&+j~7kU?+6MQDA>{nKtd{?=o%k{^8=1 zP8xtm_*6env%(UKm!`5vc7w^(zs1H6o8HbQ?aas2DV29yTd+h7cI3AY+S*3G>K!Qo zfUp|p)He!~AwA#1MfI*?%1LSJS^Z~e=)og8ce>)~p0OUW0EX}5DlK#|(1n%U*Vn&S{a1IB?`xuP$8v*o$LlK>wE?IABq}RR zp$o;F%=%uxVl|Ei%+`gPEqix&Qza0ds+D)Gyjl@+M%*54P;>oq7|!m?{;*ePLo%ba zD~TRsTm2D6%|ybE&Ku9%<7?=FK4||Dyn3rTVMU11(?&|xVkpmb`2v!Q9f}=NilETf zoUePPZ3BNQ0YetL0yX4RNeRIi+8c$=HW_Q%yf(o43#{?mc1)Gil~FF=`uN05lS2|k z=foC%&EBTDWBF8tzGlMHpT&)vz=2d$tg*-74)aSCBU`W%A~rbh8U;mfY(;;6kNt%i za){H!Vb4y}ufrZP4E}dJPb11vAIKs+t)xg-aNwtrC3lMpnE4~5YCUsOmw-?ZDeKh3 zoO)@l+Ee62%0@e-cIOR7?*HL3OU5c*`69(tFp{1&&^~_^YHj4fC7Bfzbn5J#JA~ZV zyV24fB?RnDIn>%26~V>z!JL6%VG%F6@#jZNfLf~Nww3PyhcmlCJYD%V-Nwz+{Ttit zmk0>>_VGYVuia^k*Uof+8*H?v>z4VrvBf9h6lRf>SYF)$jL879dhqpQPr^dt0S~J; zdd+-4xPD^rz$?zHdEDE~ta%qPrt55*J$TgU#RF9sar-6$UD3M}$$*K(-LJ4zTem`X zP5dmSEPN(8@OhF}^pr5nNj_$<@j@rEgd_7IqQ+^PIXhprQoO9%4y-4%T#BVP!i)^} z{4x*|Yi($MiFV79MRGl8xY)jW0l6wvC30z-P11_J!njq45-Mk-L}(F-uC#{sZs&~$dDbDk3q%E#e8c2G1;3n~*RRl2 z4kWnn)G~@HE!h$?$B9oo4i*1!Ew*u2?o`}%Qre3gYx^^-aIMwkmu@f#(C^+n#;8B4 zZ=WK%J)#e{f99>vpR2zcxIoV|4U|1R^4`fW$stybjaaMRIqs(ykFg8wk zOH`^$((?p%jal23SFzv9VUHBC__mi#@MPn5BbxWA-wNxDZ1Kq6ZC)U4gq>>(OW78i zW3?G)966vUE00dN=7y5YROOJb5d&L8P21K-Sb)XO$ja>et^^iI zH$)fR#q|>iBi>ckkEsQovn6OWW&_i8lCrrJ{Q&wi zgK{Jj#f2387mPtp;m@$mE=;Y&p%}u?2RRMU_cBGh6+zCVyvicL?@o(avwl3T;$K_= z5N}^=$1O`K2Bt8jib6>N@xow7$k*!|5|0cpfz~X@c2>Xo`u%pP($Y(7tNIXMQ=lzF z(zIUv>|$@OEHXgE;0fInyYYG`i2`(%^f9?oB_zhevX!_6A-_OCymhu#ils!fYf=GJ z9_ahR8XF4~RP=&Tn?3FTOosgtA_Uk$;twPy!Kq$J5s#3=MG|mWdviHW`F4@Q1S-U* zT!K4`SD?gci93u_L7z!^Q+=LOt62z6J3{<`mFZsVihzC^UC z`UnYnvJUmTrt$zllUuGsxj%s^o~H`m)&`ii7rF$nq$X#XvzeyoVq-hNB&NGKv{6W9 zz4hQJt2Tmqxw*lyjRpB9ATQ`z>%J1Ox2RAd1$f&o5@22FXb4|h)loDV?~(`blhQRe zJ48hHE7HQkk0qiG85y9nF)&z9$mdxEJ}e}$!8t5!b(~r%$;cFZmk|kW>m=z#a^wl< z#c6`tv3f}Uq=4*EDyIFboSK$D$p~HD&w))dq8P}tUy<@; z{Y1Tet-_LCzdAJ32AK)=eiZ`GFaK#WHciTV?bqHW@*DD;7_(S)`P-E-5=lvD@)^(w z9g)2XfR(TAN|0+Vlknz%J6P*SFKmin$n-nZhXahR;f$wDRSe`uzZ@;!>eW1;RjWF3v%S2VrGYtIQP ze04B2#UBL)0i<4eb(E2VdUQdU-rNIe`hA5*VVG~z6gnK`5v_GTsq{V;|1~TR!pmtI zIA1k-=dLl7OW)+S{~1kLfEcY8Bt82%L)Gx0?5tpq^<}`Fba)`ZUS3SXy%bhRcYjqk zc+8EBK3<7t*LHICFG7a@N{En`&!1cm6gcTK^gmG^A&F9LX%S(oa0tDu)a-HrSR8UG z2%FWme0uMNz9U?~B@hhnQls&0%dO^J5mHU=+cOaJkcC>-}p1# za#tF`|i z&V|s&MJfUW`@vc&*9w&VTDQ;nY+Zaetk4hsL9U-qInho+P=>MnNwfnN3cPsp{jQxj zGs>&T&`jh^k5#_*yijNJ2~Y|NEU(gL@Z#}JaXM{;=YPZ|+U_~H|Qtp8__|F_Rhm`um7|9&vUf%xyCBMig; z?e;+Qv`f+wrw781KUf{$6ns2hIwGTL&H|ixe|G;q?RB zL&EI?%Kx8^^%o;@ev{kF8IXK z%>~7S($vrwaBjlKrOQC!2mDOG?O*ZMWT-H-J_fKk2=C$-kN%&3cm>c1Zg>L z^{H1>5+$gp(wVP@JeK-7=9x=dSC!S93}u8SM*n*aA;;HXaC{B!!y}^6gp)~=M7JT8 z2Fkw}qeKAoKFnG}n#f7M2jB^WkI3G*;}Cv6CCN_7Wn8#~*wpk`9RuRuab0zRar;gd z7B~-t>xS%;7}nsO7h2Mx*P@=$;YbbdAhLItu7f_Ji`7*0t=FOm$4FyHWB1}w z4kKe|edJ;Q58+%sqNFkS`svBL`uT5({6w{s{Nr}C?p?8;KXd6P+I8isELYtc zd`;y3^m&tPH1AO66ExsihBHb{*(haty&Q_PaW5f!Aw=c4=joB!Drnz{#ZP7_m{n#h z0)`oQ-B$zNFnH}snes0jHQNt`+OX@MC;TN_;eU81o~~XBL+7AC;x+kjlBUgXhMTYG zqU1(#Mk>QWgsBL5c07z8vYO9AA7=s~XJV66VyptB$NtmgG;kcu)M9K*7IXGv5E_B| z7bo3^bgAf_&~W!pPJV!-2mb6#BB4XxBK-RK+#E4wH2=8>Q_>M%-^tPO-HBYAby~_l zLA>(H;~uAf8K2sMcc#WTi%nfPRVt%&{{q|oJG(%(r49d}vt z5fDKvYi6cqWC{a~MEAj!137tc<<0v=_+1wKQkf{ep5)O#m8f5g1-CQ2`(Gww!K$B9 z;sIR-lz2#DTRvW(BNc%`gk57|K{eD6)`WD-|DE^ZB|vN|Ko~C&qyImbsrp|xMeQK< z)yX!W?@2p+Ip4#jZ{+^ED>iq6@UX~3Vq$=fn2vTLp}p!SLf!%Ln_v-d_4jXt*M0F{&Ots@T$~<%zkUC{vEhB#zVX&?rFEvLNG-Lv zRMPA4&ySgQEu*{p^d}^wc?Kv6%jr7dei!HgX|UJ0am<;m{`pbIEvk?;#qiKDrU&56 zb3gT~D-EBetJK(e?oOBJQ}hiwKZlHG+$G%qZ!ue1@Z-fBM7ih7ti$yP{DC)qJp%L? zTD4b8{_+WIdX*fdPs*QbPH+=8E?@Z(Frw*}pxp!>=dG6dhb6-Hf{wQ2-b?e@^%)?Z zzdlo>6Bk=*WJns2!e)SdEb|7cWeHSbs9N4B91?UW@xa=+?WuX|8HN9(WKD)Gcq5Ew zETeBdF=v&hV+5UixVgA3Sb~mjRhv*7LC8-^+^D=gKAvO``kUA{C7=!^N}o(YF+0uP z7`}(6j>>$POT60p~hMV%A;1dU^Gk-Q5C>nVG%xwkr{FQUnc!dU@g#4jB2s1Uf2Nkxi-0$%U57 z)6KHrU026h@N^+t^1Tts(Uv{RYH1HBrIOh0e&TnFglzfs8Im3zoes-vZ|J;1{e3C# z#b*;az$r{mJ5QLV`y+%|@+_H>?=;069g>k1^>S~M)g=p}SjfO4<@VOYW1<=u+6f^B z7i`p0#srvdYUd`areLNo-E0~N(f*b|2H1Wek43z_tdbGvuP!;iTq(0beKp~S8O@VLjwz2Cl0eZn*cXEgtj^#=cwnsiY>UQk``*(PXIMTU* z_{@mk7bzgRKbWRXW*hpoYAmyU^;uMGObqA)WS+AM+M4w-RNJGfZ4a;rLf1LI3F~$QL8n&n-n3!{(!Y#jY;=I!jlV zLyNL_)6_D!6Uc%e!8Z8(Y5i`pJ+HXdwvuT!p(uUu5{!2`B}K=*WH8-*s>cY@$mwcw z+A-BiGVFGf`+r;XYsR^W&Nqlwh(=_AvNzRp9Z_Lv8}|V#-_+JIiv$ad_!4?Zm=w~P z$M>+*TXTYMdTBT(r$}ew-;=ZvAMvB1!8~KHy{*^V^Y*ACUzXy#RFOxN3D;<8BzIq! zWUK+;ajX&!%v$ImT*KT{$Eozc(yS&g)c|1orxaI&U zMrN>z7u6IMBN*u!4sz9QG5FJ^jqh!0Xw3JP)g)6Kf;O3lhlr5%id6x{gnTKIfutGE z$;hys@b|yDV}&<=VQ3%mS>keoGwyFdgLBoe*`F@@+1@khERu&^#>|q8aD(m9?q<~^ za=EnUxEPI*5T_Tb0-*a~RWA@wAzHFmUF#k0QaIjtCuy~@kdo`K$Wj8$LSeJxdH5f9 z5$z1^bhbAXV))+LmCMT^%STzQBMwVu70#R8pID<`n~bG)pItfyW)Bx>np1}852rPI z<3X=9Z*R}pA7=)*I61kvIH|bW=L8hn%3pDgBYJgxLQhxUbK|V2igDt9A}E0ikWyBg z7j_K)*n7{i|AV`8hA553%ESk~3aD z8>1ttUz?J+zg?D>yU(+bt)F?R)iP}H(mt5kkjHW|np9~A?wel}R6_JU5SU9qxwB`H8s@Z@m^i*qZ~RcV1lw;Aw-Cuk`J%yeAcZkz1af+n&(}oCd*~QfcVVcZ^K0dnlk5a_-4=0kQnBEg*uaB+)sBp41;CWy`9H( zBC$4-h%hc2+L$kt3}h3XU30cKTQfHhAv&c7jk;7+k%pP!S3a#6ZzBE6zqKMiE>%Zt zBj%^O6HvId?gWpN2s2>I#$Zu*y992@7X^vSt|#t6mKQig&##n5PP{flkIjhxF_uc= zVP|+t!Nu`#qQa8Ls~1qzc2rex%gw4PIW!}K9AIkMxZnqSJ9nlX&Y%x&zfH9zF7l5h za($15?!8%EZytVcH;fluIA1Y;14kG&yP&rKBLI5nIzH{Nz*?m4Cg8Vb@+&N_vwWh( zI2|2M*r!P~!(QyvORbA;X?L+l9t{J-C~b9YkzudH+dlRUpJqt-X zZkoND_Qu+RFNW4+H;Cy#tCida*Fb?3a6jq71^gE zZ~x(IeF$}f{y1}QK5i_FTiz(=ehmd@*=qk%7Ak+k3h7K$mhkf)&HQ|85b6lq8sz%Z z@nOnF23LQY+?R*j$stH`z-ZUoZ_e3H z2-#1K;SCKM9bY@ZN=n{06zvc6xW~0FT{-a)_UL(&gfXY*FzgfkiTg7%d8%O%^Ihug z&)M}#FHzL?O_r=}RNb_K4jtOTmG;C&)|<4fpi)(9&M`I7uQDhs`uC9GJHfMmcO~>Z zD_d9mBi(*_p!ah2i|wk=9)D#3Wjj+i{IFcBNd_&h=wGr@9Wi;~dHz8C1de7oPxMRm z`4!Nrs*!Q}$t7(gH zAOrV}qp8Kh>l;jJ_jba}@G~rBh6gh&Vtb`QIz4Ab;!svvJG&3I6uXv2iz4=ICO`)b zn~7=|9tsMKK&cJLn}n}TPGB9t{1LWwKWOlt+`=l+HtqEC7`|%YR5?&3?zMD|uxPsH zSI^$f=PTy=V%WH=LS{4Dm9QK$+4>g~u@)x0)cv+{iaaix)5`4Wxwn;_=Xe&h=~Nmm zs)rS=x%7POpSh74PUykJWQ5kOsnT4pW^FMy9u&^9@Wd?GX+QkBr{= z+FxPb)fakHbjmvE&(TJlVz`{H)i$1?grA{g0RWeUpC6#w*et2OkWCOr| znTY-?A+{9v`XH>k*XLZ=v=o$#VeCjZp1a7~!~<~gJ1WaLX0k2KsKqc!d|3ufwX`91gTyYQj< z6LkV>$kNm}c5qUBNu;#n!}H2js7nNVk{pE_DmU;1w$f3R=w@V@KIV_TJ7d|;|B`b; zQAEjxQCl%x`e}8CYuWbP;-XwkC|k-ue7_p^rUQiSEm-KUjaS>g%1_7&@yQmIlv$#w z;@-GLEpc~ab2x?RZo@ZgeM?q`>StTY`)?@_^xPM0**e39Lf0%lv0t62iyf^LIVC^M zlcI5UB!Kwyilm$GUF_`C*r^@rZyvVWZlcd_;zz&hie1TTAyQrsn zRM%yymXvGRyh$JGTgfNUTMcKz2vT_Jw{BU*+I166ieZl|=y!xHz5X{@>`L|J&oj8v zYT5=5Q8={TaU-=JC6$x1kQ+zT^9CYSgqU5xW^0y*if3$=f~(M zqULiruxtAMayqx2?@UXy{V7Xr@>`tx(-DvX&v>iXVpA2%4k`>-4aNg{Y$u~;$xbb$ zH48K{b5;{mS$TDapx|#LS#=Efywnn@|D1Q}0g~GHIlNjy>kQ6p`ZDH*+w3B@j>*HA zRDOSv%GdP=4i{Ey;$?FqqSl~_xeZBautWX0LSVWFI8YGu>)%7_lM6l1DORo|r8F~Z zoXQbKka^Ab(lM#ttzRDK1rr-=?{O#^>(R=K2lA{ zrTVylsQ%F1X}q4iINDgZ!U(%A+xg?*-iqJCMSY?yf0}Y+=pMq=MkXh-0oBSrKHIpA zTR$~T@U4C4yLZ+0^w&6dZ7n2fyw-k@JEDuC>GJSHW=;aB8Xl<`eSK*MuCBu9G7q5z zve%sSmo^8VGAPPNY*3)$hfWX$Q?S9r74-*Q!X( z@1j(GC9b^8!|{8u$;)o=*IVel{S}P2qYr*Ce|0qzEd$-|xj&jT0z2N&aG%^If;Zlq z%=zs6(jVI*<97r<*w~djqvQ2lqR_yAbx*R`>I5cLOpTSVr{%$=gbDcJKE@H_H=B~5 z?}5%=<1TlKqN~J?A5km4d0uk9k+wa0`}L^n54GX(pDz63Qd+KXv9@&ljzvv@-)Pd z)8L`VS2XHvj^oqf9R4b6iNL2h{6=oP9-#%TH-_7eko#|E21H^{NXQSqa^4?FTR0cm z^JvF~w1(;p7~4?b_oQ?6q2p8rv?h)CJ&Z}?b;Q=L``>-ku>|XRM{rX~Zr+0waz|%> zs4Cn2Zoqv;0sRSQ-QvQ0Ml1GZxR}nE5OsD4e}tIQs!&{|x6@TQTmj!L^6$8p8vzM8 zyux5EG?Ky1yMaEpfMf^|ccYT{kL;laX?ja0K`-Gs@uC z(h7NcH{*te*i^N33Z;bNNX=QQM|bTTR;ZMs%PuFGM1(gtkA4!O-X5X9_uza1cjcA| zL@^5cTUiT7?>6#lI>furOV`9^3Tk4)K>LKSCZ_y6e24R@d@oL)?355V`)`tu?9H;n zi@1o^$U|KG3wrhUx_>2!j=7L^PA)Qe-uO|flouYMof%rh z6ZipDFtTB*G?u_g_wM`pI@1Babc1Cl7cB3$ebBFWb8kaZE6S9g554-9#n#n-o>apt z@)}e$$@DyFu4N4{{f|9g2z0dNwOhpXof*E!>tnTUgqWLZT576{Z|yGbh1=?6OTU*BM?QVpxzyJ)to} z<|a03yv)J2|2`~IHUeWlUlyVK(~PUum`vm})M!_)9FmSWnuRS-&p9_kb3 zGJlV{g>d_yJHcm#~(jcW$TYO=kt@;x9*J7F<#}sH?$>> zKaysXS<+l{ zBw`5#(+`5bc8`Ye3RYFRT{^dLap$!wXlghU-%a{@00zJzn7I`+^hjcx-hV+Da4#%T zT)nO6YG_2R3WKCOcJSCHy2cNHh{IsqXVx#~w@P<_XXy9{Npb7kuIn8##iKHM-J zt5zC+)264z>Job~Y6AE7ciN$0@z!Y70-&E!KZwr0M0Q*HS)4Y1LcuKMV)NHJ%5@K- z?&&3Z?4)hSrp65N(M{cq4(jI=IW=WG+J)~`+&c#c5eEx{nteEXN6U^y$Gxq?Nt(Sa z9j`y&B{jv_0$oJZH5x<<`wwZ<{_S)epTU1lM^*>!*~WF#jr{UAV~-MgaJaOC!Np0l zQ)b#Gw{v;QAcm|u8vfjsI!@bv`TV`MU&dCbv9i}VT)4pGQ(DR|15lK0&-1Uw-3t4Y ze;lPFNP_escW)fuK+8R& zy3R~35eG@4r@P?2FQGXm<84mVvM&yTc87ldFgNGZw_!>S$1~S|_A&SYs=yMEhcSoe zd`6$r5Uw02mCGmYZl=w(7OW4YtjFE`mlV|6X_}IP(|d{%!Hm|edyc-7fdl2cD@W)E z9dRf<`5KP(PFT$ymvMc+!YjITooVtDgFlz7N5=iz2u(FRue|C*3CPaQvvr-O&p zCs%OKmK(N%$?tVy5l3gs>IliB6)Tf7Xp|lsDg+$3jTdW{p?2)@{^2ic^YcN(E6yO@?u?5b8d%1cEd7$2ab{PZ=8eKzZGoS;|1tW1A9?%-Q-nS zLLbY?uXDQFj<&Wvp)@eV8czYg6`u=#u%E~^@|;uaCJad3O!?=}RaCzIra6<7j-#;i zEeB)VBdi%V_dkyD=}NuOosAwz`A+93z3Iv3ot-Mn`r2m6xY?Urd~48V?>jXc?rP{J z9RC2P*`qJ`E?2*-v6*S(u5*>f6sHUudl@sxl&!c!wa2t0D)aMA4xuj!q)P8pH`|qK zMzwQbtt(&mz)ed^c1%M#t-sVYSjI{4+pNFw(*Ks8-+%rSllUyc7rK9y=}p{_>XA*+ zeT3BTr<*xB5W)7$zwx^1_P5!_jv;%!^$}f_5_5`@cGiBpHndC^UA$!0Df+}``!CYp zZ@Y9YTIby7MK-mq5bE(HGRq**~rOYUaNsK1D-aZQ_l# zliqBM!PMhSY1=m{i~MQ{nlLYkU}UB{YME#o5)_XW{Hn6z8xG2uEqw!tl++R-3pQiK z+**h-#!S2>!XxrD0%sSM!qu@bjG78@(E_iPj?$Hr+w{Mv9~WKnm9ozP4- z3!0HayR*h7HuCk}v0vwQdmgPnq`1O>XjS+1^_7o0dnsu1j*K66aN83=0@meH$%V^k zK_!N>OYi8dic4g?%4_v83k!s|DEjvNLOdU6nj6nYGt_`*Q&*(Rq-xqyi7ieR%_&B@ zUe?!^H6l2HgUZAJgjr4D?rTG#6FwZFrd65pO=mBR616PM;O0rE)Z@?&yZ&gX<#-!P zeTyy#v^8dJ5i@tn+WA~~B~HzVyRMNvtv8lyq1}~giM{8^V0fGE0IK3+Bj+<|x4}$Z zd_+Kv%^iwH?E{YY8=G$yVlT?DckExC&8iy<#!0Jxy!BlfE7tGa=ih>~^IjQ-KN`#v zi;+h@FNb$wS0tV`bh%kyE>e#^>s%oD6v(O(3;|lz8`FNON%EO&BRu)D{DVgwF4rBN z#3BX8HH9*M?2ni4P8~@LhyNtPlkb^PVkCb-r_6`;f?t$>*;+vKA|1Q_fw<2$hz|LD zITTq7S))t(N%i(*(|29G;fmP{?qA2KLOp1eyZRzZzT9B!#1wlAXm5(ov3ub}IVS&8 zC4!fWI+dkDrCLQnPQmx?!&PHYP9@{M?4F*QL7w_MUFz<4(M{q}3#>gY>+7I<{JN`` z>2k{pwR*q7cAxIBHKzS%8#kh>?3+vo5WZcSh;XI-MDJ6tMkLlUVmuYI%Wv?GlOFm< z-o$AG3#w8!e3Hg;)}%BZ%?n#UiUqB7U6N49zy zJAO6$c(cc4(O{4_i*ES**?X(n_d4)* zl#|d0%O;`?`~7cM_SckX#(9+n-iVwhL{Z2$Y;DZhLa)0$+%a@h;?7a{bx-CYFE#qo z^=;4%`Z~M5i17t+;B)9`o0S#Y=+zo1LA~TdL(A%ElzeyIezkk|ol~h|Z1E1r<9Ykr zvuaUbcz>+9F5YpIL6P#V8gQR}ov~HwBe-z`INbN|{UD27L{=Nd9VEgbvvAgy{rnIQ zOy22TMG`5R2zj(jQ7~Ut+8Fy=wL!X08H{`2INT+Eu1}|Eh7On#hieYmM3v)bmtossAXJVF0G6n8Su+G0pec3R3E35Zv zjt0p9M|;bIovEvQ*E}p0dyWGSmWn1BKGWB;!^vLLFB~RGA>Dj5wxFHZmBL7(3+{D~ z{w;{2$(hi->20ZgoX~BSfU7CE|3=)np|;X>gxo>8KT$}g*RsEXpe!!{F;D$(;ONW>dAU1rXvDiR~$VV)5_c1UmV z)uWuwA*R8Sp}&Y^Ce2P{76d?iHWxejMovcK{874MIx(#C)Y8v)wu;HRADcJf-x8&JHE^DTIZ?Z819^02CG7@ZrAT*A+?g5Scu#E65 zmh!9z4v6>UvO9n~e&p!1Mph`bjVVLS)p!`^hyS=P@GLd@B6f!weKj;d15phPEs>P4 zw=HegVx?Oab7sfP!w#vGzOM?s!xnK^Otr2p=53&E3!DszoB#HD1Q%Lj zq?n*O3XW8tSK#&W?UZU_v6*TSVZUo?v6W2bcf)=ik5aLb*A9`wRK`)saUWx^z+Ez)gSsB*q(o z7u@R4&F69Og5nn~Ond#)@zfL?dEXGoTjmQ>L=uG30St9Mf7C{aWuDNH2>#Bz{ z2>gI-mC<=%LMHtWZoLo6M|_*P#)T0Y8kh;+?gF7H^giNnC+$DnQ5}-5vmbf5T8AP0 z1S9zI9`JJFWh;C@L$ltRsHoq+qoR(Eglh~z49(p?4&k_SKmzKPQ2X?*IRbto!dL{TEQ#{(Eu%dmH^vwT=GaUdpZPI3C)M zlpJnij)_>kX~)ucVH1#tCmauvhhsnzf-B&_f&iKxU;h;h(Et68&D;rVB*^-fuJc-} zwoej8cJpv|bypcNgW3HfBx3<9qjHg7>NW~o0pFbtW`#H*m zYlEzyv8*L9#Gp~YYP0!vycRLOx11j?q9i41Jsb&pp4O1!)K#TxW%bYAkTx)vuga6Q zE>2y&nyae%MDv^>3*SlVPN8es?NHscZJ$u|*7vt-vwVi^h*zah)YDKv77nHZt&q|P zPtde@(PdQ;#7dp?lmwDBf85`yj#5sGVmtxmD)zmP8B(&;zmeMJqsN-XmntOWGT-0z zM94C9sWeoOqveVlw@y4lWh3S4Vi{NnOxH2NOzu8&vOwJ1@<_!-g$uVJkmntw4^K3h zxo_fk#>N&|C?GW7UVusI!1oxprmeT16zKeLVONIW{wv8oayn)vMM$ZK%;bv%UFDpYWUI zBHCV6Uqld2=1!|E;XZM#dGf0|qO61yD*Ol>#zEe4!)X_zrhB2;z-AASs3-JHaBPV6 zy>)LACz`5ui-}R+!NS7R(;{nGJ$5$Q&@u7;sXOR8574mA`(Jy2Xr;r-%kB_%42l(u z?L|ED!j<1)I6~slAltp4xTKEmBasNGdW&NNx%Tk_ZnHhg$|;gMGja7*3qNDn77#tY z>o#s1eV;QC;|iC-ozi6q;_+JZYrW~y?SR*R)FQo;&;$1=+^5v&)E%O{9B`jkk4F{N zuv?UNxYRCu3yh6DLmqN~)hBq`$M@*&HfoR%jrRx9VnR=MN5XN0>^s^metlI|=JUni zXUMSoJoLU@YkfJcPmznO?5>N23m^tK8HfD#c z6i(ey2r%DVZ9}ntb!sq6T4`_B$i2-rwNN! zp22si3O^-Zvw0+X-2ursfR_u2k3tf}ryQ!K1tdx2*ypp1_{mHW;-P`Gj=utef>Kg~{qFPNSKGtz5LjP`^WP&C z)jcujej?x-Rn29yirj6z#y6M6smyksZ7mzsxvxanXE9IRi(!rI_dD3Buuk)5jBIMr ziev;ZKa}#HZd#J=c9=$U#SNVZA<#|BfSxApy-}Tl!tmbiU@^O`dB@JRq1o_x6eAg_ z-b%Hh@0}0j=q00bt10E1;eY@BCiHrP75zk*G~RLXFAC(wAXjz`DjVpfrdx6XId9tfmid4ix!5Aa?S%{JpBeEvK8Fp~pp1)omiZqZWt zS5uJq3I&IOcDv;l$6+${ntgn49lyMTWaAkTst4L(9u?b=9xjq}aVUJN!P~=YXBp+7?4i&dEaXEh(xIhyeQB94NU%xR zsUxFZGs?O1;_eG`@_w3P+^=t|tAS@iQ#GmwalF2-?-zX7UhHL41vU?=+mAW*HEJJvzu4nx1$l3C*JqiJs` ztE5Vai|hI2s*qKe80zVMab`6ZB2^U<0_2abrpxZ`n<*Yv){^ILgx1|$K)>dE7paC@ z^a{%GG4!-}AL)cP!}3ec878I9{h@97>Pwyhtnz0%Ok^|QDeJ_y zmL^JwBgrYE z712Tru##30`agMln3KJYWRP^494%9f%*kPt#t#2&FvxsERKQI)_$1f;`xrDof5z-; zOU;Ih#>HbiSt5kJt)Fa?`79X3^TATesw2IhT8N%%UqEzAl4i&(d`qW!UIW;iIwak_ zb(`;XKX&lqn{qFAK1{KQZ1c;RsbalDp%8=dEcD^xr4L0+A$(9j83x}hSfoyn zdULP8`H$9wRNbV<+ls57jx8bhYwRQ$NPq__L&x{SHS6@orR4Iohq2AMVrx!J?jAxf zEo3oxx`#Zd+FiqvG-&DKzKhoQ!{nuH;m(z6ZaBI+5jCE(*2o;OJ6@F}3NtE<2F*95 zbFzxk^a&f$`$L3P_2=_yYmtng+sbFf!ZSLSaF>uzOsw;G`eIq=NcaZZn0+qR2BEyk zw$uEs7N!vrC#qg2!&w^loK9f<&8l97jPPE|84}C}x?4kj$JHIq%U|BX zx45>F#L(YtD*WzGO1uTg*VzM&4z8W@*lH**z`VVxbk|LlqUC`qpf}u8GW=lp<>dkX z?FF|dFW+=0fh6C0@k7riBOt-9F&P_o73-pn*fi}^c=sOE-243-7u+XS>wu!uoALdu z)nZ`-?|GrUV?F+)A=6=h+__?XsD-hzu>EcL)9fJ_+l5Yrb=wvF#-QjS3x9QCV{E-_ zu*?h3?O9nS#*!(YxX`Wc&6s>1yN~_|VU06T`PkH_Hs-rwoE5tpNj-#}doT>+5mh3* zx_jb@w!9m>tG*Agsv@IhX^PF(TgEF4=eHHaSH1xp0@sr~3rcl1ESOcD> zbg6eew!w{cUil`*(c9b|YyzUUdCmLveg#KQBNe3lrdlQ^!|6Aaxjv@-UWaFDr``A8 zG9B9!E0n4*P}G_6HSu)=>?f0@VaY#Ks0?F8h8(Yb6!9%TQ1LK-KZ(`tpIbrS=`oi0 zyq>wL4&RMQOjL?L@OvZ{(U8b3SoT+Yt8Q@iU_Ul?ZwoM@I4u~lsoaW8l4Un49Y{UNgiqYXy2>|i6=3nxicj?h)uARWS{V&&g)WT z32G+F^vUR|jyK*0(q2LJD=GdRJH}44JsBA(l1btrx}}e5bU+W~zP=G-u3g)mKb?V( z0;Yb=ji}B1@)-b2Cu2sMK5|i|(+K9bRM-vSx9NgRAHEGVs~ z0OtKA2COAeJrnA#=9g-|7P`zU^ifs4sKK$}YSgi(*t)`7y7%K( zBE6J=$&7c=)rLKe9i{?ugq zq1)Ky7_>S@^jcOCLNPXQP1&0ChMy=&FB6zH?GcQx8$Zd{8~h}uq+iJ(a#Z)|Uqw8~ z#NnQh9+jh);$Moxaa6EBiy;~KLYd!q`iU>2l&+rF-}DHblIRwx9-p6kT3b<8vY6Wg zsUsfm2KrzdUi{vroJr%9_OgA!IDSmRhoxo7cVgaSpEOuKPIJ(A#Tm$h#nNkGT!*(^ z{$QjZRP94_LEZS;$@N!2&hZl=X)kUmi$JMbw^Z4ESGSD;)wRfx6ag;2nWY^p`Wh8f z4H3b4W+|zp!~3)lB+HK9z=3LiT*o@`L^>kz#94wAf@lH@a_(X5UE|nrlG<$iJ$Qt@ zcE`oJ3m}_h+Q+?G}V%6mEir=x$-;W!lFbqzLQbOP_wnk z#sl;A_C>4C;*!H%tdu$Vt}3@xQ~y0FK}qie{GuJ+E{7~|tWNu!2e7sObVK)Il0-)Y zt(tYS6EFHtpzOF9WHQ48)#caOx34%bHv7dXe1xeBHWZ&(KYbS_-TZLAF{ zJF|ZiJ4^KczqN48O*y8vd z!G4Q;P>1{)XX90P<2RdCI$Fk(L$a*Wx7x8vFbUG`9m>^iM`4W_=F)cDU~U?=g0ZM$ z&>9{`SD<>v)#DipLQ3fDR{54F4?F}ss`DalG|-qv>DnUbdDBt)&*R*g&k}D2m($x9xp&Q1=JkZZMub%4-_PeIFnxfOn4&%quazyHxZIxh|T*AzU0HW})$ zc$Y6^<;(BiY5R7bVv;EgO4sa%SQ~6Np75hUY`w*6!_q&$k!qfZU({P88-2YuA1J(E zc4)nRrykyzD|3h+t3b*+)=~CGpkEZv5F{4ChS4HZkroASB1rNyg)YDWSW)X-U_aGr zG0DVXtOqb18lSe(zWimE#eO-HR~I9>oM5LQJ7g`bo9Ap|S~=FkXA>U>0?>P>-{H2F z97=+=8xk)<^CH1{mE9Q9qV?`I$o%sl_1&Vv^Rj8oaP&L$okE`zH)N_DcpRrq?43bX z+4gpch(f8NTYN2kL|BqO^!ok#vIncZy+ouD^fGeni;Y<3U6O;%<*H>n)|!$ApfhK* z`&511sH`A!JXVl?AfSLr1%2DAV2UbGb-v?ottw=||1|3{^VVX@qm;HwISUWbDv16- z%b^AiEyI%5Z0}@b!WWJeE(w#-py${2@5}Z%bC@mO-JP7^uik6teO^S!EvZ`R_|uu= z(OpokIaYjdt1gS;Lghxv?>U&n*eOn|br_5}{{G->+v;UmbA!TV2cGe)Kel4MWEfXH zFfJ;6UimJ_2Vlt4o0Fy+5Y)g6LSPfBy7A28CvtVU4iFCa944}J&N-!N_$$5GKy-o9OJmzcmVA>{Pxn!S4iEDaUJDXI6NApnV4itnZ> zEA!S(8vFU`_VXG_fz2D1YC`7BU*^PDi$w*TcGsJlh&}6-v-XgmA1(7q2l6*f`Pg}4XZ6tEROYR2gxc596Md=TF_F5;H?nW%3W&uOsWU_kr#{m zF~c==^fV{u|0>5@lLo9mulWd0ssuQi;DjyRQ@S_Hot)|8jNoOz5#Pe&^p&`u46>FZ!J^^Y}aW6OA9ho8EJ^-e5} zk(qHlJ&kKBtFL{1wiu*$|0F3!R{IJEmekAY(PtMqx_;E(jL*w18!aVT_N(64y%}!$ zMs;SvaiU>iWaN=yl}6_S90(H);Tq($j_RQ!xxIT6#gscL)2$$kiNSM=;5LC||17&Sa?WW}bRl zMdld?tNBX-%!Km@UkB zgIB)nE<23W#TE8%R)kW^BiniXgvB2RtJ|aN?I-?pgCZ1SPK&xi*4)(M1z{wGr(lD{ zQVZ`=nFs?V9F~})fzAPa0d$3_xpfg7OfMSXzq)YRQLUOvmoubV%a018+xAyt;D~K> z{_R-Rwo8BHO*__2X9~FOC{anT8SiL$ z=SVd?@Nx(U@2iUoWlYBpaOjJ)@p@`{rPRjq%?;lykEY~F9a2jhDa1qV8_e}@C^ycB zB%HWmsn0tE3(CN{m3v(Xo9(BUzgr(aQUFS2dNf$S~G2c=5E4d;aH+{Kdg#`Lg9)$gH#V-nTU8qM(gq!8fSTbN4(AiTuy^g9d~hVRt*$K51+(RkG$~_@RL-pZSw~ ziwVt5nKs;^l7FwW9_)b8yVrlRj$0&;6ABIz(YBc=-WsaYR?8OsLJXd4OZRATJ2tr| zu2yurjI`uvW_Y+T<^0V*k+P<&18m9X{my!PLXdT8A&3_6rQZP8ISoMA66%(oIv=qJ zx*dBB1pr$pDvWa?ZxlM0eN4&eI$%ts=v=9{8zG{Uf0#rQV+Eykqc|sEO@jCFT`DfF zeyYYhdsg1>R7AdJdOSa#V=ZpGk0T9&sRvi`H#qsDW!`S9vyK{$rECrfz57%}| z#$)d>8Mun5>js^y?G!bl00RCAIHzEM%mA%CHh)!t`O6Ue$|!a(WYT(#)n^M8N&~vs z`9+2rc&t}vcW6E*tNL7Rk;{GDvAjAm{YjsEL{T`$KwxZPx#r&_OtSb|5~gWC$m#j` zj^N9k{)g*iWB7|&BEjit|LlmN@nOwbB#omoRRj^3s;iR!2u>WQ5@ngCZkSg8^=u0o zqwy*Le?OhLKrau^Joh-@VCvt_SrlCWGsk*~6{MEyEtpvk0x0o|h(C9*Rn#U4HiUc; z4u9rB#?Hp3n##?XGZ4l(aLP&GLrQs(Gt#oI;3s0poo>epTFR^?;@N>-8!6tSn5tmM zOiNR14tIF{3JA|5jRb;cUt%*IAwT>9*L;vyhFV%!F99MY&qDFwIj>`@h>tS*wbr_E z%Iq%o0rz?8*zU{=>s0QL&cWuhd{j*gHuT7~vv!T%p&EpF z{HyGe$|;6!3^-4iEuQEOvV4b|ZMx-#UmV`U8g*)|u=bAyal+b97vL4FK(e66;Xy~o z{+_%)52BHzwwX-s1;+JA2TXwd*-It_0Wi50WSFb_AB5-j@?s4!o|k#|Z&z1ysE`nf z!QEOO{npNGla^nr=GI(^EAMuw$mk25jhlWQG$eXzv!U>8W22=R`}{KmpfyDEdEks5 zHP#zb#l!e>N%TwQ&!W3?g_3>#=H#C=XO=f5mIESJhB+~%Q65PSc5Gn zL5@{V1ug|hTTbsR5B!z+%#f_E&O7qqPw`5kf7P_27QM6689zH)=N&er#Q0->xp5Dw zbR}Nbja+x6u)|kn-h_-Y7C?<&3Lo6_kpM3av|mjA2yc?OePW8ClO`6WqfGlIt`N3t zh=`ERGXRh}0CIOTccln64BWtEoo6WiNx84l$NCr#rZ;Fe^GUmp)*FT86vpRk>u$wS zrJ;`fabF*z|44cAOsIaGPq=(Rn1C5Y;j+<%0=s)w>QkNIq1e;RHIHg-rWXc-Ibd6IQbGB$0XBdKP*Hike=Bi}o+ z0iox<;j`~Cum*b&{fHhcYrg6ZXe)jcVMc%Y5(Z6WYJ5L56e(+LWaOB#_-6O#Bj4p_ z#v&PkXKKRF^i5C^2RC;^@Fpuq-ncYPi^Yan=MVs>drPV=C;E4lG2D@Lc$(k%@Tu{v z`Gsa&DqF|ZooVKWc`>XZo-n&dBE!qjGLK?|K6cigrH&3orsASXgYS8=L@kwYXv@uS zfpnL;A}c9Ap>{vB+QQaxy`*oT+S}@TZ*Qc(l<-GKlfWILr^2K`t>$E<=zwM$L?4sY zUNSDVG$Q(eFDPEi$VeBAS@ZoB!2rTT?(4g=4w}!89KT*JXc?ID?jsXZYu#)`Ulhno zC}0lk{>XtN_C}N)5w5?z5ApqPt2_IJGs5rDXWSI{5}aR_sWeGj-Eu>Wbbn{I>9hjA zi#*@_^6;Y<3%GA448$0lwCuWS?yU_Di56OgR?X)^tn($V)b0KG^79`p%%Rz53RSOf z5OINIU#aTX1uKjlHi~^Y<&n8UmU11??3tW_`yer3cYR?rQ00NIjn#cPC#ZQbGhdI| zc)T(mM$2G{1c?!R*wu0!)*bnvIr1r(R@`K{-tOd3`7>*f%)uJtj(pVi=XCpF)ee2X zdhaxCdVRRC?6q@h$j|TXaK3e1DP@MBw<#?_T+GQg^g9pC-yGn0E)@HS%zj9b?uE(5$ zTUT5eM^!{gw@6gaFUH}Ewbc&h=*`PGWd_7D!|k0CjIPZ~kvw1s(SMK}KKMB4b$IxR zdmejfcE-s)Cidn#c~Bh*^BUL3>P?~q`%cZj@~11b&Gm6VFhDfpd#F5K%*;oktLm(O zkjZK90;9zLLzUXFf{mV`dI@TFl@JeJeYhzomNqC|eLe&t7?@=o->|?XGxO!J&O>%A z(>@HpFpC|_4$gPE?o}YapZiVszQD_OaWJBZz1L!H14JUO!p-q zMzUo3ovcVQ$lx=6LYQI|0ks8txc!E;J>@t%=<2IKtugT4d&>dS=)p(259}@oWc16Z zuu#rk62>fYgLB;M4)u_>_Q7^*Y^<@NDIw(V0PJx0i_SJv9$?iW>$#cUvCKyPM^rHp z-fH`fKm4UxQH_h6IeHCAcR&z7pf>RY#3mjP2lAR%r{0tMYm{$2TtX9q_yOUd1t)>M zFu9c(C7aK2-X2h&yqulOm-+#9;RCQyAWKIihagFK1e2`u^a`KzWMfIDJRpPIuq0g6A@G~+0^I+jYTzb(a^&G^XW$=uu>nc1 zsw(_~sAI{D37kS}cTyuM$v}5|ueYUbgjV*5@Bm2xK6Nh)7*}#jAvU{+PE!sc=fN zakT}9qj&s=jm3KUK<&v#08ymN&Ob{xI1WtI!RR_~ISHvtG4u2KmYs^lgh)7ugf5f= z9->@r81mKb9QZD9ZOJYX=qPz-jl<-t7R?N9wSX$2bD)ME_#XZ<{v8g2%6$LI!HXCq z>O509qlF0*ixW2UGPkokJSZ#M9@{Kazg9Ro3K1#dbv5fZvhq=i8z36qRSJ;Uoz2x@_}7M<)m53 zp=HNHl{naG!d9Iga-*hg4PK4|y@QI{cH+wybEzY@7r}Phjinx11I~&=_`&<*#7*ahA%P^zhimMvuWgwgwKq@o6Jo;fA)EBvR z6XB-M{oM%B2-=}Q2{I!9eN~AvD)8psb_4^!h(;-tXm-Np$F7sFc2!3P3OZe+8qrvN zxS`w%?leaZM9+ap8@>7kAimDZ-Q(^&0cs;QYd5>N=)iiqn+02ltfM9q7N#FQW7~yaW|asMdA_9WY#d}?z2bLT6PW>N5$Ss zk^&|4OfNT8n-T4m?e%%k5G~CLmOz{pQ0tSRH6c%LJAWX;a9u|P-8F0>0GW4ROw14{ zVETqk3=)<8hZ+A$F9{|*GN9Jy|JCjzeCO!eR{Xz74z+uD@t@yAZvF3^mNf=AV_(^%yt5M zBa$9*@hd;`aS}x4Fc+Y;9bM@L#J`Xs#y72xh_%lF-I;jd!+{V7^+c|EJ!0{gKi>a4 ziW9sva+2#IN8iaOI|9^bo~Ohw@EEowW(Fi||vGMBi=d9H!Jl-NPPB($2}?IgX;kY={f{P9+B4yVYd)^y!(Twp0se0EiQmG zZ1gGdIjCgo)_42>0&5&n9{^E=qKL;*)Bj+9kBe(-1#a{rpmvT5Og;qCf0VgO_!qc{ z<)Q;*;OMT;EG$2Mu&~(2UrXnO$`gJWuzpl7bpm#M$Fv4IY)KC5#YU{}lzVj|2m_mmHR;?tT%mL?&r{ zBj0s)My4Y&yB&criK-)-8jy8zAk83y4n*axOBRtv46 znwqz7Yifx6^GR_cCMx0;_Cm=Q)GXRuoFB*&8dEIaux^dW$Y7A19Du=Ytw#LAZeUS^ zS3F||7K)4y{#e$l?Z=Mjw{~;{o^f*BnJpShJSJSu2eBNJ0PXUW`_x*amVb&1SGriQ zlMv5WlKzbzsCR%^$+qy+7xje`dv$OQiL|b%ihLeIKD}qZ-NW+ciXs{?r{27el>$O@nbN zw>y4^>jOhWsKR-7%TVIR($QMZ!Ecn1HB=xGID3LHe_&c*Xm$~l+^MrOs z6>9|7J?jpG4yb%*h$W&n5Z)14C}qcPa<${W_>_?ER5*(w+VgWgwVN5gt@nm0tX4_= zt+Bui;6P>$%U|Rsx8_#Q_76gG8T^=%vLF#-HWW-vcEilN0?wm*svmsF&wCBIVXpvw-qjCEk(2vk7EWm^waoPex{{9mb%&xu+0 zuF6kD6+I38LZose38-CiGB=_^_VyTzNQRVs%W!B*Bte?*ep#M+!Br`c6U;Kl=DjFj zWc<;q%rJSvvpXRy0?B&lHN-GJ3aiRC0>!Q;s=Zv;V7)uDjA{2ol5Na8XfkodEOl9w zdc)=48r52vTc6215X;gt$gCvt3MqLieba5VWrL`BNQ`nKK4P&U=Z?aX2! z1vaQ{Si$@D_~J_`J~XZiLLL1-N{yk_c#{n~s0c8b2tGFfGKOg zF|oL8^WQstpY!t-BPTZ|WX_-{l`c`N$4IlKy#Hf?v=e7D_^$*s+gFO0-3X?c%vh%#`sJ$#jagPX2_UvcOVp}NEtdw z0qsZmm^*1F8QMKHz*2i%DwoyKkv;HqyMt9ld2Qbt9QApmtu3=_V`<4FG_3H#nVX{d ziY7M)ikN?}fKu?{VgF0u-U2Rkrx9v=y{1-|%zJ-hbrrO6Ehu!Vk23lkgA|r|Ys1II z1!|LZJU%68)me&O8Vz{>d3uU`Ch%3}jC)~R3q|uz`*7vrJ#+Z(tGjtiW8g=^yYwNXHw%zx3Pt9YI+3eer2l44G=>9r$P3b4eLzzOQXs#4? z`|Hm)ha}ojN&8`wZUq>LXod~T&Wf^VP#wt53o0TFk`fvo60|P;MyRjK!NU<8W4_D2+QbxhGutRCnsNi#T`AdM7Uc?U#+e57w35xRq}fqV z;6{Q4EtIanYS5}N?i&lVTI4a^0*2CL>?bjGNmUYDk}@3Fx05KAvm8D3PoEDfA>(q7 zgi3E6!MZAS(5ik^%%Yhsb?WBr?g<%D`uH?l#G~IawLrP>>nvggq|6K&xADR zp4};Cq0FzprRG={TT^Oiahc<&%q*(dBkXf1PcVziBM+q98(W&wcdX!PB`ZZOSM>Yp z1{r=ONcVFhpX636`UWXt+1vk#(?+{4Foe!gjTl7N%BC##QAy{n*$q%MJ(a<=I|}Hn zOE?`=)O@s)9^)dl2IrY$yt5|5Af^lj+~Ong zmi+hjLvBZemNRaGUc*VEhccBm#{@91tu}*R)N`jQR^|COI8BO9Q`_GxC}g14jNSBv zm4~HJ5G5%1db$l1d=-teo;g03h%g;Yu3pw--o(uG{@d9a6Q3Qjo<=V>IlP6B>!OCe6Gr-;Jr}nnx(2dyWzxqDMS2CjYaU6(7-ZHpE`=;Z$6WUF7nH@&n+=t zQ)^Kf9WFBdYPuaWFId}N2SRqBS8)OeSJ;0*M#diaFgor$w}+DZ&%0GocI%W^Ie+Ng ze30q`8vM@WY&IGDH?;gkw0)_Z$K}Lbxd>RxCHBtj*D1}bOal2dcPs*y?(dHC@C5Gcl!2|LNO>0bZM$x^v^47>E;~i|2Ew`{sa1^97(p7BWMaHh2%@1QD+H=)bj=%nbETuskuWE+1B0 zH~D;lgvafO^d34cG`YJz>bbPL{#ADjNsm##-Id-OyRJ4oyzFBP#}-P)xCoR3{!P5D zu<6Pq`z?=kINBklBkHT1Gd5QpaA~AENc-&ZF8ESm1)#aVXl19BBP+su(#;EQcTz(e z2juhyMu~KE?X0|<%Yvog<1BIUEH4MCo9fi`2uddGGT#_z+%zjz(~SCiX=Gt=eX$*O zWzTyU-m-aDn{e!vC*mflh-c9`k>Y7@8(kCIG)BxoD=Vdl-V~wx6h+u3&IJeeqcl71 z`5~Aw_%muy|964GEA}Rp*G`t6?!^~>L@P=7j~IpMV1BOhs>=yG59!2=zFCKI$hIX? z8Hn9w>hH|Iegn!=S1=^!ejil7{ddBsMro_qnucIK`}Zz__Q^B*J5yi2kdkf3o#t&N z-;wX)1}?4vBKP{nGv=Egwl<^V*J-MD1dSUyki;CO29(f;BE=JmzrGiimeWaCu6!-N zo|M|7csoV;s34ZMBIwi1ll&LMJ@y56)^6exI9}3AP3wD9vaece!z%>Jog_Q1o@&e8 zz>~_(tg1tNx=vnCN;x8TWVo<`Kt!pzk`Tz50@I0VkE~J<$q@mz@F3Gd+=_nR@FUSwh(Xe3n$Z!(YZ_Yq_On zX@5Bhk=@e}sFCf|c$vbb3~?V1#C_85W~<2(4OeZ*d2WG>_U9Y@M(CX3VR;nH^@Y zv|>648B1zkxbr^KMl&PLUmtEdf^Z1uxgQlBqa2=Jwe{9RfzN_huy0KyO>+hZ*5n}o zr6D=;nFU+t9LC9kE1gSs#Jg%iWxcYbWOus>OA6W}zcb&*(d4u;G=y(*1jmUJj*gU+ zx|R3uH_8pnwbRAtR7z8+m*$cTpTVm|`CFeY?VkGFv7|LAImv{NOrMgmqh!glMcYL) zwlRC71mv>r>K(;Q8K^0z8;ZKxTFwqtD-}mW0UVB8Z`eGdKjE$j-KaibZ!lCB;qdAw z@mtv=8Hj@W5W*n+ZuO8@Yt@gLqTgwSaUmnHlB&ykO(``cE|*31QJvYiYe+3?(Vt1kDW!^?=^4 zzp#Ck>*n~)IV$(Su|llG`UEaSl!Ed*!uXwtxZtBQ*rn>BX47Pp{$A_BXJvf&?{%Y= zcMTZwOtzh?KdeKvL`Mw-sPVyaJQN+m5Mr=Jv#k)y$3DT#FeQ6HS`%e^gN$v@ZikWO zjZf>vySl5^QIPG@r3`03llk$Nd|(XTEhu2}t*g1vApg_&V*Cpu7VBG5(hZ7Iou^J_ zlh?5s8l}cpJLS1W)YoumQd1Y}UEi`g{LG0_`dB89j>o3iI-kRUmYlhEyWivt4j`JW z5TL*|UOd+$bIEvIH7T{h%iTxv*+s-@iY9jt@$>5_<0pYJnw>}9RZ3U;47(K3R9RMt z2xoGBJ|0{uFMnii{@IoXhKoV=#3ub${RgcfkMhR4-W@u6`Xak;2eQvgkp=5{z((R@1XoKbdh11 zuLyVA>4_)b5pzl08&rb)o}>Qxtg#y9egPY&%0rfurOg1zpI=)r@1!<$4eOdEZzy)K zYC>f3Ng#`PM$UfYq90tX)P=-3f3flJX^04CU?QkVQ2L$Jy8iWuB=)V!3lX^iBlkFC z;!|6&XaoGJmrq5IvIGXXpQ`WHMD?Py1uuJd-ys?E-jn0wEs>FtB~;Ek&RlAsflgo` z386D2lJvDYCGn}n;faY_S11NrPp36p+Gy3`*XPX@8t!JlUKAi2_h-NK{p%h|dQ8zr zXJwV6Pi2Pr6fWScRP`PK*T>#F^9)f~%Z7_(pT9tdtFSW3Y!xUlIgokx{#m zsPZND12gqJvNCvmuyO1$vt|AlyR9&Djaqj-m+Y{)YOXsQzrOI)@CW@pJKGs3DKBVI zbFWj*`9JlS4I4i{eR<8JbNZ2PTF~r)8|c!Lbn1OVIqyVFY*YS&QhlTeVipfn+0;LqzuTuU1y+FMnXb zZ+njC3QIrGKS}5 ztW($zSD&3XzCw99grbi2ayY?l742~16u+@rJ{)OB^KxFV)@81*!fLaLWt9~x&X<1n z?gQ^Xvw(tzz%25u4JdIKmFA;o<9&=+KA-<=AvlTfMxqzU*(+vrQkSvucc9Qxy=s!P z>2ryPy2MDee_2rFs$J9W_8F#1^n0DFG-lDf57JIwA9u+zq{~pzkA6J8W48Va%FF9N z`25A*Lm>%zu*d+5Lpk1LZvNJCNz3MDjtk|cNOEnnN8Ipqb=8dqF6U{lLmGfX+(_um zj>sc{LH$?aNFMKWMC;()!&Wh`t+D6KYEQ(gd-S)oM6$B74zgc9S!(^;eub`Z1~5jU zW^`H!`Jtdu;}?+^pQ^e41xoCN0#W}*iFl|GJ@Yla-70}XbsU!#Pby>8`aMuMh#iMX zC>c4$&`ZrmBO=gx=m1M@Ns(#ue6}&+ZnWSs$!Lt$wA~BHKdgcM{GQ9|@mH-AILq;b|5z!oJsLefA|YPqxjsIgr;4dAH2Pdq#3{B^?@1V| zUBWK$>C-22Ryo$!Towx)D!wT8>Q$eBdl9q)9z4KOtFZE?a=`}LgFbMgc)4g{a2D6v zX~Hm!s5LhY1ur#jT;9H0K6JjaJ5!1lyo#jFW!^OM-k=|E(e8Ls*@bO#+VdWQ`LIxj zb*lHLDb^wk-8zC!-B09Hzh8-Mw7TE3*5r~Nk(b-jt{Xg3ApS5f;K#uU=Ckj;fEqOc z0p1V(5}u@y>>rdfGY8!=T`Da;m2R>GxpH>yY8aTM(QWeGDIe`OWMcB&bv_X06bw^m zB0f&o(aeZ>tAPEfKi@x()HPLew9Jg8@paO|oA=n>4#V4T-=M-cdUlLq4P!iaZk)%j@OT?oN|06~R8o z6BzDeb8Qr+5_5ytHSgdCzr5J%;+D__yCQH}rP@Ks=OFaRh9=`F@OnY28VCy@_h3eA=WRZ|j%0hQ{Ns^C)bPaI)dIjc+@VxS$@*S6*CP z>x+#RF4U|ETHk=&5n5QVM6;)^WylR@t zqmmOce(A}i?Qs7m)o83>4Ca)=^@OoDv%Z78WU}ye+#MZc_FP=mP8R zt(yDp27@YFpaCCh(V(Q3^$L=>^Uy^XJ9$sm`^XajQ_Sc^|3-C`4d1DP z!8TdgY&xz@Zwln*^s@>Z&^$3Ej_vqzu)+enE^4yq!C^zF{6%uy;uXB++Rc^lCb{^z zQ>FpS^9XA1vn&)QvER^ICWt-gyOxWO*wB_2tf&!)CmPu(7)BB|5jR7T_zt(>Teo$# z7LPl3(yx%<^vUP?zzyfMkyt5Mka2~;ZLgFcYhs)~&NY`Q6p&Q`CAONGjftCl;g}<5 z*BOif9m4_x)42v~JY{eV*aF0Gv#xS@Pjs~bL09xvz&#KpmCT;5R`Slv^1#b(d8*L4 zX??V?T<@0~bbqV*lxYuZuRSqdwZ<(^;tGs zqMr}t2{b|AndTVdy7faP7xucXHE9a05aMkcb}cbqnnCR~2)gw980>Xy2&eLWx_FPm zYO@8ZAA}pAzK8qSEc(nRe>xm2-_YKvhf(^dmbYF=4UUoFQ%}J1tF|C|u8-8CUG2&v zkZKTI+to5}Mm>atgEf6f0XZ$0GLXpQGdZ}24P%q>3^&NyEM(Q50gpSQq43P0r2cJV zwKpkj**B%^6ZNp@taVHOc8eTuQ(qAXK^gwhc4YlVo)k9sdih)rp!Yns5XT|8*#=g! zAma1!TW$Hl2G%WzM|tbS<@)o>>obgvEV#hooj&mh6o-Q=(Mm2!_{<+$TQHYDkl~#a zQZmJ~a|+RDnheXP1okwOdN*)IT-tMDtd=Lvw^T+zCMJ_akV}LOOZRak{(EfQdQe$l z+!bvj6~@7s_qKuhuIf-_#F=*ieHZ_SqgE4o8|};)CXbFmhGMmE!va6#mzLg#3;KfEGvnPEy}k0`%i-aqETtNn!4hK&i>9z0 zJQDiV1s8%LOY?=mQ*d1PHh0;t*7eMprIBHz`dr#R>@tSGdgm|uF23ZQ3mmBi;z*$4 zGvKhe+x@BoDc-XG$B~ZZmwgxdFE!GD0>|{nDGOqx4*#CV&46%@GXD_5HsS*(WdXV< z>_0kulN;1-s3?!xpF@=3C|?jA1Jx)QM_^_z)iXiF@NN{O+|9-^NYz*W7++~5hpkOfnbWm zbiYDyO@oq((1@Vm5mMe8Isu1Y=E*-nn8*j9#?mNhCBPs2Q)QM!m;XCL{8`pro(Nv~ z8!2$un!1UhQBLt9$PP}xt%f}U|CcHmgPHulFMs^cMF6WO9sf}8e~l1-s>?rSSsc?uTMX?pJ@P> z;eSv04VKZ7p z!zzrQBn2q6raIYUHIovjPUpy>ih`w&#hIsYQ0i)Q+%Rpt@oa^YMq5t#F}Nn;Dii6u zcee)8L?%^F-)@AIJhJC<$Rxel1T7Z(+w)k=Z=hg}XDb@fe6}#8V`I%p)N$eFZ-4o+ zQ&NC5Mlv$Sk=xcv$sSZJUVx4Fk$fyU{&i0}uk1;x!t-m83*@>tH69(njB-Bm#5cT)BTZQ?E0{caW% ziQ?qQML#Rl?4P`d-n0&a36Xgfo->(2aC@4u@${m&QCnc!6ER?df$Rn*(j`ZYT#Q}K z*nYgY0DYXxid^DzDIKdM|C*1Ue^Msf2Mc{AZGb%HM|vl2iFqYam}INm2yZ;hr#vu* zywG9XZ}?Wzvd9Q&Jp)=rf`Zh1Msw@b0+$Az?clpLyoISlCiPC=k=o`qJYluVq|tB7 zgciLBq>#s+Z0-U618_8x-gmBv4V&$?OM8<-dHn~a7v<#T<>cgzx9nhM z9k=SwY1lFA&#}|r?5(xaS3t)UORe`n;^&ef#Z8cJffkiY1p}da-CI!hRYV^r za{zTgrn?$nK#%Xo|F7Ky_z~FwJqQ^YzY$z+J7I0MH`cJ;Bie%skl#+&2Q+%eH^s(k zYR1OGGR1G?u_Y{0;H%zHAbj%A2}}Wyd-UH#ki$w$tsvTjNNC#Ps=>_+tTtS3g8Uxr9cO`!~uQ^VT`L)B&Goyi6KIKDk~nb5p& zSY(k&nHNH zCg*YS@f(gyBfoxuTBz%)Cz(~5;H?Q1GKcqtk8=wt%Bg!B0M=j;;`DKKa@ybP?tc05 zUMw|r{(UXZy@Cw^mFDW|6Eu^PV|zfU@gOcwYkvTqe zrD#k0IQ+Eku-xVCVzvZEKh(V0NYaQ<8*zo3(`ya)`Sz|jCOL4Pb1$T*n8hGO9@RVI ziP8xEXm4XmjLx~t_=6GWXYEiJw3H0YVa>q_HvvLyw_**bd1a4Nou0T=OtpeVf8 z#`L4cu)KbdH3SazU2!6$i(Lwhi`J^&T{TbT;&d{<%BP9?K;?YJ%JkQT=bV*1C%oGLti=RJd-CGz0u~kMt;~<)t39*ISw5y^L-hl(@ zZtB&U;yQvWYm!?zc}){$u3q@X(h8e8&NFi-#R)rE7%}($Fghh50Fu_)GR5WKp~z-n z*-m?j4`pvpm~6*jww9EX7$rnS;yps|fCJI;H#;tJ^FcU#n<+mSZ1Bh6!DMY?uD7w-IU4<(wJNp3dI%lWXC_OsJxH`ye~ZU&T# z64@_Z{kMr;`ueq?;Oke@(a`oZRTRM!QESx$2WT1izOEn@aySy_v?m0ezHe<|v0EOq z>e7nEaI!{11qV#&G*1t{qJWR~K46V(ZuVGRriu7vYrV7CNl#BPkFCCF*lDxJCiv?w zWZ-uG3TU(N?{YIR?zjf8wEO7e%T$+1x?MU}oM@zWYGHyMdbTuec!P9X_@hb(F$Ifw zRc3+er{${v8&Ua}>A;$cU%x}jyu}5k)$};Es_MNkwJZhz~Cr|T$8kN8-HCX27o%wS0VC|~>Hl$sB zlom$Rer#1>?jJJD?UoP-IyPS7M0US?WSu|w&sW9)>qRGDba(G=%gf_Cu~i{ZlH;f6 zSy?>=dm|%g5J0t#O$9skjdKFC4xXp{Z5_a-33NSCcvC!NVgy1d0t4Brl2Z1*FlFcT z_X`R3^{b+`JV`5JC*FhWW4LU`O$qP4omHp*B*G6<%~@#j((6gW_N&u@loSRx>P@Yc zyf=PtFEL4q3%UOKJT!#pWl#GJO=nGnxOh;-+&n0lYBEP^DnZ;(CAB=tdbdOblqd(K zm_*K%%)lC3-%Kwy-{>U<)wETmDa_9=Dtgm-wx>redOO5iUn-t+(hA{oTwtosu45qT zyz!7(>er4&k%zwbgQhRQ=9!Z$VGNV71OdRYOtl*q8a)eRk$L0du8f zQO9eNPRhIDHAS>2nXs;TK2A!eHTnBpib)ja)_i&vE{kS6QA;Ce@9o9m(m|OEVp6^f zKX$yLSLCs;E?kWw4R)iyf7g2#6<+x~x7he;xJS8vxg*6BzmmVV zbNwEpaWbLig1=9##z?K^twQlIN3SgVD8p=G|A3dqHzrNJU4nJe!PuueDO5(g1U((! zDa)mb__B@R>Tb*M7F{#raBs@=?z&I5K zZCn!EOG;s6Oi2lQs>MggA(^nyPbVHx)Oc;!lxaj>R5b~swSU>00Zo3;=!dF(Yxn9Z z7Q5*=&i=n zO-Wm8>-z3vS{yOPR8lX;wAJdW#u`5e)3dl)YI3T?cvLx9>o%l1P8rmTyA~6-TOLS( z`Xts3Wl2&}68foL7!OElnwVu1&SSNH5kVrgK?CEu_Wf`8Y8kf5GU*1X;%9f)9o$0O z&9Cm-W1Oel`V9Xn*>f8WoQcMa8U!`0oDY{KBo`2^C;<@=ixwo({nGbx(`%puPTxn~ zyVyR$J5T$_30vC_Lk0u}Jj+fgR~;Uw-g&ml{1Qm3OupS`E<|y)QR8O>|M=glVHe@< z%K=iYDb7jvOq=qBAF%J){4kGF^uDH>#IET*X_Ul@Q830VVU;gmdm1Rq*1Di$%WwkA zSry<^Fr=F_MKB*Sw|}OuXP=|&yF-}yo|s6(ZEcI)W!?4fnsTTebIF$M;yltMb<<{R zd(^!nNn2mS@mlexG2ipo4HEC%N%gJw=5pPS2fQzBYkk)SZZ@v6TXWet;LKaAiuI9# z-h^Sf_u9l3V?M9@-d9GvOoBvi@7j#r*;hWZLkuUc6X zdL?@mx9~w$Yi@n1@fe#Z-PGd@O^#wFJ0%}F<--{zy;gJg&HM-70PFLy`4RU!v#AIN zXHqaRR=KKn^ACP%1U`~aGF~6V_?J8P#rah#x$M@nf-Y1E8Sg@ZH#l53i2>KTD}VRG z<=e!79AWuJMwV+1oWxqN-TBn@Y)mxkkM z5|><*s@@@VE_f*HBV#?)h1OH}k=Er;UL)QP50fqpCZDXiZ~q)0(4bnBDOLPIBg}l~ zh)T!>e0BM92h|eZlrpa>N87NrFU)2XXd!6`*h6bU=yq^D*yMo;En;yAk#b%8Y=vD!>bQ1GzitPHm4b@p7xuxv&f&mDC&^e%Ee+4P?p|WRxGKXM-;ig*(OrpARc^7 zb1J)0{n%%tt3^$v<7qUg2`vTf9@>6q*7O=|r_Qpj9VB>{2Tlv8h6Z{Pf(kTu-Q+rG zwY^yPRBz#<%h!cK(lH1&LN@Y?!q_!mQ%9#77D+`^ug~_NPEo5y!-7iEJvEm^rNctK zEM+e3R0ZeW%c-38f12r_?hwTONhX@^b?*eFutxWYwv*`?r_Ihv5Jv%LR+i>!Uxi*< zW~;5^;t;M3M0k#AVam8Y4eY&YHp3zt7WXw@?-2W_`fKj`R2^zx>+C38KFv6o;ku%I z2;ygW6aKB^MaJZBd|2fv{#EmysDv_y*$u%k9p;A?cB^#Zy57u>h^IJos}K!gXSHDJ zJmyqFu(=Y!=bNoVf};Js?@Ce{M3E;Y2WsNAcDYimstFwKGD^u|(V(aD&*zk^7nlk< zBt(nuZl`kRf1L`l!XUDn(%)>#hrV!GV0g%DjT3Vcf;gIAldK9!#nFL{uZDtx`ECAl zNpb!?)gMb*hZ^(p&PZ9WXQIvS_%E7dTUpW5TUkXs!oJFEm$Xgd>P*>f@&)}q5mSZA zm#Hn6&&jvOB>Ev|x?G~_dVyE*gm4pcBztXpAfbxT53??R>0@LbYzA&GKF}~AxWFSDw_?jNEom!WMsk}<) zOVBA5db?GI&SJ`y^s$rzCNw*}MrE<^m!;;2JFmkSW>#6giKsi2jIHJ6_1_blaOBbq z{EOwUxekvDtPLC-1qB=&pwrE2uvhU17X^B#i3-^*7`S>6aGpo;~`e9+Y z!bMQ?TnJ@o=ar~*^d)G?(j?_Wkj3Ma?A2UF{va-@&9BtREgu3U< zrFqO8A6+D-`t%N74XwY#bFy^LZGB!T4acDy3ULB^tqKnql5Fdf%kl@`{WIn}GCk)? znzy#_O>=XEb&c#s%w!LR>f$`3QH@Hpwo&q4m&D?JcgE{8z)hxC=WHc*C)rrPTK?(T?)<@-a2Rdu?R zms0Tf3Y{rb`%cw`qd++>hl$;F%d`cLDBX=-p&11p5h;xgKX9)CDIChJ3`Ib4i2+ITBDyA zY`I)2X+8=N;aOGMU|oHo?Ev+?-b?bKB@`~R>RpVz8Bd-X?<)YROU2ggD-I^xxX|4n zfpBQQwNfaim(1kKq;PSOjdfv>dZ4j}hK8o5MnW?0d2JK?AcHV3zr#=IS^R+oiUZns z0blcXygk8O9R))UE^A{L9PVrE>(@MnN{G?c91O3QQeYKBDG|MvOK8o4n@T3k*n~v- z3kit@4xPQv>8DvEW;Fsm-Z^ikJ`H{D_EZdd)grLX%zKvkMtq8-uaC!8V_=57a#C(R z(QzGt)|U_+nD2YRd`!D7AkHOKDXC`7@do=-*#;-j`5rQou&%6E7U>EZe~-`ZAf{&; zeF~y?#@V|~bv!@nx|nIutsGolqG)8wqHhtqGqnKyp6uj7SeG{`LVv$%BXB%46oDx; zZ`nCgLqCw7L#%|PDNydjYY$iw`IdIf0-`m(`_mS)Q-_E9bTuWQ8BqrviN|fhs#am}zUr>=ySc-OK z{XX{8VrtUP&*zr?f2lp1i0-rEgq}YN zK@ZYmV8o==_nZp8ILi7X#%7hs;WR>|sP`5=TsgI4e@fOi28JBzN6ipi814D` z_4?uiw82&rVu0MNe^B>%ino?zoFm5m5QohGPR!afKKUdV@gzTDhuQe-{ipy|Z-GT7 z;XX>EvP;5ovL{c#{KDw9()Z=KcEpQBAuQ>>JdcZH&C|GTmzToR+2*y}(ssX=5r8x% znVZ`AtEmCaEJ-$AU$e5d*JES86$_4>!ryElYU*g-s*Rh2aPvPSa!smDh*LfF-X-9L ziny+|Rg}K8wT}7W#ts2YrHNZNY+$rxX-{!lwjVWG=f3NKACz&aEDIg}s#aaqmor9X zSkac)rX7t7bzOvN6)ELY$#o2jz+*-2!Y~RiIm~*ztJ|ZgQv2JZIR=<;Sq)l(>8|Tw z#mmKNM*u;H^=}B~ndwr0zoC0%y?8O_ON8akks#5IECgcuu|2)LcIwR1Aoy8gVrNQ) z-Ir0wF_ZIMccy$}rmZbWLRXTmyp?WH1bE5N9-9t3Xnj=qu02&Iy7a>SzfR+~C^`XN zq@qNfdLb!a;%wrx&jCsoUi0^2da&4a>avQ~zVF9~@02~OPG%?8q}pJvO@`W)z5k6f zKXD>Cz?9{+rB`QBX8{MJ^7s2IG!!;?wc=LyK?6QIp^mHt*O;}sZ);8JU2eJyN)6dm zqmF}O+E9#9nkFpmepbYVM&0>In`f%6NK0qr8ClqDd%;fW;@r&Y;rcjL^_}aDD^xD4 zH5>8R&ML?7bM{*JvKt+_18>ld98bM&78ObO)KylNd4@4??HOFX#KNRmtXJbSp@{3` z=onl)92P;Pq`d#@%pj^fc_0bhk9vlkJCp48fTP1I#JqolXbItHIr4AIRoX2f5o`cE zCj6M#j)Wz5yr0Fe#Mr#KXn&`6@@8MHJ$|LrZip?BgW3w9ZKJ!38R4R?Cd?Iq^VzL( z#qIh0tc65vqv3821(`+_do|F}#(jC9L`hsxuR7E`Sgx)&@ImfevWVksPrLvCg)&OY zQh&C#&d#n{{{)bi(?}NpK-2CfMVor$~2hsfb%rRdPzHw(JXef_WjUPv63ElY%zgP%AIUMXF?QOV<9cn_=nP2oyTI zPAB!k7uA8vz{40-(r9sn+;cq*41!NU078dyX(U31e`d2 zJG<6a(7{3EuUV)aHf-1(UbJi20?QPo0=ZT%84^mYc3;<`47@id3N-UH`;!7LlfZzn zewKN+)#+bfKsG%G1$B?_8wU4VZE2QEA-P(*;*g-)I`%wR~Fp>y$m+I;=qhnRu@a|+e1phV7ol$W`tB7vCc zmgTgPoCS<)VGmOgooHe#ZiHcHXLDx8O}40^ihT(qYoF4nTO}*WAW)Qbh zwBKAO*4mM-5$1x|dati@=6QZjC`LZd*CsH-b*!;FgH@?RN1l3XR8Dx z!}=Q+x|>Z#Dp%%k#blY;+cqtc5w@lAQ@z$dw6f<7$uf%scW z^sZSjP?Op1R?yUZ9A8U8c|Id(p`C8DPWG{VsS;sMeR5UimHNpt6{ehb@0Nt9yf^+f zVoq`Pj>2uk-Kd`$SSgP^xl1&8xWS9rq-Zj6B2Ss@ZAXR zNGsp1eCw4iGmHmwOAy)B*CLMUGFXZxLBZ0HGsD45ADbKl^I{0=8xN%ph7Fy&kx)^V z7fKAGoSf}UH{#YCGO8e+s!+YIWT$WFyW{4K{_x9f2Q5tGGGIy=PEX!@=#|Bl!e!cj zH;EMq1L0E;>RgWk;vlFqeM6FVluI&pEA`d6s&Woj{?Ha~En_ zdVmAKFPRUTHrbY5<6#V)zt}wqW{D6fyCK>OHTK40Lbw4fLmt}?dM;-ugk+q~&CLw| zd<;Hq@U>(o>P*{Ux>$)iHJ1&1$D*$gb(%rdGI;AsHjfzOGzWL}Tl}Ngo|A^NaVr)w{+?6LZ?#YESG( z3DviM`?kA%?_Oe}a;mLuwgGH~k)fX;5H;DG3=Aewh=v9rxf0t?-qJ>l{rG`zva(Wt zhKc*(MgpuQ4GKcT!(rW-Se2Am`}Y%DZsH2;Ex#mzjAsW5GD3PdCk$j)`PfkiW6{yW zIk5FW=64AWAxOK*_tJi}{?{+>?b(LGVqwrt7Lbz^2JrLLB7)WecL8)fK1=d5!DnP- z^`syO`1IeM-~Z+b|L274e^=_1BJQ$j3D32(Z+Nb}K>Ot`o2BeUAU6N9My|>@>y(jqY z9VEyX{@i5zUm`^Jcb7)n_SYNsI~(@!&-XX``wfG-lMm@tLmn3RcoXis6>GQyA}eAI zy;gkp_JZwHKnVDe8%S6!{F`MHIDQkk;1UZGvkIU6a{}O+|BVQ;e?k%^x)UN548gNk zon*C~OrAKIiW-APL{6q;{Ji}9TztG-ynJeWH%0mQMfrs}czH#6d52XfSpV$;8#|My zX72yj7Zje!O9U63her6@#7tDf)WPwooh{iz896RK?wgnX(mNpnF`$C1l1%aK$It!; D(4EVg literal 0 HcmV?d00001 diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile new file mode 100644 index 000000000..a93507dc2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile @@ -0,0 +1,34 @@ +FROM ubuntu:23.04 + +# Automatically generated by the CM workflow automation meta-framework +# https://github.com/mlcommons/ck + +LABEL github="" +LABEL maintainer="" +LABEL license="" + +SHELL ["/bin/bash", "-c"] + +ARG CM_GH_TOKEN +ARG CM_ADD_DOCKER_GROUP_ID="" + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd ${CM_ADD_DOCKER_GROUP_ID} cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Install python packages +RUN python3 -m pip install --user cmind requests giturlparse tabulate --break-system-packages diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.bat b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.bat new file mode 100644 index 000000000..bd4ea665d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.bat @@ -0,0 +1 @@ +docker build -f "ubuntu-23.04.Dockerfile" -t "cknowledge/test-cm-script:ubuntu-23.04-cm-dev" . diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.sh b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.sh new file mode 100644 index 000000000..92ec69ba1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.build.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker build -f "ubuntu-23.04.Dockerfile" -t "cknowledge/test-cm-script:ubuntu-23.04-cm-dev" . diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.bat new file mode 100644 index 000000000..c7c3fd198 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.bat @@ -0,0 +1 @@ +docker run -it --entrypoint "" cknowledge/test-cm-script:ubuntu-23.04-cm-dev bash diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.sh new file mode 100644 index 000000000..69425443a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/docker_repro_example/ubuntu-23.04.Dockerfile.run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker run -it --entrypoint "" cknowledge/test-cm-script:ubuntu-23.04-cm-dev bash diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/module.py b/cmx4mlops/cmx4mlops/repo/automation/script/module.py new file mode 100644 index 000000000..b08875892 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/module.py @@ -0,0 +1,6587 @@ +# +# CM "script" automation helps users to encode their MLOps, DevOps and other knowledge +# as portable and reusable automation recipes with simple tags, native scripts +# and a unified CLI, Python API and JSON/YAML meta descriptions. +# +# This is a stable prototype of the CM script automation being developed by Grigori Fursin and Arjun Suresh +# +# TBD: when we have bandwidth and resources, we should refactor it +# and make it cleaner and simpler while keeping full backwards compatibility. +# +# Author: Grigori Fursin +# Contributors: Arjun Suresh, Anandhu Sooraj +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os +import logging + +from cmind.automation import Automation +from cmind import utils +from cmind import __version__ as current_cm_version + + +class CAutomation(Automation): + """ + CM "script" automation actions + (making native scripts more portable, deterministic, reusable and reproducible) + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + logging.basicConfig(level=logging.INFO) + self.os_info = {} + self.run_state = {} + self.run_state['deps'] = [] + self.run_state['fake_deps'] = False + self.run_state['parent'] = None + self.run_state['version_info'] = [] + + self.file_with_cached_state = 'cm-cached-state.json' + + self.tmp_file_env = 'tmp-env' + self.tmp_file_env_all = 'tmp-env-all' + self.tmp_file_run = 'tmp-run' + self.tmp_file_state = 'tmp-state.json' + + self.tmp_file_run_state = 'tmp-run-state.json' + self.tmp_file_run_env = 'tmp-run-env.out' + self.tmp_file_ver = 'tmp-ver.out' + + self.__version__ = "1.3.2" + + self.local_env_keys = ['CM_VERSION', + 'CM_VERSION_MIN', + 'CM_VERSION_MAX', + 'CM_VERSION_MAX_USABLE', + 'CM_DETECTED_VERSION', + 'CM_INPUT', + 'CM_OUTPUT', + 'CM_OUTBASENAME', + 'CM_OUTDIRNAME', + 'CM_NAME', + 'CM_EXTRA_CACHE_TAGS', + 'CM_TMP_*', + 'CM_GIT_*', + 'CM_RENEW_CACHE_ENTRY'] + + self.input_flags_converted_to_tmp_env = ['path'] + + self.input_flags_converted_to_env = ['input', + 'output', + 'outdirname', + 'outbasename', + 'name', + 'extra_cache_tags', + 'skip_compile', + 'skip_run', + 'accept_license', + 'skip_system_deps', + 'git_ssh', + 'gh_token'] + + ############################################################ + + def run(self, i): + """ + Run CM script + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (artifact) (str): specify CM script (CM artifact) explicitly + + (tags) (str): tags to find an CM script (CM artifact) + + (env) (dict): global environment variables (can/will be updated by a given script and dependencies) + (const) (dict): constant environment variable (will be preserved and persistent for a given script and dependencies) + + (state) (dict): global state dictionary (can/will be updated by a given script and dependencies) + (const_state) (dict): constant state (will be preserved and persistent for a given script and dependencies) + + (add_deps) (dict): {"name": {"tag": "tag(s)"}, "name": {"version": "version_no"}, ...} + (add_deps_recursive) (dict): same as add_deps but is passed recursively onto dependencies as well + + (version) (str): version to be added to env.CM_VERSION to specialize this flow + (version_min) (str): min version to be added to env.CM_VERSION_MIN to specialize this flow + (version_max) (str): max version to be added to env.CM_VERSION_MAX to specialize this flow + (version_max_usable) (str): max USABLE version to be added to env.CM_VERSION_MAX_USABLE + + (path) (str): list of paths to be added to env.CM_TMP_PATH to specialize this flow + + (input) (str): converted to env.CM_INPUT (local env) + (output) (str): converted to env.CM_OUTPUT (local env) + + (outbasename) (str): converted to env.CM_OUTBASENAME (local env) + (outdirname) (str): converted to env.CM_OUTDIRNAME (local env) + + (extra_cache_tags) (str): converted to env.CM_EXTRA_CACHE_TAGS and used to add to caching (local env) + + (name) (str): taken from env.CM_NAME and/or converted to env.CM_NAME (local env) + Added to extra_cache_tags with "name-" prefix . + Useful for python virtual env (to create multiple entries) + + (quiet) (bool): if True, set env.CM_QUIET to "yes" and attempt to skip questions + (the developers have to support it in pre/post processing and scripts) + + (skip_cache) (bool): if True, skip caching and run in current directory + (force_cache) (bool): if True, force caching if can_force_cache=true in script meta + + (skip_remembered_selections) (bool): if True, skip remembered selections + (uses or sets env.CM_TMP_SKIP_REMEMBERED_SELECTIONS to "yes") + + (new) (bool): if True, skip search for cached and run again + (renew) (bool): if True, rewrite cache entry if exists + + (dirty) (bool): if True, do not clean files + + (save_env) (bool): if True, save env and state to tmp-env.sh/bat and tmp-state.json + (shell) (bool): if True, save env with cmd/bash and run it + + (recursion) (bool): True if recursive call. + Useful when preparing the global bat file or Docker container + to save/run it in the end. + + (recursion_spaces) (str, internal): adding ' ' during recursion for debugging + + (remembered_selections) (list): remember selections of cached outputs + + (print_env) (bool): if True, print aggregated env before each run of a native script + + (fake_run) (bool): if True, will run the dependent scripts but will skip the main run script + (prepare) (bool): the same as fake_run + (fake_deps) (bool): if True, will fake run the dependent scripts + (run_state) (dict): Internal run state + + (debug_script_tags) (str): if !='', run cmd/bash before executing a native command + inside a script specified by these tags + + (debug_script) (bool): if True, debug current script (set debug_script_tags to the tags of a current script) + (debug_uid) (str): if True, set CM_TMP_DEBUG_UID to this number to enable + remote python debugging of scripts and wrapped apps/tools + (detected_versions) (dict): All the used scripts and their detected_versions + + (verbose) (bool): if True, prints all tech. info about script execution (False by default) + (v) (bool): the same as verbose + + (time) (bool): if True, print script execution time (or if verbose == True) + (space) (bool): if True, print used disk space for this script (or if verbose == True) + + (ignore_script_error) (bool): if True, ignore error code in native tools and scripts + and finish a given CM script. Useful to test/debug partial installations + + (json) (bool): if True, print output as JSON + (j) (bool): if True, print output as JSON + + (pause) (bool): if True, pause at the end of the main script (Press Enter to continue) + + (repro) (bool): if True, dump cm-run-script-input.json, cm-run_script_output.json, + cm-run-script-state.json, cm-run-script-info.json + to improve the reproducibility of results + + (repro_prefix) (str): if !='', use it to record above files {repro-prefix)-input.json ... + (repro_dir) (str): if !='', use this directory to dump info (default = 'cm-repro') + + (dump_version_info) (bool): dump info about resolved versions of tools in dependencies + + (print_deps) (bool): if True, will print the CM run commands of the direct dependent scripts + + (print_readme) (bool): if True, will print README with all CM steps (deps) to run a given script + + (script_call_prefix) (str): how to call script in logs and READMEs (cm run script) + + (skip_sys_utils) (bool): if True, set env['CM_SKIP_SYS_UTILS']='yes' + to skip CM sys installation + (skip_sudo) (bool): if True, set env['CM_TMP_SKIP_SUDO']='yes' + to let scripts deal with that + + (silent) (bool): if True, attempt to suppress all info if supported + (sets CM_TMP_SILENT=yes) + (s) (bool): the same as 'silent' + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * (skipped) (bool): if true, this script was skipped + + * new_env (dict): new environment (delta from a collective script) + * new_state (dict): new state (delta from a collective script) + + * env (dict): global env (updated by this script - includes new_env) + * state (dict): global state (updated by this script - includes new_state) + + """ + + r = self._run(i) + + return r + + ############################################################ + + def _run(self, i): + + from cmind import utils + import copy + import time + import shutil + + # Check if save input/output to file + repro = i.get('repro', False) + repro_prefix = '' + + if repro: + repro_prefix = i.get('repro_prefix', '') + if repro_prefix == '': + repro_prefix = 'cm-run-script' + + repro_dir = i.get('repro_dir', '') + if repro_dir == '': + repro_dir = os.path.join(os.getcwd(), 'cm-repro') + if not os.path.isdir(repro_dir): + os.makedirs(repro_dir) + + repro_prefix = os.path.join(repro_dir, repro_prefix) + + if repro_prefix != '': + dump_repro_start(repro_prefix, i) + + recursion = i.get('recursion', False) + + # If first script run, check if can write to current directory + if not recursion and not i.get('skip_write_test', False): + if not can_write_to_current_directory(): + return { + 'return': 1, 'error': 'Current directory "{}" is not writable - please change it'.format(os.getcwd())} + + # Check if has default config + r = self.cmind.access({'action': 'load', + 'automation': 'cfg,88dce9c160324c5d', + 'artifact': 'default'}) + if r['return'] == 0: + config = r['config'] + + script_input = config.get('script', {}) + + if len(script_input) > 0: + utils.merge_dicts({'dict1': i, 'dict2': script_input}) + + recursion_int = int(i.get('recursion_int', 0)) + 1 + + start_time = time.time() + + # Check extra input from environment variable CM_SCRIPT_EXTRA_CMD + # Useful to set up default flags such as the name of virtual enviroment + extra_cli = os.environ.get('CM_SCRIPT_EXTRA_CMD', '').strip() + if extra_cli != '': + from cmind import cli + r = cli.parse(extra_cli) + if r['return'] > 0: + return r + + cm_input = r['cm_input'] + + utils.merge_dicts({'dict1': i, + 'dict2': cm_input, + 'append_lists': True, + 'append_unique': True}) + + # Check simplified CMD: cm run script "get compiler" + # If artifact has spaces, treat them as tags! + artifact = i.get('artifact', '') + if ' ' in artifact: # or ',' in artifact: + del (i['artifact']) + if 'parsed_artifact' in i: + del (i['parsed_artifact']) + # Force substitute tags + i['tags'] = artifact.replace(' ', ',') + + # Check if has extra tags as a second artifact + # Example: cmr . "_python _tiny" + + parsed_artifacts = i.get('parsed_artifacts', []) + if len(parsed_artifacts) > 0: + extra_tags = parsed_artifacts[0][0][0] + if ' ' in extra_tags or ',' in extra_tags: + # Add tags + x = i.get('tags', '') + if x != '': + x += ',' + i['tags'] = x + extra_tags.replace(' ', ',') + + # Recursion spaces needed to format log and print + recursion_spaces = i.get('recursion_spaces', '') + # Caching selections to avoid asking users again + remembered_selections = i.get('remembered_selections', []) + + # Get current env and state before running this script and sub-scripts + env = i.get('env', {}) + state = i.get('state', {}) + const = i.get('const', {}) + const_state = i.get('const_state', {}) + + # Save current env and state to detect new env and state after running + # a given script + saved_env = copy.deepcopy(env) + saved_state = copy.deepcopy(state) + + for key in ["env", "state", "const", "const_state"]: + if i.get("local_" + key): + if not i.get(key, {}): + i[key] = {} + utils.merge_dicts({'dict1': i[key], + 'dict2': i['local_' + key], + 'append_lists': True, + 'append_unique': True}) + + add_deps = i.get('ad', {}) + if not add_deps: + add_deps = i.get('add_deps', {}) + else: + utils.merge_dicts({'dict1': add_deps, 'dict2': i.get( + 'add_deps', {}), 'append_lists': True, 'append_unique': True}) + + add_deps_recursive = i.get('adr', {}) + if not add_deps_recursive: + add_deps_recursive = i.get('add_deps_recursive', {}) + else: + utils.merge_dicts({'dict1': add_deps_recursive, 'dict2': i.get( + 'add_deps_recursive', {}), 'append_lists': True, 'append_unique': True}) + + save_env = i.get('save_env', False) + + print_env = i.get('print_env', False) + + show_time = i.get('time', False) + show_space = i.get('space', False) + + if not recursion and show_space: + start_disk_stats = shutil.disk_usage("/") + + extra_recursion_spaces = ' ' # if verbose else '' + + skip_cache = i.get('skip_cache', False) + force_cache = i.get('force_cache', False) + + fake_run = i.get('fake_run', False) + fake_run = i.get( + 'fake_run', + False) if 'fake_run' in i else i.get( + 'prepare', + False) + if fake_run: + env['CM_TMP_FAKE_RUN'] = 'yes' + + debug_uid = i.get('debug_uid', '') + if debug_uid != '': + r = _update_env(env, 'CM_TMP_DEBUG_UID', debug_uid) + if r['return'] > 0: + return r + + fake_deps = i.get('fake_deps', False) + if fake_deps: + env['CM_TMP_FAKE_DEPS'] = 'yes' + + if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']: + env['CM_SKIP_SYS_UTILS'] = 'yes' + if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']: + env['CM_TMP_SKIP_SUDO'] = 'yes' + + run_state = i.get('run_state', self.run_state) + if not run_state.get('version_info', []): + run_state['version_info'] = [] + if run_state.get('parent', '') == '': + run_state['parent'] = None + if fake_deps: + run_state['fake_deps'] = True + + # Check verbose and silent + verbose = False + + silent = True if str(i.get('silent', '')).lower() in [ + 'true', 'yes', 'on'] else False + + if not silent: + silent = True if str(i.get('s', '')).lower() in [ + 'true', 'yes', 'on'] else False + + if silent: + if 'verbose' in i: + del (i['verbose']) + if 'v' in i: + del (i['v']) + env['CM_TMP_SILENT'] = 'yes' + run_state['tmp_silent'] = True + + if 'verbose' in i: + verbose = i['verbose'] + elif 'v' in i: + verbose = i['v'] + + if verbose: + env['CM_VERBOSE'] = 'yes' + run_state['tmp_verbose'] = True + logging.getLogger().setLevel(logging.DEBUG) + + print_deps = i.get('print_deps', False) + print_versions = i.get('print_versions', False) + print_readme = i.get('print_readme', False) + dump_version_info = i.get('dump_version_info', False) + + new_cache_entry = i.get('new', False) + renew = i.get('renew', False) + + cmd = i.get('cmd', '') + # Capturing the input command if it is coming from an access function + if not cmd and 'cmd' in i.get('input', ''): + i['cmd'] = i['input']['cmd'] + cmd = i['cmd'] + + debug_script_tags = i.get('debug_script_tags', '') + + detected_versions = i.get('detected_versions', {}) + + ignore_script_error = i.get('ignore_script_error', False) + + # Detect current path and record in env for further use in native + # scripts + current_path = os.path.abspath(os.getcwd()) + r = _update_env(env, 'CM_TMP_CURRENT_PATH', current_path) + if r['return'] > 0: + return r + + # Check if quiet mode + quiet = i.get( + 'quiet', + False) if 'quiet' in i else ( + env.get( + 'CM_QUIET', + '').lower() == 'yes') + if quiet: + env['CM_QUIET'] = 'yes' + + skip_remembered_selections = i.get('skip_remembered_selections', False) if 'skip_remembered_selections' in i \ + else (env.get('CM_SKIP_REMEMBERED_SELECTIONS', '').lower() == 'yes') + if skip_remembered_selections: + env['CM_SKIP_REMEMBERED_SELECTIONS'] = 'yes' + + # Prepare debug info + parsed_script = i.get('parsed_artifact') + parsed_script_alias = parsed_script[0][0] if parsed_script is not None else '' + + # Get and cache minimal host OS info to be able to run scripts and + # manage OS environment + if len(self.os_info) == 0: + r = self.cmind.access({'action': 'get_host_os_info', + 'automation': 'utils,dc2743f8450541e3'}) + if r['return'] > 0: + return r + + self.os_info = r['info'] + + os_info = self.os_info + + # Bat extension for this host OS + bat_ext = os_info['bat_ext'] + + # Add permanent env from OS (such as CM_WINDOWS:"yes" on Windows) + env_from_os_info = os_info.get('env', {}) + if len(env_from_os_info) > 0: + env.update(env_from_os_info) + + # take some env from the user environment + keys = [ + "GH_TOKEN", + "ftp_proxy", + "FTP_PROXY", + "http_proxy", + "HTTP_PROXY", + "https_proxy", + "HTTPS_PROXY", + "no_proxy", + "NO_PROXY", + "socks_proxy", + "SOCKS_PROXY"] + for key in keys: + if os.environ.get(key, '') != '' and env.get(key, '') == '': + env[key] = os.environ[key] + + # Check path/input/output in input and pass to env + for key in self.input_flags_converted_to_tmp_env: + value = i.get(key, '').strip() + if value != '': + env['CM_TMP_' + key.upper()] = value + + for key in self.input_flags_converted_to_env: + value = i.get( + key, + '').strip() if isinstance( + i.get( + key, + ''), + str) else i.get( + key, + '') + if value: + env[f"CM_{key.upper()}"] = value + + r = update_env_with_values(env) + if r['return'] > 0: + return r + + ####################################################################### + # Check if we want to skip cache (either by skip_cache or by fake_run) + force_skip_cache = True if skip_cache else False + force_skip_cache = True if fake_run else force_skip_cache + + ####################################################################### + # Find CM script(s) based on their tags and variations to get their meta and customize this workflow. + # We will need to decide how to select if more than 1 (such as "get compiler") + # + # Note: this local search function will separate tags and variations + # + # STEP 100 Input: Search sripts by i['tags'] (includes variations starting from _) and/or i['parsed_artifact'] + # tags_string = i['tags'] + + tags_string = i.get('tags', '').strip() + + ii = utils.sub_input(i, self.cmind.cfg['artifact_keys']) + + ii['tags'] = tags_string + ii['out'] = None + + # if cm run script without tags/artifact and with --help + if len(ii.get('parsed_artifact', [])) == 0 and ii.get( + 'tags', '') == '' and i.get('help', False): + return utils.call_internal_module( + self, __file__, 'module_help', 'print_help', {'meta': {}, 'path': ''}) + + r = self.search(ii) + if r['return'] > 0: + return r + + # Search function will return + + list_of_found_scripts = r['list'] + + script_tags = r['script_tags'] + script_tags_string = ','.join(script_tags) + + variation_tags = r['variation_tags'] + +# # Print what was searched! +# cm_script_info = 'CM script' +# +# x = 'with' +# if parsed_script_alias !='' : +# cm_script_info += ' '+x+' alias "{}"'.format(parsed_script_alias) +# x = 'and' +# +# if len(script_tags)>0: +# cm_script_info += ' '+x+' tags "{}"'.format(script_tags_string.replace(',',' ')) +# x = 'and' +# +# if len(variation_tags)>0: +# x_variation_tags = ['_'+v for v in variation_tags] +# cm_script_info += ' '+x+' variations "{}"'.format(" ".join(x_variation_tags)) +# +# if verbose: +# logging.info('') +# logging.info(recursion_spaces + '* Searching for ' + cm_script_info) +# else: +# logging.info(recursion_spaces + '* Running ' + cm_script_info) + + cm_script_info = i.get('script_call_prefix', '').strip() + if cm_script_info == '': + cm_script_info = 'cm run script' + if not cm_script_info.endswith(' '): + cm_script_info += ' ' + + x = '"' + y = ' ' + if parsed_script_alias != '': + cm_script_info += parsed_script_alias + x = ' --tags="' + y = ',' + + if len(script_tags) > 0 or len(variation_tags) > 0: + cm_script_info += x + + if len(script_tags) > 0: + cm_script_info += script_tags_string.replace(',', y) + + if len(variation_tags) > 0: + if len(script_tags) > 0: + cm_script_info += ' ' + + x_variation_tags = ['_' + v for v in variation_tags] + cm_script_info += y.join(x_variation_tags) + + cm_script_info += '"' + +# if verbose: +# logging.info('') + + if not run_state.get('tmp_silent', False): + logging.info(recursion_spaces + '* ' + cm_script_info) + + ####################################################################### + # Report if scripts were not found or there is an ambiguity with UIDs + if not r['found_scripts']: + return { + 'return': 1, 'error': 'no scripts were found with above tags (when variations ignored)'} + + if len(list_of_found_scripts) == 0: + return { + 'return': 16, 'error': 'no scripts were found with above tags and variations\n' + r.get('warning', '')} + + # Sometimes there is an ambiguity when someone adds a script + # while duplicating a UID. In such case, we will return >1 script + # and will start searching in the cache ... + # We are detecing such cases here: + if len(list_of_found_scripts) > 1 and script_tags_string == '' and parsed_script_alias != '' and '?' not in parsed_script_alias and '*' not in parsed_script_alias: + x = 'Ambiguity in the following scripts have the same UID - please change that in _cm.json or _cm.yaml:\n' + for y in list_of_found_scripts: + x += ' * ' + y.path + '\n' + + return {'return': 1, 'error': x} + + # STEP 100 Output: list_of_found_scripts based on tags (with variations) and/or parsed_artifact + # script_tags [] - contains tags without variations (starting from _ such as _cuda) + # variation_tags [] - contains only variations tags (without _) + # string_tags_string [str] (joined script_tags) + + ####################################################################### + # Sort scripts for better determinism + list_of_found_scripts = sorted(list_of_found_scripts, key=lambda a: (a.meta.get('sort', 0), + a.path)) + logging.debug(recursion_spaces + + ' - Number of scripts found: {}'.format(len(list_of_found_scripts))) + + # Check if script selection is remembered + if not skip_remembered_selections and len(list_of_found_scripts) > 1: + for selection in remembered_selections: + if selection['type'] == 'script' and set( + selection['tags'].split(',')) == set(script_tags_string.split(',')): + # Leave 1 entry in the found list + list_of_found_scripts = [selection['cached_script']] + logging.debug( + recursion_spaces + + ' - Found remembered selection with tags: {}'.format(script_tags_string)) + break + + # STEP 200 Output: potentially pruned list_of_found_scripts if + # selection of multple scripts was remembered + + # STEP 300: If more than one CM script found (example: "get compiler"), + # first, check if selection was already remembered! + # second, check in cache to prune scripts + + # STEP 300 input: lit_of_found_scripts + + select_script = 0 + + # If 1 script found and script_tags == '', pick them from the meta + if script_tags_string == '' and len(list_of_found_scripts) == 1: + script_tags_string = ','.join( + list_of_found_scripts[0].meta.get('tags', [])) + + # Found 1 or more scripts. Scans cache tags to find at least 1 with + # cache==True + preload_cached_scripts = False + for script in list_of_found_scripts: + if script.meta.get('cache', False) == True or ( + script.meta.get('can_force_cache', False) and force_cache): + preload_cached_scripts = True + break + + # STEP 300 Output: preload_cached_scripts = True if at least one of the + # list_of_found_scripts must be cached + + # STEP 400: If not force_skip_cache and at least one script can be cached, find (preload) related cache entries for found scripts + # STEP 400 input: script_tags and -tmp (to avoid unfinished scripts + # particularly when installation fails) + + cache_list = [] + + if not force_skip_cache and preload_cached_scripts: + cache_tags_without_tmp_string = '-tmp' + if script_tags_string != '': + cache_tags_without_tmp_string += ',' + script_tags_string + if variation_tags: + cache_tags_without_tmp_string += ',_' + \ + ",_".join(variation_tags) + # variation_tags are prefixed with "_" but the CM search function knows only tags and so we need to change "_-" to "-_" for excluding any variations + # This change can later be moved to a search function specific to + # cache + cache_tags_without_tmp_string = cache_tags_without_tmp_string.replace( + ",_-", ",-_") + + logging.debug( + recursion_spaces + + ' - Searching for cached script outputs with the following tags: {}'.format(cache_tags_without_tmp_string)) + + search_cache = {'action': 'find', + 'automation': self.meta['deps']['cache'], + 'tags': cache_tags_without_tmp_string} + rc = self.cmind.access(search_cache) + if rc['return'] > 0: + return rc + + cache_list = rc['list'] + + logging.debug( + recursion_spaces + + ' - Number of cached script outputs found: {}'.format( + len(cache_list))) + + # STEP 400 output: cache_list + + # STEP 500: At this stage with have cache_list related to either 1 or more scripts (in case of get,compiler) + # If more than 1: Check if in cache and reuse it or ask user to select + # STEP 500 input: list_of_found_scripts + + if len(list_of_found_scripts) > 0: + # If only tags are used, check if there are no cached scripts with tags - then we will reuse them + # The use case: cm run script --tags=get,compiler + # CM script will always ask to select gcc,llvm,etc even if any of + # them will be already cached + if len(cache_list) > 0: + new_list_of_found_scripts = [] + + for cache_entry in cache_list: + # Find associated script and add to the + # list_of_found_scripts + associated_script_artifact = cache_entry.meta['associated_script_artifact'] + + x = associated_script_artifact.find(',') + if x < 0: + return {'return': 1, 'error': 'CM artifact format is wrong "{}" - no comma found'.format( + associated_script_artifact)} + + associated_script_artifact_uid = associated_script_artifact[x + 1:] + + cache_entry.meta['associated_script_artifact_uid'] = associated_script_artifact_uid + + for script in list_of_found_scripts: + script_uid = script.meta['uid'] + + if associated_script_artifact_uid == script_uid: + if script not in new_list_of_found_scripts: + new_list_of_found_scripts.append(script) + + # Avoid case when all scripts are pruned due to just 1 + # variation used + if len(new_list_of_found_scripts) > 0: + list_of_found_scripts = new_list_of_found_scripts + + # Select scripts + if len(list_of_found_scripts) > 1: + select_script = select_script_artifact( + list_of_found_scripts, + 'script', + recursion_spaces, + False, + script_tags_string, + quiet, + verbose) + + # Remember selection + if not skip_remembered_selections: + remembered_selections.append({'type': 'script', + 'tags': script_tags_string, + 'cached_script': list_of_found_scripts[select_script]}) + else: + select_script = 0 + + # Prune cache list with the selected script + if len(list_of_found_scripts) > 0: + script_artifact_uid = list_of_found_scripts[select_script].meta['uid'] + + new_cache_list = [] + for cache_entry in cache_list: + if cache_entry.meta['associated_script_artifact_uid'] == script_artifact_uid: + new_cache_list.append(cache_entry) + + cache_list = new_cache_list + + # Here a specific script is found and meta obtained + # Set some useful local variables + script_artifact = list_of_found_scripts[select_script] + + meta = script_artifact.meta + path = script_artifact.path + + # Check min CM version requirement + min_cm_version = meta.get('min_cm_version', '').strip() + if min_cm_version != '': + # Check compare version while avoiding craches for older version + if 'compare_versions' in dir(utils): + comparison = utils.compare_versions( + current_cm_version, min_cm_version) + if comparison < 0: + return {'return': 1, 'error': 'CM script requires CM version >= {} while current CM version is {} - please update using "pip install cmind -U"'.format( + min_cm_version, current_cm_version)} + + # Check path to repo + script_repo_path = script_artifact.repo_path + + script_repo_path_with_prefix = script_artifact.repo_path + if script_artifact.repo_meta.get('prefix', '') != '': + script_repo_path_with_prefix = os.path.join( + script_repo_path, script_artifact.repo_meta['prefix']) + + env['CM_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path + env['CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix + + # Check if has --help + if i.get('help', False): + return utils.call_internal_module(self, __file__, 'module_help', 'print_help', { + 'meta': meta, 'path': path}) + + run_state['script_id'] = meta['alias'] + "," + meta['uid'] + run_state['script_tags'] = script_tags + run_state['script_variation_tags'] = variation_tags + run_state['script_repo_alias'] = script_artifact.repo_meta.get( + 'alias', '') + run_state['script_repo_git'] = script_artifact.repo_meta.get( + 'git', False) + + if not recursion: + run_state['script_entry_repo_to_report_errors'] = meta.get( + 'repo_to_report_errors', '') + run_state['script_entry_repo_alias'] = script_artifact.repo_meta.get( + 'alias', '') + run_state['script_entry_repo_git'] = script_artifact.repo_meta.get( + 'git', False) + + deps = meta.get('deps', []) + post_deps = meta.get('post_deps', []) + prehook_deps = meta.get('prehook_deps', []) + posthook_deps = meta.get('posthook_deps', []) + input_mapping = meta.get('input_mapping', {}) + docker_settings = meta.get('docker') + docker_input_mapping = {} + if docker_settings: + docker_input_mapping = docker_settings.get( + 'docker_input_mapping', {}) + new_env_keys_from_meta = meta.get('new_env_keys', []) + new_state_keys_from_meta = meta.get('new_state_keys', []) + + found_script_artifact = utils.assemble_cm_object( + meta['alias'], meta['uid']) + + found_script_tags = meta.get('tags', []) + + if i.get('debug_script', False): + debug_script_tags = ','.join(found_script_tags) + + logging.debug(recursion_spaces + + ' - Found script::{} in {}'.format(found_script_artifact, path)) + + # STEP 500 output: script_artifact - unique selected script artifact + # (cache_list) pruned for the unique script if cache is used + # meta - script meta + # path - script path + # found_script_tags [] - all tags of the found script + + # HERE WE HAVE ORIGINAL ENV + + # STEP 600: Continue updating env + # Add default env from meta to new env if not empty + # (env NO OVERWRITE) + script_artifact_default_env = meta.get('default_env', {}) + for key in script_artifact_default_env: + env.setdefault(key, script_artifact_default_env[key]) + + # Force env from meta['env'] as a CONST + # (env OVERWRITE) + script_artifact_env = meta.get('env', {}) + env.update(script_artifact_env) + + script_artifact_state = meta.get('state', {}) + utils.merge_dicts({'dict1': state, + 'dict2': script_artifact_state, + 'append_lists': True, + 'append_unique': True}) + + # Store the default_version in run_state -> may be overridden by + # variations + default_version = meta.get( + 'default_version', + '') # not used if version is given + run_state['default_version'] = default_version + + # STEP 700: Overwrite env with keys from the script input (to allow user friendly CLI) + # IT HAS THE PRIORITY OVER meta['default_env'] and meta['env'] but not over the meta from versions/variations + # (env OVERWRITE - user enforces it from CLI) + # (it becomes const) + if input_mapping: + update_env_from_input_mapping(env, i, input_mapping) + update_env_from_input_mapping(const, i, input_mapping) + + # This mapping is done in module_misc + # if docker_input_mapping: + # update_env_from_input_mapping(env, i, docker_input_mapping) + # update_env_from_input_mapping(const, i, docker_input_mapping) + + # Update env/state with cost + env.update(const) + utils.merge_dicts({'dict1': state, + 'dict2': const_state, + 'append_lists': True, + 'append_unique': True}) + + # STEP 800: Process variations and update env (overwrite from env and update form default_env) + # VARIATIONS HAS THE PRIORITY OVER + # MULTIPLE VARIATIONS (THAT CAN BE TURNED ON AT THE SAME TIME) SHOULD + # NOT HAVE CONFLICTING ENV + + # VARIATIONS OVERWRITE current ENV but not input keys (they become + # const) + + variations = script_artifact.meta.get('variations', {}) + state['docker'] = meta.get('docker', {}) + + r = self._update_state_from_variations( + i, + meta, + variation_tags, + variations, + env, + state, + const, + const_state, + deps, + post_deps, + prehook_deps, + posthook_deps, + new_env_keys_from_meta, + new_state_keys_from_meta, + add_deps_recursive, + run_state, + recursion_spaces, + verbose) + if r['return'] > 0: + return r + + warnings = meta.get('warnings', []) + if len(r.get('warnings', [])) > 0: + warnings += r['warnings'] + + variation_tags_string = r['variation_tags_string'] + explicit_variation_tags = r['explicit_variation_tags'] + + # USE CASE: + # HERE we may have versions in script input and env['CM_VERSION_*'] + + # STEP 900: Get version, min, max, usable from env (priority if passed from another script to force version), + # then script input, then script meta + + # VERSIONS SHOULD NOT BE USED INSIDE VARIATIONS (in meta)! + + # First, take version from input + version = i.get('version', '').strip() + version_min = i.get('version_min', '').strip() + version_max = i.get('version_max', '').strip() + version_max_usable = i.get('version_max_usable', '').strip() + + # Second, take from env + if version == '': + version = env.get('CM_VERSION', '') + if version_min == '': + version_min = env.get('CM_VERSION_MIN', '') + if version_max == '': + version_max = env.get('CM_VERSION_MAX', '') + if version_max_usable == '': + version_max_usable = env.get( + 'CM_VERSION_MAX_USABLE', '') + + # Third, take from meta + if version == '': + version = meta.get('version', '') + if version_min == '': + version_min = meta.get('version_min', '') + if version_max == '': + version_max = meta.get('version_max', '') + if version_max_usable == '': + version_max_usable = meta.get( + 'version_max_usable', '') + + # Update env with resolved versions + notes = [] + for version_index in [(version, 'CM_VERSION', ' == {}'), + (version_min, 'CM_VERSION_MIN', ' >= {}'), + (version_max, 'CM_VERSION_MAX', ' <= {}'), + (version_max_usable, 'CM_VERSION_MAX_USABLE', '({})')]: + version_value = version_index[0] + key = version_index[1] + note = version_index[2] + + if version_value != '': + env[key] = version_value + + notes.append(note.format(version_value)) +# elif key in env: +# # If version_X is "", remove related key from ENV ... +# del(env[key]) + + if len(notes) > 0: + logging.debug( + recursion_spaces + + ' - Requested version: ' + + ' '.join(notes)) + + # STEP 900 output: version* set + # env['CM_VERSION*] set + + # STEP 1000: Update version only if in "versions" (not obligatory) + # can be useful when handling complex Git revisions + versions = script_artifact.meta.get('versions', {}) + + if version != '' and version in versions: + versions_meta = versions[version] + r = update_state_from_meta( + versions_meta, + env, + state, + const, + const_state, + deps, + post_deps, + prehook_deps, + posthook_deps, + new_env_keys_from_meta, + new_state_keys_from_meta, + run_state, + i) + if r['return'] > 0: + return r + adr = get_adr(versions_meta) + if adr: + self._merge_dicts_with_tags(add_deps_recursive, adr) + # Processing them again using updated deps for + # add_deps_recursive + r = update_adr_from_meta( + deps, + post_deps, + prehook_deps, + posthook_deps, + add_deps_recursive, + env) + + # STEP 1100: Update deps from input + r = update_deps_from_input( + deps, post_deps, prehook_deps, posthook_deps, i) + if r['return'] > 0: + return r + + r = update_env_with_values(env) + if r['return'] > 0: + return r + + if str(env.get('CM_RUN_STATE_DOCKER', False) + ).lower() in ['true', '1', 'yes']: + if state.get('docker'): + if str(state['docker'].get('run', True) + ).lower() in ['false', '0', 'no']: + logging.info( + recursion_spaces + + ' - Skipping script::{} run as we are inside docker'.format(found_script_artifact)) + + # restore env and state + for k in list(env.keys()): + del (env[k]) + for k in list(state.keys()): + del (state[k]) + + env.update(saved_env) + state.update(saved_state) + + rr = { + 'return': 0, + 'env': env, + 'new_env': {}, + 'state': state, + 'new_state': {}, + 'deps': []} + return rr + + elif str(state['docker'].get('real_run', True)).lower() in ['false', '0', 'no']: + logging.info( + recursion_spaces + + ' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact)) + fake_run = True + env['CM_TMP_FAKE_RUN'] = 'yes' + + ####################################################################### + # Check extra cache tags + x = env.get('CM_EXTRA_CACHE_TAGS', '').strip() + extra_cache_tags = [] if x == '' else x.split(',') + + if i.get('extra_cache_tags', '') != '': + for x in i['extra_cache_tags'].strip().split(','): + if x != '': + if '<<<' in x: + import re + tmp_values = re.findall(r'<<<(.*?)>>>', str(x)) + for tmp_value in tmp_values: + xx = str(env.get(tmp_value, '')) + x = x.replace("<<<" + tmp_value + ">>>", xx) + if x not in extra_cache_tags: + extra_cache_tags.append(x) + + if env.get('CM_NAME', '') != '': + extra_cache_tags.append('name-' + env['CM_NAME'].strip().lower()) + + ####################################################################### + # Check if need to clean output files + clean_output_files = meta.get('clean_output_files', []) + + if len(clean_output_files) > 0: + clean_tmp_files(clean_output_files, recursion_spaces) + + ####################################################################### + # Check if the output of a selected script should be cached + cache = False if i.get( + 'skip_cache', + False) else meta.get( + 'cache', + False) + cache = cache or ( + i.get( + 'force_cache', + False) and meta.get( + 'can_force_cache', + False)) + # fake run skips run script - should not pollute cache + cache = False if fake_run else cache + + cached_uid = '' + cached_tags = [] + cached_meta = {} + + remove_tmp_tag = False + reuse_cached = False + + found_cached = False + cached_path = '' + + local_env_keys_from_meta = meta.get('local_env_keys', []) + + # Check if has customize.py + path_to_customize_py = os.path.join(path, 'customize.py') + customize_code = None + customize_common_input = {} + + if os.path.isfile(path_to_customize_py) and cache: + r = utils.load_python_module( + {'path': path, 'name': 'customize'}) + if r['return'] > 0: + return r + + customize_code = r['code'] + + customize_common_input = { + 'input': i, + 'automation': self, + 'artifact': script_artifact, + 'customize': script_artifact.meta.get('customize', {}), + 'os_info': os_info, + 'recursion_spaces': recursion_spaces, + 'script_tags': script_tags, + 'variation_tags': variation_tags + } + + ####################################################################### + # Check if script is cached if we need to skip deps from cached entries + this_script_cached = False + + ####################################################################### + # Check if the output of a selected script should be cached + if cache: + # TBD - need to reuse and prune cache_list instead of a new CM + # search inside find_cached_script + + r = find_cached_script({'self': self, + 'recursion_spaces': recursion_spaces, + 'extra_recursion_spaces': extra_recursion_spaces, + 'add_deps_recursive': add_deps_recursive, + 'script_tags': script_tags, + 'found_script_tags': found_script_tags, + 'found_script_path': path, + 'customize_code': customize_code, + 'customize_common_input': customize_common_input, + 'variation_tags': variation_tags, + 'variation_tags_string': variation_tags_string, + 'explicit_variation_tags': explicit_variation_tags, + 'version': version, + 'version_min': version_min, + 'version_max': version_max, + 'extra_cache_tags': extra_cache_tags, + 'new_cache_entry': new_cache_entry, + 'meta': meta, + 'env': env, + 'state': state, + 'const': const, + 'const_state': const_state, + 'skip_remembered_selections': skip_remembered_selections, + 'remembered_selections': remembered_selections, + 'quiet': quiet, + 'verbose': verbose, + 'show_time': show_time + }) + if r['return'] > 0: + return r + + # Sort by tags to ensure determinism in order (and later add + # versions) + found_cached_scripts = sorted( + r['found_cached_scripts'], + key=lambda x: sorted( + x.meta['tags'])) + + cached_tags = r['cached_tags'] + search_tags = r['search_tags'] + + num_found_cached_scripts = len(found_cached_scripts) + + if num_found_cached_scripts > 0: + selection = 0 + + # Check if quiet mode + if num_found_cached_scripts > 1: + if quiet: + num_found_cached_scripts = 1 + + if num_found_cached_scripts > 1: + selection = select_script_artifact( + found_cached_scripts, + 'cached script output', + recursion_spaces, + True, + script_tags_string, + quiet, + verbose) + + if selection >= 0: + if not skip_remembered_selections: + # Remember selection + remembered_selections.append({'type': 'cache', + 'tags': search_tags, + 'cached_script': found_cached_scripts[selection]}) + else: + num_found_cached_scripts = 0 + + elif num_found_cached_scripts == 1: + logging.debug( + recursion_spaces + + ' - Found cached script output: {}'.format( + found_cached_scripts[0].path)) + + if num_found_cached_scripts > 0: + found_cached = True + + # Check chain of dynamic dependencies on other CM scripts + if len(deps) > 0: + logging.debug( + recursion_spaces + + ' - Checking dynamic dependencies on other CM scripts:') + + r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, True, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + logging.debug( + recursion_spaces + + ' - Processing env after dependencies ...') + + r = update_env_with_values(env) + if r['return'] > 0: + return r + + # Check chain of prehook dependencies on other CM scripts. + # (No execution of customize.py for cached scripts) + logging.debug( + recursion_spaces + + ' - Checking prehook dependencies on other CM scripts:') + + r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + # Continue with the selected cached script + cached_script = found_cached_scripts[selection] + + logging.debug( + recursion_spaces + + ' - Loading state from cached entry ...') + + path_to_cached_state_file = os.path.join(cached_script.path, + self.file_with_cached_state) + + r = utils.load_json(file_name=path_to_cached_state_file) + if r['return'] > 0: + return r + version = r['meta'].get('version') + + if not run_state.get('tmp_silent', False): + logging.info( + recursion_spaces + + ' ! load {}'.format(path_to_cached_state_file)) + + ########################################################### + # IF REUSE FROM CACHE - update env and state from cache! + cached_state = r['meta'] + + r = self._fix_cache_paths(cached_state['new_env']) + if r['return'] > 0: + return r + new_env = r['new_env'] + + utils.merge_dicts( + {'dict1': env, 'dict2': new_env, 'append_lists': True, 'append_unique': True}) + + new_state = cached_state['new_state'] + utils.merge_dicts({'dict1': state, + 'dict2': new_state, + 'append_lists': True, + 'append_unique': True}) + + utils.merge_dicts( + {'dict1': new_env, 'dict2': const, 'append_lists': True, 'append_unique': True}) + utils.merge_dicts({'dict1': new_state, + 'dict2': const_state, + 'append_lists': True, + 'append_unique': True}) + + if not fake_run: + # Check chain of posthook dependencies on other CM scripts. We consider them same as postdeps when + # script is in cache + logging.debug( + recursion_spaces + + ' - Checking posthook dependencies on other CM scripts:') + + clean_env_keys_post_deps = meta.get( + 'clean_env_keys_post_deps', []) + + r = self._call_run_deps(posthook_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + logging.debug( + recursion_spaces + + ' - Checking post dependencies on other CM scripts:') + + # Check chain of post dependencies on other CM scripts + r = self._call_run_deps(post_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + if renew or (not found_cached and num_found_cached_scripts == 0): + # Add more tags to cached tags + # based on meta information of the found script + x = 'script-artifact-' + meta['uid'] + if x not in cached_tags: + cached_tags.append(x) + + # Add all tags from the original CM script + for x in meta.get('tags', []): + if x not in cached_tags: + cached_tags.append(x) + + if not found_cached and num_found_cached_scripts == 0: + if i.get('only_execute_from_cache'): + # useful to check valid cache entries for a script (cm show + # cache can return invalid cache entries for a script too) + return { + 'return': 1, 'error': f'No valid cache entry found for {cached_tags}'} + + # If not cached, create cached script artifact and mark as tmp + # (remove if cache successful) + tmp_tags = ['tmp'] + + # Finalize tmp tags + tmp_tags += [t for t in cached_tags if not t.startswith("-")] + + # Check if some variations are missing + # though it should not happen! + for t in variation_tags: + if t.startswith("-"): + continue + x = '_' + t + if x not in tmp_tags: + tmp_tags.append(x) + + # Use update to update the tmp one if already exists + logging.debug( + recursion_spaces + + ' - Creating new "cache" script artifact in the CM local repository ...') + logging.debug(recursion_spaces + + ' - Tags: {}'.format(','.join(tmp_tags))) + + if version != '': + cached_meta['version'] = version + + ii = {'action': 'update', + 'automation': self.meta['deps']['cache'], + 'search_tags': tmp_tags, + 'tags': ','.join(tmp_tags), + 'meta': cached_meta, + 'force': True} + + r = self.cmind.access(ii) + if r['return'] > 0: + return r + + remove_tmp_tag = True + + cached_script = r['list'][0] + + cached_path = cached_script.path + cached_meta = cached_script.meta + + cached_uid = cached_meta['uid'] + + # Changing path to CM script artifact for cached output + # to record data and files there + logging.debug( + recursion_spaces + + ' - Changing to {}'.format(cached_path)) + + os.chdir(cached_path) + + # If found cached and we want to renew it + if found_cached and renew: + cached_path = cached_script.path + cached_meta = cached_script.meta + + cached_uid = cached_meta['uid'] + + # Changing path to CM script artifact for cached output + # to record data and files there + logging.debug( + recursion_spaces + + ' - Changing to {}'.format(cached_path)) + + os.chdir(cached_path) + + # Force to finalize script inside cached entry + found_cached = False + remove_tmp_tag = True + + env['CM_RENEW_CACHE_ENTRY'] = 'yes' + + # Prepare files to be cleaned + clean_files = [self.tmp_file_run_state, + self.tmp_file_run_env, + self.tmp_file_ver, + self.tmp_file_env + bat_ext, + self.tmp_file_env_all + bat_ext, + self.tmp_file_state, + self.tmp_file_run + bat_ext] + + if not found_cached and len(meta.get('clean_files', [])) > 0: + clean_files = meta['clean_files'] + clean_files + + ################################ + if not found_cached: + if len(warnings) > 0: + logging.warn( + '=================================================') + logging.warn('WARNINGS:') + for w in warnings: + logging.warn(' ' + w) + logging.warn( + '=================================================') + + # Update default version meta if version is not set + if version == '': + default_version = run_state.get('default_version', '') + if default_version != '': + version = default_version + + if version_min != '': + ry = self.cmind.access({'action': 'compare_versions', + 'automation': 'utils,dc2743f8450541e3', + 'version1': version, + 'version2': version_min}) + if ry['return'] > 0: + return ry + + if ry['comparison'] < 0: + version = version_min + + if version_max != '': + ry = self.cmind.access({'action': 'compare_versions', + 'automation': 'utils,dc2743f8450541e3', + 'version1': version, + 'version2': version_max}) + if ry['return'] > 0: + return ry + + if ry['comparison'] > 0: + if version_max_usable != '': + version = version_max_usable + else: + version = version_max + + logging.debug( + recursion_spaces + + ' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version)) + + r = _update_env(env, 'CM_VERSION', version) + if r['return'] > 0: + return r + + if 'version-' + version not in cached_tags: + cached_tags.append('version-' + version) + + if default_version in versions: + versions_meta = versions[default_version] + r = update_state_from_meta( + versions_meta, + env, + state, + const, + const_state, + deps, + post_deps, + prehook_deps, + posthook_deps, + new_env_keys_from_meta, + new_state_keys_from_meta, + run_state, + i) + if r['return'] > 0: + return r + + if "add_deps_recursive" in versions_meta: + self._merge_dicts_with_tags( + add_deps_recursive, versions_meta['add_deps_recursive']) + + r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path) + if r['return'] > 0: + return r + + # Run chain of docker dependencies if current run cmd is from + # inside a docker container + docker_deps = [] + if i.get('docker_run_deps'): + docker_meta = meta.get('docker') + if docker_meta: + docker_deps = docker_meta.get('deps', []) + if docker_deps: + docker_deps = [ + dep for dep in docker_deps if not dep.get( + 'skip_inside_docker', False)] + + if len(docker_deps) > 0: + + logging.debug( + recursion_spaces + + ' - Checking docker run dependencies on other CM scripts:') + + r = self._call_run_deps(docker_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, False, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + logging.debug( + recursion_spaces + + ' - Processing env after docker run dependencies ...') + + r = update_env_with_values(env) + if r['return'] > 0: + return r + + # Check chain of dependencies on other CM scripts + if len(deps) > 0: + logging.debug(recursion_spaces + + ' - Checking dependencies on other CM scripts:') + + r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, False, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + logging.debug(recursion_spaces + + ' - Processing env after dependencies ...') + + r = update_env_with_values(env) + if r['return'] > 0: + return r + + # Clean some output files + clean_tmp_files(clean_files, recursion_spaces) + + # Prepare common input to prepare and run script + run_script_input = { + 'path': path, + 'bat_ext': bat_ext, + 'os_info': os_info, + 'const': const, + 'state': state, + 'const_state': const_state, + 'reuse_cached': reuse_cached, + 'recursion': recursion, + 'recursion_spaces': recursion_spaces, + 'remembered_selections': remembered_selections, + 'tmp_file_run_state': self.tmp_file_run_state, + 'tmp_file_run_env': self.tmp_file_run_env, + 'tmp_file_state': self.tmp_file_state, + 'tmp_file_run': self.tmp_file_run, + 'local_env_keys': self.local_env_keys, + 'local_env_keys_from_meta': local_env_keys_from_meta, + 'posthook_deps': posthook_deps, + 'add_deps_recursive': add_deps_recursive, + 'remembered_selections': remembered_selections, + 'found_script_tags': found_script_tags, + 'variation_tags_string': variation_tags_string, + 'found_cached': False, + 'debug_script_tags': debug_script_tags, + 'verbose': verbose, + 'meta': meta, + 'self': self + } + if os.path.isfile( + path_to_customize_py): # possible duplicate execution - needs fix + r = utils.load_python_module( + {'path': path, 'name': 'customize'}) + if r['return'] > 0: + return r + + customize_code = r['code'] + + customize_common_input = { + 'input': i, + 'automation': self, + 'artifact': script_artifact, + 'customize': script_artifact.meta.get('customize', {}), + 'os_info': os_info, + 'recursion_spaces': recursion_spaces, + 'script_tags': script_tags, + 'variation_tags': variation_tags + } + run_script_input['customize_code'] = customize_code + run_script_input['customize_common_input'] = customize_common_input + + if repro_prefix != '': + run_script_input['repro_prefix'] = repro_prefix + if ignore_script_error: + run_script_input['ignore_script_error'] = True + + # Assemble PIP versions + pip_version_string = '' + + pip_version = env.get('CM_VERSION', '') + pip_version_min = env.get('CM_VERSION_MIN', '') + pip_version_max = env.get('CM_VERSION_MAX', '') + + if pip_version != '': + pip_version_string = '==' + pip_version + elif pip_version_min != '' and pip_version_max != '': + pip_version_string = '>=' + pip_version_min + ',<=' + pip_version_max + elif pip_version_min != '': + pip_version_string = '>=' + pip_version_min + elif pip_version_max != '': + pip_version_string = '<=' + pip_version_max + + env.update(const) + utils.merge_dicts({'dict1': state, + 'dict2': const_state, + 'append_lists': True, + 'append_unique': True}) + + r = _update_env( + env, + 'CM_TMP_PIP_VERSION_STRING', + pip_version_string) + if r['return'] > 0: + return r + + if pip_version_string != '': + logging.debug( + recursion_spaces + + ' # potential PIP version string (if needed): ' + + pip_version_string) + + tmp_curdir = os.getcwd() + if env.get('CM_OUTDIRNAME', '') != '': + if not os.path.exists(env['CM_OUTDIRNAME']): + os.makedirs(env['CM_OUTDIRNAME']) + os.chdir(env['CM_OUTDIRNAME']) + + # Check if pre-process and detect + if 'preprocess' in dir(customize_code) and not fake_run: + + logging.debug(recursion_spaces + ' - Running preprocess ...') + + run_script_input['run_state'] = run_state + + ii = copy.deepcopy(customize_common_input) + ii['env'] = env + ii['state'] = state + ii['meta'] = meta + # may need to detect versions in multiple paths + ii['run_script_input'] = run_script_input + + r = customize_code.preprocess(ii) + if r['return'] > 0: + return r + + # Check if preprocess says to skip this component + skip = r.get('skip', False) + + if skip: + logging.debug( + recursion_spaces + + ' - this script is skipped!') + + # Check if script asks to run other dependencies instead of + # the skipped one + another_script = r.get('script', {}) + + if len(another_script) == 0: + return {'return': 0, 'skipped': True} + + logging.debug( + recursion_spaces + + ' - another script is executed instead!') + + ii = { + 'action': 'run', + 'automation': utils.assemble_cm_object(self.meta['alias'], self.meta['uid']), + 'recursion_spaces': recursion_spaces + extra_recursion_spaces, + 'recursion': True, + 'remembered_selections': remembered_selections, + 'env': env, + 'state': state, + 'const': const, + 'const_state': const_state, + 'save_env': save_env, + 'add_deps_recursive': add_deps_recursive + } + + ii.update(another_script) + + # Return to current path + os.chdir(current_path) + + ########################################################### + return self.cmind.access(ii) + + # If return version + if cache: + if r.get('version', '') != '': + cached_tags = [ + x for x in cached_tags if not x.startswith('version-')] + cached_tags.append('version-' + r['version']) + + if len(r.get('add_extra_cache_tags', [])) > 0: + for t in r['add_extra_cache_tags']: + if t not in cached_tags: + cached_tags.append(t) + + if print_env: + import json + logging.debug(json.dumps(env, indent=2, sort_keys=True)) + + # Check chain of pre hook dependencies on other CM scripts + if len(prehook_deps) > 0: + logging.debug( + recursion_spaces + + ' - Checking prehook dependencies on other CM scripts:') + + r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + if not fake_run: + env_key_mappings = meta.get("env_key_mappings", {}) + if env_key_mappings: + update_env_keys(env, env_key_mappings) + + run_script_input['meta'] = meta + run_script_input['env'] = env + run_script_input['run_state'] = run_state + run_script_input['recursion'] = recursion + + r = prepare_and_run_script_with_postprocessing( + run_script_input) + if r['return'] > 0: + return r + + # If return version + if r.get('version', '') != '': + version = r.get('version') + if cache: + cached_tags = [ + x for x in cached_tags if not x.startswith('version-')] + cached_tags.append('version-' + r['version']) + + if len(r.get('add_extra_cache_tags', [])) > 0 and cache: + for t in r['add_extra_cache_tags']: + if t not in cached_tags: + cached_tags.append(t) + + # Check chain of post dependencies on other CM scripts + clean_env_keys_post_deps = meta.get( + 'clean_env_keys_post_deps', []) + + r = self._run_deps(post_deps, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive, recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + # Add extra tags from env updated by deps (such as python version + # and compiler version, etc) + extra_cache_tags_from_env = meta.get( + 'extra_cache_tags_from_env', []) + for extra_cache_tags in extra_cache_tags_from_env: + key = extra_cache_tags['env'] + prefix = extra_cache_tags.get('prefix', '') + + v = env.get(key, '').strip() + if v != '': + for t in v.split(','): + x = 'deps-' + prefix + t + if x not in cached_tags: + cached_tags.append(x) + + if env.get('CM_OUTDIRNAME', '') != '': + os.chdir(tmp_curdir) + + detected_version = env.get( + 'CM_DETECTED_VERSION', env.get( + 'CM_VERSION', '')) + dependent_cached_path = env.get('CM_GET_DEPENDENT_CACHED_PATH', '') + + ####################################################################### + # Finalize script + + # Force consts in the final new env and state + utils.merge_dicts({'dict1': env, 'dict2': const, + 'append_lists': True, 'append_unique': True}) + utils.merge_dicts({'dict1': state, + 'dict2': const_state, + 'append_lists': True, + 'append_unique': True}) + + if i.get('force_new_env_keys', []): + new_env_keys = i['force_new_env_keys'] + else: + new_env_keys = new_env_keys_from_meta + + if i.get('force_new_state_keys', []): + new_state_keys = i['force_new_state_keys'] + else: + new_state_keys = new_state_keys_from_meta + + r = detect_state_diff( + env, + saved_env, + new_env_keys, + new_state_keys, + state, + saved_state) + if r['return'] > 0: + return r + + new_env = r['new_env'] + new_state = r['new_state'] + + utils.merge_dicts({'dict1': saved_env, + 'dict2': new_env, + 'append_lists': True, + 'append_unique': True}) + utils.merge_dicts({'dict1': saved_state, + 'dict2': new_state, + 'append_lists': True, + 'append_unique': True}) + + # Restore original env/state and merge env/state + # This is needed since we want to keep original env/state outside this script + # If we delete env and create a new dict, the original one outside this script will be detached + # That's why we just clean all keys in original env/state (used oustide) + # And then copy saved_env (with new_env merged) and saved_state (with new_state merged) + # while getting rid of all temporal updates in env and state inside + # this script + + for k in list(env.keys()): + del (env[k]) + for k in list(state.keys()): + del (state[k]) + + env.update(saved_env) + state.update(saved_state) + + # Prepare env script content (to be saved in cache and in the current + # path if needed) + env_script = convert_env_to_script( + new_env, os_info, start_script=os_info['start_script']) + + # If using cached script artifact, return to default path and then + # update the cache script artifact + if cache and cached_path != '': + # Check if need to remove tag + if remove_tmp_tag: + # Save state, env and deps for reuse + r = utils.save_json(file_name=os.path.join(cached_path, self.file_with_cached_state), + meta={'new_state': new_state, 'new_env': new_env, 'deps': deps, 'version': version}) + if r['return'] > 0: + return r + + # Save all env + env_all_script = convert_env_to_script( + env, os_info, start_script=os_info['start_script']) + + r = record_script(os.path.join(cached_path, self.tmp_file_env_all + bat_ext), + env_all_script, os_info) + if r['return'] > 0: + return r + + # Save env + r = record_script(os.path.join(cached_path, self.tmp_file_env + bat_ext), + env_script, os_info) + if r['return'] > 0: + return r + + # Remove tmp tag from the "cached" arifact to finalize caching + logging.debug( + recursion_spaces + + ' - Removing tmp tag in the script cached output {} ...'.format(cached_uid)) + + # Check if version was detected and record in meta) + if detected_version != '': + cached_meta['version'] = detected_version + + if found_script_artifact != '': + cached_meta['associated_script_artifact'] = found_script_artifact + + x = found_script_artifact.find(',') + if x < 0: + return { + 'return': 1, 'error': 'CM artifact format is wrong "{}" - no comma found'.format(found_script_artifact)} + + cached_meta['associated_script_artifact_uid'] = found_script_artifact[x + 1:] + + # Check if the cached entry is dependent on any path + if dependent_cached_path != '': + if os.path.isdir(cached_path) and os.path.exists( + dependent_cached_path): + if not os.path.samefile( + cached_path, dependent_cached_path): + cached_meta['dependent_cached_path'] = dependent_cached_path + + ii = {'action': 'update', + 'automation': self.meta['deps']['cache'], + 'artifact': cached_uid, + 'meta': cached_meta, + 'replace_lists': True, # To replace tags + 'tags': ','.join(cached_tags)} + + r = self.cmind.access(ii) + if r['return'] > 0: + return r + + # Clean tmp files only in current path (do not touch cache - we keep + # all info there) + script_path = os.getcwd() + os.chdir(current_path) + + shell = i.get('shell', False) +# if not shell: +# shell = i.get('debug', False) + + if not shell and not i.get('dirty', False) and not cache: + clean_tmp_files(clean_files, recursion_spaces) + + # Record new env and new state in the current dir if needed + if save_env or shell: + # Check if script_prefix in the state from other components + where_to_add = len(os_info['start_script']) + + script_prefix = state.get('script_prefix', []) + if len(script_prefix) > 0: + env_script.insert(where_to_add, '\n') + for x in reversed(script_prefix): + env_script.insert(where_to_add, x) + + if shell: + x = [ + 'cmd', + '.', + '', + '.bat', + ''] if os_info['platform'] == 'windows' else [ + 'bash', + ' ""', + '"', + '.sh', + '. ./'] + + env_script.append('\n') + env_script.append('echo{}\n'.format(x[1])) + env_script.append( + 'echo {}Working path: {}{}'.format( + x[2], script_path, x[2])) + xtmp_run_file = '' + tmp_run_file = 'tmp-run{}'.format(x[3]) + if os.path.isfile(tmp_run_file): + xtmp_run_file = 'Change and run "{}". '.format( + tmp_run_file) + + env_script.append( + 'echo {}Running debug shell. {}Type exit to quit ...{}\n'.format( + x[2], xtmp_run_file, x[2])) + env_script.append('echo{}\n'.format(x[1])) + env_script.append('\n') + env_script.append(x[0]) + + env_file = self.tmp_file_env + bat_ext + + r = record_script(env_file, env_script, os_info) + if r['return'] > 0: + return r + + if shell: + x = env_file if os_info['platform'] == 'windows' else '. ./' + env_file + os.system(x) + + if not version and detected_version: + version = detected_version + + # Add detected or forced version to the CM script run time state + # to aggregate all resolved versions and dump them at the end + # if requested (for better reproducibility/replicability) + + script_uid = script_artifact.meta.get('uid') + script_alias = script_artifact.meta.get('alias') + + # we should use user-friendly tags here + # script_tags = script_artifact.meta.get('tags') + + version_info_tags = ",".join(script_tags) + + if len(variation_tags) > 0: + for vt in variation_tags: + version_info_tags += ',_' + vt + + version_info = {} + version_info[version_info_tags] = { + 'script_uid': script_uid, + 'script_alias': script_alias, + 'script_tags': ','.join(found_script_tags), + 'script_variations': ','.join(variation_tags), + 'version': version, + 'parent': run_state['parent'] + } + + run_state['version_info'].append(version_info) + + script_versions = detected_versions.get(meta['uid'], []) + + if not script_versions: + detected_versions[meta['uid']] = [version] + else: + script_versions.append(version) + + # RETURN + elapsed_time = time.time() - start_time + + if verbose and cached_uid != '': + logging.info( + recursion_spaces + + ' - cache UID: {}'.format(cached_uid)) + + if print_deps: + print_deps_data = self._print_deps(run_state['deps']) + new_state['print_deps'] = print_deps_data + + if print_readme or repro_prefix != '': + readme = self._get_readme(cmd, run_state) + + # Copy Docker sample + if repro_prefix != '' and repro_dir != '': + docker_template_path = os.path.join( + self.path, 'docker_repro_example') + if os.path.isdir(docker_template_path): + try: + + shutil.copytree( + docker_template_path, + repro_dir, + dirs_exist_ok=True) + except Exception as e: + pass + + docker_container = self._get_docker_container(cmd, run_state) + + try: + + with open(os.path.join(repro_dir, 'ubuntu-23.04.Dockerfile'), 'a+') as f: + f.write(docker_container) + except BaseException: + pass + + if print_readme: + with open('README-cm.md', 'w') as f: + f.write(readme) + + if dump_version_info: + r = self._dump_version_info_for_script(quiet=quiet, silent=silent) + if r['return'] > 0: + return r + + rr = { + 'return': 0, + 'env': env, + 'new_env': new_env, + 'state': state, + 'new_state': new_state, + 'deps': run_state.get('deps')} + + # Print output as json to console + if i.get('json', False) or i.get('j', False): + import json + logging.info(json.dumps(rr, indent=2)) + + # Check if save json to file + if repro_prefix != '': + + with open(repro_prefix + '-README-cm.md', 'w', encoding='utf-8') as f: + f.write(readme) + + dump_repro(repro_prefix, rr, run_state) + + if verbose or show_time: + logging.info( + recursion_spaces + + ' - running time of script "{}": {:.2f} sec.'.format( + ','.join(found_script_tags), + elapsed_time)) + + if not recursion and show_space: + stop_disk_stats = shutil.disk_usage("/") + + used_disk_space_in_mb = int( + (start_disk_stats.free - stop_disk_stats.free) / (1024 * 1024)) + + if used_disk_space_in_mb > 0: + logging.info( + recursion_spaces + + ' - used disk space: {} MB'.format(used_disk_space_in_mb)) + + # Check if need to print some final info such as path to model, etc + if not run_state.get('tmp_silent', False): + print_env_at_the_end = meta.get('print_env_at_the_end', {}) + if len(print_env_at_the_end) > 0: + for p in sorted(print_env_at_the_end): + t = print_env_at_the_end[p] + if t == '': + t = 'ENV[{}]'.format(p) + + v = new_env.get(p, None) + + logging.info('{}: {}'.format(t, str(v))) + + # Check if print nice versions + if print_versions: + self._print_versions(run_state) + + # Check if pause (useful if running a given script in a new terminal + # that may close automatically) + if i.get('pause', False): + input('Press Enter to continue ...') + + return rr + + ########################################################################## + def _fix_cache_paths(self, env): + cm_repos_path = os.environ.get( + 'CM_REPOS', os.path.join( + os.path.expanduser("~"), "CM", "repos")) + current_cache_path = os.path.realpath( + os.path.join(cm_repos_path, "local", "cache")) + + new_env = env # just a reference + + for key, val in new_env.items(): + # Check for a path separator in a string and determine the + # separator + if isinstance(val, str) and any(sep in val for sep in [ + "/local/cache/", "\\local\\cache\\"]): + sep = "/" if "/local/cache/" in val else "\\" + + path_split = val.split(sep) + repo_entry_index = path_split.index("local") + loaded_cache_path = sep.join( + path_split[0:repo_entry_index + 2]) + if loaded_cache_path != current_cache_path and os.path.exists( + current_cache_path): + new_env[key] = val.replace( + loaded_cache_path, current_cache_path) + + elif isinstance(val, list): + for i, val2 in enumerate(val): + if isinstance(val2, str) and any(sep in val2 for sep in [ + "/local/cache/", "\\local\\cache\\"]): + sep = "/" if "/local/cache/" in val2 else "\\" + + path_split = val2.split(sep) + repo_entry_index = path_split.index("local") + loaded_cache_path = sep.join( + path_split[0:repo_entry_index + 2]) + if loaded_cache_path != current_cache_path and os.path.exists( + current_cache_path): + new_env[key][i] = val2.replace( + loaded_cache_path, current_cache_path) + + return {'return': 0, 'new_env': new_env} + + ########################################################################## + def _dump_version_info_for_script( + self, output_dir=os.getcwd(), quiet=False, silent=False): + + if not quiet and not silent: + pass + for f in ['cm-run-script-versions.json', 'version_info.json']: + if not quiet and not silent: + logging.info('Dumping versions to {}'.format(f)) + r = utils.save_json(f, self.run_state.get('version_info', [])) + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def _update_state_from_variations(self, i, meta, variation_tags, variations, env, state, const, const_state, deps, post_deps, prehook_deps, + posthook_deps, new_env_keys_from_meta, new_state_keys_from_meta, add_deps_recursive, run_state, recursion_spaces, verbose): + + # Save current explicit variations + import copy + explicit_variation_tags = copy.deepcopy(variation_tags) + + # Calculate space + required_disk_space = {} + + # Check if warning + warnings = [] + + # variation_tags get appended by any aliases + r = self._get_variations_with_aliases(variation_tags, variations) + if r['return'] > 0: + return r + variation_tags = r['variation_tags'] + excluded_variation_tags = r['excluded_variation_tags'] + + # Get a dictionary of variation groups + r = self._get_variation_groups(variations) + if r['return'] > 0: + return r + + variation_groups = r['variation_groups'] + + run_state['variation_groups'] = variation_groups + + # Add variation(s) if specified in the "tags" input prefixed by _ + + # If there is only 1 default variation, then just use it or + # substitute from CMD + + default_variation = meta.get('default_variation', '') + + if default_variation and default_variation not in variations: + return {'return': 1, 'error': 'Default variation "{}" is not in the list of variations: "{}" '.format( + default_variation, variations.keys())} + + if len(variation_tags) == 0: + if default_variation != '' and default_variation not in excluded_variation_tags: + variation_tags = [default_variation] + + r = self._update_variation_tags_from_variations( + variation_tags, variations, variation_groups, excluded_variation_tags) + if r['return'] > 0: + return r + + # variation_tags get appended by any default on variation in groups + r = self._process_variation_tags_in_groups( + variation_tags, variation_groups, excluded_variation_tags, variations) + if r['return'] > 0: + return r + if variation_tags != r['variation_tags']: + variation_tags = r['variation_tags'] + + # we need to again process variation tags if any new default + # variation is added + r = self._update_variation_tags_from_variations( + variation_tags, variations, variation_groups, excluded_variation_tags) + if r['return'] > 0: + return r + + valid_variation_combinations = meta.get( + 'valid_variation_combinations', []) + if valid_variation_combinations: + if not any(all(t in variation_tags for t in s) + for s in valid_variation_combinations): + return {'return': 1, 'error': 'Invalid variation combination "{}" prepared. Valid combinations: "{}" '.format( + variation_tags, valid_variation_combinations)} + + invalid_variation_combinations = meta.get( + 'invalid_variation_combinations', []) + if invalid_variation_combinations: + if any(all(t in variation_tags for t in s) + for s in invalid_variation_combinations): + return {'return': 1, 'error': 'Invalid variation combination "{}" prepared. Invalid combinations: "{}" '.format( + variation_tags, invalid_variation_combinations)} + + variation_tags_string = '' + if len(variation_tags) > 0: + for t in variation_tags: + if variation_tags_string != '': + variation_tags_string += ',' + + x = '_' + t + variation_tags_string += x + + logging.debug( + recursion_spaces + + ' Prepared variations: {}'.format(variation_tags_string)) + + # Update env and other keys if variations + if len(variation_tags) > 0: + for variation_tag in variation_tags: + if variation_tag.startswith('~'): + # ignore such tag (needed for caching only to differentiate + # variations) + continue + + if variation_tag.startswith('-'): + # ignore such tag (needed for caching only to eliminate + # variations) + continue + + variation_tag_dynamic_suffix = None + if variation_tag not in variations: + if '.' in variation_tag and variation_tag[-1] != '.': + variation_tag_dynamic_suffix = variation_tag[variation_tag.index( + ".") + 1:] + if not variation_tag_dynamic_suffix: + return {'return': 1, 'error': 'tag {} is not in variations {}'.format( + variation_tag, variations.keys())} + variation_tag = self._get_name_for_dynamic_variation_tag( + variation_tag) + if variation_tag not in variations: + return {'return': 1, 'error': 'tag {} is not in variations {}'.format( + variation_tag, variations.keys())} + + variation_meta = variations[variation_tag] + if variation_tag_dynamic_suffix: + self._update_variation_meta_with_dynamic_suffix( + variation_meta, variation_tag_dynamic_suffix) + + r = update_state_from_meta( + variation_meta, + env, + state, + const, + const_state, + deps, + post_deps, + prehook_deps, + posthook_deps, + new_env_keys_from_meta, + new_state_keys_from_meta, + run_state, + i) + if r['return'] > 0: + return r + + if variation_meta.get('script_name', '') != '': + meta['script_name'] = variation_meta['script_name'] + + if variation_meta.get('default_version', '') != '': + run_state['default_version'] = variation_meta['default_version'] + + if variation_meta.get( + 'required_disk_space', 0) > 0 and variation_tag not in required_disk_space: + required_disk_space[variation_tag] = variation_meta['required_disk_space'] + + if variation_meta.get('warning', '') != '': + x = variation_meta['warning'] + if x not in warnings: + warnings.append() + + adr = get_adr(variation_meta) + if adr: + self._merge_dicts_with_tags(add_deps_recursive, adr) + + combined_variations = [t for t in variations if ',' in t] + + combined_variations.sort(key=lambda x: x.count(',')) + ''' By sorting based on the number of variations users can safely override + env and state in a larger combined variation + ''' + + for combined_variation in combined_variations: + v = combined_variation.split(",") + all_present = set(v).issubset(set(variation_tags)) + if all_present: + + combined_variation_meta = variations[combined_variation] + + r = update_state_from_meta( + combined_variation_meta, + env, + state, + const, + const_state, + deps, + post_deps, + prehook_deps, + posthook_deps, + new_env_keys_from_meta, + new_state_keys_from_meta, + run_state, + i) + if r['return'] > 0: + return r + + adr = get_adr(combined_variation_meta) + if adr: + self._merge_dicts_with_tags( + add_deps_recursive, adr) + + if combined_variation_meta.get( + 'script_name', '') != '': + meta['script_name'] = combined_variation_meta['script_name'] + + if variation_meta.get('default_version', '') != '': + run_state['default_version'] = variation_meta['default_version'] + + if combined_variation_meta.get( + 'required_disk_space', 0) > 0 and combined_variation not in required_disk_space: + required_disk_space[combined_variation] = combined_variation_meta['required_disk_space'] + + if combined_variation_meta.get('warning', '') != '': + x = combined_variation_meta['warning'] + if x not in warnings: + warnings.append(x) + + # Processing them again using updated deps for add_deps_recursive + r = update_adr_from_meta( + deps, + post_deps, + prehook_deps, + posthook_deps, + add_deps_recursive, + env) + if r['return'] > 0: + return r + + if len(required_disk_space) > 0: + required_disk_space_sum_mb = sum( + list(required_disk_space.values())) + + warnings.append( + 'Required disk space: {} MB'.format(required_disk_space_sum_mb)) + + return {'return': 0, 'variation_tags_string': variation_tags_string, + 'explicit_variation_tags': explicit_variation_tags, 'warnings': warnings} + + ########################################################################## + def _update_variation_tags_from_variations( + self, variation_tags, variations, variation_groups, excluded_variation_tags): + + import copy + tmp_variation_tags_static = copy.deepcopy(variation_tags) + for v_i in range(len(tmp_variation_tags_static)): + v = tmp_variation_tags_static[v_i] + + if v not in variations: + v_static = self._get_name_for_dynamic_variation_tag(v) + tmp_variation_tags_static[v_i] = v_static + + combined_variations = [t for t in variations if ',' in t] + # We support default_variations in the meta of cmbined_variations + combined_variations.sort(key=lambda x: x.count(',')) + ''' By sorting based on the number of variations users can safely override + env and state in a larger combined variation + ''' + tmp_combined_variations = {k: False for k in combined_variations} + + # Recursively add any base variations specified + if len(variation_tags) > 0: + tmp_variations = {k: False for k in variation_tags} + while True: + for variation_name in variation_tags: + tag_to_append = None + + # ignore the excluded variations + if variation_name.startswith( + "~") or variation_name.startswith("-"): + tmp_variations[variation_name] = True + continue + + if variation_name not in variations: + variation_name = self._get_name_for_dynamic_variation_tag( + variation_name) + + # base variations are automatically turned on. Only + # variations outside of any variation group can be added as + # a base_variation + if "base" in variations[variation_name]: + base_variations = variations[variation_name]["base"] + for base_variation in base_variations: + dynamic_base_variation = False + dynamic_base_variation_already_added = False + if base_variation not in variations: + base_variation_dynamic = self._get_name_for_dynamic_variation_tag( + base_variation) + if not base_variation_dynamic or base_variation_dynamic not in variations: + return {'return': 1, 'error': 'Variation "{}" specified as base variation of "{}" is not existing'.format( + base_variation, variation_name)} + else: + dynamic_base_variation = True + base_prefix = base_variation_dynamic.split(".")[ + 0] + "." + for x in variation_tags: + if x.startswith(base_prefix): + dynamic_base_variation_already_added = True + + if base_variation not in variation_tags and not dynamic_base_variation_already_added: + tag_to_append = base_variation + + if tag_to_append: + if tag_to_append in excluded_variation_tags: + return {'return': 1, 'error': 'Variation "{}" specified as base variation for the variation is in the excluded list "{}" '.format( + tag_to_append, variation_name)} + variation_tags.append(tag_to_append) + tmp_variations[tag_to_append] = False + + tag_to_append = None + + # default_variations dictionary specifies the + # default_variation for each variation group. A default + # variation in a group is turned on if no other variation + # from that group is turned on and it is not excluded using + # the '-' prefix + r = self._get_variation_tags_from_default_variations( + variations[variation_name], + variations, + variation_groups, + tmp_variation_tags_static, + excluded_variation_tags) + if r['return'] > 0: + return r + + variations_to_add = r['variations_to_add'] + for t in variations_to_add: + tmp_variations[t] = False + variation_tags.append(t) + + tmp_variations[variation_name] = True + + for combined_variation in combined_variations: + if tmp_combined_variations[combined_variation]: + continue + v = combined_variation.split(",") + all_present = set(v).issubset(set(variation_tags)) + if all_present: + combined_variation_meta = variations[combined_variation] + tmp_combined_variations[combined_variation] = True + + r = self._get_variation_tags_from_default_variations( + combined_variation_meta, + variations, + variation_groups, + tmp_variation_tags_static, + excluded_variation_tags) + if r['return'] > 0: + return r + + variations_to_add = r['variations_to_add'] + for t in variations_to_add: + tmp_variations[t] = False + variation_tags.append(t) + + all_base_processed = True + for variation_name in variation_tags: + if variation_name.startswith("-"): + continue + if variation_name not in variations: + variation_name = self._get_name_for_dynamic_variation_tag( + variation_name) + if tmp_variations[variation_name] == False: + all_base_processed = False + break + if all_base_processed: + break + return {'return': 0} + + ########################################################################## + def _get_variation_tags_from_default_variations( + self, variation_meta, variations, variation_groups, tmp_variation_tags_static, excluded_variation_tags): + # default_variations dictionary specifies the default_variation for + # each variation group. A default variation in a group is turned on if + # no other variation from that group is turned on and it is not + # excluded using the '-' prefix + + tmp_variation_tags = [] + if "default_variations" in variation_meta: + default_base_variations = variation_meta["default_variations"] + for default_base_variation in default_base_variations: + tag_to_append = None + + if default_base_variation not in variation_groups: + return {'return': 1, 'error': 'Default variation "{}" is not a valid group. Valid groups are "{}" '.format( + default_base_variation, variation_groups)} + + unique_allowed_variations = variation_groups[default_base_variation]['variations'] + # add the default only if none of the variations from the + # current group is selected and it is not being excluded with - + # prefix + if len(set(unique_allowed_variations) & set(tmp_variation_tags_static)) == 0 and default_base_variations[ + default_base_variation] not in excluded_variation_tags and default_base_variations[default_base_variation] not in tmp_variation_tags_static: + tag_to_append = default_base_variations[default_base_variation] + + if tag_to_append: + if tag_to_append not in variations: + variation_tag_static = self._get_name_for_dynamic_variation_tag( + tag_to_append) + if not variation_tag_static or variation_tag_static not in variations: + return {'return': 1, 'error': 'Invalid variation "{}" specified in default variations for the variation "{}" '.format( + tag_to_append, variation_meta)} + tmp_variation_tags.append(tag_to_append) + + return {'return': 0, 'variations_to_add': tmp_variation_tags} + + ############################################################ + def version(self, i): + """ + Print version + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + console = i.get('out') == 'con' + + version = self.__version__ + + if console: + logging.info(version) + + return {'return': 0, 'version': version} + + ############################################################ + + def search(self, i): + """ + Overriding the automation search function to filter out scripts not matching the given variation tags + + TBD: add input/output description + """ + + console = i.get('out') == 'con' + + # Check simplified CMD: cm run script "get compiler" + # If artifact has spaces, treat them as tags! + artifact = i.get('artifact', '') + if ' ' in artifact: # or ',' in artifact: + del (i['artifact']) + if 'parsed_artifact' in i: + del (i['parsed_artifact']) + # Force substitute tags + i['tags'] = artifact.replace(' ', ',') + + ####################################################################### + # Process tags to find script(s) and separate variations + # (not needed to find scripts) + tags_string = i.get('tags', '').strip() + + tags = [] if tags_string == '' else tags_string.split(',') + + script_tags = [] + variation_tags = [] + + for t in tags: + t = t.strip() + if t != '': + if t.startswith('_'): + tx = t[1:] + if tx not in variation_tags: + variation_tags.append(tx) + elif t.startswith('-_'): + tx = '-' + t[2:] + if tx not in variation_tags: + variation_tags.append(tx) + else: + script_tags.append(t) + + excluded_tags = [v[1:] for v in script_tags if v.startswith("-")] + common = set(script_tags).intersection(set(excluded_tags)) + if common: + return { + 'return': 1, 'error': 'There is common tags {} in the included and excluded lists'.format(common)} + + excluded_variation_tags = [v[1:] + for v in variation_tags if v.startswith("-")] + common = set(variation_tags).intersection(set(excluded_variation_tags)) + if common: + return { + 'return': 1, 'error': 'There is common variation tags {} in the included and excluded lists'.format(common)} + + ####################################################################### + # Find CM script(s) based on thier tags to get their meta (can be more than 1) + # Then check if variations exists inside meta + + i['tags'] = ','.join(script_tags) + + i['out'] = None + i['common'] = True + + r = super(CAutomation, self).search(i) + if r['return'] > 0: + return r + + lst = r['list'] + + r['unfiltered_list'] = lst + + found_scripts = False if len(lst) == 0 else True + + if found_scripts and len(variation_tags) > 0: + filtered = [] + + for script_artifact in lst: + meta = script_artifact.meta + variations = meta.get('variations', {}) + + matched = True + for t in variation_tags: + if t.startswith('-'): + t = t[1:] + if t in variations: + continue + matched = False + for s in variations: + if s.endswith('.#'): + if t.startswith(s[:-1]) and t[-1] != '.': + matched = True + break + if not matched: + break + if not matched: + continue + + filtered.append(script_artifact) + + if len(lst) > 0 and not filtered: + warning = [""] + for script in lst: + meta = script.meta + variations = meta.get('variations', {}) + warning.append( + 'variation tags {} are not matching for the found script {} with variations {}\n'.format( + variation_tags, meta.get('alias'), variations.keys())) + r['warning'] = "\n".join(warning) + + r['list'] = filtered + + # Print filtered paths if console + if console: + for script in r['list']: + + # This should not be logging since the output can be consumed by other external tools and scripts + # logging.info(script.path) + print(script.path) + + # Finalize output + r['script_tags'] = script_tags + r['variation_tags'] = variation_tags + r['found_scripts'] = found_scripts + + return r + + ############################################################ + def test(self, i): + """ + Test automation (TBD) + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + + # Check parsed automation + if 'parsed_automation' not in i: + return {'return': 1, 'error': 'automation is not specified'} + + console = i.get('out') == 'con' + + # Find CM artifact(s) + i['out'] = None + r = self.search(i) + + if r['return'] > 0: + return r + + lst = r['list'] + for script_artifact in lst: + path = script_artifact.path + meta = script_artifact.meta + original_meta = script_artifact.original_meta + + alias = meta.get('alias', '') + uid = meta.get('uid', '') + if console: + logging.info(path) + test_config = meta.get('tests', '') + if test_config: + logging.info(test_config) + variations = meta.get("variations") + tags_string = ",".join(meta.get("tags")) + test_input_index = i.get('test_input_index') + test_input_id = i.get('test_input_id') + run_inputs = i.get("run_inputs", test_config.get( + 'run_inputs', [{"docker_os": "ubuntu", "docker_os_version": "22.04"}])) + if test_input_index: + index_plus = False + try: + if test_input_index.endswith("+"): + input_index = int(test_input_index[:-1]) + index_plus = True + else: + input_index = int(test_input_index) + except ValueError as e: + print(e) + return { + 'return': 1, 'error': f'Invalid test_input_index: {test_input_index}. Must be an integer or an integer followed by a +'} + if input_index > len(run_inputs): + run_inputs = [] + else: + if index_plus: + run_inputs = run_inputs[index_index - 1:] + else: + run_inputs = [run_inputs[input_index - 1]] + + for run_input in run_inputs: + if test_input_id: + if run_input.get('id', '') != test_input_id: + continue + + ii = {'action': 'run', + 'automation': 'script', + 'quiet': i.get('quiet'), + } + test_all_variations = run_input.get( + 'test-all-variations', False) + if test_all_variations: + run_variations = [ + f"_{v}" for v in variations if variations[v].get( + 'group', + '') == '' and str( + variations[v].get( + 'exclude-in-test', + '')).lower() not in [ + "1", + "true", + "yes"]] + else: + given_variations = run_input.get( + 'variations_list', []) + if given_variations: + v_split = [] + run_variations = [] + for i, v in enumerate(given_variations): + v_split = v.split(",") + for t in v_split: + if not t.startswith("_"): + # variations must begin with _. We + # support both with and without _ + # in the meta + given_variations[i] = f"_{t}" + if v_split: + run_variations.append( + ",".join(v_split)) + else: + # run the test without any variations + run_variations = [""] + use_docker = run_input.get('docker', False) + for key in run_input: # override meta with any user inputs like for docker_cm_repo + if i.get(key): + if isinstance(run_input[key], dict): + utils.merge_dicts({ + 'dict1': run_input[key], + 'dict2': i[key], + 'append_lists': True, + 'append_unique': True + }) + else: + run_input[key] = i[key] + + ii = {**ii, **run_input} + i_env = ii.get('env', i.get('env', {})) + if use_docker: + ii['action'] = "docker" + for key in i: + if key.startswith("docker_"): + ii[key] = i[key] + + if ii.get('docker_image_name', '') == '': + ii['docker_image_name'] = alias + + for variation_tags in run_variations: + run_tags = f"{tags_string},{variation_tags}" + ii['tags'] = run_tags + if i_env: + import copy + ii['env'] = copy.deepcopy(i_env) + logging.info(ii) + r = self.cmind.access(ii) + if r['return'] > 0: + return r + + return {'return': 0, 'list': lst} + + ############################################################ + + def native_run(self, i): + """ + Add CM script + + Args: + (CM input dict): + + env (dict): environment + command (str): string + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + env = i.get('env', {}) + cmd = i.get('command', '') + + script = i.get('script', []) + + # Create temporary script name + script_name = i.get('script_name', '') + if script_name == '': + script_name = 'tmp-native-run.' + + if os.name == 'nt': + script_name += 'bat' + else: + script_name += 'sh' + + if os.name == 'nt': + xcmd = 'call ' + script_name + + if len(script) == 0: + script.append('@echo off') + script.append('') + else: + xcmd = 'chmod 755 ' + script_name + ' ; ./' + script_name + + if len(script) == 0: + script.append('#!/bin/bash') + script.append('') + + # Assemble env + if len(env) > 0: + for k in env: + v = env[k] + + if os.name == 'nt': + script.append('set ' + k + '=' + v) + else: + if ' ' in v: + v = '"' + v + '"' + script.append('export ' + k + '=' + v) + + script.append('') + + # Add CMD + script.append(cmd) + + # Record script + r = utils.save_txt(file_name=script_name, string='\n'.join(script)) + if r['return'] > 0: + return r + + # Run script + rc = os.system(xcmd) + + return {'return': 0, 'return_code': rc} + + ############################################################ + def add(self, i): + """ + Add CM script + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (tags) (str): tags to find an CM script (CM artifact) + + (script_name) (str): name of script (it will be copied to the new entry and added to the meta) + + (tags) (string or list): tags to be added to meta + + (new_tags) (string or list): new tags to be added to meta (the same as tags) + + (json) (bool): if True, record JSON meta instead of YAML + + (meta) (dict): preloaded meta + + (template) (string): template to use (python) + (python) (bool): template=python + (pytorch) (bool): template=pytorch + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + import shutil + + console = i.get('out') == 'con' + + # Try to find script artifact by alias and/or tags + ii = utils.sub_input(i, self.cmind.cfg['artifact_keys']) + + parsed_artifact = i.get('parsed_artifact', []) + + artifact_obj = parsed_artifact[0] if len(parsed_artifact) > 0 else None + artifact_repo = parsed_artifact[1] if len( + parsed_artifact) > 1 else None + + script_name = '' + if 'script_name' in i: + script_name = i.get('script_name', '').strip() + del (i['script_name']) + + if script_name != '' and not os.path.isfile(script_name): + return {'return': 1, + 'error': 'file {} not found'.format(script_name)} + + # Move tags from input to meta of the newly created script artifact + tags_list = utils.convert_tags_to_list(i) + if 'tags' in i: + del (i['tags']) + + if len(tags_list) == 0: + if console: + x = input( + 'Please specify a combination of unique tags separated by comma for this script: ') + x = x.strip() + if x != '': + tags_list = x.split(',') + + if len(tags_list) == 0: + return { + 'return': 1, 'error': 'you must specify a combination of unique tags separate by comman using "--new_tags"'} + + # Add placeholder (use common action) + ii['out'] = 'con' + # Avoid recursion - use internal CM add function to add the script + # artifact + ii['common'] = True + + # Check template path + template_dir = 'template' + + template = i.get('template', '') + + if template == '': + if i.get('python', False): + template = 'python' + elif i.get('pytorch', False): + template = 'pytorch' + + if template != '': + template_dir += '-' + template + + template_path = os.path.join(self.path, template_dir) + + if not os.path.isdir(template_path): + return {'return': 1, 'error': 'template path {} not found'.format( + template_path)} + + # Check if preloaded meta exists + meta = { + 'cache': False + # 20240127: Grigori commented that because newly created script meta looks ugly + # 'new_env_keys':[], + # 'new_state_keys':[], + # 'input_mapping':{}, + # 'docker_input_mapping':{}, + # 'deps':[], + # 'prehook_deps':[], + # 'posthook_deps':[], + # 'post_deps':[], + # 'versions':{}, + # 'variations':{}, + # 'input_description':{} + } + + fmeta = os.path.join(template_path, self.cmind.cfg['file_cmeta']) + + r = utils.load_yaml_and_json(fmeta) + if r['return'] == 0: + utils.merge_dicts({'dict1': meta, + 'dict2': r['meta'], + 'append_lists': True, + 'append_unique': True}) + + # Check meta from CMD + xmeta = i.get('meta', {}) + + if len(xmeta) > 0: + utils.merge_dicts({'dict1': meta, 'dict2': xmeta, + 'append_lists': True, 'append_unique': True}) + + meta['automation_alias'] = self.meta['alias'] + meta['automation_uid'] = self.meta['uid'] + meta['tags'] = tags_list + + script_name_base = script_name + script_name_ext = '' + if script_name != '': + # separate name and extension + j = script_name.rfind('.') + if j >= 0: + script_name_base = script_name[:j] + script_name_ext = script_name[j:] + + meta['script_name'] = script_name_base + + ii['meta'] = meta + ii['action'] = 'add' + + use_yaml = True if not i.get('json', False) else False + + if use_yaml: + ii['yaml'] = True + + ii['automation'] = 'script,5b4e0237da074764' + + for k in ['parsed_automation', 'parsed_artifact']: + if k in ii: + del ii[k] + + if artifact_repo is not None: + ii['artifact'] = utils.assemble_cm_object2( + artifact_repo) + ':' + utils.assemble_cm_object2(artifact_obj) + + r_obj = self.cmind.access(ii) + if r_obj['return'] > 0: + return r_obj + + new_script_path = r_obj['path'] + + if console: + logging.info('Created script in {}'.format(new_script_path)) + + # Copy files from template (only if exist) + files = [ + (template_path, 'README-extra.md', ''), + (template_path, 'customize.py', ''), + (template_path, 'main.py', ''), + (template_path, 'requirements.txt', ''), + (template_path, 'install_deps.bat', ''), + (template_path, 'install_deps.sh', ''), + (template_path, 'plot.bat', ''), + (template_path, 'plot.sh', ''), + (template_path, 'analyze.bat', ''), + (template_path, 'analyze.sh', ''), + (template_path, 'validate.bat', ''), + (template_path, 'validate.sh', '') + ] + + if script_name == '': + files += [(template_path, 'run.bat', ''), + (template_path, 'run.sh', '')] + else: + if script_name_ext == '.bat': + files += [(template_path, 'run.sh', script_name_base + '.sh')] + files += [('', script_name, script_name)] + + else: + files += [(template_path, 'run.bat', + script_name_base + '.bat')] + files += [('', script_name, script_name_base + '.sh')] + + for x in files: + path = x[0] + f1 = x[1] + f2 = x[2] + + if f2 == '': + f2 = f1 + + if path != '': + f1 = os.path.join(path, f1) + + if os.path.isfile(f1): + f2 = os.path.join(new_script_path, f2) + + if console: + logging.info(' * Copying {} to {}'.format(f1, f2)) + + shutil.copyfile(f1, f2) + + return r_obj + + ########################################################################## + def _get_name_for_dynamic_variation_tag(script, variation_tag): + ''' + Returns the variation name in meta for the dynamic_variation_tag + ''' + if "." not in variation_tag or variation_tag[-1] == ".": + return None + return variation_tag[:variation_tag.index(".") + 1] + "#" + + ########################################################################## + + def _update_variation_meta_with_dynamic_suffix( + script, variation_meta, variation_tag_dynamic_suffix): + ''' + Updates the variation meta with dynamic suffix + ''' + for key in variation_meta: + value = variation_meta[key] + + if isinstance(value, list): # deps,pre_deps... + for item in value: + if isinstance(item, dict): + for item_key in item: + item_value = item[item_key] + if isinstance( + item_value, dict): # env,default_env inside deps + for item_key2 in item_value: + item_value[item_key2] = item_value[item_key2].replace( + "#", variation_tag_dynamic_suffix) + elif isinstance(item_value, list): # names for example + for i, l_item in enumerate(item_value): + if isinstance(l_item, str): + item_value[i] = l_item.replace( + "#", variation_tag_dynamic_suffix) + else: + item[item_key] = item[item_key].replace( + "#", variation_tag_dynamic_suffix) + + elif isinstance(value, dict): # add_deps, env, .. + for item in value: + item_value = value[item] + if isinstance(item_value, dict): # deps + for item_key in item_value: + item_value2 = item_value[item_key] + if isinstance( + item_value2, dict): # env,default_env inside deps + for item_key2 in item_value2: + item_value2[item_key2] = item_value2[item_key2].replace( + "#", variation_tag_dynamic_suffix) + else: + item_value[item_key] = item_value[item_key].replace( + "#", variation_tag_dynamic_suffix) + else: + if isinstance(item_value, list): # lists inside env... + for i, l_item in enumerate(item_value): + if isinstance(l_item, str): + item_value[i] = l_item.replace( + "#", variation_tag_dynamic_suffix) + else: + value[item] = value[item].replace( + "#", variation_tag_dynamic_suffix) + + else: # scalar value + pass # no dynamic update for now + + ########################################################################## + + def _get_variations_with_aliases(script, variation_tags, variations): + ''' + Automatically turn on variation tags which are aliased by any given tag + ''' + import copy + tmp_variation_tags = copy.deepcopy(variation_tags) + + excluded_variations = [k[1:] + for k in variation_tags if k.startswith("-")] + for i, e in enumerate(excluded_variations): + if e not in variations: + dynamic_tag = script._get_name_for_dynamic_variation_tag(e) + if dynamic_tag and dynamic_tag in variations: + excluded_variations[i] = dynamic_tag + + for k in variation_tags: + if k.startswith("-"): + continue + if k in variations: + variation = variations[k] + else: + variation = variations[script._get_name_for_dynamic_variation_tag( + k)] + if 'alias' in variation: + + if variation['alias'] in excluded_variations: + return {'return': 1, 'error': 'Alias "{}" specified for the variation "{}" is conflicting with the excluded variation "-{}" '.format( + variation['alias'], k, variation['alias'])} + + if variation['alias'] not in variations: + return {'return': 1, 'error': 'Alias "{}" specified for the variation "{}" is not existing '.format( + variation['alias'], k)} + + if 'group' in variation: + return { + 'return': 1, 'error': 'Incompatible combinations: (alias, group) specified for the variation "{}" '.format(k)} + + if 'default' in variation: + return { + 'return': 1, 'error': 'Incompatible combinations: (default, group) specified for the variation "{}" '.format(k)} + + if variation['alias'] not in tmp_variation_tags: + tmp_variation_tags.append(variation['alias']) + + return {'return': 0, 'variation_tags': tmp_variation_tags, + 'excluded_variation_tags': excluded_variations} + + ########################################################################## + + def _get_variation_groups(script, variations): + + groups = {} + + for k in variations: + variation = variations[k] + if not variation: + continue + if 'group' in variation: + if variation['group'] not in groups: + groups[variation['group']] = {} + groups[variation['group']]['variations'] = [] + groups[variation['group']]['variations'].append(k) + if 'default' in variation: + if 'default' in groups[variation['group']]: + return {'return': 1, 'error': 'Multiple defaults specied for the variation group "{}": "{},{}" '.format( + variation['group'], k, groups[variation['group']]['default'])} + groups[variation['group']]['default'] = k + + return {'return': 0, 'variation_groups': groups} + + ########################################################################## + + def _process_variation_tags_in_groups( + script, variation_tags, groups, excluded_variations, variations): + import copy + tmp_variation_tags = copy.deepcopy(variation_tags) + tmp_variation_tags_static = copy.deepcopy(variation_tags) + + for v_i in range(len(tmp_variation_tags_static)): + v = tmp_variation_tags_static[v_i] + + if v not in variations: + v_static = script._get_name_for_dynamic_variation_tag(v) + tmp_variation_tags_static[v_i] = v_static + + for k in groups: + group = groups[k] + unique_allowed_variations = group['variations'] + + if len(set(unique_allowed_variations) & + set(tmp_variation_tags_static)) > 1: + return {'return': 1, 'error': 'Multiple variation tags selected for the variation group "{}": {} '.format( + k, str(set(unique_allowed_variations) & set(tmp_variation_tags_static)))} + if len(set(unique_allowed_variations) & + set(tmp_variation_tags_static)) == 0: + if 'default' in group and group['default'] not in excluded_variations: + tmp_variation_tags.append(group['default']) + + return {'return': 0, 'variation_tags': tmp_variation_tags} + + ########################################################################## + + def _call_run_deps(script, deps, local_env_keys, local_env_keys_from_meta, env, state, const, const_state, + add_deps_recursive, recursion_spaces, remembered_selections, variation_tags_string, found_cached, debug_script_tags='', + verbose=False, show_time=False, extra_recursion_spaces=' ', run_state={'deps': [], 'fake_deps': [], 'parent': None}): + if len(deps) == 0: + return {'return': 0} + + # Check chain of post hook dependencies on other CM scripts + import copy + + # Get local env keys + local_env_keys = copy.deepcopy(local_env_keys) + + if len(local_env_keys_from_meta) > 0: + local_env_keys += local_env_keys_from_meta + + r = script._run_deps(deps, local_env_keys, env, state, const, const_state, add_deps_recursive, recursion_spaces, + remembered_selections, variation_tags_string, found_cached, debug_script_tags, + verbose, show_time, extra_recursion_spaces, run_state) + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def _run_deps(self, deps, clean_env_keys_deps, env, state, const, const_state, add_deps_recursive, recursion_spaces, + remembered_selections, variation_tags_string='', from_cache=False, debug_script_tags='', + verbose=False, show_time=False, extra_recursion_spaces=' ', run_state={'deps': [], 'fake_deps': [], 'parent': None}): + """ + Runs all the enabled dependencies and pass them env minus local env + """ + + if len(deps) > 0: + # Preserve local env + tmp_env = {} + + variation_groups = run_state.get('variation_groups') + + for d in deps: + + if not d.get('tags'): + continue + + if is_dep_tobe_skipped(d, env): + continue + + if from_cache and not d.get("dynamic", None): + continue + + if d.get('env'): + # to update env local to a dependency + r = update_env_with_values(d['env'], False, env) + if r['return'] > 0: + return r + + update_tags_from_env_with_prefix = d.get( + "update_tags_from_env_with_prefix", {}) + for t in update_tags_from_env_with_prefix: + for key in update_tags_from_env_with_prefix[t]: + if str(d.get('env', {}).get(key, '')).strip() != '': + d['tags'] += "," + t + str(d.get('env')[key]) + elif str(env.get(key, '')).strip() != '': + d['tags'] += "," + t + str(env[key]) + + for key in clean_env_keys_deps: + if '?' in key or '*' in key: + import fnmatch + for kk in list(env.keys()): + if fnmatch.fnmatch(kk, key): + tmp_env[kk] = env[kk] + del (env[kk]) + elif key in env: + tmp_env[key] = env[key] + del (env[key]) + + import re + for key in list(env.keys()): + value = env[key] + tmp_values = re.findall(r'<<<(.*?)>>>', str(value)) + if tmp_values == []: + continue + tmp_env[key] = env[key] + del (env[key]) + + force_env_keys_deps = d.get("force_env_keys", []) + for key in force_env_keys_deps: + if '?' in key or '*' in key: + import fnmatch + for kk in list(tmp_env.keys()): + if fnmatch.fnmatch(kk, key): + env[kk] = tmp_env[kk] + elif key in tmp_env: + env[key] = tmp_env[key] + + if d.get("reuse_version", False): + for k in tmp_env: + if k.startswith('CM_VERSION'): + env[k] = tmp_env[k] + + update_tags_from_env = d.get("update_tags_from_env", []) + for t in update_tags_from_env: + if env.get(t, '').strip() != '': + d['tags'] += "," + env[t] + + inherit_variation_tags = d.get("inherit_variation_tags", False) + skip_inherit_variation_groups = d.get( + "skip_inherit_variation_groups", []) + variation_tags_to_be_skipped = [] + if inherit_variation_tags: + if skip_inherit_variation_groups: # skips inheriting variations belonging to given groups + for group in variation_groups: + if group in skip_inherit_variation_groups: + variation_tags_to_be_skipped += variation_groups[group]['variations'] + + variation_tags = variation_tags_string.split(",") + variation_tags = [x for x in variation_tags if not x.startswith( + "_") or x[1:] not in set(variation_tags_to_be_skipped)] + + # handle group in case of dynamic variations + for t_variation in variation_tags_to_be_skipped: + if t_variation.endswith(".#"): + beg = t_variation[:-1] + for m_tag in variation_tags: + if m_tag.startswith("_" + beg): + variation_tags.remove(m_tag) + + deps_tags = d['tags'].split(",") + for tag in deps_tags: + if tag.startswith("-_") or tag.startswith("_-"): + variation_tag = "_" + tag[2:] + if variation_tag in variation_tags: + variation_tags.remove(variation_tag) + new_variation_tags_string = ",".join(variation_tags) + # deps should have non-empty tags + d['tags'] += "," + new_variation_tags_string + + if run_state: + run_state['deps'].append(d['tags']) + + if not run_state.get('fake_deps'): + import copy + if not run_state: + run_state_copy = {} + else: + run_state_copy = copy.deepcopy(run_state) + run_state_copy['deps'] = [] + + run_state_copy['parent'] = run_state['script_id'] + + if len(run_state['script_variation_tags']) > 0: + run_state_copy['parent'] += " ( " + ',_'.join( + run_state['script_variation_tags']) + " )" + + # Run collective script via CM API: + # Not very efficient but allows logging - can be optimized + # later + + ii = { + 'action': 'run', + 'automation': utils.assemble_cm_object(self.meta['alias'], self.meta['uid']), + 'recursion_spaces': recursion_spaces, # + extra_recursion_spaces, + 'recursion': True, + 'remembered_selections': remembered_selections, + 'env': env, + 'state': state, + 'const': copy.deepcopy(const), + 'const_state': copy.deepcopy(const_state), + 'add_deps_recursive': add_deps_recursive, + 'debug_script_tags': debug_script_tags, + 'verbose': verbose, + 'silent': run_state.get('tmp_silent', False), + 'time': show_time, + 'run_state': run_state_copy + + } + + for key in ["env", "state", "const", "const_state"]: + ii['local_' + key] = d.get(key, {}) + if d.get(key): + d[key] = {} + + utils.merge_dicts( + {'dict1': ii, 'dict2': d, 'append_lists': True, 'append_unique': True}) + + r = self.cmind.access(ii) + if r['return'] > 0: + return r + + run_state['version_info'] = run_state_copy.get( + 'version_info') + + # Restore local env + env.update(tmp_env) + r = update_env_with_values(env) + if r['return'] > 0: + return r + + # Update env/state with cost + env.update(const) + utils.merge_dicts({'dict1': state, + 'dict2': const_state, + 'append_lists': True, + 'append_unique': True}) + + return {'return': 0} + + ########################################################################## + def _merge_dicts_with_tags(self, dict1, dict2): + """ + Merges two dictionaries and append any tag strings in them + """ + if dict1 == dict2: + return {'return': 0} + for dep in dict1: + if 'tags' in dict1[dep]: + dict1[dep]['tags_list'] = utils.convert_tags_to_list( + dict1[dep]) + for dep in dict2: + if 'tags' in dict2[dep]: + dict2[dep]['tags_list'] = utils.convert_tags_to_list( + dict2[dep]) + utils.merge_dicts({'dict1': dict1, 'dict2': dict2, + 'append_lists': True, 'append_unique': True}) + for dep in dict1: + if 'tags_list' in dict1[dep]: + dict1[dep]['tags'] = ",".join(dict1[dep]['tags_list']) + del (dict1[dep]['tags_list']) + for dep in dict2: + if 'tags_list' in dict2[dep]: + del (dict2[dep]['tags_list']) + + ########################################################################## + def _get_readme(self, cmd_parts, run_state): + """ + Outputs a Markdown README file listing the CM run commands for the dependencies + """ + + deps = run_state['deps'] + + version_info = run_state.get('version_info', []) + version_info_dict = {} + + for v in version_info: + k = list(v.keys())[0] + version_info_dict[k] = v[k] + + content = '' + + content += """ +*This README was automatically generated by the [CM framework](https://github.com/mlcommons/ck).* + +## Install CM + +```bash +pip install cmind -U +``` + +Check [this readme](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +with more details about installing CM and dependencies across different platforms +(Ubuntu, MacOS, Windows, RHEL, ...). + +## Install CM automation repositories + +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +""" + + current_cm_repo = run_state['script_repo_alias'] + if current_cm_repo not in ['mlcommons@ck', 'mlcommons@cm4mlops']: + content += '\ncm pull repo ' + \ + run_state['script_repo_alias'] + '\n' + + content += """``` + +## Run CM script + +```bash +""" + + cmd = "cm run script " + + for cmd_part in cmd_parts: + x = '"' if ' ' in cmd_part and not cmd_part.startswith('-') else '' + cmd = cmd + " " + x + cmd_part + x + + content += cmd + '\n' + + content += """``` + +## Run individual CM scripts to customize dependencies (optional) + +""" + deps_ = '' + + for dep_tags in deps: + + xversion = '' + version = version_info_dict.get(dep_tags, {}).get('version', '') + if version != '': + xversion = ' --version={}\n'.format(version) + + content += "```bash\n" + content += "cm run script --tags=" + \ + dep_tags + "{}\n".format(xversion) + content += "```\n\n" + + return content + + ########################################################################## + def _get_docker_container(self, cmd_parts, run_state): + """ + Outputs a Markdown README file listing the CM run commands for the dependencies + """ + + deps = run_state['deps'] + + version_info = run_state.get('version_info', []) + version_info_dict = {} + + for v in version_info: + k = list(v.keys())[0] + version_info_dict[k] = v[k] + + content = '' + + content += """ + +# The following CM commands were automatically generated (prototype) + +cm pull repo mlcommons@cm4mlops --checkout=dev + +""" + current_cm_repo = run_state['script_repo_alias'] + if current_cm_repo not in ['mlcommons@ck', 'mlcommons@cm4mlops']: + content += '\ncm pull repo ' + \ + run_state['script_repo_alias'] + '\n\n' + + deps_ = '' + + for dep_tags in deps: + + xversion = '' + version = version_info_dict.get(dep_tags, {}).get('version', '') + if version != '': + xversion = ' --version={}\n'.format(version) + + content += "# cm run script --tags=" + \ + dep_tags + "{}\n\n".format(xversion) + + cmd = "cm run script " + + for cmd_part in cmd_parts: + x = '"' if ' ' in cmd_part and not cmd_part.startswith('-') else '' + cmd = cmd + " " + x + cmd_part + x + + content += cmd + '\n' + + return content + + ########################################################################## + + def _print_versions(self, run_state): + """ + Print versions in the nice format + """ + + version_info = run_state.get('version_info', []) + + logging.info('=========================') + logging.info('Versions of dependencies:') + for v in version_info: + k = list(v.keys())[0] + version_info_dict = v[k] + + version = version_info_dict.get('version', '') + + if version != '': + logging.info('* {}: {}'.format(k, version)) + + logging.info('=========================') + + return {} + + ########################################################################## + def _markdown_cmd(self, cmd): + """ + Returns a CM command in markdown format + """ + + return '```bash\n ' + cmd + ' \n ```' + + ########################################################################## + + def _print_deps(self, deps): + """ + Prints the CM run commands for the list of CM script dependencies + """ + + print_deps_data = [] + run_cmds = self._get_deps_run_cmds(deps) + for cmd in run_cmds: + print_deps_data.append(cmd) + logging.info(cmd) + + return print_deps_data + + ########################################################################## + + def _get_deps_run_cmds(self, deps): + """ + Returns the CM run commands for the list of CM script dependencies + """ + + run_cmds = [] + + for dep_tags in deps: + run_cmds.append("cm run script --tags=" + dep_tags) + + return run_cmds + + ########################################################################## + + def run_native_script(self, i): + """ + Run native script in a CM script entry + (wrapper around "prepare_and_run_script_with_postprocessing" function) + + Args: + (dict): + + run_script_input (dict): saved input for "prepare_and_run_script_with_postprocessing" function + env (dict): the latest environment for the script + script_name (str): native script name + + Returns: + (dict): Output from "prepare_and_run_script_with_postprocessing" function + + + """ + + import copy + + run_script_input = i['run_script_input'] + script_name = i['script_name'] + env = i.get('env', '') + detect_version = i.get('detect_version', '') + + if detect_version: + postprocess = "detect_version" + else: + postprocess = "" + + # Create and work on a copy to avoid contamination + env_copy = copy.deepcopy(run_script_input.get('env', {})) + run_script_input_state_copy = copy.deepcopy( + run_script_input.get('state', {})) + script_name_copy = run_script_input.get('script_name', '') + + run_script_input['script_name'] = script_name + run_script_input['env'] = env + + r = prepare_and_run_script_with_postprocessing( + run_script_input, postprocess=postprocess) + + env_tmp = copy.deepcopy(run_script_input['env']) + r['env_tmp'] = env_tmp + + run_script_input['state'] = run_script_input_state_copy + run_script_input['env'] = env_copy + run_script_input['script_name'] = script_name_copy + + return r + + ########################################################################## + def find_file_in_paths(self, i): + """ + Find file name in a list of paths + + Args: + (CM input dict): + + paths (list): list of paths + file_name (str): filename pattern to find + (select) (bool): if True and more than 1 path found, select + (select_default) (bool): if True, select the default one + (recursion_spaces) (str): add space to print + (run_script_input) (dict): prepared dict to run script and detect version + + (detect_version) (bool): if True, attempt to detect version + (env_path) (str): env key to pass path to the script to detect version + (run_script_input) (dict): use this input to run script to detect version + (env) (dict): env to check/force version + + (hook) (func): call this func to skip some artifacts + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + (found_files) (list): paths to files when found + + """ + import copy + + paths = i['paths'] + select = i.get('select', False) + select_default = i.get('select_default', False) + recursion_spaces = i.get('recursion_spaces', '') + + hook = i.get('hook', None) + + verbose = i.get('verbose', False) + if not verbose: + verbose = i.get('v', False) + + file_name = i.get('file_name', '') + file_name_re = i.get('file_name_re', '') + file_is_re = False + + if file_name_re != '': + file_name = file_name_re + file_is_re = True + + if file_name == '': + raise Exception( + 'file_name or file_name_re not specified in find_artifact') + + found_files = [] + + import glob + import re + + for path in paths: + # May happen that path is in variable but it doesn't exist anymore + if os.path.isdir(path): + if file_is_re: + file_list = [ + os.path.join( + path, + f) for f in os.listdir(path) if re.match( + file_name, + f)] + + for f in file_list: + duplicate = False + for existing in found_files: + if os.path.samefile(existing, f): + duplicate = True + break + if not duplicate: + skip = False + if hook is not None: + r = hook({'file': f}) + if r['return'] > 0: + return r + skip = r['skip'] + if not skip: + found_files.append(f) + + else: + path_to_file = os.path.join(path, file_name) + + file_pattern_suffixes = [ + "", + ".[0-9]", + ".[0-9][0-9]", + "-[0-9]", + "-[0-9][0-9]", + "[0-9]", + "[0-9][0-9]", + "[0-9].[0-9]", + "[0-9][0-9].[0-9]", + "[0-9][0-9].[0-9][0-9]" + ] + + for suff in file_pattern_suffixes: + file_list = glob.glob(path_to_file + suff) + for f in file_list: + duplicate = False + + for existing in found_files: + try: + if os.path.samefile(existing, f): + duplicate = True + break + except Exception as e: + # This function fails on Windows sometimes + # because some files can't be accessed + pass + + if not duplicate: + skip = False + if hook is not None: + r = hook({'file': f}) + if r['return'] > 0: + return r + skip = r['skip'] + if not skip: + found_files.append(f) + + if select: + # Check and prune versions + if i.get('detect_version', False): + found_paths_with_good_version = [] + found_files_with_good_version = [] + + env = i.get('env', {}) + + run_script_input = i['run_script_input'] + env_path_key = i['env_path_key'] + + version = env.get('CM_VERSION', '') + version_min = env.get('CM_VERSION_MIN', '') + version_max = env.get('CM_VERSION_MAX', '') + + x = '' + + if version != '': + x += ' == {}'.format(version) + if version_min != '': + x += ' >= {}'.format(version_min) + if version_max != '': + x += ' <= {}'.format(version_max) + + if x != '': + logging.info( + recursion_spaces + + ' - Searching for versions: {}'.format(x)) + + new_recursion_spaces = recursion_spaces + ' ' + + for path_to_file in found_files: + logging.info(recursion_spaces + ' * ' + path_to_file) + + run_script_input['env'] = env + run_script_input['env'][env_path_key] = path_to_file + run_script_input['recursion_spaces'] = new_recursion_spaces + + rx = prepare_and_run_script_with_postprocessing( + run_script_input, postprocess="detect_version") + + run_script_input['recursion_spaces'] = recursion_spaces + + if rx['return'] > 0: + if rx['return'] != 2: + return rx + else: + # Version was detected + + detected_version = rx.get('version', '') + + if detected_version != '': + if detected_version == -1: + logging.info( + recursion_spaces + ' SKIPPED due to incompatibility ...') + else: + ry = check_version_constraints({'detected_version': detected_version, + 'version': version, + 'version_min': version_min, + 'version_max': version_max, + 'cmind': self.cmind}) + if ry['return'] > 0: + return ry + + if not ry['skip']: + found_files_with_good_version.append( + path_to_file) + else: + logging.info( + recursion_spaces + ' SKIPPED due to version constraints ...') + + found_files = found_files_with_good_version + + # Continue with selection + if len(found_files) > 1: + if len(found_files) == 1 or select_default: + selection = 0 + else: + # Select 1 and proceed + logging.info( + recursion_spaces + + ' - More than 1 path found:') + num = 0 + + for file in found_files: + logging.info( + recursion_spaces + + ' {}) {}'.format( + num, + file)) + num += 1 + x = input(recursion_spaces + + ' Make your selection or press Enter for 0: ') + + x = x.strip() + if x == '': + x = '0' + + selection = int(x) + + if selection < 0 or selection >= num: + selection = 0 + logging.info( + recursion_spaces + + ' Selected {}: {}'.format( + selection, + found_files[selection])) + + found_files = [found_files[selection]] + + return {'return': 0, 'found_files': found_files} + + ########################################################################## + def detect_version_using_script(self, i): + """ + Detect version using script + + Args: + (CM input dict): + + (recursion_spaces) (str): add space to print + + run_script_input (dict): use this input to run script to detect version + (env) (dict): env to check/force version + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + 16 if not detected + * (error) (str): error string if return>0 + + (detected_version) (str): detected version + + """ + recursion_spaces = i.get('recursion_spaces', '') + + import copy + + detected = False + + env = i.get('env', {}) + + run_script_input = i['run_script_input'] + + version = env.get('CM_VERSION', '') + version_min = env.get('CM_VERSION_MIN', '') + version_max = env.get('CM_VERSION_MAX', '') + + x = '' + + if version != '': + x += ' == {}'.format(version) + if version_min != '': + x += ' >= {}'.format(version_min) + if version_max != '': + x += ' <= {}'.format(version_max) + + if x != '': + logging.info( + recursion_spaces + + ' - Searching for versions: {}'.format(x)) + + new_recursion_spaces = recursion_spaces + ' ' + + run_script_input['recursion_spaces'] = new_recursion_spaces + run_script_input['env'] = env + + # Prepare run script + rx = prepare_and_run_script_with_postprocessing( + run_script_input, postprocess="detect_version") + + run_script_input['recursion_spaces'] = recursion_spaces + + if rx['return'] == 0: + # Version was detected + detected_version = rx.get('version', '') + + if detected_version != '': + ry = check_version_constraints({'detected_version': detected_version, + 'version': version, + 'version_min': version_min, + 'version_max': version_max, + 'cmind': self.cmind}) + if ry['return'] > 0: + return ry + + if not ry['skip']: + return {'return': 0, 'detected_version': detected_version} + + return {'return': 16, 'error': 'version was not detected'} + + ########################################################################## + def find_artifact(self, i): + """ + Find some artifact (file) by name + + Args: + (CM input dict): + + file_name (str): filename to find + + env (dict): global env + os_info (dict): OS info + + (detect_version) (bool): if True, attempt to detect version + (env_path) (str): env key to pass path to the script to detect version + (run_script_input) (dict): use this input to run script to detect version + + (default_path_env_key) (str): check in default paths from global env + (PATH, PYTHONPATH, LD_LIBRARY_PATH ...) + + (recursion_spaces) (str): add space to print + + (hook) (func): call this func to skip some artifacts + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + error = 16 if artifact not found but no problem + + found_path (list): found path to an artifact + full_path (str): full path to a found artifact + default_path_list (list): list of default paths + + """ + + import copy + + file_name = i['file_name'] + + os_info = i['os_info'] + + env = i['env'] + + env_path_key = i.get('env_path_key', '') + + run_script_input = i.get('run_script_input', {}) + extra_paths = i.get('extra_paths', {}) + + # Create and work on a copy to avoid contamination + env_copy = copy.deepcopy(env) + run_script_input_state_copy = copy.deepcopy( + run_script_input.get('state', {})) + + default_path_env_key = i.get('default_path_env_key', '') + recursion_spaces = i.get('recursion_spaces', '') + + hook = i.get('hook', None) + + # Check if forced to search in a specific path or multiple paths + # separated by OS var separator (usually : or ;) + path = env.get('CM_TMP_PATH', '') + + if path != '' and env.get( + 'CM_TMP_PATH_IGNORE_NON_EXISTANT', '') != 'yes': + # Can be a list of paths + path_list_tmp = path.split(os_info['env_separator']) + for path_tmp in path_list_tmp: + if path_tmp.strip() != '' and not os.path.isdir(path_tmp): + return {'return': 1, + 'error': 'path {} doesn\'t exist'.format(path_tmp)} + + # Check if forced path and file name from --input (CM_INPUT - local env + # - will not be visible for higher-level script) + forced_file = env.get('CM_INPUT', '').strip() + if forced_file != '': + if not os.path.isfile(forced_file): + return {'return': 1, + 'error': 'file {} doesn\'t exist'.format(forced_file)} + + file_name = os.path.basename(forced_file) + path = os.path.dirname(forced_file) + + default_path_list = self.get_default_path_list(i) + # [] if default_path_env_key == '' else \ + # os.environ.get(default_path_env_key,'').split(os_info['env_separator']) + + if path == '': + path_list_tmp = default_path_list + else: + logging.info( + recursion_spaces + + ' # Requested paths: {}'.format(path)) + path_list_tmp = path.split(os_info['env_separator']) + + # Check soft links + path_list_tmp2 = [] + for path_tmp in path_list_tmp: + # path_tmp_abs = os.path.realpath(os.path.join(path_tmp, file_name)) + # GF: I remarked above code because it doesn't work correcly + # for virtual python - it unsoftlinks virtual python and picks up + # native one from /usr/bin thus making workflows work incorrectly + # ... + path_tmp_abs = os.path.join(path_tmp, file_name) + + if not path_tmp_abs in path_list_tmp2: + path_list_tmp2.append(path_tmp_abs) + + path_list = [] + for path_tmp in path_list_tmp2: + path_list.append(os.path.dirname(path_tmp)) + + # Check if quiet + select_default = True if env.get('CM_QUIET', '') == 'yes' else False + + # Prepare paths to search + r = self.find_file_in_paths({'paths': path_list, + 'file_name': file_name, + 'select': True, + 'select_default': select_default, + 'detect_version': i.get('detect_version', False), + 'env_path_key': env_path_key, + 'env': env_copy, + 'hook': hook, + 'run_script_input': run_script_input, + 'recursion_spaces': recursion_spaces}) + + run_script_input['state'] = run_script_input_state_copy + + if r['return'] > 0: + return r + + found_files = r['found_files'] + + if len(found_files) == 0: + return {'return': 16, 'error': '{} not found'.format(file_name)} + + # Finalize output + file_path = found_files[0] + found_path = os.path.dirname(file_path) + + if found_path not in default_path_list: + env_key = '+' + default_path_env_key + + paths = env.get(env_key, []) + if found_path not in paths: + paths.insert(0, found_path) + env[env_key] = paths + for extra_path in extra_paths: + epath = os.path.normpath( + os.path.join(found_path, "..", extra_path)) + if os.path.exists(epath): + if extra_paths[extra_path] not in env: + env[extra_paths[extra_path]] = [] + env[extra_paths[extra_path]].append(epath) + logging.info( + recursion_spaces + + ' # Found artifact in {}'.format(file_path)) + + if env_path_key != '': + env[env_path_key] = file_path + + return {'return': 0, 'found_path': found_path, + 'found_file_path': file_path, + 'found_file_name': os.path.basename(file_path), + 'default_path_list': default_path_list} + + ########################################################################## + def find_file_deep(self, i): + """ + Find file name in a list of paths + + Args: + (CM input dict): + + paths (list): list of paths + file_name (str): filename pattern to find + (restrict_paths) (list): restrict found paths to these combinations + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + (found_paths) (list): paths to files when found + + """ + + paths = i['paths'] + file_name = i['file_name'] + + restrict_paths = i.get('restrict_paths', []) + + found_paths = [] + + for p in paths: + if os.path.isdir(p): + p1 = os.listdir(p) + for f in p1: + p2 = os.path.join(p, f) + + if os.path.isdir(p2): + r = self.find_file_deep( + {'paths': [p2], 'file_name': file_name, 'restrict_paths': restrict_paths}) + if r['return'] > 0: + return r + + found_paths += r['found_paths'] + else: + if f == file_name: + found_paths.append(p) + break + + if len(found_paths) > 0 and len(restrict_paths) > 0: + filtered_found_paths = [] + + for p in found_paths: + for f in restrict_paths: + if f in p: + filtered_found_paths.append(p) + break + + found_paths = filtered_found_paths + + return {'return': 0, 'found_paths': found_paths} + + ########################################################################## + def find_file_back(self, i): + """ + Find file name backwards + + Args: + (CM input dict): + + path (str): path to start with + file_name (str): filename or directory to find + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + (found_path) (str): path if found or empty + + """ + + path = i['path'] + file_name = i['file_name'] + + found_path = '' + + while path != '': + path_to_file = os.path.join(path, file_name) + if os.path.isfile(path_to_file): + break + + path2 = os.path.dirname(path) + + if path2 == path: + path = '' + break + else: + path = path2 + + return {'return': 0, 'found_path': path} + + ########################################################################## + def parse_version(self, i): + """ + Parse version (used in post processing functions) + + Args: + (CM input dict): + + (file_name) (str): filename to get version from (tmp-ver.out by default) + match_text (str): RE match text string + group_number (int): RE group number to get version from + env_key (str): which env key to update + which_env (dict): which env to update + (debug) (boolean): if True, print some debug info + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + version (str): detected version + string (str): full file string + + """ + + file_name = i.get('file_name', '') + if file_name == '': + file_name = self.tmp_file_ver + + match_text = i['match_text'] + group_number = i['group_number'] + env_key = i['env_key'] + which_env = i['which_env'] + debug = i.get('debug', False) + + r = utils.load_txt(file_name=file_name, + check_if_exists=True, + split=True, + match_text=match_text, + fail_if_no_match='version was not detected') + if r['return'] > 0: + if r.get('string', '') != '': + r['error'] += ' ({})'.format(r['string']) + return r + + string = r['string'] + + if r['match'].lastindex and r['match'].lastindex >= group_number: + version = r['match'].group(group_number) + else: + return {'return': 1, 'error': 'Invalid version detection group number. Version was not detected. Last index of match = {}. Given group number = {}'.format( + r['match'].lastindex, group_number)} + + which_env[env_key] = version + # to be recorded in the cache meta + which_env['CM_DETECTED_VERSION'] = version + + return {'return': 0, 'version': version, 'string': string} + + ########################################################################## + def update_deps(self, i): + """ + Update deps from pre/post processing + Args: + (CM input dict): + deps (dict): deps dict + update_deps (dict): key matches "names" in deps + Returns: + (CM return dict): + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + deps = i['deps'] + add_deps = i['update_deps'] + env = i.get('env', {}) + update_deps(deps, add_deps, False, env) + + return {'return': 0} + + ########################################################################## + def update_state_from_meta(self, meta, env, state, const, const_state, deps, post_deps, + prehook_deps, posthook_deps, new_env_keys, new_state_keys, run_state, i): + """ + Updates state and env from meta + Args: + """ + + r = update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps, + post_deps, + prehook_deps, + posthook_deps, + new_env_keys, + new_state_keys, + run_state, + i) + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def get_default_path_list(self, i): + default_path_env_key = i.get('default_path_env_key', '') + os_info = i['os_info'] + default_path_list = [] if default_path_env_key == '' else \ + os.environ.get( + default_path_env_key, + '').split( + os_info['env_separator']) + + return default_path_list + + ############################################################ + + def doc(self, i): + """ + Document CM script. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations + + (output_dir) (str): output directory (../docs by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module( + self, __file__, 'module_misc', 'doc', i) + + ############################################################ + def gui(self, i): + """ + Run GUI for CM script. + + Args: + (CM input dict): + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + artifact = i.get('artifact', '') + tags = '' + if artifact != '': + if ' ' in artifact: + tags = artifact.replace(' ', ',') + + if tags == '': + tags = i.get('tags', '') + + if 'tags' in i: + del (i['tags']) + + i['action'] = 'run' + i['artifact'] = 'gui' + i['parsed_artifact'] = [('gui', '605cac42514a4c69')] + i['script'] = tags.replace(',', ' ') + + return self.cmind.access(i) + + ############################################################ + + def dockerfile(self, i): + """ + Generate Dockerfile for CM script. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations + + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module( + self, __file__, 'module_misc', 'dockerfile', i) + + ############################################################ + def docker(self, i): + """ + Run CM script in an automatically-generated container. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + (repos) (str): list of repositories to search for automations + + (output_dir) (str): output directory (./ by default) + + (docker) (dict): convert keys into docker_{key} strings for CM >= 2.3.8.1 + + + (docker_skip_build) (bool): do not generate Dockerfiles and do not recreate Docker image (must exist) + (docker_noregenerate) (bool): do not generate Dockerfiles + (docker_norecreate) (bool): do not recreate Docker image + + (docker_cfg) (str): if True, show all available basic docker configurations, otherwise pre-select one + (docker_cfg_uid) (str): if True, select docker configuration with this UID + + (docker_path) (str): where to create or find Dockerfile + (docker_gh_token) (str): GitHub token for private repositories + (docker_save_script) (str): if !='' name of script to save docker command + (docker_interactive) (bool): if True, run in interactive mode + (docker_it) (bool): the same as `docker_interactive` + (docker_detached) (bool): detach Docker + (docker_dt) (bool) the same as `docker_detached` + + (docker_base_image) (str): force base image + (docker_os) (str): force docker OS (default: ubuntu) + (docker_os_version) (str): force docker OS version (default: 22.04) + (docker_image_tag_extra) (str): add extra tag (default:-latest) + + (docker_cm_repo) (str): force CM automation repository when building Docker (default: cm4mlops) + (docker_cm_repos) + (docker_cm_repo_flags) + + (dockerfile_env) + + (docker_skip_cm_sys_upgrade) (bool): if True, do not install CM sys deps + + (docker_extra_sys_deps) + + (fake_run_deps) + (docker_run_final_cmds) + + (all_gpus) + (num_gpus) + + (docker_device) + + (docker_port_maps) + + (docker_shm_size) + + (docker_extra_run_args) + + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module( + self, __file__, 'module_misc', 'docker', i) + + ########################################################################## + + def _available_variations(self, i): + """ + return error with available variations + + Args: + (CM input dict): + + meta (dict): meta of the script + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + 16 if not detected + * (error) (str): error string if return>0 + + """ + + meta = i['meta'] + + list_of_variations = sorted( + ['_' + v for v in list(meta.get('variations', {}.keys()))]) + + return {'return': 1, 'error': 'python package variation is not defined in "{}". Available: {}'.format( + meta['alias'], ' '.join(list_of_variations))} + + ############################################################ + def prepare(self, i): + """ + Run CM script with --fake_run only to resolve deps + """ + + i['fake_run'] = True + + return self.run(i) + + ############################################################ + # Reusable blocks for some scripts + def clean_some_tmp_files(self, i): + """ + Clean tmp files + """ + + env = i.get('env', {}) + + cur_work_dir = env.get('CM_TMP_CURRENT_SCRIPT_WORK_PATH', '') + if cur_work_dir != '' and os.path.isdir(cur_work_dir): + for x in ['tmp-run.bat', 'tmp-state.json']: + xx = os.path.join(cur_work_dir, x) + if os.path.isfile(xx): + os.remove(xx) + + return {'return': 0} + + +############################################################################## +def find_cached_script(i): + """ + Internal automation function: find cached script + + Args: + (CM input dict): + + deps (dict): deps dict + update_deps (dict): key matches "names" in deps + + Returns: + (CM return dict): + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + import copy + + recursion_spaces = i['recursion_spaces'] + extra_recursion_spaces = i['extra_recursion_spaces'] + script_tags = i['script_tags'] + cached_tags = [] + customize_code = i.get('customize_code') + customize_common_input = i.get('customize_common_input', {}) + found_script_tags = i['found_script_tags'] + variation_tags = i['variation_tags'] + variation_tags_string = i['variation_tags_string'] + explicit_variation_tags = i['explicit_variation_tags'] + version = i['version'] + version_min = i['version_min'] + version_max = i['version_max'] + extra_cache_tags = i['extra_cache_tags'] + add_deps_recursive = i['add_deps_recursive'] + new_cache_entry = i['new_cache_entry'] + meta = i['meta'] + env = i['env'] + state = i['state'] + const = i['const'] + const_state = i['const_state'] + self_obj = i['self'] + skip_remembered_selections = i['skip_remembered_selections'] + remembered_selections = i['remembered_selections'] + quiet = i['quiet'] + show_time = i.get('show_time', False) + search_tags = '' + + verbose = i.get('verbose', False) + if not verbose: + verbose = i.get('v', False) + + found_cached_scripts = [] + + logging.debug( + recursion_spaces + + ' - Checking if script execution is already cached ...') + + # Create a search query to find that we already ran this script with the same or similar input + # It will be gradually enhanced with more "knowledge" ... + if len(script_tags) > 0: + for x in script_tags: + if x not in cached_tags: + cached_tags.append(x) + + if len(found_script_tags) > 0: + for x in found_script_tags: + if x not in cached_tags: + cached_tags.append(x) + + explicit_cached_tags = copy.deepcopy(cached_tags) + + if len(explicit_variation_tags) > 0: + explicit_variation_tags_string = '' + + for t in explicit_variation_tags: + if explicit_variation_tags_string != '': + explicit_variation_tags_string += ',' + if t.startswith("-"): + x = "-_" + t[1:] + else: + x = '_' + t + explicit_variation_tags_string += x + + if x not in explicit_cached_tags: + explicit_cached_tags.append(x) + + logging.debug( + recursion_spaces + + ' - Prepared explicit variations: {}'.format(explicit_variation_tags_string)) + + if len(variation_tags) > 0: + variation_tags_string = '' + + for t in variation_tags: + if variation_tags_string != '': + variation_tags_string += ',' + if t.startswith("-"): + x = "-_" + t[1:] + else: + x = '_' + t + variation_tags_string += x + + if x not in cached_tags: + cached_tags.append(x) + + logging.debug( + recursion_spaces + + ' - Prepared variations: {}'.format(variation_tags_string)) + + # Add version + if version != '': + if 'version-' + version not in cached_tags: + cached_tags.append('version-' + version) + explicit_cached_tags.append('version-' + version) + + # Add extra cache tags (such as "virtual" for python) + if len(extra_cache_tags) > 0: + for t in extra_cache_tags: + if t not in cached_tags: + cached_tags.append(t) + explicit_cached_tags.append(t) + + # Add tags from deps (will be also duplicated when creating new cache + # entry) + extra_cache_tags_from_env = meta.get('extra_cache_tags_from_env', []) + for extra_cache_tags in extra_cache_tags_from_env: + key = extra_cache_tags['env'] + prefix = extra_cache_tags.get('prefix', '') + + v = env.get(key, '').strip() + if v != '': + for t in v.split(','): + x = 'deps-' + prefix + t + if x not in cached_tags: + cached_tags.append(x) + explicit_cached_tags.append(x) + + # Check if already cached + if not new_cache_entry: + search_tags = '-tmp' + if len(cached_tags) > 0: + search_tags += ',' + ','.join(explicit_cached_tags) + + logging.debug( + recursion_spaces + + ' - Searching for cached script outputs with the following tags: {}'.format(search_tags)) + + r = self_obj.cmind.access({'action': 'find', + 'automation': self_obj.meta['deps']['cache'], + 'tags': search_tags}) + if r['return'] > 0: + return r + + found_cached_scripts = r['list'] + + # Check if selection is remembered + if not skip_remembered_selections and len(found_cached_scripts) > 1: + # Need to add extra cached tags here (since recorded later) + for selection in remembered_selections: + if selection['type'] == 'cache' and set( + selection['tags'].split(',')) == set(search_tags.split(',')): + tmp_version_in_cached_script = selection['cached_script'].meta.get( + 'version', '') + + skip_cached_script = check_versions( + self_obj.cmind, tmp_version_in_cached_script, version_min, version_max) + + if skip_cached_script: + return {'return': 2, 'error': 'The version of the previously remembered selection for a given script ({}) mismatches the newly requested one'.format( + tmp_version_in_cached_script)} + else: + found_cached_scripts = [selection['cached_script']] + logging.debug( + recursion_spaces + + ' - Found remembered selection with tags "{}"!'.format(search_tags)) + break + + if len(found_cached_scripts) > 0: + selection = 0 + + # Check version ranges ... + new_found_cached_scripts = [] + + for cached_script in found_cached_scripts: + skip_cached_script = False + dependent_cached_path = cached_script.meta.get( + 'dependent_cached_path', '') + if dependent_cached_path: + if not os.path.exists(dependent_cached_path): + # TODO Need to restrict the below check to within container + # env + i['tmp_dep_cached_path'] = dependent_cached_path + r = utils.call_internal_module( + self_obj, __file__, 'module_misc', 'get_container_path_script', i) + if not os.path.exists(r['value_env']): + # Need to rm this cache entry + skip_cached_script = True + continue + + os_info = self_obj.os_info + + # Bat extension for this host OS + bat_ext = os_info['bat_ext'] + script_path = i['found_script_path'] + detected_version = None + + if os.path.exists(os.path.join(script_path, + f"validate_cache{bat_ext}")): + run_script_input = { + 'path': script_path, + 'bat_ext': bat_ext, + 'os_info': os_info, + 'recursion_spaces': recursion_spaces, + 'tmp_file_run': self_obj.tmp_file_run, + 'self': self_obj, + 'meta': meta, + 'customize_code': customize_code, + 'customize_common_input': customize_common_input + } + + deps = meta.get('deps') + if deps: + r = self_obj._call_run_deps(deps, self_obj.local_env_keys, meta.get('local_env_keys', []), env, state, const, const_state, add_deps_recursive, + recursion_spaces + extra_recursion_spaces, + remembered_selections, variation_tags_string, True, '', False, show_time, extra_recursion_spaces, {}) + if r['return'] > 0: + return r + + # Check if pre-process and detect + # if 'preprocess' in dir(customize_code): + + # logging.debug(recursion_spaces + ' - Running preprocess ...') + + # ii = copy.deepcopy(customize_common_input) + # ii['env'] = env + # ii['meta'] = meta + # # may need to detect versions in multiple paths + # ii['run_script_input'] = run_script_input + + # r = customize_code.preprocess(ii) + # if r['return'] > 0: + # return r + + ii = { + 'run_script_input': run_script_input, + 'env': env, + 'script_name': 'validate_cache', + 'detect_version': True + } + r = self_obj.run_native_script(ii) + # print(r) + if r['return'] > 0: + # return r + continue + if r.get('version'): + detected_version = r['version'] + + if not skip_cached_script: + cached_script_version = cached_script.meta.get('version', '') + if cached_script_version and detected_version and cached_script_version != detected_version: + continue + + skip_cached_script = check_versions( + self_obj.cmind, cached_script_version, version_min, version_max) + + if not skip_cached_script: + new_found_cached_scripts.append(cached_script) + + found_cached_scripts = new_found_cached_scripts + + return {'return': 0, 'cached_tags': cached_tags, + 'search_tags': search_tags, 'found_cached_scripts': found_cached_scripts} + + +############################################################################## +def enable_or_skip_script(meta, env): + """ + Internal: enable a dependency based on enable_if_env and skip_if_env meta information + (AND function) + """ + + if not isinstance(meta, dict): + logging.info( + "The meta entry is not a dictionary for skip/enable if_env: %s", + meta) + + for key in meta: + meta_key = [str(v).lower() for v in meta[key]] + if key in env: + value = str(env[key]).lower().strip() + if set(meta_key) & set(["yes", "on", "true", "1"]): + # Any set value other than false is taken as set + if value not in ["no", "off", "false", "0", ""]: + continue + elif set(meta_key) & set(["no", "off", "false", "0"]): + if value in ["no", "off", "false", "0", ""]: + continue + elif value in meta_key: + continue + else: + if set(meta_key) & set(["no", "off", "false", "0", ""]): + # If key is missing in env, and if the expected value is False, + # consider it a match + continue + + return False + + return True + +############################################################################## + + +def any_enable_or_skip_script(meta, env): + """ + Internal: enable a dependency based on enable_if_env and skip_if_env meta information + (OR function) + """ + for key in meta: + found = False + if key in env: + value = str(env[key]).lower().strip() + + meta_key = [str(v).lower() for v in meta[key]] + + if set(meta_key) & set(["yes", "on", "true", "1"]): + if value not in ["no", "off", "false", "0", ""]: + found = True + elif set(meta_key) & set(["no", "off", "false", "0", ""]): + if value in ["no", "off", "false", "0", ""]: + found = True + elif value in meta_key: + found = True + + # If found any match from the list (OR), return + if found: + return True + + return False + +########################################################################## + + +def _update_env(env, key=None, value=None): + + if key is None or value is None: + return { + 'return': 1, 'error': 'None value not expected in key and value arguments in _update_env.'} + if not isinstance(key, str): + return {'return': 1, 'error': 'String value expected inside key argument.'} + + env[key] = value + + r = update_env_with_values(env) + if r['return'] > 0: + return r + + return {'return': 0} + + +########################################################################## +def update_env_with_values(env, fail_on_not_found=False, extra_env=None): + """ + Update any env key used as part of values in meta + """ + import re + + extra_env = extra_env or {} # Default to an empty dictionary if not provided + + for key, value in env.items(): + # Check for keys starting with "+" and ensure their values are lists + if key.startswith("+") and not isinstance(value, list): + return {'return': 1, 'error': f'List value expected for {key} in env'} + + # Handle boolean values directly + if isinstance(value, bool): + env[key] = value + continue + + # Search for placeholders like <<<...>>> + placeholders = re.findall(r'<<<(.*?)>>>', str(value)) + + # No placeholders found + if not placeholders: + # Special handling for CM_GIT_URL + if key == 'CM_GIT_URL' and env.get('CM_GIT_AUTH', "no") == "yes": + if env.get('CM_GH_TOKEN', '') and '@' not in env['CM_GIT_URL']: + params = {"token": env['CM_GH_TOKEN']} + value = get_git_url("token", value, params) + elif 'CM_GIT_SSH' in env: + value = get_git_url("ssh", value) + env[key] = value + continue + + # Process each placeholder + for placeholder in placeholders: + if placeholder not in env and placeholder not in extra_env and fail_on_not_found: + return {'return': 1, + 'error': f'Variable {placeholder} is not in env'} + + # Determine the source of the value + found_env = env if placeholder in env else extra_env if placeholder in extra_env else None + if found_env: + if isinstance(value, str): + value = value.replace( + f"<<<{placeholder}>>>", str( + found_env[placeholder])) + elif isinstance(value, list): + value = [ + v.replace( + f"<<<{placeholder}>>>", str( + found_env[placeholder])) if isinstance( + v, str) else v for v in value] + + env[key] = value + + return {'return': 0} + +############################################################################## + + +def check_version_constraints(i): + """ + Internal: check version constaints and skip script artifact if constraints are not met + """ + + detected_version = i['detected_version'] + + version = i.get('version', '') + version_min = i.get('version_min', '') + version_max = i.get('version_max', '') + + cmind = i['cmind'] + + skip = False + + if version != '' and version != detected_version: + skip = True + + if not skip and detected_version != '' and version_min != '': + ry = cmind.access({'action': 'compare_versions', + 'automation': 'utils,dc2743f8450541e3', + 'version1': detected_version, + 'version2': version_min}) + if ry['return'] > 0: + return ry + + if ry['comparison'] < 0: + skip = True + + if not skip and detected_version != '' and version_max != '': + ry = cmind.access({'action': 'compare_versions', + 'automation': 'utils,dc2743f8450541e3', + 'version1': detected_version, + 'version2': version_max}) + if ry['return'] > 0: + return ry + + if ry['comparison'] > 0: + skip = True + + return {'return': 0, 'skip': skip} + + +############################################################################## +def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): + """ + Internal: prepare and run script with postprocessing that can be reused for version check + """ + + path = i['path'] + bat_ext = i['bat_ext'] + os_info = i['os_info'] + customize_code = i.get('customize_code', None) + customize_common_input = i.get('customize_common_input', {}) + + env = i.get('env', {}) + const = i.get('const', {}) + state = i.get('state', {}) + const_state = i.get('const_state', {}) + run_state = i.get('run_state', {}) + verbose = i.get('verbose', False) + if not verbose: + verbose = i.get('v', False) + + show_time = i.get('time', False) + + recursion = i.get('recursion', False) + found_script_tags = i.get('found_script_tags', []) + debug_script_tags = i.get('debug_script_tags', '') + + meta = i.get('meta', {}) + + reuse_cached = i.get('reused_cached', False) + recursion_spaces = i.get('recursion_spaces', '') + + tmp_file_run_state = i.get('tmp_file_run_state', '') + tmp_file_run_env = i.get('tmp_file_run_env', '') + tmp_file_state = i.get('tmp_file_state', '') + tmp_file_run = i['tmp_file_run'] + local_env_keys = i.get('local_env_keys', []) + local_env_keys_from_meta = i.get('local_env_keys_from_meta', []) + posthook_deps = i.get('posthook_deps', []) + add_deps_recursive = i.get('add_deps_recursive', {}) + recursion_spaces = i['recursion_spaces'] + remembered_selections = i.get('remembered_selections', []) + variation_tags_string = i.get('variation_tags_string', '') + found_cached = i.get('found_cached', False) + script_automation = i['self'] + + repro_prefix = i.get('repro_prefix', '') + + # Prepare script name + check_if_run_script_exists = False + script_name = i.get('script_name', '').strip() + if script_name == '': + script_name = meta.get('script_name', '').strip() + if script_name != '': + # Script name was added by user - we need to check that it really + # exists (on Linux or Windows) + check_if_run_script_exists = True + if script_name == '': + # Here is the default script name - if it doesn't exist, we skip it. + # However, if it's explicitly specified, we check it and report + # if it's missing ... + script_name = 'run' + + if bat_ext == '.sh': + run_script = get_script_name(env, path, script_name) + else: + run_script = script_name + bat_ext + + path_to_run_script = os.path.join(path, run_script) + + if check_if_run_script_exists and not os.path.isfile(path_to_run_script): + return { + 'return': 16, 'error': 'script {} not found - please add one'.format(path_to_run_script)} + + # Update env and state with const + utils.merge_dicts({'dict1': env, + 'dict2': const, + 'append_lists': True, + 'append_unique': True}) + utils.merge_dicts({'dict1': state, 'dict2': const_state, + 'append_lists': True, 'append_unique': True}) + + # Update env with the current path + if os_info['platform'] == 'windows' and ' ' in path: + path = '"' + path + '"' + + cur_dir = os.getcwd() + + r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path) + if r['return'] > 0: + return r + + r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_WORK_PATH', cur_dir) + if r['return'] > 0: + return r + + # Record state + if tmp_file_state != '': + r = utils.save_json(file_name=tmp_file_state, meta=state) + if r['return'] > 0: + return r + + rr = {'return': 0} + + # If batch file exists, run it with current env and state + if os.path.isfile(path_to_run_script) and not reuse_cached: + if tmp_file_run_state != '' and os.path.isfile(tmp_file_run_state): + os.remove(tmp_file_run_state) + if tmp_file_run_env != '' and os.path.isfile(tmp_file_run_env): + os.remove(tmp_file_run_env) + + run_script = tmp_file_run + bat_ext + run_script_without_cm = tmp_file_run + '-without-cm' + bat_ext + + logging.debug( + recursion_spaces + + ' - Running native script "{}" from temporal script "{}" in "{}" ...'.format( + path_to_run_script, + run_script, + cur_dir)) + if not run_state.get('tmp_silent', False): + logging.info(recursion_spaces + ' ! cd {}'.format(cur_dir)) + logging.info( + recursion_spaces + + ' ! call {} from {}'.format( + path_to_run_script, + run_script)) + + # Prepare env variables + import copy + script = copy.deepcopy(os_info['start_script']) + + # Check if script_prefix in the state from other components + script_prefix = state.get('script_prefix', []) + if len(script_prefix) > 0: + # script = script_prefix + ['\n'] + script + script += script_prefix + ['\n'] + + script += convert_env_to_script(env, os_info) + +# # Check if run bash/cmd before running the command (for debugging) +# if debug_script_tags !='' and all(item in found_script_tags for item in debug_script_tags.split(',')): +# # Copy original run script to be able to run it outside ... +# x=['cmd', '.', '','.bat'] if os_info['platform'] == 'windows' else ['bash', ' ""', '"','.sh'] +# +# script.append('\n') +# script.append('echo{}\n'.format(x[1])) +# script.append('echo {}Running debug shell. Type exit to resume script execution ...{}\n'.format(x[2],x[2])) +# script.append('echo{}\n'.format(x[1])) +# script.append('\n') +# script.append(x[0]) + + # Append batch file to the tmp script + script.append('\n') + script.append( + os_info['run_bat'].replace( + '${bat_file}', + '"' + + path_to_run_script + + '"') + + '\n') + + # Prepare and run script + r = record_script(run_script, script, os_info) + if r['return'] > 0: + return r + + # Save file to run without CM + if debug_script_tags != '' and all( + item in found_script_tags for item in debug_script_tags.split(',')): + + import shutil + shutil.copy(run_script, run_script_without_cm) + + logging.info( + '================================================================================') + logging.info( + 'Debug script to run without CM was recorded: {}'.format(run_script_without_cm)) + logging.info( + '================================================================================') + + # Run final command + cmd = os_info['run_local_bat_from_python'].replace( + '${bat_file}', run_script) + + rc = os.system(cmd) + + if rc > 0 and not i.get('ignore_script_error', False): + # Check if print files when error + print_files = meta.get('print_files_if_script_error', []) + + if len(print_files) > 0: + for pr in print_files: + if os.path.isfile(pr): + r = utils.load_txt(file_name=pr) + if r['return'] == 0: + logging.info( + "========================================================") + logging.info("Print file {}:".format(pr)) + logging.info("") + logging.info(r['string']) + logging.info("") + + # Check where to report errors and failures + repo_to_report = run_state.get( + 'script_entry_repo_to_report_errors', '') + + if repo_to_report == '': + script_repo_alias = run_state.get('script_repo_alias', '') + script_repo_git = run_state.get('script_repo_git', False) + + if script_repo_git and script_repo_alias != '': + repo_to_report = 'https://github.com/' + \ + script_repo_alias.replace('@', '/') + '/issues' + + if repo_to_report == '': + repo_to_report = 'https://github.com/mlcommons/cm4mlops/issues' + + note = ''' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Note that it is often a portability issue of a third-party tool or a native script +wrapped and unified by this CM script (automation recipe). Please re-run +this script with --repro flag and report this issue with the original +command line, cm-repro directory and full log here: + +{} + +The CM concept is to collaboratively fix such issues inside portable CM scripts +to make existing tools and native scripts more portable, interoperable +and deterministic. Thank you'''.format(repo_to_report) + + rr = { + 'return': 2, + 'error': 'Portable CM script failed (name = {}, return code = {})\n\n{}'.format( + meta['alias'], + rc, + note)} + + if repro_prefix != '': + dump_repro(repro_prefix, rr, run_state) + + return rr + + # Load updated state if exists + if tmp_file_run_state != '' and os.path.isfile(tmp_file_run_state): + r = utils.load_json(file_name=tmp_file_run_state) + if r['return'] > 0: + return r + + updated_state = r['meta'] + + utils.merge_dicts({'dict1': state, + 'dict2': updated_state, + 'append_lists': True, + 'append_unique': True}) + + # Load updated env if exists + if tmp_file_run_env != '' and os.path.isfile(tmp_file_run_env): + r = utils.load_txt(file_name=tmp_file_run_env) + if r['return'] > 0: + return r + + r = utils.convert_env_to_dict(r['string']) + if r['return'] > 0: + return r + + updated_env = r['dict'] + + utils.merge_dicts({'dict1': env, + 'dict2': updated_env, + 'append_lists': True, + 'append_unique': True}) + + if postprocess != '' and customize_code is not None and postprocess in dir( + customize_code): + if not run_state.get('tmp_silent', False): + logging.info( + recursion_spaces + + ' ! call "{}" from {}'.format( + postprocess, + customize_code.__file__)) + + if len(posthook_deps) > 0 and (postprocess == "postprocess"): + r = script_automation._call_run_deps(posthook_deps, local_env_keys, local_env_keys_from_meta, env, state, const, const_state, + add_deps_recursive, recursion_spaces, remembered_selections, variation_tags_string, found_cached, debug_script_tags, verbose, show_time, ' ', run_state) + if r['return'] > 0: + return r + + if (postprocess == "postprocess") and customize_code is not None and 'postprocess' in dir( + customize_code): + rr = run_postprocess(customize_code, customize_common_input, recursion_spaces, env, state, const, + const_state, meta, verbose, i) # i as run_script_input + elif (postprocess == "detect_version") and customize_code is not None and 'detect_version' in dir(customize_code): + rr = run_detect_version(customize_code, customize_common_input, recursion_spaces, env, state, const, + const_state, meta, verbose) + + return rr + +############################################################################## + + +def run_detect_version(customize_code, customize_common_input, + recursion_spaces, env, state, const, const_state, meta, verbose=False): + + if customize_code is not None and 'detect_version' in dir(customize_code): + import copy + + logging.debug(recursion_spaces + ' - Running detect_version ...') + + # Update env and state with const + utils.merge_dicts({'dict1': env, 'dict2': const, + 'append_lists': True, 'append_unique': True}) + utils.merge_dicts({'dict1': state, + 'dict2': const_state, + 'append_lists': True, + 'append_unique': True}) + + ii = copy.deepcopy(customize_common_input) + ii['env'] = env + ii['state'] = state + ii['meta'] = meta + + r = customize_code.detect_version(ii) + return r + + return {'return': 0} + +############################################################################## + + +def run_postprocess(customize_code, customize_common_input, recursion_spaces, + env, state, const, const_state, meta, verbose=False, run_script_input=None): + + if customize_code is not None and 'postprocess' in dir(customize_code): + import copy + + logging.debug(recursion_spaces + ' - Running postprocess ...') + + # Update env and state with const + utils.merge_dicts({'dict1': env, 'dict2': const, + 'append_lists': True, 'append_unique': True}) + utils.merge_dicts({'dict1': state, + 'dict2': const_state, + 'append_lists': True, + 'append_unique': True}) + + ii = copy.deepcopy(customize_common_input) + ii['env'] = env + ii['state'] = state + ii['meta'] = meta + + if run_script_input is not None: + ii['run_script_input'] = run_script_input + + r = customize_code.postprocess(ii) + return r + + return {'return': 0} + +############################################################################## + + +def get_script_name(env, path, script_name='run'): + """ + Internal: find the most appropriate run script name for the detected OS + """ + + from os.path import exists + + tmp_suff1 = env.get('CM_HOST_OS_FLAVOR', '') + tmp_suff2 = env.get('CM_HOST_OS_VERSION', '') + tmp_suff3 = env.get('CM_HOST_PLATFORM_FLAVOR', '') + + if exists(os.path.join(path, script_name + '-' + tmp_suff1 + + '-' + tmp_suff2 + '-' + tmp_suff3 + '.sh')): + return script_name + '-' + tmp_suff1 + '-' + tmp_suff2 + '-' + tmp_suff3 + '.sh' + elif exists(os.path.join(path, script_name + '-' + tmp_suff1 + '-' + tmp_suff3 + '.sh')): + return script_name + '-' + tmp_suff1 + '-' + tmp_suff3 + '.sh' + elif exists(os.path.join(path, script_name + '-' + tmp_suff1 + '-' + tmp_suff2 + '.sh')): + return script_name + '-' + tmp_suff1 + '-' + tmp_suff2 + '.sh' + elif exists(os.path.join(path, script_name + '-' + tmp_suff1 + '.sh')): + return script_name + '-' + tmp_suff1 + '.sh' + elif exists(os.path.join(path, script_name + '-' + tmp_suff3 + '.sh')): + return script_name + '-' + tmp_suff3 + '.sh' + else: + return script_name + '.sh' + +############################################################################## + + +def update_env_keys(env, env_key_mappings): + """ + Internal: convert env keys as per the given mapping + """ + + for key_prefix in env_key_mappings: + for key in list(env): + if key.startswith(key_prefix): + new_key = key.replace(key_prefix, env_key_mappings[key_prefix]) + env[new_key] = env[key] + # del(env[key]) + +############################################################################## + + +def convert_env_to_script(env, os_info, start_script=None): + """ + Internal: Convert env to script for a given platform. + """ + import copy + + # Initialize script with a deep copy of the start_script or an empty list + script = copy.deepcopy(start_script) if start_script else [] + + # Determine if the platform is Windows + is_windows = os_info['platform'] == 'windows' + + for k in sorted(env): + env_value = env[k] + + # Handle Windows-specific value processing + if is_windows: + if not isinstance(env_value, list): + env_value = [env_value] + + processed_values = [] + for v in env_value: + v_str = str(v) + if '"' not in v_str: + # Add quotes if special characters are present + if any(char in v_str for char in ['|', '&', '>', '<']): + v_str = f'"{v_str}"' + processed_values.append(v_str) + + env_value = processed_values if isinstance( + env[k], list) else processed_values[0] + + # Process special keys + key = k + if k.startswith('+'): + key = k[1:] + env_separator = os_info.get('env_separator', ';') + + # Custom separator if key starts with a non-alphanumeric character + if not key[0].isalnum(): + env_separator = key[0] + key = key[1:] + + # Append the existing environment variable to the new value + env_value = f"{env_separator.join(env_value)}{env_separator}{os_info['env_var'].replace('env_var', key)}" + + # Replace placeholders in the platform-specific environment command + env_command = os_info['set_env'].replace( + '${key}', key).replace( + '${value}', str(env_value)) + script.append(env_command) + + return script + +############################################################################## + + +def record_script(run_script, script, os_info): + """ + Internal: record script and chmod 755 on Linux + """ + + final_script = '\n'.join(script) + + if not final_script.endswith('\n'): + final_script += '\n' + + r = utils.save_txt(file_name=run_script, string=final_script) + if r['return'] > 0: + return r + + if os_info.get('set_exec_file', '') != '': + cmd = os_info['set_exec_file'].replace('${file_name}', run_script) + rc = os.system(cmd) + + return {'return': 0} + +############################################################################## + + +def clean_tmp_files(clean_files, recursion_spaces): + """ + Internal: clean tmp files + """ + +# logging.info('') +# logging.info(recursion_spaces+' - cleaning files {} ...'.format(clean_files)) + + for tmp_file in clean_files: + if os.path.isfile(tmp_file): + os.remove(tmp_file) + + return {'return': 0} + +############################################################################## + + +def update_dynamic_env_values(mydict, env): + """ + Internal: update the dynamic value in given dict identified by <<<...>>> with the value from env dictionary if set + """ + import re + # Regular expression to match <<>> + pattern = re.compile(r'<<<(.*?)>>>') + + def replace_variables(value): + # Function to replace the <<>> with corresponding value from + # env + if isinstance(value, str): # Only process if the value is a string + matches = pattern.findall(value) + for match in matches: + if match in env: # Replace only if the variable is in env + value = value.replace(f'<<<{match}>>>', str(env[match])) + return value + + # Recursively update the dictionary + for key, val in mydict.items(): + if isinstance(val, dict): + # If the value is a dictionary, recurse into it + update_dynamic_env_values(val, env) + else: + # Replace variables in the current value + mydict[key] = replace_variables(val) + + return + + +############################################################################## +def update_dep_info(dep, new_info): + """ + Internal: Add additional info to a dependency. + """ + for info, value in new_info.items(): + + if info == "tags": + # Process tags + existing_tags = dep.get('tags', '').split(",") + new_tags = value.split(",") + # Filter and combine unique tags + filtered_new_tags = [tag for tag in new_tags if "<<<" not in tag] + combined_tags = existing_tags + \ + list(set(filtered_new_tags) - set(existing_tags)) + dep['tags'] = ",".join(combined_tags) + + elif "enable_if_" in info or "skip_if_" in info: + # Skip special cases meant for conditions + continue + + elif isinstance(value, dict): + # Merge dictionaries + dep.setdefault(info, {}) + if isinstance(dep[info], dict): + utils.merge_dicts({ + 'dict1': dep[info], + 'dict2': value, + 'append_lists': True, + 'append_unique': True + }) + # Optional: Throw an error if types are mismatched + # else: + # raise ValueError(f"Cannot merge non-dict type into dict for key '{info}'") + + elif isinstance(value, list): + # Merge lists + dep.setdefault(info, []) + if isinstance(dep[info], list): + dep[info].extend(value) + # Optional: Throw an error if types are mismatched + # else: + # raise ValueError(f"Cannot append non-list type into list for key '{info}'") + + else: + # Overwrite or set other types of values + dep[info] = value + + +############################################################################## + +def update_deps(deps, add_deps, fail_error=False, env={}): + """ + Internal: add deps tags, version etc. by name + """ + # deps_info_to_add = [ "version", "version_min", "version_max", + # "version_max_usable", "path", "tags", .... ] + new_deps_info = {} + for new_dep_name in add_deps: + if is_dep_tobe_skipped(add_deps[new_dep_name], env): + continue + dep_found = False + for dep in deps: + names = dep.get('names', []) + if new_dep_name in names: + update_dynamic_env_values(add_deps[new_dep_name], env) + update_dep_info(dep, add_deps[new_dep_name]) + dep_found = True + if not dep_found and fail_error: + return {'return': 1, 'error': new_dep_name + + ' is not one of the dependency'} + + return {'return': 0} + + +############################################################################## +def append_deps(deps, new_deps): + """ + Internal: add deps from meta + """ + + for new_dep in new_deps: + existing = False + new_dep_names = new_dep.get('names', []) + if len(new_dep_names) > 0: + for i in range(len(deps)): + dep = deps[i] + dep_names = dep.get('names', []) + if len(dep_names) > 0: + if set(new_dep_names) == set(dep_names): + deps[i] = new_dep + existing = True + break + else: # when no name, check for tags + new_dep_tags = new_dep.get('tags') + new_dep_tags_list = new_dep_tags.split(",") + for i in range(len(deps)): + dep = deps[i] + dep_tags_list = dep.get('tags').split(",") + if set(new_dep_tags_list) == set(dep_tags_list): + deps[i] = new_dep + existing = True + break + + if not existing: + deps.append(new_dep) + + return {'return': 0} + +############################################################################## + + +def is_dep_tobe_skipped(d, env): + """ + Internal: check if this dependency is to be skipped + """ + if d.get('skip_if_fake_run', False) and env.get( + 'CM_TMP_FAKE_RUN', '') == 'yes': + return True + + if "enable_if_env" in d: + if not enable_or_skip_script(d["enable_if_env"], env): + return True + + if "enable_if_any_env" in d: + if not any_enable_or_skip_script(d["enable_if_any_env"], env): + return True + + if "skip_if_env" in d: + if enable_or_skip_script(d["skip_if_env"], env): + return True + + if "skip_if_any_env" in d: + if any_enable_or_skip_script(d["skip_if_any_env"], env): + return True + + return False + +############################################################################## + + +def update_deps_from_input(deps, post_deps, prehook_deps, posthook_deps, i): + """ + Internal: update deps from meta + """ + add_deps_info_from_input = i.get('ad', {}) + if not add_deps_info_from_input: + add_deps_info_from_input = i.get('add_deps', {}) + else: + utils.merge_dicts({'dict1': add_deps_info_from_input, 'dict2': i.get( + 'add_deps', {}), 'append_lists': True, 'append_unique': True}) + + add_deps_recursive_info_from_input = i.get('adr', {}) + if not add_deps_recursive_info_from_input: + add_deps_recursive_info_from_input = i.get('add_deps_recursive', {}) + else: + utils.merge_dicts({'dict1': add_deps_recursive_info_from_input, 'dict2': i.get( + 'add_deps_recursive', {}), 'append_lists': True, 'append_unique': True}) + + env = i.get('env', {}) + + if add_deps_info_from_input: + r1 = update_deps(deps, add_deps_info_from_input, True, env) + r2 = update_deps(post_deps, add_deps_info_from_input, True, env) + r3 = update_deps(prehook_deps, add_deps_info_from_input, True, env) + r4 = update_deps(posthook_deps, add_deps_info_from_input, True, env) + if r1['return'] > 0 and r2['return'] > 0 and r3['return'] > 0 and r4['return'] > 0: + return r1 + if add_deps_recursive_info_from_input: + update_deps(deps, add_deps_recursive_info_from_input, False, env) + update_deps(post_deps, add_deps_recursive_info_from_input, False, env) + update_deps( + prehook_deps, + add_deps_recursive_info_from_input, + False, + env) + update_deps( + posthook_deps, + add_deps_recursive_info_from_input, + False, + env) + + return {'return': 0} + + +############################################################################## +def update_env_from_input_mapping(env, inp, input_mapping): + """ + Internal: update env from input and input_mapping + """ + for key in input_mapping: + if key in inp: + env[input_mapping[key]] = inp[key] + +############################################################################## + + +def update_state_from_meta(meta, env, state, const, const_state, deps, post_deps, + prehook_deps, posthook_deps, new_env_keys, new_state_keys, run_state, i): + """ + Internal: update env and state from meta + """ + + default_env = meta.get('default_env', {}) + for key in default_env: + env.setdefault(key, default_env[key]) + + update_env = meta.get('env', {}) + env.update(update_env) + + update_meta_if_env = meta.get('update_meta_if_env', []) + update_meta_if_env_from_state = run_state.get('update_meta_if_env', []) + run_state['update_meta_if_env'] = update_meta_if_env + \ + update_meta_if_env_from_state + + for c_meta in run_state['update_meta_if_env']: + if is_dep_tobe_skipped(c_meta, env): + continue + utils.merge_dicts({'dict1': env, 'dict2': c_meta.get( + 'env', {}), 'append_lists': True, 'append_unique': True}) + utils.merge_dicts({'dict1': state, 'dict2': c_meta.get( + 'state', {}), 'append_lists': True, 'append_unique': True}) + if c_meta.get('docker', {}): + if not state.get('docker', {}): + state['docker'] = {} + utils.merge_dicts({'dict1': state['docker'], + 'dict2': c_meta['docker'], + 'append_lists': True, + 'append_unique': True}) + + update_const = meta.get('const', {}) + if update_const: + const.update(update_const) + env.update(const) + + update_state = meta.get('state', {}) + utils.merge_dicts({'dict1': state, 'dict2': update_state, + 'append_lists': True, 'append_unique': True}) + + update_const_state = meta.get('const_state', {}) + if const_state: + utils.merge_dicts({'dict1': const_state, + 'dict2': update_const_state, + 'append_lists': True, + 'append_unique': True}) + utils.merge_dicts({'dict1': state, + 'dict2': const_state, + 'append_lists': True, + 'append_unique': True}) + + new_deps = meta.get('deps', []) + if len(new_deps) > 0: + append_deps(deps, new_deps) + + new_post_deps = meta.get("post_deps", []) + if len(new_post_deps) > 0: + append_deps(post_deps, new_post_deps) + + new_prehook_deps = meta.get("prehook_deps", []) + if len(new_prehook_deps) > 0: + append_deps(prehook_deps, new_prehook_deps) + + new_posthook_deps = meta.get("posthook_deps", []) + if len(new_posthook_deps) > 0: + append_deps(posthook_deps, new_posthook_deps) + + add_deps_info = meta.get('ad', {}) + if not add_deps_info: + add_deps_info = meta.get('add_deps', {}) + else: + utils.merge_dicts({'dict1': add_deps_info, 'dict2': meta.get( + 'add_deps', {}), 'append_lists': True, 'append_unique': True}) + if add_deps_info: + r1 = update_deps(deps, add_deps_info, True, env) + r2 = update_deps(post_deps, add_deps_info, True, env) + r3 = update_deps(prehook_deps, add_deps_info, True, env) + r4 = update_deps(posthook_deps, add_deps_info, True, env) + if r1['return'] > 0 and r2['return'] > 0 and r3['return'] > 0 and r4['return'] > 0: + return r1 + + # i would have 'input' when called through cm.access + input_update_env = i.get('input', i) + + input_mapping = meta.get('input_mapping', {}) + if input_mapping: + update_env_from_input_mapping(env, input_update_env, input_mapping) + + # handle dynamic env values + r = update_env_with_values(env) + if r['return'] > 0: + return r + + # Possibly restrict this to within docker environment + # we need to see input here + add_deps_info = meta.get('ad', i.get('ad', {})) + if not add_deps_info: + add_deps_info = meta.get('add_deps', i.get('add_deps_recursive', {})) + else: + utils.merge_dicts({'dict1': add_deps_info, 'dict2': meta.get( + 'add_deps', {}), 'append_lists': True, 'append_unique': True}) + + new_docker_settings = meta.get('docker') + if new_docker_settings: + docker_settings = state.get('docker', {}) + # docker_input_mapping = docker_settings.get('docker_input_mapping', {}) + # new_docker_input_mapping = new_docker_settings.get('docker_input_mapping', {}) + # if new_docker_input_mapping: + # # update_env_from_input_mapping(env, i['input'], docker_input_mapping) + # utils.merge_dicts({'dict1':docker_input_mapping, 'dict2':new_docker_input_mapping, 'append_lists':True, 'append_unique':True}) + utils.merge_dicts({'dict1': docker_settings, + 'dict2': new_docker_settings, + 'append_lists': True, + 'append_unique': True}) + if docker_settings.get('deps', []): + update_deps(docker_settings['deps'], add_deps_info, False, env) + state['docker'] = docker_settings + + new_env_keys_from_meta = meta.get('new_env_keys', []) + if new_env_keys_from_meta: + new_env_keys += new_env_keys_from_meta + + new_state_keys_from_meta = meta.get('new_state_keys', []) + if new_state_keys_from_meta: + new_state_keys += new_state_keys_from_meta + + return {'return': 0} + +############################################################################## + + +def update_adr_from_meta(deps, post_deps, prehook_deps, + posthook_deps, add_deps_recursive_info, env={}): + """ + Internal: update add_deps_recursive from meta + """ + if add_deps_recursive_info: + update_deps(deps, add_deps_recursive_info, False, env) + update_deps(post_deps, add_deps_recursive_info, False, env) + update_deps(prehook_deps, add_deps_recursive_info, False, env) + update_deps(posthook_deps, add_deps_recursive_info, False, env) + + return {'return': 0} + +############################################################################## + + +def get_adr(meta): + add_deps_recursive_info = meta.get('adr', {}) + if not add_deps_recursive_info: + add_deps_recursive_info = meta.get('add_deps_recursive', {}) + else: + utils.merge_dicts({'dict1': add_deps_recursive_info, 'dict2': meta.get( + 'add_deps_recursive', {}), 'append_lists': True, 'append_unique': True}) + return add_deps_recursive_info + +############################################################################## + + +def detect_state_diff(env, saved_env, new_env_keys, + new_state_keys, state, saved_state): + """ + Internal: detect diff in env and state + """ + + new_env = {} + new_state = {} + + # Check if leave only specific keys or detect diff automatically + for k in new_env_keys: + if '?' in k or '*' in k: + import fnmatch + for kk in env: + if fnmatch.fnmatch(kk, k): + new_env[kk] = env[kk] + elif k in env: + new_env[k] = env[k] + elif "<<<" in k: + import re + tmp_values = re.findall(r'<<<(.*?)>>>', k) + for tmp_value in tmp_values: + if tmp_value in env: + value = env[tmp_value] + if value in env: + new_env[value] = env[value] + + for k in new_state_keys: + if '?' in k or '*' in k: + import fnmatch + for kk in state: + if fnmatch.fnmatch(kk, k): + new_state[kk] = state[kk] + elif k in state: + new_state[k] = state[k] + elif "<<<" in k: + import re + tmp_values = re.findall(r'<<<(.*?)>>>', k) + for tmp_value in tmp_values: + if tmp_value in state: + value = state[tmp_value] + if value in state: + new_state[value] = state[value] + + return {'return': 0, 'env': env, 'new_env': new_env, + 'state': state, 'new_state': new_state} + +############################################################################## + + +def select_script_artifact(lst, text, recursion_spaces, + can_skip, script_tags_string, quiet, verbose): + """ + Internal: select script + """ + + string1 = recursion_spaces + \ + ' - More than 1 {} found for "{}":'.format(text, script_tags_string) + + # If quiet, select 0 (can be sorted for determinism) + if quiet: + logging.debug(string1) + logging.debug('Selected default due to "quiet" mode') + + return 0 + + # Select 1 and proceed + logging.info(string1) + num = 0 + + for a in lst: + meta = a.meta + + name = meta.get('name', '') + + s = a.path + if name != '': + s = '"' + name + '" ' + s + + x = recursion_spaces + \ + ' {}) {} ({})'.format(num, s, ','.join(meta['tags'])) + + version = meta.get('version', '') + if version != '': + x += ' (Version {})'.format(version) + + logging.info(x) + num += 1 + + s = 'Make your selection or press Enter for 0' + if can_skip: + s += ' or use -1 to skip' + + x = input(recursion_spaces + ' ' + s + ': ') + x = x.strip() + if x == '': + x = '0' + + selection = int(x) + + if selection < 0 and not can_skip: + selection = 0 + + if selection < 0: + logging.info(recursion_spaces + ' Skipped') + else: + if selection >= num: + selection = 0 + logging.info( + recursion_spaces + + ' Selected {}: {}'.format( + selection, + lst[selection].path)) + + return selection + +############################################################################## + + +def check_versions(cmind, cached_script_version, version_min, version_max): + """ + Internal: check versions of the cached script + """ + skip_cached_script = False + + if cached_script_version != '': + if version_min != '': + ry = cmind.access({'action': 'compare_versions', + 'automation': 'utils,dc2743f8450541e3', + 'version1': cached_script_version, + 'version2': version_min}) + if ry['return'] > 0: + return ry + + if ry['comparison'] < 0: + skip_cached_script = True + + if not skip_cached_script and version_max != '': + ry = cmind.access({'action': 'compare_versions', + 'automation': 'utils,dc2743f8450541e3', + 'version1': cached_script_version, + 'version2': version_max}) + if ry['return'] > 0: + return ry + + if ry['comparison'] > 0: + skip_cached_script = True + + return skip_cached_script + +############################################################################## + + +def get_git_url(get_type, url, params={}): + from giturlparse import parse + p = parse(url) + if get_type == "ssh": + return p.url2ssh + elif get_type == "token": + token = params['token'] + return "https://git:" + token + "@" + p.host + "/" + p.owner + "/" + p.repo + return url + +############################################################################## + + +def can_write_to_current_directory(): + + import tempfile + + cur_dir = os.getcwd() + +# try: +# tmp_file = tempfile.NamedTemporaryFile(dir = cur_dir) +# except Exception as e: +# return False + + tmp_file_name = next(tempfile._get_candidate_names()) + '.tmp' + + tmp_path = os.path.join(cur_dir, tmp_file_name) + + try: + tmp_file = open(tmp_file_name, 'w') + except Exception as e: + return False + + tmp_file.close() + + os.remove(tmp_file_name) + + return True + +########################################################################## + + +def dump_repro_start(repro_prefix, ii): + import json + + # Clean reproducibility and experiment files + for f in ['cm-output.json', 'version_info.json', '-input.json', + '-info.json', '-output.json', '-run-state.json']: + ff = repro_prefix + f if f.startswith('-') else f + if os.path.isfile(ff): + try: + os.remove(ff) + except BaseException: + pass + + try: + with open(repro_prefix + '-input.json', 'w', encoding='utf-8') as f: + json.dump(ii, f, ensure_ascii=False, indent=2) + except BaseException: + pass + + # Get some info + info = {} + + try: + import platform + import sys + + info['host_os_name'] = os.name + info['host_system'] = platform.system() + info['host_os_release'] = platform.release() + info['host_machine'] = platform.machine() + info['host_architecture'] = platform.architecture() + info['host_python_version'] = platform.python_version() + info['host_sys_version'] = sys.version + + r = utils.gen_uid() + if r['return'] == 0: + info['run_uid'] = r['uid'] + + r = utils.get_current_date_time({}) + if r['return'] == 0: + info['run_iso_datetime'] = r['iso_datetime'] + + with open(repro_prefix + '-info.json', 'w', encoding='utf-8') as f: + json.dump(info, f, ensure_ascii=False, indent=2) + except BaseException: + pass + + # For experiment + cm_output = {} + + cm_output['tmp_test_value'] = 10.0 + + cm_output['info'] = info + cm_output['input'] = ii + + try: + with open('cm-output.json', 'w', encoding='utf-8') as f: + json.dump(cm_output, f, ensure_ascii=False, indent=2) + except BaseException: + pass + + return {'return': 0} + +########################################################################## + + +def dump_repro(repro_prefix, rr, run_state): + import json + import copy + + try: + with open(repro_prefix + '-output.json', 'w', encoding='utf-8') as f: + json.dump(rr, f, ensure_ascii=False, indent=2) + except BaseException: + pass + + try: + with open(repro_prefix + '-run-state.json', 'w', encoding='utf-8') as f: + json.dump(run_state, f, ensure_ascii=False, indent=2) + except BaseException: + pass + + # For experiment + cm_output = {} + + # Attempt to read + try: + r = utils.load_json('cm-output.json') + if r['return'] == 0: + cm_output = r['meta'] + except BaseException: + pass + + cm_output['output'] = rr + cm_output['state'] = copy.deepcopy(run_state) + + # Try to load version_info.json + version_info = {} + + version_info_orig = {} + + if 'version_info' in cm_output['state']: + version_info_orig = cm_output['state']['version_info'] + del (cm_output['state']['version_info']) + + try: + r = utils.load_json('version_info.json') + if r['return'] == 0: + version_info_orig += r['meta'] + + for v in version_info_orig: + for key in v: + dep = v[key] + version_info[key] = dep + + except BaseException: + pass + + if len(version_info) > 0: + cm_output['version_info'] = version_info + + if rr['return'] == 0: + # See https://cTuning.org/ae + cm_output['acm_ctuning_repro_badge_available'] = True + cm_output['acm_ctuning_repro_badge_functional'] = True + + try: + with open('cm-output.json', 'w', encoding='utf-8') as f: + json.dump( + cm_output, + f, + ensure_ascii=False, + indent=2, + sort_keys=True) + except BaseException: + pass + + return {'return': 0} + + +############################################################################## +# Demo to show how to use CM components independently if needed +if __name__ == "__main__": + import cmind + auto = CAutomation(cmind, __file__) + + r = auto.test({'x': 'y'}) + + logging.info(r) diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/module_help.py b/cmx4mlops/cmx4mlops/repo/automation/script/module_help.py new file mode 100644 index 000000000..e1eb4424a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/module_help.py @@ -0,0 +1,119 @@ +# Author: Grigori Fursin +# Contributors: Arjun Suresh, Anandhu Sooraj +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os +from cmind import utils + +# Pring help about script + + +def print_help(i): + + meta = i.get('meta', '') + path = i.get('path', '') + + if len(meta) == 0 and path == '': + return {'return': 0} + + print('') + print( + 'Help for this CM script ({},{}):'.format( + meta.get( + 'alias', ''), meta.get( + 'uid', ''))) + + print('') + print('Path to this automation recipe: {}'.format(path)) + + variations = meta.get('variations', {}) + if len(variations) > 0: + print('') + print('Available variations:') + print('') + for v in sorted(variations): + print(' _' + v) + + input_mapping = meta.get('input_mapping', {}) + if len(input_mapping) > 0: + print('') + print('Available flags mapped to environment variables:') + print('') + for k in sorted(input_mapping): + v = input_mapping[k] + + print(' --{} -> --env.{}'.format(k, v)) + + input_description = meta.get('input_description', {}) + if len(input_description) > 0: + # Check if has important ones (sort) + sorted_keys = [] + all_keys = sorted(list(input_description.keys())) + + for k in sorted( + all_keys, key=lambda x: input_description[x].get('sort', 0)): + v = input_description[k] + if v.get('sort', 0) > 0: + sorted_keys.append(k) + + print('') + print('Available flags (Python API dict keys):') + print('') + for k in all_keys: + v = input_description[k] + n = v.get('desc', '') + + x = ' --' + k + if n != '': + x += ' ({})'.format(n) + + print(x) + + if len(sorted_keys) > 0: + print('') + print('Main flags:') + print('') + for k in sorted_keys: + v = input_description[k] + n = v.get('desc', '') + + x = ' --' + k + + d = None + if 'default' in v: + d = v.get('default', '') + + if d is not None: + x += '=' + d + + c = v.get('choices', []) + if len(c) > 0: + x += ' {' + ','.join(c) + '}' + + if n != '': + x += ' ({})'.format(n) + + print(x) + + print('') + x = input('Would you like to see a Python API with a list of common keys/flags for all scripts including this one (y/N)? ') + + x = x.strip().lower() + + skip_delayed_help = False if x in ['y', 'yes'] else True + + r = {'return': 0} + + if skip_delayed_help: + r['skip_delayed_help'] = True + + return r diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/module_misc.py b/cmx4mlops/cmx4mlops/repo/automation/script/module_misc.py new file mode 100644 index 000000000..22b4cf2fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/module_misc.py @@ -0,0 +1,2518 @@ +# Author: Grigori Fursin +# Contributors: Arjun Suresh, Anandhu Sooraj +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os +from cmind import utils + +# Meta deps + + +def process_deps(self_module, meta, meta_url, md_script_readme, + key, extra_space='', skip_from_meta=False, skip_if_empty=False): + + x = '' + y = [] + if len(meta.get(key, {})) > 0: + x = '***' + + for d in meta[key]: + d_tags = d.get('tags', '') + + z = extra_space + ' * ' + d_tags + y.append(z) + + names = d.get('names', []) + + for kk in [ + ('enable_if_env', 'Enable this dependency only if all ENV vars are set'), + ('enable_if_any_env', + 'Enable this dependency only if any of ENV vars are set'), + ('skip_if_env', + 'Skip this dependenecy only if all ENV vars are set'), + ('skip_if_any_env', + 'Skip this dependenecy only if any of ENV vars are set') + ]: + + k1 = kk[0] + k2 = kk[1] + + conditions = d.get(k1, {}) + if len(conditions) > 0: + y.append(extra_space + + ' * {}:
\n`{}`'.format(k2, str(conditions))) + + if len(names) > 0: + y.append( + extra_space + + ' * CM names: `--adr.' + + str(names) + + '...`') + + # Attempt to find related CM scripts + r = self_module.cmind.access({'action': 'find', + 'automation': 'script', + 'tags': d_tags}) + if r['return'] == 0: + lst = r['list'] + + if len(lst) == 0: + y.append(extra_space + + ' - *Warning: no scripts found*') + else: + for s in lst: + s_repo_meta = s.repo_meta + + s_repo_alias = s_repo_meta.get('alias', '') + s_repo_uid = s_repo_meta.get('uid', '') + + # Check URL + s_url = '' + s_url_repo = '' + if s_repo_alias == 'internal': + s_url_repo = 'https://github.com/mlcommons/ck/tree/master/cm/cmind/repo' + s_url = s_url_repo + '/script/' + elif '@' in s_repo_alias: + s_url_repo = 'https://github.com/' + \ + s_repo_alias.replace('@', '/') + '/tree/master' + if s_repo_meta.get('prefix', '') != '': + s_url_repo += '/' + s_repo_meta['prefix'] + s_url = s_url_repo + '/script/' + + s_alias = s.meta['alias'] + y.append( + extra_space + ' - CM script: [{}]({})'.format(s_alias, s_url + s_alias)) + + z = '' + if not skip_from_meta: + z = ' from [meta]({})'.format(meta_url) + + if not skip_if_empty or len(y) > 0: + md_script_readme.append( + (extra_space + + ' 1. ' + + x + + 'Read "{}" on other CM scripts' + + z + + x).format(key)) + md_script_readme += y + +############################################################ + + +def doc(i): + """ + Add CM automation. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + parsed_artifact (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + (repos) (str): list of repositories to search for automations + + (output_dir) (str): output directory (../docs by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + self_module = i['self_module'] + + cur_dir = os.getcwd() + + template_file = 'template_list_of_scripts.md' + list_file = 'list_of_scripts.md' + + public_taskforce = '[Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)' + + console = i.get('out') == 'con' + + repos = i.get('repos', '') + if repos == '': + repos = 'internal,a4705959af8e447a' + + parsed_artifact = i.get('parsed_artifact', []) + + if len(parsed_artifact) < 1: + parsed_artifact = [('', ''), ('', '')] + elif len(parsed_artifact) < 2: + parsed_artifact.append(('', '')) + else: + repos = parsed_artifact[1][0] + + list_of_repos = repos.split(',') if ',' in repos else [repos] + + ii = utils.sub_input(i, self_module.cmind.cfg['artifact_keys'] + ['tags']) + + ii['out'] = None + + # Search for automations in repos + lst = [] + + for repo in list_of_repos: + parsed_artifact[1] = ( + '', repo) if utils.is_cm_uid(repo) else ( + repo, '') + ii['parsed_artifact'] = parsed_artifact + r = self_module.search(ii) + if r['return'] > 0: + return r + lst += r['list'] + + md = [] + + toc = [] + + toc_category = {} + toc_category_sort = {} + script_meta = {} + urls = {} + + for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): + + toc_readme = [] + + # Common index for all scripts + md_script = [] + + path = artifact.path + meta = artifact.meta + original_meta = artifact.original_meta + + print('Documenting {}'.format(path)) + + alias = meta.get('alias', '') + uid = meta.get('uid', '') + + script_meta[alias] = meta + + name = meta.get('name', '') + developers = meta.get('developers', '') + + # Check if has tags help otherwise all tags + tags = meta.get('tags_help', '').strip() + if tags == '': + tags = meta.get('tags', []) + else: + tags = tags.split(' ') + + variations = meta.get('variations', {}) + + variation_keys = sorted(list(variations.keys())) + version_keys = sorted(list(meta.get('versions', {}).keys())) + + default_variation = meta.get('default_variation', '') + default_version = meta.get('default_version', '') + + input_mapping = meta.get('input_mapping', {}) + input_description = meta.get('input_description', {}) + + category = meta.get('category', '').strip() + category_sort = meta.get('category_sort', 0) + if category != '': + if category not in toc_category: + toc_category[category] = [] + + if category not in toc_category_sort or category_sort > 0: + toc_category_sort[category] = category_sort + + if alias not in toc_category[category]: + toc_category[category].append(alias) + + repo_path = artifact.repo_path + repo_meta = artifact.repo_meta + + repo_alias = repo_meta.get('alias', '') + repo_uid = repo_meta.get('uid', '') + + # Check URL + url = '' + url_repo = '' + if repo_alias == 'internal': + url_repo = 'https://github.com/mlcommons/ck/tree/dev/cm/cmind/repo' + url = url_repo + '/script/' + elif '@' in repo_alias: + url_repo = 'https://github.com/' + \ + repo_alias.replace('@', '/') + '/tree/dev' + if repo_meta.get('prefix', '') != '': + url_repo += '/' + repo_meta['prefix'] + url = url_repo + '/script/' + + if url != '': + url += alias + + urls[alias] = url + + # Check if there is about doc + path_readme = os.path.join(path, 'README.md') + path_readme_extra = os.path.join(path, 'README-extra.md') + path_readme_about = os.path.join(path, 'README-about.md') + + readme_about = '' + if os.path.isfile(path_readme_about): + r = utils.load_txt(path_readme_about, split=True) + if r['return'] > 0: + return + + s = r['string'] + readme_about = r['list'] + + ####################################################################### + # Start automatically generated README + md_script_readme = [ + # '
', + # 'Click here to see the table of contents.', + # '{{CM_README_TOC}}', + # '
', + # '', + 'Automatically generated README for this automation recipe: **{}**'.format( + meta['alias']), + ] + + md_script.append('## ' + alias) + md_script.append('') + +# x = 'About' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) +# md_script_readme.append('') +# toc_readme.append(x) + +# x = 'About' +# md_script_readme.append('#### '+x) +# md_script_readme.append('') +# toc_readme.append(' '+x) + + if name != '': + name += '.' + md_script.append('*' + name + '*') + md_script.append('') + +# md_script_readme.append('*'+name+'*') +# md_script_readme.append('') + + if os.path.isfile(path_readme): + r = utils.load_txt(path_readme, split=True) + if r['return'] > 0: + return + + s = r['string'] + readme = r['list'] + + if not 'automatically generated' in s.lower(): + found_path_readme_extra = True + + # Attempt to rename to README-extra.md + if os.path.isfile(path_readme_extra): + return { + 'return': 1, 'error': 'README.md is not auto-generated and README-extra.md already exists - can\'t rename'} + + os.rename(path_readme, path_readme_extra) + + # Add to Git (if in git) + os.chdir(path) + os.system('git add README-extra.md') + os.chdir(cur_dir) + + if category != '': + md_script_readme.append('') + md_script_readme.append('Category: **{}**'.format(category)) + + md_script_readme.append('') + md_script_readme.append('License: **Apache 2.0**') + + md_script_readme.append('') + + if developers == '': + md_script_readme.append('Maintainers: ' + public_taskforce) + else: + md_script_readme.append('Developers: ' + developers) + + x = '* [{}]({})'.format(alias, url) + if name != '': + x += ' *(' + name + ')*' + toc.append(x) + + cm_readme_extra = '[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name={},{}) ] '.format( + alias, uid) + + if os.path.isfile(path_readme_extra): + readme_extra_url = url + '/README-extra.md' + + x = '* Notes from the authors, contributors and users: [*GitHub*]({})'.format( + readme_extra_url) + md_script.append(x) + + cm_readme_extra += '[ [Notes from the authors, contributors and users](README-extra.md) ] ' + + md_script_readme.append('') + md_script_readme.append('---') + md_script_readme.append('*' + cm_readme_extra.strip() + '*') + + if readme_about != '': + md_script_readme += ['', '---', ''] + readme_about + + x = 'Summary' + md_script_readme.append('') + md_script_readme.append('---') + md_script_readme += [ + # '
', + # 'Click to see the summary', + '#### Summary', + '' + ] + toc_readme.append(x) + + +# if category != '': +# x = 'Category' +# md_script_readme.append('___') +# md_script_readme.append('#### '+x) +# md_script_readme.append(' ') +# md_script_readme.append(category+'.') +# toc_readme.append(x) + +# x = '* Category: *{}*'.format(category + '.') +# md_script_readme.append(x) + + +# x = 'Origin' +# md_script_readme.append('___') +# md_script_readme.append('#### '+x) +# md_script_readme.append('') +# toc_readme.append(x) + + x = '* CM GitHub repository: *[{}]({})*'.format(repo_alias, url_repo) + md_script.append(x) + md_script_readme.append(x) + + x = '* GitHub directory for this script: *[GitHub]({})*'.format(url) + md_script.append(x) + md_script_readme.append(x) + + # Check meta + meta_file = self_module.cmind.cfg['file_cmeta'] + meta_path = os.path.join(path, meta_file) + + meta_file += '.yaml' if os.path.isfile( + meta_path + '.yaml') else '.json' + + meta_url = url + '/' + meta_file + + x = '* CM meta description of this script: *[GitHub]({})*'.format( + meta_url) + md_script.append(x) + +# x = '* CM automation "script": *[Docs]({})*'.format('https://github.com/octoml/ck/blob/master/docs/list_of_automations.md#script') +# md_script.append(x) +# md_script_readme.append(x) + + if len(variation_keys) > 0: + variation_pointer = "[,variations]" + variation_pointer2 = "[variations]" + else: + variation_pointer = '' + variation_pointer2 = '' + + if len(input_mapping) > 0: + input_mapping_pointer = "[--input_flags]" + else: + input_mapping_pointer = '' + + cli_all_tags = '`cm run script --tags={}`'.format(','.join(tags)) + cli_all_tags3 = '`cm run script --tags={}{} {}`'.format( + ','.join(tags), variation_pointer, input_mapping_pointer) + x = '* CM CLI with all tags: {}*'.format(cli_all_tags) + md_script.append(x) + + cli_help_tags_alternative = '`cmr "{}" --help`'.format(' '.join(tags)) + + cli_all_tags_alternative = '`cmr "{}"`'.format(' '.join(tags)) + cli_all_tags_alternative3 = '`cmr "{} {}" {}`'.format( + ' '.join(tags), variation_pointer2, input_mapping_pointer) + cli_all_tags_alternative_j = '`cmr "{} {}" {} -j`'.format( + ' '.join(tags), variation_pointer, input_mapping_pointer) + x = '* CM CLI alternative: {}*'.format(cli_all_tags_alternative) + md_script.append(x) + + cli_all_tags_alternative_docker = '`cm docker script "{}{}" {}`'.format( + ' '.join(tags), variation_pointer2, input_mapping_pointer) + + +# cli_uid = '`cm run script {} {}`'.format(meta['uid'], input_mapping_pointer) +# x = '* CM CLI with alias and UID: {}*'.format(cli_uid) +# md_script.append(x) + + if len(variation_keys) > 0: + x = '' + for variation in variation_keys: + if x != '': + x += ';  ' + x += '_' + variation + md_script.append('* Variations: *{}*'.format(x)) + + if default_variation != '': + md_script.append( + '* Default variation: *{}*'.format(default_variation)) + + if len(version_keys) > 0: + md_script.append( + '* Versions: *{}*'.format(';  '.join(version_keys))) + + if default_version != '': + md_script.append('* Default version: *{}*'.format(default_version)) + + md_script.append('') +# md_script_readme.append('') + + # Add extra to README + x = 'Meta description' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) + md_script_readme.append( + '* CM meta description for this script: *[{}]({})*'.format(meta_file, meta_file)) +# md_script_readme.append('') +# toc_readme.append(x) + + x = 'Tags' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) + md_script_readme.append( + '* All CM tags to find and reuse this script (see in above meta description): *{}*'.format(','.join(tags))) +# md_script_readme.append('') +# toc_readme.append(x) + + cache = meta.get('cache', False) + md_script_readme.append('* Output cached? *{}*'.format(str(cache))) + + md_script_readme.append( + '* See [pipeline of dependencies]({}) on other CM scripts'.format('#dependencies-on-other-cm-scripts')) + + md_script_readme += ['', + # '
' + ] + + # Add usage + x1 = 'Reuse this script in your project' + x1a = 'Install MLCommons CM automation meta-framework' + x1aa = 'Pull CM repository with this automation recipe (CM script)' + x1b = 'Print CM help from the command line' + x2 = 'Customize and run this script from the command line with different variations and flags' + x3 = 'Run this script from Python' + x3a = 'Run this script via GUI' + x4 = 'Run this script via Docker (beta)' + md_script_readme += [ + '', + '---', + '### ' + x1, + '', + '#### ' + x1a, + '', + '* [Install CM](https://access.cknowledge.org/playground/?action=install)', + '* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)', + '', + '#### ' + x1aa, + '', + '```cm pull repo {}```'.format(repo_alias), + '', + '#### ' + x1b, + '', + '```{}```'.format(cli_help_tags_alternative), + '', + '#### ' + x2, + '', + '{}'.format(cli_all_tags), + '', + '{}'.format(cli_all_tags3), + '', + '*or*', + '', + '{}'.format(cli_all_tags_alternative), + '', + '{}'.format(cli_all_tags_alternative3), + '', + # '3. {}'.format(cli_uid), + ''] + + x = ' and check the [Gettings Started Guide](https://github.com/mlcommons/ck/blob/dev/docs/getting-started.md) for more details.' + if len(variation_keys) > 0: + md_script_readme += ['* *See the list of `variations` [here](#variations)' + x + '*', + '' + ] + + if input_description and len(input_description) > 0: + x = 'Input Flags' + md_script_readme.append('') + md_script_readme.append('#### ' + x) + toc_readme.append(' ' + x) + + md_script_readme.append('') + key0 = '' + for key in input_description: + if key0 == '': + key0 = key + + value = input_description[key] + desc = value + + if isinstance(value, dict): + desc = value['desc'] + + choices = value.get('choices', []) + if len(choices) > 0: + desc += ' {' + ','.join(choices) + '}' + + default = value.get('default', '') + if default != '': + desc += ' (*' + str(default) + '*)' + + md_script_readme.append('* --**{}**={}'.format(key, desc)) + + md_script_readme.append('') + md_script_readme.append( + '**Above CLI flags can be used in the Python CM API as follows:**') + md_script_readme.append('') + + x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```' + md_script_readme.append(x) + + md_script_readme += ['#### ' + x3, + '', + '
', + 'Click here to expand this section.', + '', + '```python', + '', + 'import cmind', + '', + "r = cmind.access({'action':'run'", + " 'automation':'script',", + " 'tags':'{}'".format( + ','.join(tags)), + " 'out':'con',", + " ...", + " (other input keys for this script)", + " ...", + " })", + "", + "if r['return']>0:", + " print (r['error'])", + '', + '```', + '', + '
', + '', + + '', + '#### ' + x3a, + '', + '```cmr "cm gui" --script="' + + ','.join(tags) + '"```', + '', + # 'Use this [online GUI](https://cKnowledge.org/cm-gui/?tags={}) to generate CM CMD.'.format(','.join(tags)), + # '', + '#### ' + x4, + '', + '{}'.format(cli_all_tags_alternative_docker), + '' + ] + toc_readme.append(x1) + toc_readme.append(' ' + x1a) + toc_readme.append(' ' + x1b) + toc_readme.append(' ' + x2) + toc_readme.append(' ' + x3) + toc_readme.append(' ' + x3a) + toc_readme.append(' ' + x4) + + x = 'Customization' + md_script_readme.append('___') + md_script_readme.append('### ' + x) + md_script_readme.append('') + toc_readme.append(x) + + if len(variation_keys) > 0: + # x = 'Variation groups' + # md_script_readme.append('___') + # md_script_readme.append('### '+x) + # toc_readme.append(x) + + variation_groups = {} + default_variations = [] + variation_md = {} + variation_alias = {} + + # Normally should not use anymore. Should use default:true inside + # individual variations. + default_variation = meta.get('default_variation', '') + + for variation_key in sorted(variation_keys): + variation = variations[variation_key] + + alias = variation.get('alias', '').strip() + + if alias != '': + aliases = variation_alias.get(alias, []) + if variation_key not in aliases: + aliases.append(variation_key) + variation_alias[alias] = aliases + + # Do not continue this loop if alias + continue + + default = variation.get('default', False) + + if not default: + # Check outdated + if default_variation == variation_key: + default = True + + extra1 = '' + extra2 = '' + if default: + extra1 = '**' + extra2 = '** (default)' + + default_variations.append(variation_key) + + md_var = [] + + md_var.append( + '* {}`_{}`{}'.format(extra1, variation_key, extra2)) + + variation_md[variation_key] = md_var + +# md_script_readme+=md_var + + group = variation.get('group', '') + + if variation_key.endswith('_'): + group = '*Internal group (variations should not be selected manually)*' + elif group == '': + group = '*No group (any variation can be selected)*' + + if group not in variation_groups: + variation_groups[group] = [] + + variation_groups[group].append(variation_key) + + x = 'Variations' + md_script_readme.append('') + md_script_readme.append('#### ' + x) + toc_readme.append(' ' + x) + + variation_groups_order = meta.get('variation_groups_order', []) + for variation in sorted(variation_groups): + if variation not in variation_groups_order: + variation_groups_order.append(variation) + + for group_key in variation_groups_order: + md_script_readme.append('') + + if not group_key.startswith('*'): + md_script_readme.append( + ' * Group "**{}**"'.format(group_key)) + else: + md_script_readme.append(' * {}'.format(group_key)) + + md_script_readme += [ + '
', + ' Click here to expand this section.', + '' + ] + + for variation_key in sorted(variation_groups[group_key]): + variation = variations[variation_key] + + xmd = variation_md[variation_key] + + aliases = variation_alias.get(variation_key, []) + aliases2 = ['_' + v for v in aliases] + + if len(aliases) > 0: + xmd.append( + ' - Aliases: `{}`'.format(','.join(aliases2))) + + if len(variation.get('env', {})) > 0: + xmd.append(' - Environment variables:') + for key in variation['env']: + xmd.append( + ' - *{}*: `{}`'.format(key, variation['env'][key])) + + xmd.append(' - Workflow:') + + for dep in ['deps', 'prehook_deps', + 'posthook_deps', 'post_deps']: + process_deps( + self_module, + variation, + meta_url, + xmd, + dep, + ' ', + True, + True) + + for x in xmd: + md_script_readme.append(' ' + x) + + md_script_readme.append('') + md_script_readme.append('
') + md_script_readme.append('') + + # Check if has invalid_variation_combinations + vvc = meta.get('invalid_variation_combinations', []) + if len(vvc) > 0: + x = 'Unsupported or invalid variation combinations' + md_script_readme.append('') + md_script_readme.append('#### ' + x) + md_script_readme.append('') + md_script_readme.append('') + md_script_readme.append('') + toc_readme.append(' ' + x) + + for v in vvc: + vv = ['_' + x for x in v] + md_script_readme.append('* `' + ','.join(vv) + '`') + + if len(default_variations) > 0: + md_script_readme.append('') + md_script_readme.append('#### Default variations') + md_script_readme.append('') + + dv = ['_' + x for x in sorted(default_variations)] + + md_script_readme.append('`{}`'.format(','.join(dv))) + + # Check if has valid_variation_combinations + vvc = meta.get('valid_variation_combinations', []) + if len(vvc) > 0: + x = 'Valid variation combinations checked by the community' + md_script_readme.append('') + md_script_readme.append('#### ' + x) + md_script_readme.append('') + md_script_readme.append('') + md_script_readme.append('') + toc_readme.append(' ' + x) + + for v in vvc: + vv = ['_' + x for x in v] + md_script_readme.append('* `' + ','.join(vv) + '`') + + # Check input flags + if input_mapping and len(input_mapping) > 0: + x = 'Script flags mapped to environment' + md_script_readme.append('') + md_script_readme.append('#### ' + x) + toc_readme.append(' ' + x) + + md_script_readme.append('
') + md_script_readme.append( + 'Click here to expand this section.') + + md_script_readme.append('') + key0 = '' + for key in sorted(input_mapping): + if key0 == '': + key0 = key + value = input_mapping[key] + md_script_readme.append( + '* `--{}=value` → `{}=value`'.format(key, value)) + + md_script_readme.append('') + md_script_readme.append( + '**Above CLI flags can be used in the Python CM API as follows:**') + md_script_readme.append('') + + x = '```python\nr=cm.access({... , "' + key0 + '":...}\n```' + md_script_readme.append(x) + + md_script_readme.append('') + md_script_readme.append('
') + md_script_readme.append('') + + # Default environment + default_env = meta.get('default_env', {}) + + x = 'Default environment' +# md_script_readme.append('___') + md_script_readme.append('#### ' + x) + toc_readme.append(' ' + x) + + md_script_readme.append('') + md_script_readme.append('
') + md_script_readme.append( + 'Click here to expand this section.') + md_script_readme.append('') + md_script_readme.append( + 'These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.') + md_script_readme.append('') + + for key in default_env: + value = default_env[key] + md_script_readme.append('* {}: `{}`'.format(key, value)) + + md_script_readme.append('') + md_script_readme.append('
') + md_script_readme.append('') + + if len(version_keys) > 0 or default_version != '': + x = 'Versions' +# md_script_readme.append('___') + md_script_readme.append('#### ' + x) + toc_readme.append(x) + + if default_version != '': + md_script_readme.append( + 'Default version: `{}`'.format(default_version)) + md_script_readme.append('') + + if len(version_keys) > 0: + for version in version_keys: + md_script_readme.append('* `{}`'.format(version)) + + # Add workflow + x = 'Dependencies on other CM scripts' + md_script_readme += ['___', + '### ' + x, + ''] + toc_readme.append(x) + +# md_script_readme.append('
') +# md_script_readme.append('Click here to expand this section.') + + md_script_readme.append('') + + # Check customize.py file + path_customize = os.path.join(path, 'customize.py') + found_customize = False + found_customize_preprocess = False + found_customize_postprocess = False + found_output_env = [] + + if os.path.isfile(path_customize): + found_customize = True + + r = utils.load_txt(path_customize, split=True) + if r['return'] > 0: + return r + + customize = r['string'] + customize_l = r['list'] + + if 'def preprocess(' in customize: + found_customize_preprocess = True + + if 'def postprocess(' in customize: + found_customize_postprocess = True + + # Ugly attempt to get output env + found_postprocess = False + for l in customize_l: + # if not found_postprocess: + # if 'def postprocess' in l: + # found_postprocess = True + # else: + j = l.find(' env[') + if j >= 0: + j1 = l.find(']', j + 4) + if j1 >= 0: + j2 = l.find('=', j1 + 1) + if j2 >= 0: + key2 = l[j + 5:j1].strip() + key = key2[1:-1] + + if key.startswith( + 'CM_') and 'TMP' not in key and key not in found_output_env: + found_output_env.append(key) + + process_deps(self_module, meta, meta_url, md_script_readme, 'deps') + + x = '' + y = 'customize.py' + if found_customize_preprocess: + x = '***' + y = '[' + y + '](' + url + '/' + y + ')' + md_script_readme.append( + (' 1. ' + x + 'Run "preprocess" function from {}' + x).format(y)) + + process_deps( + self_module, + meta, + meta_url, + md_script_readme, + 'prehook_deps') + + # Check scripts + files = os.listdir(path) + x = '' + y = [] + for f in sorted(files): + x = '***' + if f.startswith('run') and ( + f.endswith('.sh') or f.endswith('.bat')): + f_url = url + '/' + f + y.append(' * [{}]({})'.format(f, f_url)) + + md_script_readme.append( + (' 1. ' + x + 'Run native script if exists' + x).format(y)) + md_script_readme += y + + process_deps( + self_module, + meta, + meta_url, + md_script_readme, + 'posthook_deps') + + x = '' + y = 'customize.py' + if found_customize_postprocess: + x = '***' + y = '[' + y + '](' + url + '/' + y + ')' + md_script_readme.append( + (' 1. ' + x + 'Run "postrocess" function from {}' + x).format(y)) + + process_deps( + self_module, + meta, + meta_url, + md_script_readme, + 'post_deps') + # md_script_readme.append('
') + md_script_readme.append('') + + # New environment + new_env_keys = meta.get('new_env_keys', []) + + x = 'Script output' + md_script_readme.append('___') + md_script_readme.append('### ' + x) + toc_readme.append(x) + + md_script_readme.append(cli_all_tags_alternative_j) + + x = 'New environment keys (filter)' + md_script_readme.append('#### ' + x) + toc_readme.append(x) + + md_script_readme.append('') + for key in sorted(new_env_keys): + md_script_readme.append('* `{}`'.format(key)) + + # Pass found_output_env through above filter + found_output_env_filtered = [] + + import fnmatch + + for key in found_output_env: + add = False + + for f in new_env_keys: + if fnmatch.fnmatch(key, f): + add = True + break + + if add: + found_output_env_filtered.append(key) + + x = 'New environment keys auto-detected from customize' + md_script_readme.append('#### ' + x) + toc_readme.append(x) + + md_script_readme.append('') + for key in sorted(found_output_env_filtered): + md_script_readme.append('* `{}`'.format(key)) + + # Add maintainers +# x = 'Maintainers' +# md_script_readme.append('___') +# md_script_readme.append('### '+x) +# md_script_readme.append('') +# md_script_readme.append('* ' + public_taskforce) +# toc_readme.append(x) + + # Process TOC + toc_readme_string = '\n' + for x in toc_readme: + x2 = x + prefix = '' + + if x.startswith(' '): + prefix = ' ' + x2 = x[1:] + + x2 = x2.lower().replace(' ', '-').replace(',', '') + toc_readme_string += prefix + '* [{}](#{})\n'.format(x, x2) + + # Add to the total list + md += md_script + + s = '\n'.join(md_script_readme) + + s = s.replace('{{CM_README_EXTRA}}', cm_readme_extra) +# s = s.replace('{{CM_SEE_README_EXTRA}}', cm_see_readme_extra) + s = s.replace('{{CM_README_TOC}}', toc_readme_string) + + r = utils.save_txt(path_readme, s) + if r['return'] > 0: + return r + + # Add to Git (if in git) + os.chdir(path) + os.system('git add README.md') + os.chdir(cur_dir) + + # Recreate TOC with categories + toc2 = [] + + # , key = lambda x: -toc_category_sort[x]): + for category in sorted(toc_category): + toc2.append('### ' + category) + toc2.append('') + + for script in sorted(toc_category[category]): + + meta = script_meta[script] + + name = meta.get('name', '') + + url = urls[script] + + x = '* [{}]({})'.format(script, url) + if name != '': + x += ' *(' + name + ')*' + + toc2.append(x) + + toc2.append('') + + toc_category_string = '' + for category in sorted(toc_category): + category_link = category.lower().replace(' ', '-').replace('/', '') + toc_category_string += '* [{}](#{})\n'.format(category, category_link) + + # Load template + r = utils.load_txt(os.path.join(self_module.path, template_file)) + if r['return'] > 0: + return r + + s = r['string'] + + s = s.replace('{{CM_TOC2}}', '\n'.join(toc2)) + s = s.replace('{{CM_TOC}}', '\n'.join(toc)) +# s = s.replace('{{CM_MAIN}}', '\n'.join(md)) + s = s.replace('{{CM_MAIN}}', '') + s = s.replace('{{CM_TOC_CATEGORIES}}', toc_category_string) + + # Output + output_dir = i.get('output_dir', '') + + if output_dir == '': + output_dir = '..' + + output_file = os.path.join(output_dir, list_file) + + r = utils.save_txt(output_file, s) + if r['return'] > 0: + return r + + out_docs_file = os.path.join( + "..", + "docs", + "scripts", + category, + alias, + "index.md") + r = utils.save_txt(out_docs_file, s) + if r['return'] > 0: + return r + + return {'return': 0} + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# This function takes in a host path and returns the absolute path on host and the container +# If mounts is passed, the function appends the host path and the +# container path to mounts in the form "host_path:container_path" +def update_path_for_docker(path, mounts=None, force_path_target=''): + + path_orig = '' + path_target = '' + + if path != '': # and (os.path.isfile(path) or os.path.isdir(path)): + path = os.path.abspath(path) + + path_target = path + path_orig = path + + if os.name == 'nt': + from pathlib import PureWindowsPath, PurePosixPath + + x = PureWindowsPath(path_orig) + path_target = str(PurePosixPath('/', *x.parts[1:])) + + if not path_target.startswith('/'): + path_target = '/' + path_target + + path_target = '/cm-mount' + \ + path_target if force_path_target == '' else force_path_target + + # If file, mount directory + if os.path.isfile(path) or not os.path.isdir(path): + x = os.path.dirname(path_orig) + ':' + os.path.dirname(path_target) + else: + x = path_orig + ':' + path_target + + # CHeck if no duplicates + if mounts is not None: + to_add = True + for y in mounts: + if y.lower() == x.lower(): + to_add = False + break + if to_add: + mounts.append(x) + + return (path_orig, path_target) + +############################################################ + + +def process_inputs(i): + + import copy + + i_run_cmd_arc = i['run_cmd_arc'] + docker_settings = i['docker_settings'] + mounts = i['mounts'] + + # Check if need to update/map/mount inputs and env + i_run_cmd = copy.deepcopy(i_run_cmd_arc) + + def get_value_using_key_with_dots(d, k): + v = None + j = k.find('.') + if j >= 0: + k1 = k[:j] + k2 = k[j + 1:] + + if k1 in d: + v = d[k1] + + if '.' in k2: + v, d, k = get_value_using_key_with_dots(v, k2) + else: + d = v + k = k2 + if isinstance(v, dict): + v = v.get(k2) + else: + v = None + else: + if k == '': + v = d + else: + v = d.get(k) + + return v, d, k + + docker_input_paths = docker_settings.get('input_paths', []) + if len(i_run_cmd) > 0: + for k in docker_input_paths: + v2, i_run_cmd2, k2 = get_value_using_key_with_dots(i_run_cmd, k) + + if v2 is not None: + v = i_run_cmd2[k2] + + path_orig, path_target = update_path_for_docker(v, mounts) + + if path_target != '': + i_run_cmd2[k2] = path_target + + return {'return': 0, 'run_cmd': i_run_cmd} + + +############################################################ +def regenerate_script_cmd(i): + + script_uid = i['script_uid'] + script_alias = i['script_alias'] + tags = i['tags'] + docker_settings = i['docker_settings'] + fake_run = i.get('fake_run', False) + + i_run_cmd = i['run_cmd'] + + # Cleanup from env everything that has a host path value + if i_run_cmd.get('env'): + for key in list(i_run_cmd.get('env')): + if isinstance(i_run_cmd['env'][key], str) and ((os.path.join("local", "cache", "") in i_run_cmd['env'][key]) or ( + os.path.join("CM", "repos", "") in i_run_cmd['env'][key])): + del (i_run_cmd['env'][key]) + elif isinstance(i_run_cmd['env'][key], list): + values_to_remove = [] + for val in i_run_cmd['env'][key]: + if isinstance(val, str) and ((os.path.join("local", "cache", "") in val) or ( + os.path.join("CM", "repos", "") in val)): + values_to_remove.append(val) + if values_to_remove == i_run_cmd['env'][key]: + del (i_run_cmd['env'][key]) + else: + for val in values_to_remove: + i_run_cmd['env'][key].remove(val) + + docker_run_cmd_prefix = i['docker_run_cmd_prefix'] + + # Regenerate command from dictionary input + run_cmd = 'cm run script' + + x = '' + + # Check if there are some tags without variation + requested_tags = i_run_cmd.get('tags', []) + + tags_without_variation = False + for t in requested_tags: + if not t.startswith('_'): + tags_without_variation = True + break + + if not tags_without_variation: + # If no tags without variation, add script alias and UID explicitly + if script_uid != '': + x = script_uid + if script_alias != '': + if x != '': + x = ',' + x + x = script_alias + x + + if x != '': + run_cmd += ' ' + x + ' ' + + skip_input_for_fake_run = docker_settings.get( + 'skip_input_for_fake_run', []) + add_quotes_to_keys = docker_settings.get('add_quotes_to_keys', []) + + def rebuild_flags(i_run_cmd, fake_run, + skip_input_for_fake_run, add_quotes_to_keys, key_prefix): + + run_cmd = '' + + keys = list(i_run_cmd.keys()) + + if 'tags' in keys: + # Move tags first + tags_position = keys.index('tags') + del (keys[tags_position]) + keys = ['tags'] + keys + + for k in keys: + # Assemble long key if dictionary + long_key = key_prefix + if long_key != '': + long_key += '.' + long_key += k + + if fake_run and long_key in skip_input_for_fake_run: + continue + + v = i_run_cmd[k] + + q = '\\"' if long_key in add_quotes_to_keys else '' + + if isinstance(v, dict): + run_cmd += rebuild_flags(v, + fake_run, + skip_input_for_fake_run, + add_quotes_to_keys, + long_key) + elif isinstance(v, list): + x = '' + for vv in v: + if x != '': + x += ',' + x += q + str(vv) + q + run_cmd += ' --' + long_key + ',=' + x + else: + run_cmd += ' --' + long_key + '=' + q + str(v) + q + + return run_cmd + + run_cmd += rebuild_flags(i_run_cmd, + fake_run, + skip_input_for_fake_run, + add_quotes_to_keys, + '') + + run_cmd = docker_run_cmd_prefix + ' && ' + \ + run_cmd if docker_run_cmd_prefix != '' else run_cmd + + return {'return': 0, 'run_cmd_string': run_cmd} + + +############################################################ +def aux_search(i): + + self_module = i['self_module'] + + inp = i['input'] + + repos = inp.get('repos', '') +# Grigori Fursin remarked on 20240412 because this line prevents +# from searching for scripts in other public or private repositories. +# Not sure why we enforce just 2 repositories +# +# if repos == '': repos='internal,a4705959af8e447a' + + parsed_artifact = inp.get('parsed_artifact', []) + + if len(parsed_artifact) < 1: + parsed_artifact = [('', ''), ('', '')] + elif len(parsed_artifact) < 2: + parsed_artifact.append(('', '')) + else: + repos = parsed_artifact[1][0] + + list_of_repos = repos.split(',') if ',' in repos else [repos] + + ii = utils.sub_input( + inp, + self_module.cmind.cfg['artifact_keys'] + + ['tags']) + + ii['out'] = None + + # Search for automations in repos + lst = [] + for repo in list_of_repos: + parsed_artifact[1] = ( + '', repo) if utils.is_cm_uid(repo) else ( + repo, '') + ii['parsed_artifact'] = parsed_artifact + r = self_module.search(ii) + if r['return'] > 0: + return r + lst += r['list'] + + return {'return': 0, 'list': lst} + + +############################################################ +def dockerfile(i): + """ + Add CM automation. + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + (repos) (str): list of repositories to search for automations + (output_dir) (str): output directory (./ by default) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + import copy + + # Check simplified CMD: cm docker script "python app image-classification onnx" + # If artifact has spaces, treat them as tags! + self_module = i['self_module'] + self_module.cmind.access( + {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i}) + + # Prepare "clean" input to replicate command + r = self_module.cmind.access({'action': 'prune_input', + 'automation': 'utils', + 'input': i, + 'extra_keys_starts_with': ['docker_']}) + i_run_cmd_arc = r['new_input'] + + cur_dir = os.getcwd() + + quiet = i.get('quiet', False) + + console = i.get('out') == 'con' + + # Search for script(s) + r = aux_search({'self_module': self_module, 'input': i}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + return {'return': 1, 'error': 'no scripts were found'} + + +# if i.get('cmd'): +# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') ) +# elif i.get('artifact'): +# run_cmd = "cm run script "+i['artifact'] +# elif i.get('tags'): +# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\"" +# else: +# run_cmd = "" +# +# run_cmd = i.get('docker_run_cmd_prefix') + ' && ' + run_cmd if i.get('docker_run_cmd_prefix') else run_cmd + + env = i.get('env', {}) + state = i.get('state', {}) + const = i.get('const', {}) + const_state = i.get('const_state', {}) + script_automation = i['self_module'] + + dockerfile_env = i.get('dockerfile_env', {}) + + tags_split = i.get('tags', '').split(",") + variation_tags = [t[1:] for t in tags_split if t.startswith("_")] + + for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): + + meta = artifact.meta + + script_path = artifact.path + + tags = meta.get("tags", []) + tag_string = ",".join(tags) + + script_alias = meta.get('alias', '') + script_uid = meta.get('uid', '') + + verbose = i.get('v', False) + show_time = i.get('show_time', False) + + run_state = {'deps': [], 'fake_deps': [], 'parent': None} + run_state['script_id'] = script_alias + "," + script_uid + run_state['script_variation_tags'] = variation_tags + variations = meta.get('variations', {}) + docker_settings = meta.get('docker', {}) + docker_settings['dockerfile_env'] = dockerfile_env + state['docker'] = docker_settings + add_deps_recursive = i.get('add_deps_recursive', {}) + + r = script_automation.update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys=[], + new_state_keys=[], + run_state=run_state, + i=i) + if r['return'] > 0: + return r + + r = script_automation._update_state_from_variations( + i, + meta, + variation_tags, + variations, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys_from_meta=[], + new_state_keys_from_meta=[], + add_deps_recursive=add_deps_recursive, + run_state=run_state, + recursion_spaces='', + verbose=False) + if r['return'] > 0: + return r + + docker_settings = state['docker'] + dockerfile_env = docker_settings['dockerfile_env'] + dockerfile_env['CM_RUN_STATE_DOCKER'] = True + + if not docker_settings.get('run', True) and not i.get( + 'docker_run_override', False): + print("docker.run set to False in _cm.json") + continue + '''run_config_path = os.path.join(script_path,'run_config.yml') + if not os.path.exists(run_config_path): + print("No run_config.yml file present in {}".format(script_path)) + continue + import yaml + with open(run_config_path, 'r') as run_config_file: + run_config = yaml.safe_load(run_config_file) + docker_settings = run_config.get('docker') + if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'): + print("Run config is not configured for docker run in {}".format(run_config_path)) + continue + ''' + + deps = docker_settings.get('build_deps', []) + if deps: + r = script_automation._run_deps( + deps, + [], + env, + {}, + {}, + {}, + {}, + '', + [], + '', + False, + '', + verbose, + show_time, + ' ', + run_state) + if r['return'] > 0: + return r + # For updating meta from update_meta_if_env + r = script_automation.update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys=[], + new_state_keys=[], + run_state=run_state, + i=i) + if r['return'] > 0: + return r + docker_settings = state['docker'] + + d_env = i_run_cmd_arc.get('env', {}) + for key in list(d_env.keys()): + if key.startswith("CM_TMP_"): + del (d_env[key]) + + # Check if need to update/map/mount inputs and env + r = process_inputs({'run_cmd_arc': i_run_cmd_arc, + 'docker_settings': docker_settings, + 'mounts': []}) + if r['return'] > 0: + return r + + i_run_cmd = r['run_cmd'] + + docker_run_cmd_prefix = i.get( + 'docker_run_cmd_prefix', docker_settings.get( + 'run_cmd_prefix', '')) + + r = regenerate_script_cmd({'script_uid': script_uid, + 'script_alias': script_alias, + 'run_cmd': i_run_cmd, + 'tags': tags, + 'fake_run': True, + 'docker_settings': docker_settings, + 'docker_run_cmd_prefix': docker_run_cmd_prefix}) + if r['return'] > 0: + return r + + run_cmd = r['run_cmd_string'] + + cm_repo = i.get( + 'docker_cm_repo', + docker_settings.get( + 'cm_repo', + 'mlcommons@cm4mlops')) + cm_repo_branch = i.get( + 'docker_cm_repo_branch', + docker_settings.get( + 'cm_repo_branch', + 'mlperf-inference')) + + cm_repo_flags = i.get( + 'docker_cm_repo_flags', + docker_settings.get( + 'cm_repo_flags', + '')) + + docker_base_image = i.get( + 'docker_base_image', + docker_settings.get('base_image')) + docker_os = i.get( + 'docker_os', docker_settings.get( + 'docker_os', 'ubuntu')) + docker_os_version = i.get( + 'docker_os_version', docker_settings.get( + 'docker_os_version', '22.04')) + + docker_cm_repos = i.get( + 'docker_cm_repos', + docker_settings.get( + 'cm_repos', + '')) + + docker_skip_cm_sys_upgrade = i.get( + 'docker_skip_cm_sys_upgrade', docker_settings.get( + 'skip_cm_sys_upgrade', '')) + + docker_extra_sys_deps = i.get('docker_extra_sys_deps', '') + + if not docker_base_image: + dockerfilename_suffix = docker_os + '_' + docker_os_version + else: + if os.name == 'nt': + dockerfilename_suffix = docker_base_image.replace( + '/', '-').replace(':', '-') + else: + dockerfilename_suffix = docker_base_image.split("/") + dockerfilename_suffix = dockerfilename_suffix[len( + dockerfilename_suffix) - 1] + + fake_run_deps = i.get( + 'fake_run_deps', docker_settings.get( + 'fake_run_deps', False)) + docker_run_final_cmds = docker_settings.get( + 'docker_run_final_cmds', []) + + r = check_gh_token(i, docker_settings, quiet) + if r['return'] > 0: + return r + gh_token = r['gh_token'] + i['docker_gh_token'] = gh_token # To pass to docker function if needed + + if i.get('docker_real_run', docker_settings.get( + 'docker_real_run', False)): + fake_run_option = " " + fake_run_deps = False + else: + fake_run_option = " --fake_run" + + docker_copy_files = i.get( + 'docker_copy_files', + docker_settings.get( + 'copy_files', + [])) + + env['CM_DOCKER_PRE_RUN_COMMANDS'] = docker_run_final_cmds + + docker_path = i.get('docker_path', '').strip() + if docker_path == '': + docker_path = script_path + + dockerfile_path = os.path.join( + docker_path, + 'dockerfiles', + dockerfilename_suffix + + '.Dockerfile') + + if i.get('print_deps'): + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': f"""{i.get('tags')}""", + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True, + 'fake_deps': True + } + r = self_module.cmind.access(cm_input) + if r['return'] > 0: + return r + print_deps = r['new_state']['print_deps'] + comments = ["#RUN " + dep for dep in print_deps] + comments.append("") + comments.append("# Run CM workflow") + else: + comments = [] + + if i.get('docker_push_image', '') in ['True', True, 'yes']: + env['CM_DOCKER_PUSH_IMAGE'] = 'yes' + + cm_docker_input = {'action': 'run', + 'automation': 'script', + 'tags': 'build,dockerfile', + 'cm_repo': cm_repo, + 'cm_repo_branch': cm_repo_branch, + 'cm_repo_flags': cm_repo_flags, + 'docker_base_image': docker_base_image, + 'docker_os': docker_os, + 'docker_os_version': docker_os_version, + 'skip_cm_sys_upgrade': docker_skip_cm_sys_upgrade, + 'file_path': dockerfile_path, + 'fake_run_option': fake_run_option, + 'comments': comments, + 'run_cmd': f'{run_cmd} --quiet', + 'script_tags': f"""{i.get('tags')}""", + 'copy_files': docker_copy_files, + 'quiet': True, + 'env': env, + 'dockerfile_env': dockerfile_env, + 'v': i.get('v', False), + 'fake_docker_deps': fake_run_deps, + 'print_deps': True, + 'real_run': True + } + + if docker_cm_repos != '': + cm_docker_input['cm_repos'] = docker_cm_repos + + if gh_token != '': + cm_docker_input['gh_token'] = gh_token + + if docker_extra_sys_deps != '': + cm_docker_input['extra_sys_deps'] = docker_extra_sys_deps + + r = self_module.cmind.access(cm_docker_input) + if r['return'] > 0: + return r + + print('') + print("Dockerfile generated at " + dockerfile_path) + + return {'return': 0} + +# we mount the main folder of the CM cache entry in case any file/folder +# in that cache entry is needed inside the container + + +def get_host_path(value): + path_split = value.split(os.sep) + if len(path_split) == 1: + return value + + new_value = '' + if "cache" in path_split and "local": + repo_entry_index = path_split.index("local") + if len(path_split) >= repo_entry_index + 3: + return os.sep.join(path_split[0:repo_entry_index + 3]) + + return value + + +def get_container_path_script(i): + tmp_dep_cached_path = i['tmp_dep_cached_path'] + value_mnt, value_env = get_container_path(tmp_dep_cached_path) + return {'return': 0, 'value_mnt': value_mnt, 'value_env': value_env} + + +def get_container_path(value): + path_split = value.split(os.sep) + if len(path_split) == 1: + return value + + new_value = '' + if "cache" in path_split and "local" in path_split: + new_path_split = ["", "home", "cmuser", "CM", "repos"] + repo_entry_index = path_split.index("local") + if len(path_split) >= repo_entry_index + 3: + new_path_split1 = new_path_split + \ + path_split[repo_entry_index:repo_entry_index + 3] + new_path_split2 = new_path_split + path_split[repo_entry_index:] + return "/".join(new_path_split1), "/".join(new_path_split2) + else: + orig_path, target_path = update_path_for_docker(path=value) + return target_path, target_path + + # return value, value + + +############################################################ +def docker(i): + """ + CM automation to run CM scripts via Docker + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + import copy + import re + + from cmind import __version__ as current_cm_version + + self_module = i['self_module'] + + if isinstance(i.get('docker', None), dict): + # Grigori started cleaning and refactoring this code on 20240929 + # + # 1. use --docker dictionary instead of --docker_{keys} + + if utils.compare_versions(current_cm_version, '2.3.8.1') >= 0: + docker_params = utils.convert_dictionary(i['docker'], 'docker') + i.update(docker_params) + del (i['docker']) + + quiet = i.get('quiet', False) + + detached = i.get('docker_detached', '') + if detached == '': + detached = i.get('docker_dt', '') + if detached == '': + detached = 'no' + + interactive = i.get('docker_interactive', '') + if interactive == '': + interactive = i.get('docker_it', '') + + verbose = i.get('v', False) + show_time = i.get('show_time', False) + + # Check simplified CMD: cm docker script "python app image-classification onnx" + # If artifact has spaces, treat them as tags! + self_module.cmind.access( + {'action': 'detect_tags_in_artifact', 'automation': 'utils', 'input': i}) + + # CAREFUL -> artifacts and parsed_artifacts are not supported in input + # (and should not be?) + if 'artifacts' in i: + del (i['artifacts']) + if 'parsed_artifacts' in i: + del (i['parsed_artifacts']) + + # Prepare "clean" input to replicate command + r = self_module.cmind.access({'action': 'prune_input', + 'automation': 'utils', + 'input': i, + 'extra_keys_starts_with': ['docker_']}) + i_run_cmd_arc = r['new_input'] + + env = i.get('env', {}) + + noregenerate_docker_file = i.get('docker_noregenerate', False) + norecreate_docker_image = i.get('docker_norecreate', True) + + if i.get('docker_skip_build', False): + noregenerate_docker_file = True + norecreate_docker_image = True + env['CM_DOCKER_SKIP_BUILD'] = 'yes' + + # Check available configurations + docker_cfg = i.get('docker_cfg', '') + docker_cfg_uid = i.get('docker_cfg_uid', '') + + if docker_cfg != '' or docker_cfg_uid != '': + # Check if docker_cfg is turned on but not selected + if isinstance(docker_cfg, bool) or str( + docker_cfg).lower() in ['true', 'yes']: + docker_cfg = '' + + r = self_module.cmind.access({'action': 'select_cfg', + 'automation': 'utils,dc2743f8450541e3', + 'tags': 'basic,docker,configurations', + 'title': 'docker', + 'alias': docker_cfg, + 'uid': docker_cfg_uid}) + if r['return'] > 0: + if r['return'] == 16: + return {'return': 1, 'error': 'Docker configuration {} was not found'.format( + docker_cfg)} + return r + + selection = r['selection'] + + docker_input_update = selection['meta']['input'] + + i.update(docker_input_update) + + ########################################################################## + # Run dockerfile + if not noregenerate_docker_file: + r = utils.call_internal_module( + self_module, __file__, 'module_misc', 'dockerfile', i) + if r['return'] > 0: + return r + + # Save current directory + cur_dir = os.getcwd() + + console = i.get('out') == 'con' + + # Search for script(s) + r = aux_search({'self_module': self_module, 'input': i}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + return {'return': 1, 'error': 'no scripts were found'} + + env['CM_RUN_STATE_DOCKER'] = False + script_automation = i['self_module'] + state = i.get('state', {}) + const = i.get('const', {}) + const_state = i.get('const_state', {}) + + tags_split = i.get('tags', '').split(",") + variation_tags = [t[1:] for t in tags_split if t.startswith("_")] + + docker_cache = i.get('docker_cache', "yes") + if docker_cache in ["no", False, "False"]: + if 'CM_DOCKER_CACHE' not in env: + env['CM_DOCKER_CACHE'] = docker_cache + + image_repo = i.get('docker_image_repo', '') + if image_repo == '': + image_repo = 'local' + + # Host system needs to have docker + r = self_module.cmind.access({'action': 'run', + 'automation': 'script', + 'tags': "get,docker"}) + if r['return'] > 0: + return r + + for artifact in sorted(lst, key=lambda x: x.meta.get('alias', '')): + + meta = artifact.meta + + if i.get('help', False): + return utils.call_internal_module(self_module, __file__, 'module_help', 'print_help', { + 'meta': meta, 'path': artifact.path}) + + script_path = artifact.path + + tags = meta.get("tags", []) + tag_string = ",".join(tags) + + script_alias = meta.get('alias', '') + script_uid = meta.get('uid', '') + + mounts = copy.deepcopy(i.get('docker_mounts', [])) + + '''run_config_path = os.path.join(script_path,'run_config.yml') + if not os.path.exists(run_config_path): + print("No run_config.yml file present in {}".format(script_path)) + continue + import yaml + with open(run_config_path, 'r') as run_config_file: + run_config = yaml.safe_load(run_config_file) + ''' + + variations = meta.get('variations', {}) + docker_settings = meta.get('docker', {}) + state['docker'] = docker_settings + # Todo: Support state, const and add_deps_recursive + run_state = {'deps': [], 'fake_deps': [], 'parent': None} + run_state['script_id'] = script_alias + "," + script_uid + run_state['script_variation_tags'] = variation_tags + add_deps_recursive = i.get('add_deps_recursive', {}) + + r = script_automation.update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys=[], + new_state_keys=[], + run_state=run_state, + i=i) + if r['return'] > 0: + return r + + r = script_automation._update_state_from_variations( + i, + meta, + variation_tags, + variations, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys_from_meta=[], + new_state_keys_from_meta=[], + add_deps_recursive=add_deps_recursive, + run_state=run_state, + recursion_spaces='', + verbose=False) + if r['return'] > 0: + return r + + docker_settings = state['docker'] + + if not docker_settings.get('run', True) and not i.get( + 'docker_run_override', False): + print("docker.run set to False in _cm.json") + continue + ''' + if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'): + print("Run config is not configured for docker run in {}".format(run_config_path)) + continue + ''' + + # Check if need to update/map/mount inputs and env + r = process_inputs({'run_cmd_arc': i_run_cmd_arc, + 'docker_settings': docker_settings, + 'mounts': mounts}) + if r['return'] > 0: + return r + + i_run_cmd = r['run_cmd'] + + # Check if need to mount home directory + current_path_target = '/cm-mount/current' + if docker_settings.get('mount_current_dir', '') == 'yes': + update_path_for_docker( + '.', mounts, force_path_target=current_path_target) + + _os = i.get('docker_os', docker_settings.get('os', 'ubuntu')) + version = i.get( + 'docker_os_version', + docker_settings.get( + 'os_version', + '22.04')) + + build_deps = docker_settings.get('deps', []) + deps = docker_settings.get('deps', []) + deps = build_deps + deps + if deps: + r = script_automation._run_deps( + deps, + [], + env, + {}, + {}, + {}, + {}, + '', + [], + '', + False, + '', + verbose, + show_time, + ' ', + run_state) + if r['return'] > 0: + return r + + # For updating meta from update_meta_if_env + r = script_automation.update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps=[], + post_deps=[], + prehook_deps=[], + posthook_deps=[], + new_env_keys=[], + new_state_keys=[], + run_state=run_state, + i=i) + if r['return'] > 0: + return r + + docker_settings = state['docker'] + + for key in docker_settings.get('mounts', []): + mounts.append(key) + + # Updating environment variables from CM input based on input_mapping + # from meta + input_mapping = meta.get('input_mapping', {}) + + for c_input in input_mapping: + if c_input in i: + env[input_mapping[c_input]] = i[c_input] + # del(i[c_input]) + + # Updating environment variables from CM input based on + # docker_input_mapping from meta + + docker_input_mapping = docker_settings.get('docker_input_mapping', {}) + + for c_input in docker_input_mapping: + if c_input in i: + env[docker_input_mapping[c_input]] = i[c_input] + # del(i[c_input]) + + # env keys corresponding to container mounts are explicitly passed to + # the container run cmd + container_env_string = '' + for index in range(len(mounts)): + mount = mounts[index] + # Since windows may have 2 :, we search from the right + j = mount.rfind(':') + if j > 0: + mount_parts = [mount[:j], mount[j + 1:]] + else: + return { + 'return': 1, 'error': 'Can\'t find separator : in a mount string: {}'.format(mount)} + +# mount_parts = mount.split(":") +# if len(mount_parts) != 2: +# return {'return': 1, 'error': f'Invalid mount specified in docker +# settings'} + + host_mount = mount_parts[0] + new_host_mount = host_mount + container_mount = mount_parts[1] + new_container_mount = container_mount + + tmp_values = re.findall(r'\${{ (.*?) }}', str(host_mount)) + skip = False + host_env_key = None + if tmp_values: + for tmp_value in tmp_values: + if tmp_value in env: + host_env_key = tmp_value + new_host_mount = get_host_path(env[tmp_value]) + else: # we skip those mounts + mounts[index] = None + skip = True + break + + tmp_values = re.findall(r'\${{ (.*?) }}', str(container_mount)) + if tmp_values: + for tmp_value in tmp_values: + container_env_key = tmp_value + if tmp_value in env: + new_container_mount, new_container_mount_env = get_container_path( + env[tmp_value]) + container_env_key = new_container_mount_env + # container_env_string += " --env.{}={} ".format(tmp_value, new_container_mount_env) + else: # we skip those mounts + mounts[index] = None + skip = True + break + else: + container_env_key = str(container_mount) + + if skip: + continue + mounts[index] = new_host_mount + ":" + new_container_mount + if host_env_key: + container_env_string += " --env.{}={} ".format( + host_env_key, container_env_key) + + for v in docker_input_mapping: + if docker_input_mapping[v] == host_env_key: + i[v] = container_env_key + i_run_cmd[v] = container_env_key + + mounts = list(filter(lambda item: item is not None, mounts)) + + mount_string = "" if len(mounts) == 0 else ",".join(mounts) + + # check for proxy settings and pass onto the docker + proxy_keys = [ + "ftp_proxy", + "FTP_PROXY", + "http_proxy", + "HTTP_PROXY", + "https_proxy", + "HTTPS_PROXY", + "no_proxy", + "NO_PROXY", + "socks_proxy", + "SOCKS_PROXY", + "GH_TOKEN"] + + if env.get('+ CM_DOCKER_BUILD_ARGS', []) == []: + env['+ CM_DOCKER_BUILD_ARGS'] = [] + + for key in proxy_keys: + if os.environ.get(key, '') != '': + value = os.environ[key] + container_env_string += " --env.{}={} ".format(key, value) + env['+ CM_DOCKER_BUILD_ARGS'].append( + "{}={}".format(key, value)) + + docker_use_host_group_id = i.get( + 'docker_use_host_group_id', + docker_settings.get('use_host_group_id')) + if str(docker_use_host_group_id).lower() not in [ + 'false', 'no', '0'] and os.name != 'nt': + env['+ CM_DOCKER_BUILD_ARGS'].append( + "{}={}".format('GID', '\\" $(id -g $USER) \\"')) + + docker_use_host_user_id = i.get( + 'docker_use_host_user_id', + docker_settings.get('use_host_user_id')) + if str(docker_use_host_user_id).lower() not in [ + 'false', 'no', '0'] and os.name != 'nt': + env['+ CM_DOCKER_BUILD_ARGS'].append( + "{}={}".format('UID', '\\" $(id -u $USER) \\"')) + + docker_base_image = i.get( + 'docker_base_image', + docker_settings.get('base_image')) + docker_os = i.get('docker_os', docker_settings.get('os', 'ubuntu')) + docker_os_version = i.get( + 'docker_os_version', docker_settings.get( + 'os_version', '22.04')) + image_tag_extra = i.get( + 'docker_image_tag_extra', + docker_settings.get( + 'image_tag_extra', + '-latest')) + + if not docker_base_image: + dockerfilename_suffix = docker_os + '_' + docker_os_version + else: + if os.name == 'nt': + dockerfilename_suffix = docker_base_image.replace( + '/', '-').replace(':', '-') + else: + dockerfilename_suffix = docker_base_image.split("/") + dockerfilename_suffix = dockerfilename_suffix[len( + dockerfilename_suffix) - 1] + + cm_repo = i.get( + 'docker_cm_repo', + docker_settings.get( + 'cm_repo', + 'mlcommons@cm4mlops')) + + docker_path = i.get('docker_path', '').strip() + if docker_path == '': + docker_path = script_path + + dockerfile_path = os.path.join( + docker_path, + 'dockerfiles', + dockerfilename_suffix + + '.Dockerfile') + + # Skips docker run cmd and gives an interactive shell to the user + docker_skip_run_cmd = i.get( + 'docker_skip_run_cmd', docker_settings.get( + 'skip_run_cmd', False)) + + docker_pre_run_cmds = i.get( + 'docker_pre_run_cmds', []) + docker_settings.get('pre_run_cmds', []) + + docker_run_cmd_prefix = i.get( + 'docker_run_cmd_prefix', docker_settings.get( + 'run_cmd_prefix', '')) + + all_gpus = i.get('docker_all_gpus', docker_settings.get('all_gpus')) + + num_gpus = i.get('docker_num_gpus', docker_settings.get('num_gpus')) + + device = i.get('docker_device', docker_settings.get('device')) + + image_name = i.get( + 'docker_image_name', + docker_settings.get( + 'image_name', + '')) + + r = check_gh_token(i, docker_settings, quiet) + if r['return'] > 0: + return r + gh_token = r['gh_token'] + + port_maps = i.get( + 'docker_port_maps', + docker_settings.get( + 'port_maps', + [])) + + shm_size = i.get( + 'docker_shm_size', + docker_settings.get( + 'shm_size', + '')) + + pass_user_id = i.get( + 'docker_pass_user_id', + docker_settings.get( + 'pass_user_id', + '')) + pass_user_group = i.get( + 'docker_pass_user_group', + docker_settings.get( + 'pass_user_group', + '')) + + extra_run_args = i.get( + 'docker_extra_run_args', + docker_settings.get( + 'extra_run_args', + '')) + + if detached == '': + detached = docker_settings.get('detached', '') + + if str(docker_skip_run_cmd).lower() in ['true', '1', 'yes']: + interactive = 'yes' + elif interactive == '': + interactive = docker_settings.get('interactive', '') + + +# # Regenerate run_cmd +# if i.get('cmd'): +# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') ) +# elif i.get('artifact'): +# run_cmd = "cm run script "+i['artifact'] +# elif i.get('tags'): +# run_cmd = "cm run script \""+" "+" ".join(i['tags']) + "\"" +# else: +# run_cmd = "" + + r = regenerate_script_cmd({'script_uid': script_uid, + 'script_alias': script_alias, + 'tags': tags, + 'run_cmd': i_run_cmd, + 'docker_settings': docker_settings, + 'docker_run_cmd_prefix': i.get('docker_run_cmd_prefix', '')}) + if r['return'] > 0: + return r + run_cmd = r['run_cmd_string'] + ' ' + \ + container_env_string + ' --docker_run_deps ' + + env['CM_RUN_STATE_DOCKER'] = True + + if docker_settings.get('mount_current_dir', '') == 'yes': + run_cmd = 'cd ' + current_path_target + ' && ' + run_cmd + + final_run_cmd = run_cmd if docker_skip_run_cmd not in [ + 'yes', True, 'True'] else 'cm version' + + print('') + print('CM command line regenerated to be used inside Docker:') + print('') + print(final_run_cmd) + print('') + + docker_recreate_image = 'yes' if not norecreate_docker_image else 'no' + + if i.get('docker_push_image', '') in ['True', True, 'yes']: + env['CM_DOCKER_PUSH_IMAGE'] = 'yes' + + cm_docker_input = {'action': 'run', + 'automation': 'script', + 'tags': 'run,docker,container', + 'recreate': docker_recreate_image, + 'docker_base_image': docker_base_image, + 'docker_os': docker_os, + 'docker_os_version': docker_os_version, + 'cm_repo': cm_repo, + 'env': env, + 'image_repo': image_repo, + 'interactive': interactive, + 'mounts': mounts, + 'image_name': image_name, + # 'image_tag': script_alias, + 'image_tag_extra': image_tag_extra, + 'detached': detached, + 'script_tags': f"""{i.get('tags')}""", + 'run_cmd': final_run_cmd, + 'v': i.get('v', False), + 'quiet': True, + 'pre_run_cmds': docker_pre_run_cmds, + 'real_run': True, + 'add_deps_recursive': { + 'build-docker-image': { + 'dockerfile': dockerfile_path + } + } + } + + if all_gpus: + cm_docker_input['all_gpus'] = True + + if num_gpus: + cm_docker_input['num_gpus'] = str(num_gpus) + + if device: + cm_docker_input['device'] = device + + if gh_token != '': + cm_docker_input['gh_token'] = gh_token + + if port_maps: + cm_docker_input['port_maps'] = port_maps + + if shm_size != '': + cm_docker_input['shm_size'] = shm_size + + if pass_user_id != '': + cm_docker_input['pass_user_id'] = pass_user_id + + if pass_user_group != '': + cm_docker_input['pass_user_group'] = pass_user_group + + if extra_run_args != '': + cm_docker_input['extra_run_args'] = extra_run_args + + if i.get('docker_save_script', ''): + cm_docker_input['save_script'] = i['docker_save_script'] + + print('') + + r = self_module.cmind.access(cm_docker_input) + if r['return'] > 0: + return r + + return {'return': 0} + +############################################################ + + +def check_gh_token(i, docker_settings, quiet): + gh_token = i.get('docker_gh_token', '') + + if docker_settings.get('gh_token_required', False) and gh_token == '': + rx = { + 'return': 1, + 'error': 'GH token is required but not provided. Use --docker_gh_token to set it'} + + if quiet: + return rx + + print('') + gh_token = input( + 'Enter GitHub token to access private CM repositories required for this CM script: ') + + if gh_token == '': + return rx + + return {'return': 0, 'gh_token': gh_token} diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/README-extra.md new file mode 100644 index 000000000..05e53dc1a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/README-extra.md @@ -0,0 +1,2 @@ +# CM script to run and reproduce experiments + diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/_cm.yaml new file mode 100644 index 000000000..8019b3647 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/_cm.yaml @@ -0,0 +1,38 @@ +cache: false + +deps: + # Detect host OS features + - tags: detect,os + + # Detect/install python + - tags: get,python + names: + - python + - python3 + +script_name: run + +input_mapping: + experiment: CM_EXPERIMENT + +default_env: + CM_EXPERIMENT: '1' + +variations: + install_deps: + script_name: install_deps + + run: + script_name: run + + reproduce: + script_name: reproduce + + plot: + script_name: plot + + analyze: + script_name: analyze + + validate: + script_name: validate diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.bat new file mode 100644 index 000000000..7e786771a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.sh new file mode 100644 index 000000000..630c3db3d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/analyze.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +#echo "" +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/customize.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/customize.py new file mode 100644 index 000000000..273999d46 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/customize.py @@ -0,0 +1,24 @@ +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.bat new file mode 100644 index 000000000..47f7e7ce2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.bat @@ -0,0 +1,18 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + + echo. + echo Installing requirements.txt ... + echo. + + %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.sh new file mode 100644 index 000000000..cb7c44c2b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/install_deps.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + echo "" + echo "Installing requirements.txt ..." + echo "" + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/main.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/main.py new file mode 100644 index 000000000..caa499bf0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/main.py @@ -0,0 +1,10 @@ +import os + +if __name__ == "__main__": + + print('') + print('Main script:') + print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('') + + exit(0) diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.bat new file mode 100644 index 000000000..7e786771a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.sh new file mode 100644 index 000000000..630c3db3d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/plot.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +#echo "" +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.bat new file mode 100644 index 000000000..7e786771a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.sh new file mode 100644 index 000000000..630c3db3d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/reproduce.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +#echo "" +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.bat new file mode 100644 index 000000000..6c1274ce6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.sh new file mode 100644 index 000000000..2150b45dc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +echo "" +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.bat new file mode 100644 index 000000000..7e786771a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.sh new file mode 100644 index 000000000..630c3db3d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-ae-python/validate.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +#echo "" +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +#test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/README-extra.md new file mode 100644 index 000000000..582991f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/_cm.yaml new file mode 100644 index 000000000..adbb8d4e7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/_cm.yaml @@ -0,0 +1,23 @@ +cache: false + +deps: + # Detect host OS features + - tags: detect,os + + # Detect/install python + - tags: get,python + names: + - python + - python3 + +input_mapping: + var1: CM_VAR1 + req: PIP_REQUIREMENTS + +default_env: + CM_VAR1: 'something' + +variations: + req: + env: + PIP_REQUIREMENTS: True diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/customize.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/customize.py new file mode 100644 index 000000000..625b643d4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/customize.py @@ -0,0 +1,32 @@ +from cmind import utils +import os + + +def preprocess(i): + + print('') + print('Preprocessing ...') + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', ''))) + + return {'return': 0} + + +def postprocess(i): + + print('') + print('Postprocessing ...') + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/main.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/main.py new file mode 100644 index 000000000..e3302f36f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/main.py @@ -0,0 +1,10 @@ +import os + +if __name__ == "__main__": + + print('') + print('Main script:') + print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', ''))) + print('') + + exit(0) diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/requirements.txt b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.bat new file mode 100644 index 000000000..f9e1264bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.bat @@ -0,0 +1,25 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS% +echo ENV CM_VAR1: %CM_VAR1% + +if "%PIP_REQUIREMENTS%" == "True" ( + if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + + echo. + echo Installing requirements.txt ... + echo. + + %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.sh new file mode 100644 index 000000000..a1a6aec2e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-python/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}" +echo "ENV CM_VAR1: ${CM_VAR1}" + +if [ "${PIP_REQUIREMENTS}" == "True" ]; then + if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + echo "" + echo "Installing requirements.txt ..." + echo "" + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + test $? -eq 0 || exit 1 + fi +fi + +echo "" +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/README-extra.md new file mode 100644 index 000000000..582991f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/_cm.yaml b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/_cm.yaml new file mode 100644 index 000000000..eaff95e47 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/_cm.yaml @@ -0,0 +1,42 @@ +cache: false + +deps: + # Detect host OS features + - tags: detect,os + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + - tags: get,generic-python-lib,_torch + skip_if_env: + USE_CUDA: + - yes + + - tags: get,generic-python-lib,_torch_cuda + enable_if_env: + USE_CUDA: + - yes + + - tags: get,generic-python-lib,_package.numpy + + +input_mapping: + var1: CM_VAR1 + req: PIP_REQUIREMENTS + +default_env: + CM_VAR1: 'something' + +variations: + req: + env: + PIP_REQUIREMENTS: True + + cuda: + env: + USE_CUDA: yes + deps: + - tags: get,cuda diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/customize.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/customize.py new file mode 100644 index 000000000..625b643d4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/customize.py @@ -0,0 +1,32 @@ +from cmind import utils +import os + + +def preprocess(i): + + print('') + print('Preprocessing ...') + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + print(' ENV CM_VAR1: {}'.format(env.get('CM_VAR1', ''))) + + return {'return': 0} + + +def postprocess(i): + + print('') + print('Postprocessing ...') + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/main.py b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/main.py new file mode 100644 index 000000000..217aed3b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/main.py @@ -0,0 +1,15 @@ +import os + +import torch + +if __name__ == "__main__": + + print('') + print('Main script:') + print('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1', ''))) + print('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA', ''))) + print('') + print('PyTorch version: {}'.format(torch.__version__)) + print('') + + exit(0) diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/requirements.txt b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.bat new file mode 100644 index 000000000..f9e1264bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.bat @@ -0,0 +1,25 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV PIP_REQUIREMENTS: %PIP_REQUIREMENTS% +echo ENV CM_VAR1: %CM_VAR1% + +if "%PIP_REQUIREMENTS%" == "True" ( + if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + + echo. + echo Installing requirements.txt ... + echo. + + %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.sh new file mode 100644 index 000000000..a1a6aec2e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template-pytorch/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV PIP_REQUIREMENTS: ${PIP_REQUIREMENTS}" +echo "ENV CM_VAR1: ${CM_VAR1}" + +if [ "${PIP_REQUIREMENTS}" == "True" ]; then + if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + echo "" + echo "Installing requirements.txt ..." + echo "" + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + test $? -eq 0 || exit 1 + fi +fi + +echo "" +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/main.py +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template/README-extra.md b/cmx4mlops/cmx4mlops/repo/automation/script/template/README-extra.md new file mode 100644 index 000000000..582991f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template/customize.py b/cmx4mlops/cmx4mlops/repo/automation/script/template/customize.py new file mode 100644 index 000000000..273999d46 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template/customize.py @@ -0,0 +1,24 @@ +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template/run.bat b/cmx4mlops/cmx4mlops/repo/automation/script/template/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template/run.sh b/cmx4mlops/cmx4mlops/repo/automation/script/template/run.sh new file mode 100644 index 000000000..4c23c380e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running: " +echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/automation/script/template_list_of_scripts.md b/cmx4mlops/cmx4mlops/repo/automation/script/template_list_of_scripts.md new file mode 100644 index 000000000..198a500f1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/script/template_list_of_scripts.md @@ -0,0 +1,52 @@ +[ [Back to index](README.md) ] + + + +This is an automatically generated list of portable and reusable automation recipes (CM scripts) +with a [human-friendly interface (CM)](https://github.com/mlcommons/ck) +to run a growing number of ad-hoc MLPerf, MLOps, and DevOps scripts +from [MLCommons projects](https://github.com/mlcommons/cm4mlops/tree/main/script) +and [research papers](https://www.youtube.com/watch?v=7zpeIVwICa4) +in a unified way on any operating system with any software and hardware +natively or inside containers. + +Click on any automation recipe below to learn how to run and reuse it +via CM command line, Python API or GUI. + +CM scripts can easily chained together into automation workflows using `deps` and `tags` keys +while automatically updating all environment variables and paths +for a given task and platform [using simple JSON or YAML](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml). + + +*Note that CM is a community project being developed and extended by [MLCommons members and individual contributors](../CONTRIBUTING.md) - + you can find source code of CM scripts maintained by MLCommons [here](../cm-mlops/script). + Please join [Discord server](https://discord.gg/JjWNWXKxwT) to participate in collaborative developments or provide your feedback.* + + +# License + +[Apache 2.0](LICENSE.md) + + +# Copyright + +2022-2024 [MLCommons](https://mlcommons.org) + + + + + +# List of CM scripts by categories + +{{CM_TOC_CATEGORIES}} + +{{CM_TOC2}} + +# List of all sorted CM scripts + +{{CM_TOC}} + + +{{CM_MAIN}} diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/automation/utils/COPYRIGHT.md new file mode 100644 index 000000000..2a313520b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/utils/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone and continue development as a community effort. diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/README.md b/cmx4mlops/cmx4mlops/repo/automation/utils/README.md new file mode 100644 index 000000000..9a844c656 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/utils/README.md @@ -0,0 +1,387 @@ +*This README is automatically generated - don't edit! Use `README-extra.md` for extra notes!* + +### Automation actions + +#### test + + * CM CLI: ```cm test utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15)) + * CM CLI with UID: ```cm test utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'test' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L15) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### get_host_os_info + + * CM CLI: ```cm get_host_os_info utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54)) + * CM CLI with UID: ```cm get_host_os_info utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'get_host_os_info' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L54) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### download_file + + * CM CLI: ```cm download_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156)) + * CM CLI with UID: ```cm download_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'download_file' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L156) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### unzip_file + + * CM CLI: ```cm unzip_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265)) + * CM CLI with UID: ```cm unzip_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'unzip_file' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L265) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### compare_versions + + * CM CLI: ```cm compare_versions utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343)) + * CM CLI with UID: ```cm compare_versions utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'compare_versions' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L343) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### json2yaml + + * CM CLI: ```cm json2yaml utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391)) + * CM CLI with UID: ```cm json2yaml utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'json2yaml' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L391) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### yaml2json + + * CM CLI: ```cm yaml2json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429)) + * CM CLI with UID: ```cm yaml2json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'yaml2json' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L429) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### sort_json + + * CM CLI: ```cm sort_json utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467)) + * CM CLI with UID: ```cm sort_json utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'sort_json' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L467) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### dos2unix + + * CM CLI: ```cm dos2unix utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504)) + * CM CLI with UID: ```cm dos2unix utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'dos2unix' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L504) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### replace_string_in_file + + * CM CLI: ```cm replace_string_in_file utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541)) + * CM CLI with UID: ```cm replace_string_in_file utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'replace_string_in_file' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L541) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### create_toc_from_md + + * CM CLI: ```cm create_toc_from_md utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591)) + * CM CLI with UID: ```cm create_toc_from_md utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'create_toc_from_md' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L591) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### copy_to_clipboard + + * CM CLI: ```cm copy_to_clipboard utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659)) + * CM CLI with UID: ```cm copy_to_clipboard utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'copy_to_clipboard' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L659) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### list_files_recursively + + * CM CLI: ```cm list_files_recursively utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737)) + * CM CLI with UID: ```cm list_files_recursively utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'list_files_recursively' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L737) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### generate_secret + + * CM CLI: ```cm generate_secret utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770)) + * CM CLI with UID: ```cm generate_secret utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'generate_secret' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L770) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### detect_tags_in_artifact + + * CM CLI: ```cm detect_tags_in_artifact utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793)) + * CM CLI with UID: ```cm detect_tags_in_artifact utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'detect_tags_in_artifact' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L793) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### prune_input + + * CM CLI: ```cm prune_input utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822)) + * CM CLI with UID: ```cm prune_input utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'prune_input' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L822) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### uid + + * CM CLI: ```cm uid utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864)) + * CM CLI with UID: ```cm uid utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'uid' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L864) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### system + + * CM CLI: ```cm system utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891)) + * CM CLI with UID: ```cm system utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'system' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L891) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +#### load_cfg + + * CM CLI: ```cm load_cfg utils``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969)) + * CM CLI with UID: ```cm load_cfg utils,dc2743f8450541e3``` ([add flags (dict keys) from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969)) + * CM Python API: + ```python + import cmind + + r=cm.access({ + 'action':'load_cfg' + 'automation':'utils,dc2743f8450541e3' + 'out':'con' + ``` + [add keys from this API](https://github.com/mlcommons/ck/tree/master/cm-mlops/automation/utils/module.py#L969) + ```python + }) + if r['return']>0: + print(r['error']) + ``` + +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/_cm.json b/cmx4mlops/cmx4mlops/repo/automation/utils/_cm.json new file mode 100644 index 000000000..f2dc9c5b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/utils/_cm.json @@ -0,0 +1,12 @@ +{ + "alias": "utils", + "automation_alias": "automation", + "automation_uid": "bbeb15d8f0a944a4", + "desc": "Accessing various CM utils", + "developers": "[Grigori Fursin](https://cKnowledge.org/gfursin)", + "sort": 800, + "tags": [ + "automation" + ], + "uid": "dc2743f8450541e3" +} diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/module.py b/cmx4mlops/cmx4mlops/repo/automation/utils/module.py new file mode 100644 index 000000000..90b5c5c29 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/utils/module.py @@ -0,0 +1,1121 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + +from cmind.automation import Automation +from cmind import utils + + +class CAutomation(Automation): + """ + Automation actions + """ + + ############################################################ + def __init__(self, cmind, automation_file): + super().__init__(cmind, __file__) + + ############################################################ + def test(self, i): + """ + Test automation + + Args: + (CM input dict): + + (out) (str): if 'con', output to console + + automation (str): automation as CM string object + + parsed_automation (list): prepared in CM CLI or CM access function + [ (automation alias, automation UID) ] or + [ (automation alias, automation UID), (automation repo alias, automation repo UID) ] + + (artifact) (str): artifact as CM string object + + (parsed_artifact) (list): prepared in CM CLI or CM access function + [ (artifact alias, artifact UID) ] or + [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ] + + ... + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * Output from this automation action + + """ + + import json + print(json.dumps(i, indent=2)) + + return {'return': 0} + + ########################################################################## + def get_host_os_info(self, i): + """ + Get some host platform name (currently windows or linux) and OS bits + + Args: + (CM input dict): + + (bits) (str): force host platform bits + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * info (dict): + * platform (str): "windows", "linux" or "darwin" + * bat_ext (str): ".bat" or ".sh" + * bits (str): 32 or 64 bits + * python_bits 9str): python bits + + """ + + import os + import platform + import struct + + info = {} + + pbits = str(8 * struct.calcsize("P")) + + if platform.system().lower().startswith('win'): + platform = 'windows' + info['bat_ext'] = '.bat' + info['set_env'] = 'set ${key}=${value}' + info['env_separator'] = ';' + info['env_var'] = '%env_var%' + info['bat_rem'] = 'rem ${rem}' + info['run_local_bat'] = 'call ${bat_file}' + info['run_local_bat_from_python'] = 'call ${bat_file}' + info['run_bat'] = 'call ${bat_file}' + info['start_script'] = ['@echo off', ''] + info['env'] = { + "CM_WINDOWS": "yes" + } + else: + if platform.system().lower().startswith('darwin'): + platform = 'darwin' + else: + platform = 'linux' + + info['bat_ext'] = '.sh' + info['set_env'] = 'export ${key}="${value}"' + info['env_separator'] = ':' + info['env_var'] = '${env_var}' + info['set_exec_file'] = 'chmod 755 "${file_name}"' + info['bat_rem'] = '# ${rem}' + info['run_local_bat'] = '. ./${bat_file}' + info['run_local_bat_from_python'] = 'bash -c ". ./${bat_file}"' + info['run_bat'] = '. ${bat_file}' + info['start_script'] = ['#!/bin/bash', ''] + info['env'] = {} + + info['platform'] = platform + + obits = i.get('bits', '') + if obits == '': + obits = '32' + if platform == 'windows': + # Trying to get fast way to detect bits + if os.environ.get('ProgramW6432', '') != '' or os.environ.get( + 'ProgramFiles(x86)', '') != '': # pragma: no cover + obits = '64' + else: + # On Linux use first getconf LONG_BIT and if doesn't work use + # python bits + + obits = pbits + + r = utils.gen_tmp_file({}) + if r['return'] > 0: + return r + + fn = r['file_name'] + + cmd = 'getconf LONG_BIT > ' + fn + rx = os.system(cmd) + + if rx == 0: + r = utils.load_txt(file_name=fn, remove_after_read=True) + + if r['return'] == 0: + s = r['string'].strip() + if len(s) > 0 and len(s) < 4: + obits = s + else: + if os.path.isfile(fn): + os.remove(fn) + + info['bits'] = obits + info['python_bits'] = pbits + + return {'return': 0, 'info': info} + + ########################################################################## + def download_file(self, i): + """ + Download file using requests + + Args: + (CM input dict): + + url (str): URL with file + (filename) (str): explicit file name + (path) (str): path to record file (or current if empty) + (chunk_size) (int): chunck size in bytes (65536 by default) + (text) (str): print text before downloaded status ("Downloaded: " by default) + (verify) (bool): verify SSL certificate if True (True by default) + can be switched by global env CM_UTILS_DOWNLOAD_VERIFY_SSL = no + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * filename (str): file name + * path (str): path to file + * size (int): file size + + """ + + import requests + import time + import sys + from urllib import parse + + # Get URL + url = i['url'] + + # Check file name + file_name = i.get('filename', '') + if file_name == '': + parsed_url = parse.urlparse(url) + file_name = os.path.basename(parsed_url.path) + + # Check path + path = i.get('path', '') + if path is None or path == '': + path = os.getcwd() + + # Output file + path_to_file = os.path.join(path, file_name) + + if os.path.isfile(path_to_file): + os.remove(path_to_file) + + print('Downloading to {}'.format(path_to_file)) + print('') + + # Download + size = -1 + downloaded = 0 + chunk_size = i.get('chunk_size', 65536) + + text = i.get('text', 'Downloaded: ') + + if 'CM_UTILS_DOWNLOAD_VERIFY_SSL' in os.environ: + verify = os.environ['CM_UTILS_DOWNLOAD_VERIFY_SSL'] == 'yes' + else: + verify = i.get('verify', True) + + try: + with requests.get(url, stream=True, allow_redirects=True, verify=verify) as download: + download.raise_for_status() + + size_string = download.headers.get('Content-Length') + + if size_string is None: + transfer_encoding = download.headers.get( + 'Transfer-Encoding', '') + if transfer_encoding != 'chunked': + return {'return': 1, 'error': 'did not receive file'} + else: + size_string = "0" + + size = int(size_string) + + with open(path_to_file, 'wb') as output: + for chunk in download.iter_content(chunk_size=chunk_size): + + if chunk: + output.write(chunk) + if size == 0: + continue + downloaded += 1 + percent = downloaded * chunk_size * 100 / size + + sys.stdout.write("\r{}{:3.0f}%".format(text, percent)) + sys.stdout.flush() + + sys.stdout.write("\r{}{:3.0f}%".format(text, 100)) + sys.stdout.flush() + + except Exception as e: + return {'return': 1, 'error': format(e)} + + print('') + if size == 0: + file_stats = os.stat(path_to_file) + size = file_stats.st_size + + return {'return': 0, 'filename': file_name, + 'path': path_to_file, 'size': size} + + ########################################################################## + def unzip_file(self, i): + """ + Unzip file + + Args: + (CM input dict): + + filename (str): explicit file name + (path) (str): path where to unzip file (current path otherwise) + (strip_folders) (int): strip first folders + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + import zipfile + + # Check file name + file_name = i['filename'] + + if not os.path.isfile(file_name): + return {'return': 1, + 'error': 'file {} not found'.format(file_name)} + + console = i.get('out') == 'con' + + # Attempt to read cmr.json + file_name_handle = open(file_name, 'rb') + file_name_zip = zipfile.ZipFile(file_name_handle) + + info_files = file_name_zip.infolist() + + path = i.get('path', '') + if path is None or path == '': + path = os.getcwd() + + strip_folders = i.get('strip_folders', 0) + + # Unpacking zip + for info in info_files: + f = info.filename + permissions = info.external_attr + + if not f.startswith('..') and not f.startswith( + '/') and not f.startswith('\\'): + f_zip = f + + if strip_folders > 0: + fsplit = f.split('/') # Zip standard on all OS + f = '/'.join(fsplit[strip_folders:]) + + file_path = os.path.join(path, f) + + if f.endswith('/'): + # create directory + if not os.path.exists(file_path): + os.makedirs(file_path) + else: + dir_name = os.path.dirname(file_path) + if not os.path.exists(dir_name): + os.makedirs(dir_name) + + # extract file + file_out = open(file_path, 'wb') + file_out.write(file_name_zip.read(f_zip)) + file_out.close() + + if permissions > 0xffff: + os.chmod(file_path, permissions >> 16) + + file_name_zip.close() + file_name_handle.close() + + return {'return': 0} + + ########################################################################## + def compare_versions(self, i): + """ + Compare versions + + Args: + + version1 (str): version 1 + version2 (str): version 2 + + Returns: + (CM return dict): + + * comparison (int): 1 - version 1 > version 2 + 0 - version 1 == version 2 + -1 - version 1 < version 2 + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + version1 = i['version1'] + version2 = i['version2'] + + l_version1 = version1.split('.') + l_version2 = version2.split('.') + + # 3.9.6 vs 3.9 + # 3.9 vs 3.9.6 + + i_version1 = [int(v) if v.isdigit() else v for v in l_version1] + i_version2 = [int(v) if v.isdigit() else v for v in l_version2] + + comparison = 0 + + for index in range(max(len(i_version1), len(i_version2))): + v1 = i_version1[index] if index < len(i_version1) else 0 + v2 = i_version2[index] if index < len(i_version2) else 0 + + if v1 > v2: + comparison = 1 + break + elif v1 < v2: + comparison = -1 + break + + return {'return': 0, 'comparison': comparison} + + ########################################################################## + def json2yaml(self, i): + """ + Convert JSON file to YAML + + Args: + + input (str): input file (.json) + (output) (str): output file (.yaml) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input', '') + + if input_file == '': + return {'return': 1, 'error': 'please specify --input={json file}'} + + output_file = i.get('output', '') + + r = utils.load_json(input_file, check_if_exists=True) + if r['return'] > 0: + return r + + meta = r['meta'] + + if output_file == '': + output_file = input_file[:- + 5] if input_file.endswith('.json') else input_file + output_file += '.yaml' + + r = utils.save_yaml(output_file, meta) + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def yaml2json(self, i): + """ + Convert YAML file to JSON + + Args: + + input (str): input file (.yaml) + (output) (str): output file (.json) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input', '') + + if input_file == '': + return {'return': 1, 'error': 'please specify --input={yaml file}'} + + output_file = i.get('output', '') + + r = utils.load_yaml(input_file, check_if_exists=True) + if r['return'] > 0: + return r + + meta = r['meta'] + + if output_file == '': + output_file = input_file[:- + 5] if input_file.endswith('.yaml') else input_file + output_file += '.json' + + r = utils.save_json(output_file, meta) + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def sort_json(self, i): + """ + Sort JSON file + + Args: + + input (str): input file (.json) + (output) (str): output file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input', '') + + if input_file == '': + return {'return': 1, 'error': 'please specify --input={json file}'} + + r = utils.load_json(input_file, check_if_exists=True) + if r['return'] > 0: + return r + + meta = r['meta'] + + output_file = i.get('output', '') + + if output_file == '': + output_file = input_file + + r = utils.save_json(output_file, meta, sort_keys=True) + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def dos2unix(self, i): + """ + Convert DOS file to UNIX (remove \r) + + Args: + + input (str): input file (.txt) + (output) (str): output file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input', '') + + if input_file == '': + return {'return': 1, 'error': 'please specify --input={txt file}'} + + r = utils.load_txt(input_file, check_if_exists=True) + if r['return'] > 0: + return r + + s = r['string'].replace('\r', '') + + output_file = i.get('output', '') + + if output_file == '': + output_file = input_file + + r = utils.save_txt(output_file, s) + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def replace_string_in_file(self, i): + """ + Convert DOS file to UNIX (remove \r) + + Args: + + input (str): input file (.txt) + (output) (str): output file + string (str): string to replace + replacement (str): replacement string + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + (update) (bool): True if file was upated + """ + + input_file = i.get('input', '') + if input_file == '': + return {'return': 1, 'error': 'please specify --input={txt file}'} + + string = i.get('string', '') + if string == '': + return {'return': 1, + 'error': 'please specify --string={string to replace}'} + + replacement = i.get('replacement', '') + if replacement == '': + return {'return': 1, + 'error': 'please specify --replacement={string to replace}'} + + output_file = i.get('output', '') + + if output_file == '': + output_file = input_file + + r = utils.load_txt(input_file, check_if_exists=True) + if r['return'] > 0: + return r + + s = r['string'].replace('\r', '') + + s = s.replace(string, replacement) + + r = utils.save_txt(output_file, s) + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def create_toc_from_md(self, i): + """ + Convert DOS file to UNIX (remove \r) + + Args: + + input (str): input file (.md) + (output) (str): output file (input+'.toc) + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + input_file = i.get('input', '') + if input_file == '': + return {'return': 1, 'error': 'please specify --input={txt file}'} + + output_file = i.get('output', '') + + if output_file == '': + output_file = input_file + '.toc' + + r = utils.load_txt(input_file, check_if_exists=True) + if r['return'] > 0: + return r + + lines = r['string'].split('\n') + + toc = [] + + toc.append('
') + toc.append('Click here to see the table of contents.') + toc.append('') + + for line in lines: + line = line.strip() + + if line.startswith('#'): + j = line.find(' ') + if j >= 0: + title = line[j:].strip() + + x = title.lower().replace(' ', '-') + + for k in range(0, 2): + if x.startswith('*'): + x = x[1:] + if x.endswith('*'): + x = x[:-1] + + for z in [':', '+', '.', '(', ')', ',']: + x = x.replace(z, '') + + y = ' ' * (2 * (j - 1)) + '* [' + title + '](#' + x + ')' + + toc.append(y) + + toc.append('') + toc.append('
') + + r = utils.save_txt(output_file, '\n'.join(toc) + '\n') + if r['return'] > 0: + return r + + return {'return': 0} + + ########################################################################## + def copy_to_clipboard(self, i): + """ + Copy string to a clipboard + + Args: + + string (str): string to copy to a clipboard + (add_quotes) (bool): add quotes to the string in a clipboard + (skip_fail) (bool): if True, do not fail + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + s = i.get('string', '') + + if i.get('add_quotes', False): + s = '"' + s + '"' + + failed = False + warning = '' + + # Try to load pyperclip (seems to work fine on Windows) + try: + import pyperclip + except Exception as e: + warning = format(e) + failed = True + pass + + if not failed: + pyperclip.copy(s) + else: + failed = False + + # Try to load Tkinter + try: + from Tkinter import Tk + except ImportError as e: + warning = format(e) + failed = True + pass + + if failed: + failed = False + try: + from tkinter import Tk + except ImportError as e: + warning = format(e) + failed = True + pass + + if not failed: + # Copy to clipboard + try: + r = Tk() + r.withdraw() + r.clipboard_clear() + r.clipboard_append(s) + r.update() + r.destroy() + except Exception as e: + failed = True + warning = format(e) + + rr = {'return': 0} + + if failed: + if not i.get('skip_fail', False): + return {'return': 1, 'error': warning} + + rr['warning'] = warning + + return rr + + ########################################################################## + def list_files_recursively(self, i): + """ + List files and concatenate into string separate by comma + + Args: + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + files = os.walk('.') + + s = '' + + for (dir_path, dir_names, file_names) in files: + for f in file_names: + if s != '': + s += ',' + + if dir_path == '.': + dir_path2 = '' + else: + dir_path2 = dir_path[2:].replace('\\', '/') + '/' + + s += dir_path2 + f + + print(s) + + return {'return': 0} + + ########################################################################## + def generate_secret(self, i): + """ + Generate secret for web apps + + Args: + + Returns: + (CM return dict): + + secret (str): secret + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + import secrets + s = secrets.token_urlsafe(16) + + print(s) + + return {'return': 0, 'secret': s} + + ########################################################################## + def detect_tags_in_artifact(self, i): + """ + Detect if there are tags in an artifact name (spaces) and update input + + Args: + + input (dict) : original input + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + inp = i['input'] + + artifact = inp.get('artifact', '') + if artifact == '.': + del (inp['artifact']) + elif ' ' in artifact: # or ',' in artifact: + del (inp['artifact']) + if 'parsed_artifact' in inp: + del (inp['parsed_artifact']) + # Force substitute tags + inp['tags'] = artifact.replace(' ', ',') + + return {'return': 0} + + ########################################################################## + def prune_input(self, i): + """ + Leave only input keys and remove the rest (to regenerate CM commands) + + Args: + + input (dict) : original input + (extra_keys_starts_with) (list): remove keys that starts + with the ones from this list + + Returns: + (CM return dict): + + new_input (dict): pruned input + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + """ + + import copy + + inp = i['input'] + extra_keys = i.get('extra_keys_starts_with', []) + + i_run_cmd_arc = copy.deepcopy(inp) + for k in inp: + remove = False + if k in ['action', 'automation', 'cmd', 'out', + 'parsed_automation', 'parsed_artifact', 'self_module']: + remove = True + if not remove: + for ek in extra_keys: + if k.startswith(ek): + remove = True + break + + if remove: + del (i_run_cmd_arc[k]) + + return {'return': 0, 'new_input': i_run_cmd_arc} + + ########################################################################## + + def uid(self, i): + """ + Generate CM UID. + + Args: + (CM input dict): empty dict + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * uid (str): CM UID + """ + + console = i.get('out') == 'con' + + r = utils.gen_uid() + + if console: + print(r['uid']) + + return r + + ########################################################################## + + def system(self, i): + """ + Run system command and redirect output to string. + + Args: + (CM input dict): + + * cmd (str): command line + * (path) (str): go to this directory and return back to current + * (stdout) (str): stdout file + * (stderr) (str): stderr file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + * ret (int): return code + * std (str): stdout + stderr + * stdout (str): stdout + * stderr (str): stderr + """ + + cmd = i['cmd'] + + if cmd == '': + return {'return': 1, 'error': 'cmd is empty'} + + path = i.get('path', '') + if path != '' and os.path.isdir(path): + cur_dir = os.getcwd() + os.chdir(path) + + if i.get('stdout', '') != '': + fn1 = i['stdout'] + fn1_delete = False + else: + r = utils.gen_tmp_file({}) + if r['return'] > 0: + return r + fn1 = r['file_name'] + fn1_delete = True + + if i.get('stderr', '') != '': + fn2 = i['stderr'] + fn2_delete = False + else: + r = utils.gen_tmp_file({}) + if r['return'] > 0: + return r + fn2 = r['file_name'] + fn2_delete = True + + cmd += ' > ' + fn1 + ' 2> ' + fn2 + rx = os.system(cmd) + + std = '' + stdout = '' + stderr = '' + + if os.path.isfile(fn1): + r = utils.load_txt(file_name=fn1, remove_after_read=fn1_delete) + if r['return'] == 0: + stdout = r['string'].strip() + + if os.path.isfile(fn2): + r = utils.load_txt(file_name=fn2, remove_after_read=fn2_delete) + if r['return'] == 0: + stderr = r['string'].strip() + + std = stdout + if stderr != '': + if std != '': + std += '\n' + std += stderr + + if path != '' and os.path.isdir(path): + os.chdir(cur_dir) + + return {'return': 0, 'ret': rx, 'stdout': stdout, + 'stderr': stderr, 'std': std} + + ############################################################ + def load_cfg(self, i): + """ + Load configuration artifacts and files + + Args: + (CM input dict): + + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + return utils.call_internal_module( + self, __file__, 'module_cfg', 'load_cfg', i) + + ############################################################ + def select_cfg(self, i): + """ + Select cfg interactively + + Args: + (CM input dict): + tags (str): list of tags to find cfg + alias (str): alias of a cfg file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + i['self_module'] = self + + return utils.call_internal_module( + self, __file__, 'module_cfg', 'select_cfg', i) + + ############################################################ + def print_yaml(self, i): + """ + Print YAML file + + Args: + (CM input dict): + file (str): input file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + filename = i.get('file', '') + if filename == '': + return {'return': 1, 'error': 'please specify --file={YAML file}'} + + r = utils.load_yaml(filename, check_if_exists=True) + if r['return'] > 0: + return r + + meta = r['meta'] + + import json + print(json.dumps(meta, indent=2)) + + return {'return': 0} + + ############################################################ + def print_json(self, i): + """ + Print YAML file + + Args: + (CM input dict): + file (str): input file + + Returns: + (CM return dict): + + * return (int): return code == 0 if no error and >0 if error + * (error) (str): error string if return>0 + + """ + + filename = i.get('file', '') + if filename == '': + return {'return': 1, 'error': 'please specify --file={JSON file}'} + + r = utils.load_json(filename, check_if_exists=True) + if r['return'] > 0: + return r + + meta = r['meta'] + + import json + print(json.dumps(meta, indent=2)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/automation/utils/module_cfg.py b/cmx4mlops/cmx4mlops/repo/automation/utils/module_cfg.py new file mode 100644 index 000000000..e3a445c12 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/automation/utils/module_cfg.py @@ -0,0 +1,352 @@ +# Author: Grigori Fursin +# Contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os +import cmind +import copy + +base_path = {} +base_path_meta = {} + +########################################################################## + + +def load_cfg(i): + + tags = i.get('tags', '') + artifact = i.get('artifact', '') + + key = i.get('key', '') + key_end = i.get('key_end', []) + + ii = {'action': 'find', + 'automation': 'cfg'} + if artifact != '': + ii['artifact'] = artifact + elif tags != '': + ii['tags'] = tags + + r = cmind.access(ii) + if r['return'] > 0: + return r + + lst = r['list'] + + prune = i.get('prune', {}) + prune_key = prune.get('key', '') + prune_key_uid = prune.get('key_uid', '') + prune_meta_key = prune.get('meta_key', '') + prune_meta_key_uid = prune.get('meta_key_uid', '') + prune_uid = prune.get('uid', '') + prune_list = prune.get('list', []) + + # Checking individual files inside CM entry + selection = [] + + if i.get('skip_files', False): + for l in lst: + meta = l.meta + full_path = l.path + + meta['full_path'] = full_path + + add = True + + if prune_key != '' and prune_key_uid != '': + if prune_key_uid not in meta.get(prune_key, []): + add = False + + if add: + selection.append(meta) + else: + for l in lst: + path = l.path + + main_meta = l.meta + + skip = False + + if prune_meta_key != '' and prune_meta_key_uid != '': + if prune_meta_key_uid not in main_meta.get(prune_meta_key, []): + skip = True + + if skip: + continue + + all_tags = main_meta.get('tags', []) + + files = os.listdir(path) + + for f in files: + if key != '' and not f.startswith(key): + continue + + if f.startswith('_') or (not f.endswith( + '.json') and not f.endswith('.yaml')): + continue + + if len(key_end) > 0: + skip = True + for ke in key_end: + if f.endswith(ke): + skip = False + break + if skip: + continue + + full_path = os.path.join(path, f) + + full_path_without_ext = full_path[:-5] + + r = cmind.utils.load_yaml_and_json(full_path_without_ext) + if r['return'] > 0: + print('Warning: problem loading file {}'.format(full_path)) + else: + meta = r['meta'] + + # Check base + r = process_base(meta, full_path) + if r['return'] > 0: + return r + meta = r['meta'] + + uid = meta['uid'] + + # Check pruning + add = True + + if len(prune) > 0: + if prune_uid != '' and uid != prune_uid: + add = False + + if add and len( + prune_list) > 0 and uid not in prune_list: + add = False + + if add and prune_key != '' and prune_key_uid != '' and prune_key_uid != meta.get( + prune_key, None): + add = False + + if add: + meta['full_path'] = full_path + + add_all_tags = copy.deepcopy(all_tags) + + name = meta.get('name', '') + if name == '': + name = ' '.join(meta.get('tags', [])) + name = name.strip() + meta['name'] = name + + file_tags = meta.get('tags', '').strip() + if file_tags == '': + if name != '': + add_all_tags += [v.lower() + for v in name.split(' ')] + else: + add_all_tags += file_tags.split(',') + + meta['all_tags'] = add_all_tags + + meta['main_meta'] = main_meta + + selection.append(meta) + + return {'return': 0, 'lst': lst, 'selection': selection} + +########################################################################## + + +def process_base(meta, full_path): + + global base_path, base_path_meta + + _base = meta.get('_base', '') + if _base != '': + name = '' + + filename = _base + full_path_base = os.path.dirname(full_path) + + if not filename.endswith('.yaml') and not filename.endswith('.json'): + return {'return': 1, 'error': '_base file {} in {} must be .yaml or .json'.format( + filename, full_path)} + + if ':' in _base: + x = _base.split(':') + name = x[0] + + full_path_base = base_path.get(name, '') + if full_path_base == '': + + # Find artifact + r = cmind.access({'action': 'find', + 'automation': 'cfg', + 'artifact': name}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + if not os.path.isfile(path): + return {'return': 1, 'error': '_base artifact {} not found in {}'.format( + name, full_path)} + + full_path_base = lst[0].path + + base_path[name] = full_path_base + + filename = x[1] + + # Load base + path = os.path.join(full_path_base, filename) + + if not os.path.isfile(path): + return {'return': 1, 'error': '_base file {} not found in {}'.format( + filename, full_path)} + + if path in base_path_meta: + base = copy.deepcopy(base_path_meta[path]) + else: + path_without_ext = path[:-5] + + r = cmind.utils.load_yaml_and_json(path_without_ext) + if r['return'] > 0: + return r + + base = r['meta'] + + base_path_meta[path] = copy.deepcopy(base) + + for k in meta: + v = meta[k] + + if k not in base: + base[k] = v + else: + if isinstance(v, str): + # Only merge a few special keys and overwrite the rest + if k in ['tags', 'name']: + base[k] += meta[k] + else: + base[k] = meta[k] + + +elif isinstance(v, elif) for vv in v: + base[k].append(vv) +elif isinstance(v, elif ) base[k].merge(v) + + meta = base + + return {'return': 0, 'meta':meta} + +########################################################################## + +def select_cfg(i): + + self_module = i['self_module'] + tags = i['tags'] + alias = i.get('alias', '') + uid = i.get('uid', '') + title = i.get('title', '') + + # Check if alias is not provided + r = self_module.cmind.access({'action': 'find', 'automation':'cfg', 'tags':'basic,docker,configurations'}) + if r['return'] > 0: + return r + + lst = r['list'] + + selector = [] + + # Do coarse-grain search for CM artifacts + for l in lst: + p = l.path + + if alias != '': + for ext in ['.json', '.yaml']: + p1 = os.path.join(p, alias +ext) + if os.path.isfile(p1): + selector.append({'path': p1, 'alias':alias}) + break + + else: + files = os.listdir(p) + + for f in files: + if not f.startswith('_cm') and ( + f.endswith('.json') or f.endswith('.yaml')): + selector.append({'path': os.path.join(p, f), 'alias':f[:-5]}) + + # Load meta for name and UID + selector_with_meta = [] + for s in range(0, len(selector)): + ss = selector[s] + + path = ss['path'] + + full_path_without_ext = path[:-5] + + r = cmind.utils.load_yaml_and_json(full_path_without_ext) + if r['return'] >0: + print('Warning: problem loading configuration file {}'.format(path)) + + meta = r['meta'] + + if uid == '' or meta.get('uid', '') == uid: + ss['meta'] = meta + selector_with_meta.append(ss) + + # Quit if no configurations found + if len(selector_with_meta) == 0: + return {'return': 16, 'error':'configuration was not found'} + + select = 0 + if len(selector_with_meta) > 1: + xtitle = ' ' + title if title != '' else '' + print('') + print('Available{} configurations:'.format(xtitle)) + + print('') + + selector_with_meta = sorted(selector_with_meta, key = lambda x: x['meta'].get('name', '')) + s = 0 + for ss in selector_with_meta: + alias = ss['alias'] + uid = ss['meta'].get('uid', '') + name = ss['meta'].get('name', '') + + x = name + if x!='': + x+=' ' + x += '(' + uid + ')' + + print(f'{s}) {x}'.format(s, x)) + + s += 1 + + print('') + select = input('Enter configuration number of press Enter for 0: ') + + if select.strip() == '': + select = '0' + + select = int(select) + + if select <0 or select>=len(selector): + return {'return': 1, 'error':'selection is out of range'} + + ss = selector_with_meta[select] + + return {'return': 0, 'selection':ss} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/_cm.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/_cm.json new file mode 100644 index 000000000..6877b34a7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/_cm.json @@ -0,0 +1,10 @@ +{ + "alias": "benchmark-hardware-compute", + "automation_alias": "cfg", + "automation_uid": "88dce9c160324c5d", + "tags": [ + "benchmark", + "compute" + ], + "uid": "ca67f372e7294afd" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-cpu-x64.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-cpu-x64.json new file mode 100644 index 000000000..53f295d72 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-cpu-x64.json @@ -0,0 +1,6 @@ +{ + "uid": "cdfd424c32734e38", + "name": "AMD - x64", + "tags": "cpu,x64,generic,amd", + "mlperf_inference_device": "cpu" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-gpu.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-gpu.json new file mode 100644 index 000000000..d70e1d155 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/amd-gpu.json @@ -0,0 +1,6 @@ +{ + "uid": "d8f06040f7294319", + "name": "AMD - GPU", + "tags": "gpu,amd", + "mlperf_inference_device": "rocm" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/generic-cpu-arm64.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/generic-cpu-arm64.json new file mode 100644 index 000000000..7af318b27 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/generic-cpu-arm64.json @@ -0,0 +1,6 @@ +{ + "uid":"357a972e79614903", + "name": "Arm - AArch64", + "tags": "cpu,arm64,aarch64,generic", + "mlperf_inference_device": "cpu" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/google-tpu.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/google-tpu.json new file mode 100644 index 000000000..2bb4d22cf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/google-tpu.json @@ -0,0 +1,6 @@ +{ + "uid": "b3be7ac9ef954f5a", + "name": "Google - TPU", + "tags": "tpu,google", + "mlperf_inference_device": "tpu" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/habana-gaudi.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/habana-gaudi.json new file mode 100644 index 000000000..b6caa9655 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/habana-gaudi.json @@ -0,0 +1,6 @@ +{ + "uid": "a42388a2a8cd412c", + "name": "Intel/Habana - Gauidi 2", + "tags": "gaudi,habana", + "mlperf_inference_device": "gaudi" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/intel-cpu-x64.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/intel-cpu-x64.json new file mode 100644 index 000000000..2e8ab51c4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/intel-cpu-x64.json @@ -0,0 +1,6 @@ +{ + "uid": "ee8c568e0ac44f2b", + "name": "Intel - x64", + "tags": "cpu,x64,generic,intel", + "mlperf_inference_device": "cpu" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml new file mode 100644 index 000000000..d8b9787c6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu-jetson-orin.yaml @@ -0,0 +1,7 @@ +uid: fe379ecd1e054a00 + +tags: gpu,nvidia,jetson,orin + +name: "Nvidia - GPU - Jetson Orin" + +mlperf_inference_device: cuda diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu.json new file mode 100644 index 000000000..5bc758253 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/nvidia-gpu.json @@ -0,0 +1,6 @@ +{ + "uid": "fe379ecd1e054a00", + "name": "Nvidia - GPU", + "tags": "gpu,nvidia", + "mlperf_inference_device": "cuda" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/qualcomm-ai100.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/qualcomm-ai100.json new file mode 100644 index 000000000..aa84e5735 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/qualcomm-ai100.json @@ -0,0 +1,6 @@ +{ + "uid": "d2ae645066664463", + "name": "Qualcomm - AI 100", + "tags": "accelerator,acc,qualcomm,ai,100,ai-100", + "mlperf_inference_device": "qaic" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml new file mode 100644 index 000000000..c6d06e9b4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-hardware-compute/stm-32L4R5ZIT6U-NUCLEO-L4R5ZI.yaml @@ -0,0 +1,5 @@ +uid: 2cd26d4f92ca4b85 + +tags: stm,stm32,stm32l4r5zit6u,nucleo,l4r5zi + +name: "STM32L4R5ZIT6U - NUCLEO-L4R5ZI" diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/_cm.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/_cm.json new file mode 100644 index 000000000..533c86271 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/_cm.json @@ -0,0 +1,10 @@ +{ + "alias": "benchmark-list", + "automation_alias": "cfg", + "automation_uid": "88dce9c160324c5d", + "tags": [ + "benchmark", + "list" + ], + "uid": "15291dfc4f904146" +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-cpp.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-cpp.yaml new file mode 100644 index 000000000..590eb4475 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-cpp.yaml @@ -0,0 +1,19 @@ +uid: f594dc94b2714713 + +tags: benchmark,run,loadgen,cpp + +name: "ML models with LoadGen (C++; Linux/MacOS/Windows) - dev" + +urls: +- name: "GitHub dev page" + url: "https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-cpp" + +supported_compute: +- cpu,x64 +- gpu,nvidia + +script_name: run-mlperf-inference-app,4a5d5b13fd7e4ac8 + +bench_input: + mlperf_inference_implementation: mil + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-python.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-python.yaml new file mode 100644 index 000000000..74e5aa48e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/loadgen-python.yaml @@ -0,0 +1,16 @@ +uid: 0d6b54eb27d1454e + +tags: benchmark,run,loadgen,python + +name: "ML models with LoadGen (Python; Linux/MacOS/Windows) - dev" + +urls: +- name: "GitHub dev page" + url: "https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python" + +supported_compute: +- cpu,x64 +- cpu,arm64 +- gpu,nvidia + +script_name: app-loadgen-generic-python,d3d949cc361747a6 diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-abtf.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-abtf.yaml new file mode 100644 index 000000000..a01edcbde --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-abtf.yaml @@ -0,0 +1,18 @@ +uid: 94f0faaa0c61445d + +tags: benchmark,run,mlperf,abtf,mlperf-abtf + +name: "MLPerf ABTF - dev" + +urls: +- name: "Announcement" + url: "https://mlcommons.org/2023/05/avcc-and-mlcommons-join-forces-to-develop-an-automotive-industry-standard/" +- name: "MLCommons CM automation (under development)" + url: "https://access.cknowledge.org/playground/?action=scripts" + +supported_compute: +- cpu,x64 +- cpu,arm64 +- gpu,nvidia + +script_name: test-abtf-ssd-pytorch,91bfc4333b054c21 diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-inference.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-inference.yaml new file mode 100644 index 000000000..e57764a48 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-inference.yaml @@ -0,0 +1,28 @@ +uid: 39877bb63fb54725 + +tags: benchmark,run,mlperf,inference,mlperf-inference + +name: "MLPerf inference" + +urls: +- name: "Official page" + url: "https://mlcommons.org/benchmarks/inference" +- name: "GitHub dev page" + url: "https://github.com/mlcommons/inference" +- name: "ArXiv paper" + url: "https://arxiv.org/abs/1911.02549" +- name: "MLCommons CM automation for MLPerf inference" + url: "https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference" + +script_name: run-mlperf-inference-app,4a5d5b13fd7e4ac8 + +skip_extra_urls: true + +supported_compute: +- cpu,x64 +- cpu,arm64 +- gpu,nvidia +- gpu,amd +- accelerator,acc,qualcomm,ai,100,ai-100 +- tpu,google +- gaudi,habana diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-mobile.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-mobile.yaml new file mode 100644 index 000000000..85771a44d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-mobile.yaml @@ -0,0 +1,14 @@ +uid: 8b2ed0897bd74267 + +tags: benchmark,run,mlperf,mobile,mlperf-mobile + +name: "MLPerf mobile" + +urls: +- name: "Official page" + url: "https://mlcommons.org/benchmarks/inference-mobile/" +- name: "GitHub page for mobile app" + url: "https://github.com/mlcommons/mobile_app_open" + +supported_compute: +- cpu,arm64 diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-tiny.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-tiny.yaml new file mode 100644 index 000000000..d6aeccabc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-tiny.yaml @@ -0,0 +1,16 @@ +uid: 28870394c19c4c37 + +tags: benchmark,run,mlperf,tiny,mlperf-tiny + +name: "MLPerf tiny" + +urls: +- name: "Official page" + url: "https://mlcommons.org/benchmarks/inference-tiny" +- name: "GitHub dev page" + url: "https://github.com/mlcommons/tiny" +- name: "MLCommons CM automation (under development)" + url: "https://github.com/mlcommons/ck/blob/master/docs/tutorials/reproduce-mlperf-tiny.md" + +supported_compute: +- stm32 diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-training.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-training.yaml new file mode 100644 index 000000000..8b95de4f7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-list/mlperf-training.yaml @@ -0,0 +1,18 @@ +uid: 59311e6098c14b22 + +tags: benchmark,run,mlperf,training,mlperf-training + +name: "MLPerf training" + +urls: +- name: "Official page" + url: "https://mlcommons.org/benchmarks/training" +- name: "GitHub dev page" + url: "https://github.com/mlcommons/training" +- name: "MLCommons CM automation (under development)" + url: "https://github.com/mlcommons/ck/blob/master/docs/tutorials/reproduce-mlperf-training.md" + +supported_compute: +- cpu,x64 +- gpu,nvidia +- tpu,google diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml new file mode 100644 index 000000000..334bd4d94 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/_cm.yaml @@ -0,0 +1,45 @@ +alias: benchmark-run-mlperf-inference-v3.1 +uid: 8eb42e27ec984185 + +automation_alias: cfg +automation_uid: 88dce9c160324c5d + +tags: +- benchmark +- run +- mlperf +- inference +- v3.1 + +name: "MLPerf inference - v3.1" + +supported_compute: +- ee8c568e0ac44f2b +- fe379ecd1e054a00 + +bench_uid: 39877bb63fb54725 + +view_dimensions: +- - input.device + - "MLPerf device" +- - input.implementation + - "MLPerf implementation" +- - input.backend + - "MLPerf backend" +- - input.model + - "MLPerf model" +- - input.precision + - "Model precision" +- - input.scenario + - "MLPerf scenario" +- - input.host_os + - "Host OS" +- - output.state.cm-mlperf-inference-results-last.performance + - "Got performance" + - "tick" +- - output.state.cm-mlperf-inference-results-last.accuracy + - "Got accuracy" + - "tick" +- - output.state.cm-mlperf-inference-results-last.power + - "Got energy" + - "tick" diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json new file mode 100644 index 000000000..d1f187f49 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-input.json @@ -0,0 +1,54 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "datacenter", + "device": "qaic", + "model": "bert-99.9", + "precision": "float16", + "implementation": "qualcomm", + "backend": "glow", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-0eeb9799b12b488f", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=datacenter", + "--device=qaic", + "--model=bert-99.9", + "--precision=float16", + "--implementation=qualcomm", + "--backend=glow", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json new file mode 100644 index 000000000..a9243fe3c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "800fe1b33ca443da", + "compute_uid": "d2ae645066664463", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:25:03.786139", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json new file mode 100644 index 000000000..a07a992e7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-005147815bf840b8-output.json @@ -0,0 +1,11 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "cm-mlperf-inference-results-last": { + "performance": "tested-will-be-added-in-v4.0", + "performance_valid": true + } + } +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json new file mode 100644 index 000000000..1fe11d6d5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-input.json @@ -0,0 +1,55 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99", + "precision": "int8", + "implementation": "reference", + "backend": "deepsparse", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-0eeb9799b12b488f", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99", + "--precision=int8", + "--implementation=reference", + "--backend=deepsparse", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-0eeb9799b12b488f", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json new file mode 100644 index 000000000..dbd58de07 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "12242042335e4bc8", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:15:53.984671", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json new file mode 100644 index 000000000..519ddf3a3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-0eeb9799b12b488f-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "12.4548", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "64039368", + "max latency (ns)": "802905050", + "mean latency (ns)": "372956875", + "50.00 percentile latency (ns)": "378435867", + "90.00 percentile latency (ns)": "802905050", + "95.00 percentile latency (ns)": "802905050", + "97.00 percentile latency (ns)": "802905050", + "99.00 percentile latency (ns)": "802905050", + "99.90 percentile latency (ns)": "802905050", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-deepsparse-vdefault-default_config": { + "bert-99": { + "Offline": { + "performance": "12.455", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "12.455", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "12.4548", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "64039368", + "max latency (ns)": "802905050", + "mean latency (ns)": "372956875", + "50.00 percentile latency (ns)": "378435867", + "90.00 percentile latency (ns)": "802905050", + "95.00 percentile latency (ns)": "802905050", + "97.00 percentile latency (ns)": "802905050", + "99.00 percentile latency (ns)": "802905050", + "99.90 percentile latency (ns)": "802905050", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-deepsparse-vdefault-default_config": { + "bert-99": { + "Offline": { + "performance": "12.455", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "12.455", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json new file mode 100644 index 000000000..b02bb7695 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-input.json @@ -0,0 +1,55 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99.9", + "precision": "float32", + "implementation": "reference", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-52c1d43172664ed0", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99.9", + "--precision=float32", + "--implementation=reference", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-52c1d43172664ed0", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json new file mode 100644 index 000000000..7b7b419f3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "52c1d43172664ed0", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:04:13.424211", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json new file mode 100644 index 000000000..c250f0c62 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-52c1d43172664ed0-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.615377", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "4705323615", + "max latency (ns)": "16250190121", + "mean latency (ns)": "10456508889", + "50.00 percentile latency (ns)": "10133038152", + "90.00 percentile latency (ns)": "16250190121", + "95.00 percentile latency (ns)": "16250190121", + "97.00 percentile latency (ns)": "16250190121", + "99.00 percentile latency (ns)": "16250190121", + "99.90 percentile latency (ns)": "16250190121", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://armi.in/files/model.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-onnxruntime-v1.17.0-default_config": { + "bert-99.9": { + "Offline": { + "performance": "0.615", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.615", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.615377", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "4705323615", + "max latency (ns)": "16250190121", + "mean latency (ns)": "10456508889", + "50.00 percentile latency (ns)": "10133038152", + "90.00 percentile latency (ns)": "16250190121", + "95.00 percentile latency (ns)": "16250190121", + "97.00 percentile latency (ns)": "16250190121", + "99.00 percentile latency (ns)": "16250190121", + "99.90 percentile latency (ns)": "16250190121", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://armi.in/files/model.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-onnxruntime-v1.17.0-default_config": { + "bert-99.9": { + "Offline": { + "performance": "0.615", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.615", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json new file mode 100644 index 000000000..2addebee9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-input.json @@ -0,0 +1,56 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_submission,_short", + "division": "open", + "category": "edge", + "device": "cuda", + "model": "bert-99", + "host_os": "linux", + "precision": "float32", + "implementation": "nvidia-original", + "backend": "tensorrt", + "scenario": "Offline", + "execution_mode": "test", + "submitter": "CTuning", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "compliance": "no", + "j": true, + "time": true, + "clean": true, + "quiet": true, + "jf": "mlperf-inference-results", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_submission,_short", + "--division=open", + "--category=edge", + "--device=cuda", + "--model=bert-99", + "--precision=float32", + "--implementation=nvidia-original", + "--backend=tensorrt", + "--scenario=Offline", + "--execution_mode=test", + "--submitter=CTuning", + "--power=no", + "--adr.python.version_min=3.8", + "--compliance=no", + "--j", + "--time", + "--clean", + "--quiet", + "--jf=mlperf-inference-results" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json new file mode 100644 index 000000000..0e5dcba61 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-66cce585ff0242bc-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "66cce585ff0242bc", + "compute_uid": "fe379ecd1e054a00", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T16:23:59.000629", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json new file mode 100644 index 000000000..82a0cc826 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-input.json @@ -0,0 +1,56 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_submission,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99", + "host_os": "linux", + "precision": "float32", + "implementation": "reference", + "backend": "tf", + "scenario": "Offline", + "execution_mode": "test", + "submitter": "CTuning", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "compliance": "no", + "j": true, + "time": true, + "clean": true, + "quiet": true, + "jf": "mlperf-inference-results", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_submission,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99", + "--precision=float32", + "--implementation=reference", + "--backend=tf", + "--scenario=Offline", + "--execution_mode=test", + "--submitter=CTuning", + "--power=no", + "--adr.python.version_min=3.8", + "--compliance=no", + "--j", + "--time", + "--clean", + "--quiet", + "--jf=mlperf-inference-results" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json new file mode 100644 index 000000000..3bde194ab --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-6a07cf881dee462a-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "6a07cf881dee462a", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:33:11.932584", + "functional": false, + "reproduced": false, + "support_docker": false +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json new file mode 100644 index 000000000..de6e2b2c9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-input.json @@ -0,0 +1,55 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cuda", + "model": "bert-99", + "precision": "float32", + "implementation": "reference", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-7d80f464b2274742", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cuda", + "--model=bert-99", + "--precision=float32", + "--implementation=reference", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-7d80f464b2274742", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json new file mode 100644 index 000000000..eadf7f201 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-meta.json @@ -0,0 +1,10 @@ +{ + "uid": "7d80f464b2274742", + "compute_uid": "fe379ecd1e054a00", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T16:04:27.903539", + "notes":"ONNX 1.15.0 worked; ONNX 1.17.0 did not work", + "functional": true, + "reproduced": true, + "support_docker": false +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json new file mode 100644 index 000000000..5d8f74da1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7d80f464b2274742-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "13.1969", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "295840204", + "max latency (ns)": "757755274", + "mean latency (ns)": "521501098", + "50.00 percentile latency (ns)": "497153427", + "90.00 percentile latency (ns)": "757755274", + "95.00 percentile latency (ns)": "757755274", + "97.00 percentile latency (ns)": "757755274", + "99.00 percentile latency (ns)": "757755274", + "99.90 percentile latency (ns)": "757755274", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://armi.in/files/model.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-gpu-onnxruntime-v1.15.0-default_config": { + "bert-99": { + "Offline": { + "performance": "13.197", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "13.197", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "13.1969", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "295840204", + "max latency (ns)": "757755274", + "mean latency (ns)": "521501098", + "50.00 percentile latency (ns)": "497153427", + "90.00 percentile latency (ns)": "757755274", + "95.00 percentile latency (ns)": "757755274", + "97.00 percentile latency (ns)": "757755274", + "99.00 percentile latency (ns)": "757755274", + "99.90 percentile latency (ns)": "757755274", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://armi.in/files/model.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-gpu-onnxruntime-v1.15.0-default_config": { + "bert-99": { + "Offline": { + "performance": "13.197", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "13.197", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json new file mode 100644 index 000000000..c72a9f6a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-input.json @@ -0,0 +1,56 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_submission,_short", + "division": "open", + "category": "edge", + "host_os": "linux", + "device": "cpu", + "model": "retinanet", + "precision": "float32", + "implementation": "reference", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "submitter": "CTuning", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "compliance": "no", + "j": true, + "time": true, + "clean": true, + "quiet": true, + "jf": "mlperf-inference-results", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_submission,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=retinanet", + "--precision=float32", + "--implementation=reference", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--submitter=CTuning", + "--power=no", + "--adr.python.version_min=3.8", + "--compliance=no", + "--j", + "--time", + "--clean", + "--quiet", + "--jf=mlperf-inference-results" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json new file mode 100644 index 000000000..2b8636897 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "7f094c244ebb4985", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-18", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json new file mode 100644 index 000000000..cae36b057 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985-output.json @@ -0,0 +1,146 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.808629", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "963485100", + "max latency (ns)": "12366604800", + "mean latency (ns)": "5961694610", + "50.00 percentile latency (ns)": "6164791100", + "90.00 percentile latency (ns)": "12366604800", + "95.00 percentile latency (ns)": "12366604800", + "97.00 percentile latency (ns)": "12366604800", + "99.00 percentile latency (ns)": "12366604800", + "99.90 percentile latency (ns)": "12366604800", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": { + "retinanet": { + "Offline": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.808629", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "963485100", + "max latency (ns)": "12366604800", + "mean latency (ns)": "5961694610", + "50.00 percentile latency (ns)": "6164791100", + "90.00 percentile latency (ns)": "12366604800", + "95.00 percentile latency (ns)": "12366604800", + "97.00 percentile latency (ns)": "12366604800", + "99.00 percentile latency (ns)": "12366604800", + "99.90 percentile latency (ns)": "12366604800", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": { + "retinanet": { + "Offline": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils", + "generate,mlperf,inference,submission" + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md new file mode 100644 index 000000000..6b58ae634 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-7f094c244ebb4985.md @@ -0,0 +1 @@ +TBD1 diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json new file mode 100644 index 000000000..fb7e74af5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-input.json @@ -0,0 +1,53 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99", + "precision": "uint8", + "implementation": "intel-original", + "backend": "pytorch", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-d8c0f02f52bf49ae", + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99", + "--precision=uint8", + "--implementation=intel-original", + "--backend=pytorch", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-d8c0f02f52bf49ae", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json new file mode 100644 index 000000000..adf9c9f9f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d5b6b5af6d794045-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "d5b6b5af6d794045", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T16:18:34.632335", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json new file mode 100644 index 000000000..d23c11665 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-input.json @@ -0,0 +1,53 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "retinanet", + "precision": "float32", + "implementation": "mil", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-d8c0f02f52bf49ae", + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=retinanet", + "--precision=float32", + "--implementation=mil", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-d8c0f02f52bf49ae", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json new file mode 100644 index 000000000..b0269fa05 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-meta.json @@ -0,0 +1,10 @@ +{ + "uid": "d8c0f02f52bf49ae", + "compute_uid": "ee8c568e0ac44f2b", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-20T15:39:15.255021", + "notes":"Used clang 14 installed via apt; LLVM 16.0.4 couldn't find llvmgold plugin - need to check ...", + "functional": false, + "reproduced": false, + "support_docker": false +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json new file mode 100644 index 000000000..784796ecc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-d8c0f02f52bf49ae-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "QueueSUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.452945", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "2550773320", + "max latency (ns)": "22077722147", + "mean latency (ns)": "12323786694", + "50.00 percentile latency (ns)": "13414914364", + "90.00 percentile latency (ns)": "22077722147", + "95.00 percentile latency (ns)": "22077722147", + "97.00 percentile latency (ns)": "22077722147", + "99.00 percentile latency (ns)": "22077722147", + "99.90 percentile latency (ns)": "22077722147", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-cpp-cpu-onnxruntime-vdefault-default_config": { + "retinanet": { + "Offline": { + "performance": "0.453", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.453", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "QueueSUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.452945", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "2550773320", + "max latency (ns)": "22077722147", + "mean latency (ns)": "12323786694", + "50.00 percentile latency (ns)": "13414914364", + "90.00 percentile latency (ns)": "22077722147", + "95.00 percentile latency (ns)": "22077722147", + "97.00 percentile latency (ns)": "22077722147", + "99.00 percentile latency (ns)": "22077722147", + "99.90 percentile latency (ns)": "22077722147", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-cpp-cpu-onnxruntime-vdefault-default_config": { + "retinanet": { + "Offline": { + "performance": "0.453", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.453", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json new file mode 100644 index 000000000..9eabe5cb6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-input.json @@ -0,0 +1,56 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_submission,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "resnet50", + "host_os": "windows", + "precision": "float32", + "implementation": "reference", + "backend": "onnxruntime", + "scenario": "Offline", + "execution_mode": "test", + "submitter": "CTuning", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "compliance": "no", + "j": true, + "time": true, + "clean": true, + "quiet": true, + "jf": "mlperf-inference-results", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_submission,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=retinanet", + "--precision=float32", + "--implementation=reference", + "--backend=onnxruntime", + "--scenario=Offline", + "--execution_mode=test", + "--submitter=CTuning", + "--power=no", + "--adr.python.version_min=3.8", + "--compliance=no", + "--j", + "--time", + "--clean", + "--quiet", + "--jf=mlperf-inference-results" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json new file mode 100644 index 000000000..45eb699b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "df843c22cbf54aaf", + "compute_uid": "fe379ecd1e054a00", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-18", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json new file mode 100644 index 000000000..cae36b057 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf-output.json @@ -0,0 +1,146 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.808629", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "963485100", + "max latency (ns)": "12366604800", + "mean latency (ns)": "5961694610", + "50.00 percentile latency (ns)": "6164791100", + "90.00 percentile latency (ns)": "12366604800", + "95.00 percentile latency (ns)": "12366604800", + "97.00 percentile latency (ns)": "12366604800", + "99.00 percentile latency (ns)": "12366604800", + "99.90 percentile latency (ns)": "12366604800", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": { + "retinanet": { + "Offline": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.808629", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "963485100", + "max latency (ns)": "12366604800", + "mean latency (ns)": "5961694610", + "50.00 percentile latency (ns)": "6164791100", + "90.00 percentile latency (ns)": "12366604800", + "95.00 percentile latency (ns)": "12366604800", + "97.00 percentile latency (ns)": "12366604800", + "99.00 percentile latency (ns)": "12366604800", + "99.90 percentile latency (ns)": "12366604800", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "64" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "resnext50_32x4d_fpn.onnx", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "no" + }, + "cm-mlperf-inference-results": { + "FGG_LENOVO_P14S-reference-cpu-onnxruntime-v1.16.0-default_config": { + "retinanet": { + "Offline": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.809", + "performance_valid": true, + "accuracy": "49.593", + "accuracy_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils", + "generate,mlperf,inference,submission" + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md new file mode 100644 index 000000000..97635650c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-df843c22cbf54aaf.md @@ -0,0 +1 @@ +TBD2 diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json new file mode 100644 index 000000000..68cf51d22 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-input.json @@ -0,0 +1,55 @@ +{ + "action": "run", + "automation": "script", + "tags": "run-mlperf-inference,_r4.0,_performance-only,_short", + "division": "open", + "category": "edge", + "device": "cpu", + "model": "bert-99", + "precision": "float32", + "implementation": "reference", + "backend": "pytorch", + "scenario": "Offline", + "execution_mode": "test", + "power": "no", + "adr": { + "python": { + "version_min": "3.8" + } + }, + "clean": true, + "compliance": "no", + "j": true, + "jf": "run-f05147815bf840b8", + "quiet": true, + "time": true, + "host_os": "linux", + "cmd": [ + "--tags=run-mlperf-inference,_r4.0,_performance-only,_short", + "--division=open", + "--category=edge", + "--device=cpu", + "--model=bert-99", + "--precision=float32", + "--implementation=reference", + "--backend=pytorch", + "--scenario=Offline", + "--execution_mode=test", + "--power=no", + "--adr.python.version_min=3.8", + "--clean", + "--compliance=no", + "--j", + "--jf=run-f05147815bf840b8", + "--quiet", + "--time", + "--host_os=linux" + ], + "out": "con", + "parsed_automation": [ + [ + "script", + "5b4e0237da074764" + ] + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json new file mode 100644 index 000000000..45eb699b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-meta.json @@ -0,0 +1,9 @@ +{ + "uid": "df843c22cbf54aaf", + "compute_uid": "fe379ecd1e054a00", + "bench_uid": "39877bb63fb54725", + "date_time": "2024-02-18", + "functional": true, + "reproduced": true, + "support_docker": true +} diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json new file mode 100644 index 000000000..627e18889 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v3.1/run-f05147815bf840b8-output.json @@ -0,0 +1,137 @@ +{ + "return": 0, + "env": {}, + "new_env": {}, + "state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.771384", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "1409122219", + "max latency (ns)": "12963712908", + "mean latency (ns)": "7203424157", + "50.00 percentile latency (ns)": "7862607410", + "90.00 percentile latency (ns)": "12963712908", + "95.00 percentile latency (ns)": "12963712908", + "97.00 percentile latency (ns)": "12963712908", + "99.00 percentile latency (ns)": "12963712908", + "99.90 percentile latency (ns)": "12963712908", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://zenodo.org/record/3733896/files/model.pytorch", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-pytorch-v2.1.0-default_config": { + "bert-99": { + "Offline": { + "performance": "0.771", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.771", + "performance_valid": true + } + }, + "new_state": { + "app_mlperf_inference_log_summary": { + "sut name": "PySUT", + "scenario": "Offline", + "mode": "PerformanceOnly", + "samples per second": "0.771384", + "result is": "VALID", + "min duration satisfied": "Yes", + "min queries satisfied": "Yes", + "early stopping satisfied": "Yes", + "min latency (ns)": "1409122219", + "max latency (ns)": "12963712908", + "mean latency (ns)": "7203424157", + "50.00 percentile latency (ns)": "7862607410", + "90.00 percentile latency (ns)": "12963712908", + "95.00 percentile latency (ns)": "12963712908", + "97.00 percentile latency (ns)": "12963712908", + "99.00 percentile latency (ns)": "12963712908", + "99.90 percentile latency (ns)": "12963712908", + "samples_per_query": "10", + "target_qps": "1", + "target_latency (ns)": "0", + "max_async_queries": "1", + "min_duration (ms)": "0", + "max_duration (ms)": "0", + "min_query_count": "1", + "max_query_count": "10", + "qsl_rng_seed": "13281865557512327830", + "sample_index_rng_seed": "198141574272810017", + "schedule_rng_seed": "7575108116881280410", + "accuracy_log_rng_seed": "0", + "accuracy_log_probability": "0", + "accuracy_log_sampling_target": "0", + "print_timestamps": "0", + "performance_issue_unique": "0", + "performance_issue_same": "0", + "performance_issue_same_index": "0", + "performance_sample_count": "10833" + }, + "app_mlperf_inference_measurements": { + "starting_weights_filename": "https://zenodo.org/record/3733896/files/model.pytorch", + "retraining": "no", + "input_data_types": "fp32", + "weight_data_types": "fp32", + "weight_transformations": "none" + }, + "cm-mlperf-inference-results": { + "ip_172_31_89_56-reference-cpu-pytorch-v2.1.0-default_config": { + "bert-99": { + "Offline": { + "performance": "0.771", + "performance_valid": true + } + } + } + }, + "cm-mlperf-inference-results-last": { + "performance": "0.771", + "performance_valid": true + } + }, + "deps": [ + "detect,os", + "detect,cpu", + "get,python3", + "get,mlcommons,inference,src", + "get,sut,description", + "get,mlperf,inference,results,dir", + "install,pip-package,for-cmind-python,_package.tabulate", + "get,mlperf,inference,utils" + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml new file mode 100644 index 000000000..50086d086 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml @@ -0,0 +1,38 @@ +alias: benchmark-run-mlperf-inference-v4.0 +uid: b4ee9b6c820e493a + +automation_alias: cfg +automation_uid: 88dce9c160324c5d + +tags: +- benchmark +- run +- mlperf +- inference +- v4.0 + +name: "MLPerf inference - v4.0" + +supported_compute: +- ee8c568e0ac44f2b +- fe379ecd1e054a00 + +bench_uid: 39877bb63fb54725 + +view_dimensions: +- - input.device + - "MLPerf device" +- - input.implementation + - "MLPerf implementation" +- - input.backend + - "MLPerf backend" +- - input.model + - "MLPerf model" +- - input.scenario + - "MLPerf scenario" +- - input.host_os + - "Host OS" +- - output.state.cm-mlperf-inference-results-last.performance + - "Got performance" +- - output.state.cm-mlperf-inference-results-last.accuracy + - "Got accuracy" diff --git a/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.1/_cm.yaml b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.1/_cm.yaml new file mode 100644 index 000000000..716adc20b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/benchmark-run-mlperf-inference-v4.1/_cm.yaml @@ -0,0 +1,39 @@ +alias: benchmark-run-mlperf-inference-v4.1 +uid: b7e89771987d4168 + +automation_alias: cfg +automation_uid: 88dce9c160324c5d + +tags: +- benchmark +- run +- mlperf +- inference +- v4.1 + +name: "MLPerf inference - v4.1" + +supported_compute: +- ee8c568e0ac44f2b +- fe379ecd1e054a00 +- d8f06040f7294319 + +bench_uid: 39877bb63fb54725 + +view_dimensions: +- - input.device + - "MLPerf device" +- - input.implementation + - "MLPerf implementation" +- - input.backend + - "MLPerf backend" +- - input.model + - "MLPerf model" +- - input.scenario + - "MLPerf scenario" +- - input.host_os + - "Host OS" +- - output.state.cm-mlperf-inference-results-last.performance + - "Got performance" +- - output.state.cm-mlperf-inference-results-last.accuracy + - "Got accuracy" diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/_cm.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/_cm.yaml new file mode 100644 index 000000000..d5d60a385 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/_cm.yaml @@ -0,0 +1,10 @@ +alias: docker-basic-configurations +uid: d2a0c5bb17664c93 + +automation_alias: cfg +automation_uid: 88dce9c160324c5d + +tags: +- docker +- basic +- configurations diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-archlinux.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-archlinux.yaml new file mode 100644 index 000000000..238a1e57c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-archlinux.yaml @@ -0,0 +1,9 @@ +uid: 9960e9fb3cb24cb3 + +name: "Basic ArchLinux" + +input: + docker_base_image: 'archlinux' + docker_os: arch + docker_os_version: 'latest' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-rhel-9.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-rhel-9.yaml new file mode 100644 index 000000000..1fba915fa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-rhel-9.yaml @@ -0,0 +1,9 @@ +uid: 27b4afcdd8e042e8 + +name: "Basic RHEL 9" + +input: + docker_base_image: 'registry.access.redhat.com/ubi9' + docker_os: 'rhel' + docker_os_version: '9' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-20.04.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-20.04.yaml new file mode 100644 index 000000000..35c0b48f9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-20.04.yaml @@ -0,0 +1,9 @@ +uid: 59311e6098c14b21 + +name: "Basic Ubuntu 20.04" + +input: + docker_base_image: 'ubuntu:20.04' + docker_os: ubuntu + docker_os_version: '20.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-22.04.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-22.04.yaml new file mode 100644 index 000000000..371ba2b6a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-22.04.yaml @@ -0,0 +1,9 @@ +uid: 614aa48d90724835 + +name: "Basic Ubuntu 22.04" + +input: + docker_base_image: 'ubuntu:22.04' + docker_os: ubuntu + docker_os_version: '22.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-23.04.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-23.04.yaml new file mode 100644 index 000000000..990d56cf5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-23.04.yaml @@ -0,0 +1,9 @@ +uid: 276bd8ab39324f5f + +name: "Basic Ubuntu 23.04" + +input: + docker_base_image: 'ubuntu:23.04' + docker_os: ubuntu + docker_os_version: '23.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-24.04.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-24.04.yaml new file mode 100644 index 000000000..d949d5519 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/basic-ubuntu-24.04.yaml @@ -0,0 +1,9 @@ +uid: 12e86eb386314866 + +name: "Basic Ubuntu 24.04" + +input: + docker_base_image: 'ubuntu:24.04' + docker_os: ubuntu + docker_os_version: '24.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-20.04-cuda-11.8-cudnn-8.6.0-pytorch-1.13.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-20.04-cuda-11.8-cudnn-8.6.0-pytorch-1.13.0.yaml new file mode 100644 index 000000000..16107d8d5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-20.04-cuda-11.8-cudnn-8.6.0-pytorch-1.13.0.yaml @@ -0,0 +1,11 @@ +uid: 854e65fb31584d63 + +name: "Nvidia Ubuntu 20.04 CUDA 11.8 cuDNN 8.6.0 PyTorch 1.13.0 (pytorch:22.10)" + +ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-22-10.html + +input: + docker_base_image: 'nvcr.io/nvidia/pytorch:22.10-py3' + docker_os: ubuntu + docker_os_version: '20.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.1-cudnn-8.9.1-pytorch-2.0.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.1-cudnn-8.9.1-pytorch-2.0.0.yaml new file mode 100644 index 000000000..66b9efd0d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.1-cudnn-8.9.1-pytorch-2.0.0.yaml @@ -0,0 +1,11 @@ +uid: e0e7167139a74e36 + +name: "Nvidia Ubuntu 22.04 CUDA 12.1 cuDNN 8.9.1 PyTorch 2.0.0 (pytorch:23.05)" + +ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-05.html + +input: + docker_base_image: 'nvcr.io/nvidia/pytorch:23.05-py3' + docker_os: ubuntu + docker_os_version: '22.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.4-cudnn-9.0.0-pytorch-2.3.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.4-cudnn-9.0.0-pytorch-2.3.0.yaml new file mode 100644 index 000000000..38bcff694 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.4-cudnn-9.0.0-pytorch-2.3.0.yaml @@ -0,0 +1,11 @@ +uid: 49fc51f2999b4545 + +name: "Nvidia Ubuntu 22.04 CUDA 12.4 cuDNN 9.0.0 PyTorch 2.3.0 (pytorch:24.03)" + +ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-03.html + +input: + docker_base_image: 'nvcr.io/nvidia/pytorch:24.03-py3' + docker_os: ubuntu + docker_os_version: '22.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.5-cudnn-9.1.0-pytorch-2.4.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.5-cudnn-9.1.0-pytorch-2.4.0.yaml new file mode 100644 index 000000000..b4e45d348 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.5-cudnn-9.1.0-pytorch-2.4.0.yaml @@ -0,0 +1,11 @@ +uid: 81879736ae5842f4 + +name: "Nvidia Ubuntu 22.04 CUDA 12.5 cuDNN 9.1.0 PyTorch 2.4.0 (pytorch:24.06)" + +ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-06.html + +input: + docker_base_image: 'nvcr.io/nvidia/pytorch:24.06-py3' + docker_os: ubuntu + docker_os_version: '22.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.6-cudnn-9.3.0-pytorch-2.5.0.yaml b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.6-cudnn-9.3.0-pytorch-2.5.0.yaml new file mode 100644 index 000000000..a9e2229ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/cfg/docker-basic-configurations/nvidia-ubuntu-22.04-cuda-12.6-cudnn-9.3.0-pytorch-2.5.0.yaml @@ -0,0 +1,11 @@ +uid: 203a68df99d44137 + +name: "Nvidia Ubuntu 22.04 CUDA 12.6 cuDNN 9.3.0 PyTorch 2.5.0 (pytorch:24.08)" + +ref_url: https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html + +input: + docker_base_image: 'nvcr.io/nvidia/pytorch:24.08-py3' + docker_os: ubuntu + docker_os_version: '22.04' + \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/README.md b/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/README.md new file mode 100644 index 000000000..8302f63d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/README.md @@ -0,0 +1,32 @@ +### Challenge + +Check past MLPerf inference results in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results) +and add derived metrics such as result/No of cores, power efficiency, device cost, operational costs, etc. + +Add clock speed as a third dimension to graphs and improve Bar graph visualization. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + + +### Prizes + +* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.* +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* + + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/_cm.json new file mode 100644 index 000000000..cbdc21246 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/add-derived-metrics-to-mlperf-inference/_cm.json @@ -0,0 +1,22 @@ +{ + "alias": "add-derived-metrics-to-mlperf-inference", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close_extension": true, + "date_open": "20240204", + "points": 2, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "benchmark", + "automate", + "derived-metrics", + "mlperf-inference", + "mlperf-inference-derived-metrics" + ], + "title": "Add derived metrics to MLPerf inference benchmarks (power efficiency, results / No of cores, costs, etc)", + "trophies": true, + "uid": "c65b56d7770946ee" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/README.md new file mode 100644 index 000000000..a2059c0fe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/README.md @@ -0,0 +1,4 @@ +20240220: +* A prototype of a GUI to generate CM commands to run MLPerf inference benchmarks is ready: [link](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725) +* A prototype of the infrastructure to reproduce MLPerf inference benchmark results is ready: [link](https://access.cknowledge.org/playground/?action=reproduce) +* On-going efforts: https://github.com/mlcommons/ck/issues/1052 diff --git a/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/_cm.yaml b/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/_cm.yaml new file mode 100644 index 000000000..b8b519d27 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/automate-mlperf-inference-v3.1-and-v4.0-2024/_cm.yaml @@ -0,0 +1,21 @@ +alias: automate-mlperf-inference-v3.1-and-v4.0-2024 +uid: f89f152fc2614240 + +automation_alias: challenge +automation_uid: 3d84abd768f34e08 + +title: Add MLCommons CM workflows and unifed interface to automate MLPerf inference v3.1 and v4.0 benchmarks (Intel, Nvidia, Qualcomm, Arm64, TPU ...) + +date_open: '20231215' +date_close: '20240315' + +hot: true + +tags: +- automate +- mlperf-inference-v3.1-and-v4.0 +- 2024 + +experiments: +- tags: mlperf-inference,v3.1 +- tags: mlperf-inference,v4.0 diff --git a/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/README.md new file mode 100644 index 000000000..adfbea726 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/README.md @@ -0,0 +1,10 @@ +This challenge is under preparation. You can read about the motivation behind this challenge in our [invited talk at MLPerf-Bench @ HPCA'24](https://doi.org/10.5281/zenodo.10786893). + +We plan to extend [MLCommons CM framework](https://github.com/mlcommons/ck) +to automatically compose high-performance and cost-efficient AI systems +based on MLPerf inference v4.0 results and [CM automation recipes](https://access.cknowledge.org/playground/?action=scripts). + +* A prototype of a GUI to generate CM commands to run MLPerf inference benchmarks is ready: [link](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725) +* A prototype of the infrastructure to reproduce MLPerf inference benchmark results is ready: [link](https://access.cknowledge.org/playground/?action=reproduce) + +Contact the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) for more details. diff --git a/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/_cm.yaml b/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/_cm.yaml new file mode 100644 index 000000000..b1d4fe9f1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024/_cm.yaml @@ -0,0 +1,25 @@ +alias: compose-high-performance-and-cost-efficient-ai-systems-based-on-mlperf-4.0-2024 +uid: 7c983102d89e4869 + +automation_alias: challenge +automation_uid: 3d84abd768f34e08 + +title: "Compose high-performance and cost-efficint AI systems using MLCommons' Collective Mind and MLPerf inference" + +date_open: '20240101' + +tags: +- compose +- ai +- systems +- mlperf-inference-v4.0 +- cm +- mlcommons-cm +- mlperf +- v4.0 +- performance +- energy +- cost + +experiments: +- tags: mlperf-inference,v4.0 diff --git a/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md new file mode 100644 index 000000000..306341271 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/README.md @@ -0,0 +1,30 @@ +### Challenge + +Connect CM workflows to run MLPerf inference benchmarks with [OpenBenchmarking.org](https://openbenchmarking.org). + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + + +### Prizes + +* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.* +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* + + + +### Organizers + +* Michael Larabel +* Grigori Fursin +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + +### Results + +Results will be available at [OpenBenchmark.org](https://openbenchmarking.org) +and [MLCommons CK playgronud](https://access.cknowledge.org/playground/?action=experiments). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json new file mode 100644 index 000000000..c1e65aadb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-inference-v3.1-with-openbenchmarking/_cm.json @@ -0,0 +1,22 @@ +{ + "alias": "connect-mlperf-inference-v3.1-with-openbenchmarking", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_open": "20240101", + "date_close_extension": true, + "points": 2, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "benchmark", + "automate", + "openbenchmarking", + "mlperf-inference", + "mlperf-inference-openbenchmarking" + ], + "title": "Run MLPerf inference benchmarks using CM via OpenBenchmarking.org", + "trophies": true, + "uid": "534592626eb44efe" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/README.md b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/README.md new file mode 100644 index 000000000..f2f572bd4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/README.md @@ -0,0 +1,23 @@ +### Challenge + +Evaluate models from [MLCommons MedPerf platform](https://www.medperf.org) in terms of latency, throughput, power consumption and other metrics +using MLPerf loadgen and MLCommons CM automation language. + +See the [Nature 2023 article about MedPerf](https://www.nature.com/articles/s42256-023-00652-2) +and [ACM REP'23 keynote about CM](https://doi.org/10.5281/zenodo.8105339) to learn more about these projects. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + + +### Prizes + +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* + + +### Organizers + +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) diff --git a/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/_cm.json new file mode 100644 index 000000000..d48d0a9fe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/connect-mlperf-with-medperf/_cm.json @@ -0,0 +1,26 @@ +{ + "alias": "connect-mlperf-with-medperf", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close_extension": true, + "date_open": "20240105", + "points": 2, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "benchmark", + "automate", + "medperf", + "mlperf-inference", + "mlperf-inference-medperf", + "mlperf-inference-medperf", + "mlperf-inference-medperf-v3.1", + "mlperf-inference-medperf-v3.1-2023", + "v3.1" + ], + "title": "Connect MedPerf with MLPerf and CM", + "trophies": true, + "uid": "c26d1fbf89164728" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/README.md new file mode 100644 index 000000000..62a4826ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/README.md @@ -0,0 +1,16 @@ +### CM tutorial + +https://github.com/mlcommons/ck/blob/master/docs/tutorials/scc23-mlperf-inference-bert.md + +### Challenge + +Reproduce and optimize MLPerf inference benchmarks during Student Cluster Competition at SuperComputing'23. + +See our [related challange from 2022]()https://access.cknowledge.org/playground/?action=challenges&name=repro-mlperf-inference-retinanet-scc2022). + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [cKnowledge.org](https://cKnowledge.org) + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/_cm.json new file mode 100644 index 000000000..021872b15 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2023/_cm.json @@ -0,0 +1,20 @@ +{ + "alias": "optimize-mlperf-inference-scc2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20231115", + "date_open": "20230915", + "tags": [ + "automate", + "modularize", + "reproduce", + "replicate", + "benchmark", + "mlperf", + "mlperf-inference", + "mlperf-inference-scc", + "mlperf-inference-scc-2023" + ], + "title": "Reproduce and optimize MLPerf inference v3.1 benchmarks at the Student Cluster Competition'23 at SuperComputing'23 using CM", + "uid": "ddaf594f84b14bc2" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/README.md new file mode 100644 index 000000000..1f9be23af --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/README.md @@ -0,0 +1,7 @@ +The [MLCommons](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org) +are preparing a unified interface to run MLPerf inference benchmark at the Student Cluster Competition'24. + +See [the CM-MLPerf tutorial for SCC'23](https://github.com/mlcommons/ck/blob/master/docs/tutorials/scc23-mlperf-inference-bert.md). +Note that the MLPerf model will change in SCC'24 - please stay tuned for more details! + +See https://sc24.supercomputing.org/students/student-cluster-competition for more details about SCC. diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/_cm.json new file mode 100644 index 000000000..ab75aa27a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-scc2024/_cm.json @@ -0,0 +1,19 @@ +{ + "alias": "optimize-mlperf-inference-scc2024", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_open": "20241001", + "tags": [ + "automate", + "modularize", + "reproduce", + "replicate", + "benchmark", + "mlperf", + "mlperf-inference", + "mlperf-inference-scc", + "mlperf-inference-scc-2024" + ], + "title": "Run and optimize the MLPerf inference benchmark using CM at the Student Cluster Competition'24 at SuperComputing'24", + "uid": "f7fcba4c43ab4412" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/README.md new file mode 100644 index 000000000..d0ac7cf15 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/README.md @@ -0,0 +1,18 @@ +### Challenge + +Prepare, optimize and reproduce MLPerf inference v2.1 benchmarks across diverse implementations, software and hardware +using the [MLCommons CK framework](https://github.com/mlcommons/ck). + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [OctoML](https://octoml.ai) + +### Status + +This challenge has been successfully completed. + +### Results + +Results are available [here](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,v2.1). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/_cm.json new file mode 100644 index 000000000..31cb5dffd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v2.1-2022/_cm.json @@ -0,0 +1,27 @@ +{ + "alias": "optimize-mlperf-inference-v2.1-2022", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20220901", + "date_open": "20220701", + "experiments": [ + { + "tags": "mlperf-inference,v2.1" + } + ], + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "mlperf", + "mlperf-inference", + "mlperf-inference-v2.1", + "mlperf-inference-v2.1-2022", + "v2.1" + ], + "title": "Run and optimize MLPerf inference v2.1 benchmarks", + "uid": "2e13154b7fbb412d" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/README.md new file mode 100644 index 000000000..da6decc8c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/README.md @@ -0,0 +1,74 @@ +### Challenge + +Run MLPerf inference v3.0 benchmarks out-of-the-box across diverse implementations, software and hardware +using the [MLCommons CM automation language](https://github.com/mlcommons/ck) +and submit public results to the MLPerf inference v3.0 via [cTuning foundation](https://cTuning.org). + +* [GUI to run MLPerf inference benchmarks](https://cknowledge.org/mlperf-inference-gui) +* [GUI to prepare MLPerf inference submissions](https://cknowledge.org/mlperf-inference-submission-gui) + +### Organizers + +* [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +* [cTuning foundation](https://cTuning.org) +* [cKnowledge](https://cKnowledge.org) + +### Status + +This challenge has been successfully completed. + +### Results + +Official results: +* https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/cTuning +* https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning + +Results in the MLCommons CK/CM format: +* https://github.com/mlcommons/cm4mlperf-results + +Visualization and comparison with derived metrics: +* [MLCommons Collective Knowledge Playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,v3.0). + +### The outcome + +We are very pleased to announce the successful outcome of the 1st +community challenge to run, reproduce and optimize MLPerf inference v3.0 +benchmarks: our MLCommons CK/CM workflow automation framework has helped +to prepare more than 80% of all submission results including 98% of power +results with very diverse technology and benchmark implementations from +Neural Magic, Qualcomm, cKnowledge Ltd, KRAI, cTuning foundation, Dell +Technologies, Hewlett Packard Enterprise, Lenovo, Hugging Face, NVIDIA, +Intel Corporation, AMD and Apple across diverse CPUs, GPUs and DSPs with +PyTorch, ONNX, QAIC, TF/TFLite, TVM and TensorRT using popular cloud +providers (GCP, AWS, Azure) and individual servers and edge devices +provided by our [volunteers](https://access.cknowledge.org/playground/?action=contributors). + +You can now see and compare all MLPerf inference results v3.0, v2.1 and +v2.0 online together with reproducibility reports including the +[MLPerf BERT model](https://huggingface.co/ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1) +from the [Hugging Face Zoo](https://www.linkedin.com/company/huggingface/?lipi=urn%3Ali%3Apage%3Ad_flagship3_pulse_read%3B4CDUdiVxT7WqLJNXO%2BI5bQ%3D%3D) +on [Nvidia Jetson Orin platform](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md#reproducing-the-nvidia-jetson-agx-orin-submission). +You can even create your own derived metrics (such as performance per Watt), +provide your own constraints using this [MLCommons repository](https://github.com/mlcommons/cm_inference_results) and visualize +them as shown in [this example](https://access.cknowledge.org/playground/?action=experiments&name=e472410ee67c41f9&x=Result&y=Power_Efficiency&filter=result[%27Result_Power%27]%3C35&derived_metrics=result%5B%27Power_Efficiency%27%5D%3D1000%2Fresult%5B%27Result_Power%27%5D&c=accelerator_model_name&axis_key_s=version). + +Additional thanks to [Michael Goin](https://www.linkedin.com/in/michael-goin) +from [Neural Magic](https://www.linkedin.com/company/neural-magic/?lipi=urn%3Ali%3Apage%3Ad_flagship3_pulse_read%3B4CDUdiVxT7WqLJNXO%2BI5bQ%3D%3D), our international +students including [Himanshu Dutta](https://www.linkedin.com/in/ACoAACpPCiMB7zUNStsqBmaOCtd100a7wXBGu_M?lipi=urn%3Ali%3Apage%3Ad_flagship3_pulse_read%3B4CDUdiVxT7WqLJNXO%2BI5bQ%3D%3D), +[Aditya Kumar Shaw](https://www.linkedin.com/in/ACoAACJ3ikUBjuHqi35ibm8CG6IEYv-v_VsobIs?lipi=urn%3Ali%3Apage%3Ad_flagship3_pulse_read%3B4CDUdiVxT7WqLJNXO%2BI5bQ%3D%3D), +Sachin Mudaliyar, [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), +and all [CK/CM users and contributors](https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md) for helping us to +validate, use and improve this open-source technology to automate +benchmarking and optimization of AI/ML systems in terms of performance, +accuracy, power and costs! We are also grateful to [HiPEAC](https://www.linkedin.com/company/hipeac) +and [OctoML](https://www.linkedin.com/company/octoml) for +sponsoring initial development and Peter Mattson, David Kanter, Vijay +Janapa Reddi and Alexandros Karargyris for fruitful discussions. + + +### Dissemination + +* [Forbes article](https://www.forbes.com/sites/karlfreund/2023/04/05/nvidia-performance-trounces-all-competitors-who-have-the-guts-to-submit-to-mlperf-inference-30/?sh=3c38d2866676) +* [ZDNet article](https://www.zdnet.com/article/nvidia-dell-qualcomm-speed-up-ai-results-in-latest-benchmark-tests) +* [LinkedIn article from Grigori Fursin (MLCommons Task Force co-chair)]( https://www.linkedin.com/pulse/announcing-my-new-project-reproducible-optimization-co-design-fursin ) +* [Linkedin article from Arjun Suresh (MLCommons Task Force co-chair)](https://www.linkedin.com/posts/arjunsuresh_nvidia-performance-trounces-all-competitors-activity-7049500972275929088-nnnx?utm_source=share&utm_medium=member_desktop) diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/_cm.json new file mode 100644 index 000000000..0baf3cfee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/_cm.json @@ -0,0 +1,27 @@ +{ + "alias": "optimize-mlperf-inference-v3.0-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230301", + "date_open": "20230201", + "experiments": [ + { + "tags": "mlperf-inference,v3.0" + } + ], + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "mlperf", + "mlperf-inference", + "mlperf-inference-v3.0", + "mlperf-inference-v3.0-2023", + "v3.0" + ], + "title": "Run and optimize MLPerf inference v3.0 benchmarks", + "uid": "57cbc3384d7640f9" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/crowd-benchmark-mlperf-bert-inference-cuda.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/crowd-benchmark-mlperf-bert-inference-cuda.md new file mode 100644 index 000000000..f6a17979c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/crowd-benchmark-mlperf-bert-inference-cuda.md @@ -0,0 +1,281 @@ +# Crowd-benchmarking MLPerf BERT inference + +
+Click here to see the table of contents. + +* [Crowd-benchmarking MLPerf BERT inference](#crowd-benchmarking-mlperf-bert-inference) +* [System preparation](#system-preparation) + * [Minimal system requirements](#minimal-system-requirements) + * [Install CM (CK2) automation meta-framework](#install-cm-ck2-automation-meta-framework) + * [Pull CM repository with portable automation recipes](#pull-cm-repository-with-portable-automation-recipes) + * [Detect or install CUDA](#detect-or-install-cuda) + * [Test CUDA installation](#test-cuda-installation) + * [Install Python virtual environment](#install-python-virtual-environment) + * [Detect or install cuDNN](#detect-or-install-cudnn) + * [Detect or install TensorRT](#detect-or-install-tensorrt) + * [Run MLPerf inference benchmark with BERT](#run-mlperf-inference-benchmark-with-bert) + * [Try ONNX runtime backend](#try-onnx-runtime-backend) + * [Do a test run to detect and record the system performance](#do-a-test-run-to-detect-and-record-the-system-performance) + * [Do a full accuracy run for all the scenarios](#do-a-full-accuracy-run-for-all-the-scenarios) + * [Do a full performance run for all the scenarios](#do-a-full-performance-run-for-all-the-scenarios) + * [Populate the README files](#populate-the-readme-files) + * [Generate MLPerf submission tree](#generate-mlperf-submission-tree) + * [Push the results to GitHub repo](#push-the-results-to-github-repo) + * [Try PyTorch backend](#try-pytorch-backend) + * [Test composable ML benchmark with other models, data sets, frameworks and platforms](#test-composable-ml-benchmark-with-other-models-data-sets-frameworks-and-platforms) +* [The next steps](#the-next-steps) + +
+ + +This is a pilot community project to collaboratively run MLPerf BERT inference benchmark +across diverse platforms provided by volunteers similar to [SETI@home](https://setiathome.berkeley.edu/). +However, instead of searching for extraterrestrial intelligence, we are +searching for optimal software/hardware combination to run various AI and ML workloads +in terms of performance, accuracy, power and costs ... + +This benchmark is composed from [portable and reusable automation recipes](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) +developed by [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +to modularize complex AI and ML Systems and automate their benchmarking, design space exploration, optimization and deployment +across continuously evolving software, hardware, models and data. + +*If you submit your results before 1pm PST on Friday 3rd, 2023, + they will be accepted for the official MLPerf inference v3.0 submission round + and your name acknowledged in the notes!* + + +# System preparation + +## Minimal system requirements + +* CPU: any x86-64 or Arm64 based machine +* GPU: any relatively modern Nvidia GPU with 8GB+ memory and CUDA 11.4+ +* OS: we have tested this automation on Ubuntu 20.04, Ubuntu 22.04 and Debian 10 +* Disk space: ~10GB +* Python: 3.8+ +* All other dependencies (artifacts and tools) will be installed by the CM meta-framework aka (CK2) + +## Install CM (CK2) automation meta-framework + +Follow [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install the [MLCommons CM framework](https://github.com/mlcommons/ck) +(the 2nd generation on the Collective Mind framework) on your system. + +## Pull CM repository with portable automation recipes + +Pull MLCommons CM repository with [cross-platform CM scripts](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) +supporting portable MLOps and DevOps: + +```bash +cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9 +``` + +CM pulls all such repositories into the `$HOME/CM` directory to search for portable CM automation recipes and artifacts. + +We use the unified CM CLI & Python API of [portable and reusable CM scripts](https://github.com/mlcommons/ck/blob/master/docs/list_of_scripts.md) +to compose portable automation pipelines (also implemented as CM scripts) that can automatically detect or install all necessary artifacts (tools, models, datasets, libraries, etc) +required to run a given software project such as the MLPerf inference benchmark. + +These CM scripts simply wrap existing native scripts and tools as simple micro-services +with a human-readable CLI and simple Python API to be able to easily connect them together +and run on any platform in a unified way. + +## Detect or install CUDA + +Run the following CM script: +```bash +cm run script "get cuda" --out=json +``` + +If CUDA is automatically detected, it will be registered in the CM cache: +```bash +cm show cache --tags=get,cuda +``` + +Otherwise, this script will attempt to download and install the latest CUDA +from Nvidia website. + +Please report any issue with CM scripts [here](https://github.com/mlcommons/ck/issues). + +### Test CUDA installation + +You can test if CUDA toolkit and driver was detected or installed successfully using the following command: +```bash +cm run script "get cuda-devices" +``` + +You should see similar output: +```txt +Checking compiler version ... + +nvcc: NVIDIA (R) Cuda compiler driver +Copyright (c) 2005-2022 NVIDIA Corporation +Built on Wed_Sep_21_10:33:58_PDT_2022 +Cuda compilation tools, release 11.8, V11.8.89 +Build cuda_11.8.r11.8/compiler.31833905_0 + +Compiling program ... + +Running program ... + + - Running postprocess ... +GPU Device ID: 0 +GPU Name: Tesla K80 +GPU compute capability: 3.7 +CUDA driver version: 11.4 +CUDA runtime version: 11.8 +Global memory: 11997020160 +Max clock rate: 823.500000 MHz +Total amount of shared memory per block: 49152 +Total number of registers available per block: 65536 +Warp size: 32 +Maximum number of threads per multiprocessor: 2048 +Maximum number of threads per block: 1024 +Max dimension size of a thread block X: 1024 +Max dimension size of a thread block Y: 1024 +Max dimension size of a thread block Z: 64 +Max dimension size of a grid size X: 2147483647 +Max dimension size of a grid size Y: 65535 +Max dimension size of a grid size Z: 65535 + + - running time of script "get,cuda-devices": 4.16 sec. + +``` + +## Install Python virtual environment + +```bash +cm run script "get sys-utils-cm" --quiet + +cm run script "install python-venv" --name=mlperf-cuda +``` + +If you want to install specific version of Python use the following command: +```bash +cm run script "install python-venv" --version=3.10.8 --name=mlperf-cuda +``` + +## Detect or install cuDNN + +```bash +cm run script "get cudnn" +``` + +If cuDNN is not detected on your system, you can download a TAR file +from [Nvidia website](https://developer.nvidia.com/cudnn) and then use the same CM script +to install it as follows: +```bash +cm run script "get cudnn" --tar_file= +``` + +We have tested this project with the following tar file `cudnn-linux-x86_64-8.7.0.84_cuda11-archive.tar.xz`. + +## Detect or install TensorRT + +```bash +cm run script "get tensorrt" +``` +If TensorRT is not detected on your system, you can download a TAR file +from [Nvidia website](https://developer.nvidia.com/tensorrt) and then use the same CM script +to install it as follows: +```bash +cm run script "get tensorrt" --tar_file= +``` + +We have tested this project with the following tar file `TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6.tar.gz`. + + +## Run MLPerf inference benchmark with BERT + +### Try ONNX runtime backend + +#### Do a test run to detect and record the system performance + +```bash +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ + --adr.python.name=mlperf-cuda --model=bert-99 --implementation=reference \ + --device=cuda --backend=onnxruntime --quiet +``` + +#### Do a full accuracy run for all the scenarios + +```bash +cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \ + --adr.python.name=mlperf-cuda --model=bert-99 --device=cuda \ + --implementation=reference --backend=onnxruntime --quiet \ + --execution-mode=valid --results_dir=$HOME/inference_3.0_results +``` + +#### Do a full performance run for all the scenarios + +```bash +cm run script --tags=generate-run-cmds,inference,_performance-only,_all-scenarios \ + --adr.python.name=mlperf-cuda --model=bert-99 --device=cuda \ + --implementation=reference --backend=onnxruntime --quiet \ + --execution-mode=valid --results_dir=$HOME/inference_3.0_results +``` + +#### Populate the README files + +```bash +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ + --adr.python.name=mlperf-cuda --model=bert-99 --device=cuda \ + --implementation=reference --backend=onnxruntime --quiet \ + --execution-mode=valid --results_dir=$HOME/inference_3.0_results +``` + +#### Generate MLPerf submission tree + +We should use the master branch of MLCommons inference repo for the submission checker. +You can use `--hw_note_extra` option to add your name to the notes. + +```bash +cm run script --tags=generate,inference,submission \ + --results_dir=$HOME/inference_3.0_results/valid_results \ + --adr.python.name=mlperf-cuda \ + --device=cuda --submission_dir=$HOME/inference_submission_tree --clean \ + --run-checker --submitter=cTuning --adr.inference-src.version=master + --hw_notes_extra="Result taken by " --quiet +``` + +#### Push the results to GitHub repo + +First create a fork of [this GitHub repo with aggregated results](https://github.com/ctuning/mlperf_inference_submissions_v3.0). +Then run the following command after replacing `--repo_url` with your fork URL. + +```bash +cm run script --tags=push,github,mlperf,inference,submission \ + --submission_dir=$HOME/inference_submission_tree \ + --adr.python.name=mlperf-cuda \ + --repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.0 \ + --commit_message="Bert crowd-results added" +``` + +Create a PR to the [GitHub repo with aggregated results](https://github.com/ctuning/mlperf_inference_submissions_v3.0/) + + + +### Try PyTorch backend + +You can run the same commands with PyTorch by rerunning all above commands and replacing `--backend=onnxruntime` with `--backend=pytorch`. + +For example, + +```bash +cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \ + --adr.python.name=mlperf-cuda --model=bert-99 --device=cuda \ + --implementation=reference --backend=pytorch --execution-mode=valid \ + --results_dir=$HOME/inference_3.0_results --quiet +``` + + +## Test composable ML benchmark with other models, data sets, frameworks and platforms + +* [GUI to prepare CM command line and run benchmark](https://cknowledge.org/mlperf-inference-gui) +* [GUI to compare performance, accuracy, power and costs of ML/SW/HW combinations](https://cKnowledge.org/cm-gui-graph) + + +# The next steps + +Please follow the [cTuning foundation](https://cTuning.org), [cKnowledge.org](https://cKnowledge.org) +and [MLCommons](https://mlcommons.org). + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-3d-unet-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-3d-unet-submission.md new file mode 100644 index 000000000..38f69a5d5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-3d-unet-submission.md @@ -0,0 +1,59 @@ +## Setup +Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM. +Download the ck repo to get the CM script for MLPerf submission + +``` +cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9 +``` + +## Run Commands + +3d-unet has two variants - `3d-unet-99` and `3d-unet-99.9` where the `99` and `99.9` specifies the required accuracy constraint with respect to the reference floating point model. Both models can be submitter under edge as well as datacenter category. + +Since 3d-unet is one of the slowest running model, we are only running it using nvidia-implementation where the model is quantized and run on TensorRT backend on Nvidia GPU. + +For `3d-unet-99.9` runs, simply replace `3d-unet-99` with `3d-unet-99.9`. + +### TensorRT backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=3d-unet-99 --implementation=nvidia-original --device=cuda --backend=tensorrt \ +--category=edge --division=open --quiet +``` +* Use `--category=datacenter` to run datacenter scenarios +* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=3d-unet-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=3d-unet-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \ +--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning +--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet +``` diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-bert-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-bert-submission.md new file mode 100644 index 000000000..8aebb068f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-bert-submission.md @@ -0,0 +1,80 @@ +## Setup +Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM. +Download the ck repo to get the CM script for MLPerf submission + +``` +cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9 +``` + +## Run Commands + +Bert has two variants - `bert-99` and `bert-99.9` where the `99` and `99.9` specifies the required accuracy constraint with respect to the reference floating point model. `bert-99.9` model is applicable only on a datacenter system. + +On edge category `bert-99` has Offline and SingleStream scenarios and in datacenter category both `bert-99` and `bert-99.9` have Offline and Server scenarios. The below commands are assuming an edge category system. + +### Onnxruntime backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=bert-99 --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=open --quiet +``` +* Use `--device=cuda` to run the inference on Nvidia GPU +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* Use `--category=datacenter` to run datacenter scenarios + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=bert-99 --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=bert-99 --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \ +--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning +--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet +``` + + +## Tensorflow backend + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tf`. For example, + +``` +cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \ +--model=bert-99 --device=cpu --implementation=reference --backend=tf --execution-mode=valid \ +--results_dir=$HOME/inference_3.0_results --quiet +``` + +## Pytorch backend + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=pytorch`. For example, + +``` +cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \ +--model=bert-99 --device=cpu --implementation=reference --backend=pytorch \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results --quiet +``` + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-resnet50-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-resnet50-submission.md new file mode 100644 index 000000000..6d6ba275f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-resnet50-submission.md @@ -0,0 +1,82 @@ +## Setup +Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM. +Download the ck repo to get the CM script for MLPerf submission + +``` +cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9 +``` + +## Run Commands + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + +On edge category ResNet50 has Offline, SingleStream and MultiStream scenarios and in datacenter category it has Offline and Server scenarios. The below commands are assuming an edge category system. + +### Onnxruntime backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=open --quiet +``` +* Use `--device=cuda` to run the inference on Nvidia GPU +* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) +* Use `--category=datacenter` to run datacenter scenarios + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios --model=resnet50 \ +--device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps`, `--singlestream_target_latency` and `multistream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=resnet50 --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \ +--submission_dir=$HOME/inference_submission_tree --clean \ +--run-checker --submitter=cTuning --adr.inference-src.version=master \ +--hw_notes_extra="Result taken by NAME" --quiet +``` + + +## Tensorflow backend + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tf`. For example, + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=resnet50 --device=cpu --implementation=reference --backend=tf \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +## TVM backend + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tvm-onnx`. (Only `--device=cpu` is currently supported for TVM) For example, + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=resnet50 --device=cpu --implementation=reference --backend=tvm-onnx \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-retinanet-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-retinanet-submission.md new file mode 100644 index 000000000..4eedba9f3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-retinanet-submission.md @@ -0,0 +1,67 @@ +## Setup +Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM. +Download the ck repo to get the CM script for MLPerf submission + +``` +cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9 +``` + +## Run Commands + + +### Onnxruntime backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=retinanet --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=open --quiet +``` +* Use `--device=cuda` to run the inference on Nvidia GPU +* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) +* Use `--category=datacenter` to run datacenter scenarios + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=retinanet --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps`, `--singlestream_target_latency` and `multistream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=retinanet --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \ +--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning +--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet +``` + + +## Pytorch backend + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=pytorch`. For example, + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=retinanet --device=cpu --implementation=reference --backend=pytorch \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-rnnt-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-rnnt-submission.md new file mode 100644 index 000000000..d7191c808 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/generate-rnnt-submission.md @@ -0,0 +1,53 @@ +## Setup +Please follow the MLCommons CK [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) to install CM. +Download the ck repo to get the CM script for MLPerf submission + +``` +cm pull repo mlcommons@ck --checkout=681547519f4d9a8991d992d1300c90cfde06e9b9 +``` + +## Run Commands + +### TensorRT backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=rnnt --implementation=nvidia-original --device=cuda --backend=tensorrt \ +--category=edge --division=open --quiet +``` +* Use `--category=datacenter` to run datacenter scenarios +* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=rnnt --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=rnnt --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.0_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.0_results/valid_results \ +--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning +--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet +``` diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-aws-instance.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-aws-instance.md new file mode 100644 index 000000000..e1691c21a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-aws-instance.md @@ -0,0 +1,48 @@ +The below instructions are for creating an AWS instance from the CLI. You can also create an instance via web and setup CM on it. + +## Prerequisites + +1. AWS Key, secret and token +2. `*.pem` ssh key file to be used to create the instance (public key from here will be copied to the `$HOME/.ssh/authorized_keys` file in the created instance) + +## Run Commands + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + +### Update Access Details + +``` +cd $HOME/CM/repos/mlcommon@ck/cm-mlops/script/run-terraform/aws/ +cp credentials.example credentials.sh +``` +Update `credentials.sh` with your AWS Key, Secret and Token + +### Create an AWS Instance + + +``` +cm run script --tags=run,terraform,_m7g.xlarge,_storage_size.500,_ubuntu.2204,_us-west-2 \ +--cminit --key_file=$HOME/cmuser.pem +``` + +The above command will output the IP of the created instance which will be having CM setup already done. + +`_m7g.xlarge,_storage_size.500,_ubuntu.2204` variations can be changed to launch a different instance. Below are the variation combinations we used for MLPerf inference 3.0 submissions. + +* `_g4dn.xlarge` +* `_a1.2xlarge,_storage_size.130,_ubuntu.2204` +* `_c5.4xlarge,_storage_size.130,_ubuntu.2204` +* `_m7g.2xlarge,_storage_size.500,_ubuntu.2204` +* `_inf1.2xlarge,_storage_size.500,_amazon-linux-2-kernel.510` +* `_t2.medium,_storage_size.200,_rhel.9` + +### Copy the needed files from the local machine + +Copy the imagenet dataset to the created instance. For example, + +``` +rsync -avz -e 'ssh -i $HOME/cmuser.pem' $HOME/imagenet-2012-val/ ubuntu@54.189.93.134: +``` +For using [nvidia-original implementation](https://github.com/mlcommons/ck/tree/main/cm-mlops/script/reproduce-mlperf-inference-nvidia) tar files for cuDNN and TensorRT are needed to be downloaded locally from Nvidia website and copied to the AWS instance similar to the above command. + +Once all the required files are copied over, login to the instance and follow the individual benchmark instructions from the README files given [here](./) diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-gcp-instance.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-gcp-instance.md new file mode 100644 index 000000000..6bd16556a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-gcp-instance.md @@ -0,0 +1,35 @@ +The below instructions are for creating a Google Cloud instance from the CLI. You can also create an instance via web and setup CM on it. + +## Prerequisites + +Please follow the authentication instructions given [here](https://github.com/ctuning/mlcommons-ck/blob/master/cm-mlops/script/run-terraform/README-about.md). + + +## Run Commands + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + + +### Create a GCP Instance + + +``` +cm run script --tags=run,terraform,_gcp,_n1-highmem.4,_gcp_project.mlperf-inference-tests --cminit +``` + +The above command will output the IP of the created instance which will be having CM setup already done. + +`_n1-highmem.4` variation can be changed to launch a different instance. Below are the variation combinations we used for MLPerf inference 3.0 submissions. + +* `_n1-standard.4` + +### Copy the needed files + +Copy the imagenet dataset to the created instance. For example, + +``` +rsync -avz -e 'ssh -i $HOME/cmuser.pem' $HOME/imagenet-2012-val/ ubuntu@54.189.93.134: +``` +For using [nvidia-original implementation](https://github.com/mlcommons/ck/tree/main/cm-mlops/script/reproduce-mlperf-inference-nvidia) tar files for cuDNN and TensorRT are needed to be downloaded locally from Nvidia website and copied to the AWS instance similar to the above command. + +Once all the required files are copied over, login to the instance and follow the individual benchmark instructions from the README files given [here](./) diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md new file mode 100644 index 000000000..68db00ea0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md @@ -0,0 +1,53 @@ +## Setup +We used Nvidia Jetson AGX Orin developer kit with 32GB RAM and 64GB eMMC. We also connected a 500GB SSD disk via USB and Wifi connection for internet connectivity. + +We used the out of the box developer kit image which was running Ubuntu 20.04 and JetPack 5.0.1 Developer Preview (L4T 34.1.1) with CUDA 11.4. We were also using the default 4k page size (Nvidia recommends 64k for MLPerf inference). + +[cuDNN 8.6.0](https://developer.nvidia.com/compute/cudnn/secure/8.6.0/local_installers/11.8/cudnn-local-repo-ubuntu2004-8.6.0.163_1.0-1_arm64.deb) and [TensorRT 8.5.2.2](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/secure/8.5.3/local_repos/nv-tensorrt-local-repo-ubuntu2004-8.5.3-cuda-11.8_1.0-1_arm64.deb) were downloaded as Debian packages on a host machine, copied over to Nvidia Jetson Orin and installed. + + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset. + +### Copy the needed files from a host machine + +Copy the imagenet dataset to the created instance. For example, + +``` +rsync -avz $HOME/imagenet-2012-val/ user@192.168.0.27: +``` + +Login to Orin and register the imagenet dataset as +``` +cm run script --tags=get,imagenet,dataset,_2012,_full --input=$HOME/imagenet-2012-val +``` + +Once all the required files are copied over, follow the individual benchmark instructions from the README files given [here](./) All the required dependencies should be resolved by CM. + +### Power Measurement Setup + +We were measuring power in the peak performance mode (MaxN) except for one SUT where the energy efficiency mode was changed to Max15. Our aim was to showcase the out of the box performance of Nvidia Jetson AGX Orin including the power usage. + +## Reproducing the Nvidia Jetson AGX Orin Submission + +After our submission we followed the instructions from Nvidia in the inference v3.0 repository and tried to reproduce the numbers from Nvidia. For MaxN mode we were able to match the numbers by Nvidia using same versions of CUDA, cuDNN and TensorRT but outside of docker. For MaxQ mode, we could get the same performance as Nvidia but our power usage was about 5W higher. + +### Performance results MaxN + +The below table shows the performance comparison of our results under different settings and the Nvidia submission for MLPerf inference 3.0. We'll be updating our instructions for easier reproducibility of these numbers including CM scripts for flashing the L4T image and rebuilding the kernel for 64k pagesize. + + +| Workload | Results | L4T | PAGESIZE | Power Mode | FAN Dynamic Speed control | Offline Accuracy | Offline Performance | SingleStream Accuracy | SingleStream Performance | MultiStream Accuracy | MultiStream Performance | +| --------- | --------------------------------- | ----- | -------- | ---------- | ------------------------- | ---------------- | ------------------- | --------------------- | ------------------------ | -------------------- | ----------------------- | +| ResNet50 | Nvidia Submitted (docker) | r35.3 | 64k | MaxN | active | 75.934 | 6438.1 | 76.032 | 0.633479 | 76.032 | 2.187731 | +| ResNet50 | cTuning Submitted | r34.1.1 | 4k | MaxN | active | 75.934 | 4697 | 76.032 | 0.72 | 76.032 | 2.57 | +| ResNet50 | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 75.85 | 6172 | 76.056 | 0.644 | 76.056 | 2.074 | +| ResNet50 | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 75.85 | 6430 | 76.056 | 0.659 | 76.056 | 2.20 | +| RetinaNet | Nvidia Submitted (docker) | r35.3 | x | MaxN | active | 37.372 | 92.4048 | 37.403 | 13.924457 | 37.519 | 104.680313 | +| RetinaNet | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 37.346 | 80.0854 (no DLA) | 37.350 | 14,19 | 37.409 | 105.344828 | +| RetinaNet | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 37.345 | 94.6886 | 37.340 | 14.073 | 37.488 | 103.8 | +| BERT | Nvidia Submitted (docker) | r35.3 | x | MaxN | active | 90.552 | 544.243 | 90.344 | 5.635431 | NA | NA | +| BERT | cTuning Submitted | r34.1.1 | 4k | MaxN | active | 90.552 | 449.96 | 90.344 | 7.8 | NA | NA | +| BERT | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 90.562 | 527 (128 batchsize) | 90.311 | 6.636 | NA | NA | +| BERT | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 90.552 | 539 | 90.344 | 6.31 | NA | NA | + + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/README.md new file mode 100644 index 000000000..6362f3eb6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/README.md @@ -0,0 +1,83 @@ +### Introduction + +Our goal is to help the community benchmark and optimize various AI/ML applications +across diverse software and hardware provided by volunteers similar to SETI@home! + +Open-source [MLPerf inference benchmarks](https://arxiv.org/abs/1911.02549) +were developed by a [consortium of 50+ companies and universities (MLCommons)](https://mlcommons.org) +to enable trustable and reproducible comparison of AI/ML systems +in terms of latency, throughput, power consumption, accuracy and other metrics +across diverse software/hardware stacks from different vendors. + +However, running MLPerf inference benchmarks and submitting results [turned out to be a challenge](https://doi.org/10.5281/zenodo.8144274) +even for experts and could easily take many weeks to prepare. That's why [MLCommons](https://mlcommons.org), +[cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +and [cKnowledge.org](https://www.linkedin.com/company/cknowledge) +decided to develop an open-source, technology-agnostic +and non-intrusive [Collective Mind automation language (CM)](https://github.com/mlcommons/ck) +and [Collective Knowledge Playground (CK)](https://access.cknowledge.org/playground/?action=experiments) +to help anyone run, reproduce, optimize and compare MLPerf inference benchmarks out-of-the-box +across diverse software, hardware, models and data sets. + +You can read more about our vision, open-source technology and future plans +in this [presentation](https://doi.org/10.5281/zenodo.8105339). + + + +### Advanced challenge + +We would like to ask volunteers run various MLPerf inference benchmarks +on diverse CPUs (Intel, AMD, Arm) and Nvidia GPUs similar to SETI@home +across different framework (ONNX, PyTorch, TF, TFLite) +either natively or in a cloud (AWS, Azure, GCP, Alibaba, Oracle, OVHcloud, ...) +and submit results to MLPerf inference v3.1. + +However, since some benchmarks may take 1..2 days to run, we suggest to start in the following order (these links describe CM commands to run benchmarks and submit results): +* [CPU: Reference implementation of Image Classification with ResNet50 (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/resnet50/README_reference.md) +* [CPU: TFLite C++ implementation of Image classification with variations of MobileNets and EfficientNets (open division)](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/run-mlperf-inference-mobilenet-models/README-about.md) +* [Nvidia GPU: Nvidia optimized implementation of Image Classification with ResNet50 (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/resnet50/README_nvidia.md) +* [Nvidia GPU: Nvidia optimized implementation of Language processing with BERT large (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/bert/README_nvidia.md) +* [Nvidia GPU: Reference implementation of Image Classification with ResNet50 (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/bert/README_nvidia.md) +* [Nvidia GPU: Reference implementation of Language processing with BERT large (open and then closed division)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/resnet50/README_reference.md) +* [Nvidia GPU (24GB of memory min): Reference implementation of Language processing with GPT-J 6B (open)](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/gpt-j/README_reference.md) +* [Nvidia GPU: Nvidia optimized implementation of all other models (open and closed division)](https://github.com/ctuning/mlcommons-ck/blob/master/docs/mlperf/inference/README.md#run-benchmarks-and-submit-results) + +Please read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to set up and run above benchmarks using CM. + +You can register your participation for the [Collective Knowledge leaderboard]( https://access.cKnowledge.org/playground/?action=contributors ) +using this [guide](https://github.com/mlcommons/ck/blob/master/platform/register.md). + +Please report encountered problems using [GitHub issues](https://github.com/mlcommons/ck/issues) +to help the community +improve the portability of the CM automation for MLPerf and other benchmarks and projects. + +Looking forward to your submissions and happy hacking! + + + +### Prizes + +* *All submitters will receive 1 point for submitting valid results for 1 complete benchmark on one system.* +* *All submitters will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + + +### Status + +You can see shared results in [this repostiory](https://github.com/ctuning/mlperf_inference_submissions_v3.1) +with PRs from participants [here](https://github.com/ctuning/mlperf_inference_submissions_v3.1/pulls). + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/_cm.json new file mode 100644 index 000000000..a30c26c92 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/_cm.json @@ -0,0 +1,26 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_open": "20230704", + "experiments": [], + "points": 1, + "sort": -10, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "benchmark", + "automate", + "mlperf", + "mlperf-inference", + "mlperf-inference-v3.1", + "mlperf-inference-v3.1-2023", + "v3.1" + ], + "title": "Crowd-benchmark all MLPerf inference benchmarks similar to SETI@home (latency, throughput, power consumption, accuracy, costs)", + "trophies": true, + "uid": "3e971d8089014d1f" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-3d-unet-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-3d-unet-submission.md new file mode 100644 index 000000000..9806c2264 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-3d-unet-submission.md @@ -0,0 +1,67 @@ +## Setup + +Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +to install the MLCommons CM reproducibility and automation language in your native environment or Docker container. + +Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box +across different software, hardware, models and data sets: + + +``` +cm pull repo mlcommons@ck +``` + +Note that you can install Python virtual environment via CM to avoid contaminating +your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments). + +## Run Commands + +3d-unet has two variants - `3d-unet-99` and `3d-unet-99.9` where the `99` and `99.9` specifies the required accuracy constraint with respect to the reference floating point model. Both models can be submitter under edge as well as datacenter category. + +Since 3d-unet is one of the slowest running model, we are only running it using nvidia-implementation where the model is quantized and run on TensorRT backend on Nvidia GPU. + +For `3d-unet-99.9` runs, simply replace `3d-unet-99` with `3d-unet-99.9`. + +### TensorRT backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=3d-unet-99 --implementation=nvidia-original --device=cuda --backend=tensorrt \ +--category=edge --division=open --quiet +``` +* Use `--category=datacenter` to run datacenter scenarios +* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=3d-unet-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=3d-unet-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \ +--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning +--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet +``` diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-bert-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-bert-submission.md new file mode 100644 index 000000000..c43363c1e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-bert-submission.md @@ -0,0 +1,113 @@ +## Setup + +Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +to install the MLCommons CM reproducibility and automation language in your native environment or Docker container. + +Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box +across different software, hardware, models and data sets: + + +``` +cm pull repo mlcommons@ck +``` + +Note that you can install Python virtual environment via CM to avoid contaminating +your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments). + +## Run Commands + +Bert has two variants - `bert-99` and `bert-99.9` where the `99` and `99.9` specifies the required accuracy constraint with respect to the reference floating point model. `bert-99.9` model is applicable only on a datacenter system. + +On edge category `bert-99` has Offline and SingleStream scenarios and in datacenter category both `bert-99` and `bert-99.9` have Offline and Server scenarios. The below commands are assuming an edge category system. + +### Onnxruntime backend (Reference implementation) + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=bert-99 --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=open --quiet +``` +* Use `--device=cuda` to run the inference on Nvidia GPU +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* Use `--category=datacenter` to run datacenter scenarios + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=bert-99 --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs. This requires a power analyzer as described [here](https://github.com/ctuning/mlcommons-ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md) +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=bert-99 --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \ +--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning +--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet +``` + + +## Tensorflow backend (Reference implementation) + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tf`. For example, + +``` +cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \ +--model=bert-99 --device=cpu --implementation=reference --backend=tf --execution-mode=valid \ +--results_dir=$HOME/inference_3.1_results --quiet +``` + +## Pytorch backend (Reference implementation) + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=pytorch`. For example, + +``` +cm run script --tags=generate-run-cmds,inference,_accuracy-only,_all-scenarios \ +--model=bert-99 --device=cpu --implementation=reference --backend=pytorch \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results --quiet +``` + +## TensorRT backend (Nvidia implementation) + +For TensorRT backend we are using the [Nvidia implementation](https://github.com/ctuning/mlcommons-ck/tree/master/cm-mlops/script/reproduce-mlperf-inference-nvidia) and not the [MLPerf inference reference implementation](https://github.com/ctuning/mlcommons-ck/tree/master/cm-mlops/script/app-mlperf-inference-reference) for the below reasons +* TensorRT backend is not supported by default in the reference implementation +* Reference implemnetation is mostly for fp32 models and quantization is not suppoted by default +* Nvidia has done some fantastic work in optimizing performance for TensorRT backend + +To get setup please follow the instructions [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/reproduce-mlperf-inference-nvidia/README-about.md) to download and install TensorRT and cuDNN unless you already have them installed. This readme also details how to handle the configuration files which are automatically generated by the Nvidia implementation scripts. Once this is done, the following command will run all the modes and scenarios. + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=bert-99 --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs. This requires a power analyzer as described [here](https://github.com/ctuning/mlcommons-ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md) +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the default performance numbers +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* Use `--category=datacenter` to run datacenter scenarios + + +TensorRT backend has an engine generation stage which can be time consuming. For repeated runs `--adr.nvidia-harness.make_cmd=run_harness` option will avoid this engine regeneration and reuse the previously generated one. + + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-resnet50-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-resnet50-submission.md new file mode 100644 index 000000000..470930e37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-resnet50-submission.md @@ -0,0 +1,90 @@ +## Setup + +Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +to install the MLCommons CM reproducibility and automation language in your native environment or Docker container. + +Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box +across different software, hardware, models and data sets: + + +``` +cm pull repo mlcommons@ck +``` + +Note that you can install Python virtual environment via CM to avoid contaminating +your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments). + +## Run Commands + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + +On edge category ResNet50 has Offline, SingleStream and MultiStream scenarios and in datacenter category it has Offline and Server scenarios. The below commands are assuming an edge category system. + +### Onnxruntime backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=open --quiet +``` +* Use `--device=cuda` to run the inference on Nvidia GPU +* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) +* Use `--category=datacenter` to run datacenter scenarios + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios --model=resnet50 \ +--device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps`, `--singlestream_target_latency` and `multistream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=resnet50 --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \ +--submission_dir=$HOME/inference_submission_tree --clean \ +--run-checker --submitter=cTuning --adr.inference-src.version=master \ +--hw_notes_extra="Result taken by NAME" --quiet +``` + + +## Tensorflow backend + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tf`. For example, + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=resnet50 --device=cpu --implementation=reference --backend=tf \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +## TVM backend + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=tvm-onnx`. (Only `--device=cpu` is currently supported for TVM) For example, + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=resnet50 --device=cpu --implementation=reference --backend=tvm-onnx \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-retinanet-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-retinanet-submission.md new file mode 100644 index 000000000..4420462cd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-retinanet-submission.md @@ -0,0 +1,75 @@ +## Setup + +Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +to install the MLCommons CM reproducibility and automation language in your native environment or Docker container. + +Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box +across different software, hardware, models and data sets: + + +``` +cm pull repo mlcommons@ck +``` + +Note that you can install Python virtual environment via CM to avoid contaminating +your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments). + +## Run Commands + + +### Onnxruntime backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=retinanet --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=open --quiet +``` +* Use `--device=cuda` to run the inference on Nvidia GPU +* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) +* Use `--category=datacenter` to run datacenter scenarios + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=retinanet --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps`, `--singlestream_target_latency` and `multistream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=retinanet --device=cpu --implementation=reference --backend=onnxruntime \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \ +--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning +--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet +``` + + +## Pytorch backend + +Same commands as for `onnxruntime` should work by replacing `backend=onnxruntime` with `--backend=pytorch`. For example, + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=retinanet --device=cpu --implementation=reference --backend=pytorch \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-rnnt-submission.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-rnnt-submission.md new file mode 100644 index 000000000..a6ca06921 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/generate-rnnt-submission.md @@ -0,0 +1,61 @@ +## Setup + +Please follow this [installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +to install the MLCommons CM reproducibility and automation language in your native environment or Docker container. + +Then install the repository with CM automation scripts to run MLPerf benchmarks out-of-the-box +across different software, hardware, models and data sets: + + +``` +cm pull repo mlcommons@ck +``` + +Note that you can install Python virtual environment via CM to avoid contaminating +your local Python installation as described [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/script/README-extra.md#using-python-virtual-environments). + +## Run Commands + +### TensorRT backend + +#### Do a test run to detect and record the system performance + +``` +cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenarios \ +--model=rnnt --implementation=nvidia-original --device=cuda --backend=tensorrt \ +--category=edge --division=open --quiet +``` +* Use `--category=datacenter` to run datacenter scenarios +* Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) + +#### Do a full accuracy and performance runs for all the scenarios + +``` +cm run script --tags=generate-run-cmds,inference,_all-modes,_all-scenarios \ +--model=rnnt --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +* Use `--power=yes` for measuring power. It is ignored for accuracy and compliance runs +* Use `--division=closed` to run all scenarios for the closed division including the compliance tests +* `--offline_target_qps`, `--server_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers + +#### Populate the README files +``` +cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=rnnt --device=cuda --implementation=nvidia-original --backend=tensorrt \ +--execution-mode=valid --results_dir=$HOME/inference_3.1_results \ +--category=edge --division=open --quiet +``` + +#### Generate actual submission tree + +Here, we are copying the performance and accuracy log files (compliance logs also in the case of closed division) from the results directory to the submission tree following the [directory structure required by MLCommons Inference](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#inference-1). After the submission tree is generated, [accuracy truncate script](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/truncate-mlperf-inference-accuracy-log) is called to truncate accuracy logs and then the [submission checker](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/run-mlperf-inference-submission-checker) is called to validate the generated submission tree. + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cm run script --tags=generate,inference,submission --results_dir=$HOME/inference_3.1_results/valid_results \ +--device=cpu --submission_dir=$HOME/inference_submission_tree --clean --run-checker --submitter=cTuning +--adr.inference-src.version=master --hw_notes_extra="Result taken by NAME" --quiet +``` diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-aws-instance.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-aws-instance.md new file mode 100644 index 000000000..152c612aa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-aws-instance.md @@ -0,0 +1,50 @@ +## Setup ASW instance for MLPerf + +The below instructions are for creating an AWS instance from the CLI. You can also create an instance via web and setup CM on it. + +## Prerequisites + +1. AWS Key, secret and token +2. `*.pem` ssh key file to be used to create the instance (public key from here will be copied to the `$HOME/.ssh/authorized_keys` file in the created instance) + +## Run Commands + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + +### Update Access Details + +``` +cd $HOME/CM/repos/mlcommon@ck/cm-mlops/script/run-terraform/aws/ +cp credentials.example credentials.sh +``` +Update `credentials.sh` with your AWS Key, Secret and Token + +### Create an AWS Instance + + +``` +cm run script --tags=run,terraform,_m7g.xlarge,_storage_size.500,_ubuntu.2204,_us-west-2 \ +--cminit --key_file=$HOME/cmuser.pem +``` + +The above command will output the IP of the created instance which will be having CM setup already done. + +`_m7g.xlarge,_storage_size.500,_ubuntu.2204` variations can be changed to launch a different instance. Below are the variation combinations we used for MLPerf inference 3.0 submissions. + +* `_g4dn.xlarge` +* `_a1.2xlarge,_storage_size.130,_ubuntu.2204` +* `_c5.4xlarge,_storage_size.130,_ubuntu.2204` +* `_m7g.2xlarge,_storage_size.500,_ubuntu.2204` +* `_inf1.2xlarge,_storage_size.500,_amazon-linux-2-kernel.510` +* `_t2.medium,_storage_size.200,_rhel.9` + +### Copy the needed files from the local machine + +Copy the imagenet dataset to the created instance. For example, + +``` +rsync -avz -e 'ssh -i $HOME/cmuser.pem' $HOME/imagenet-2012-val/ ubuntu@54.189.93.134: +``` +For using [nvidia-original implementation](https://github.com/mlcommons/ck/tree/main/cm-mlops/script/reproduce-mlperf-inference-nvidia) tar files for cuDNN and TensorRT are needed to be downloaded locally from Nvidia website and copied to the AWS instance similar to the above command. + +Once all the required files are copied over, login to the instance and follow the individual benchmark instructions from the README files given [here](./) diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-gcp-instance.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-gcp-instance.md new file mode 100644 index 000000000..a3a0e457a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-gcp-instance.md @@ -0,0 +1,37 @@ +## Setup GCP instance for MLPerf + +The below instructions are for creating a Google Cloud instance from the CLI. You can also create an instance via web and setup CM on it. + +## Prerequisites + +Please follow the authentication instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/run-terraform/README-about.md). + + +## Run Commands + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + + +### Create a GCP Instance + + +``` +cm run script --tags=run,terraform,_gcp,_n1-highmem.4,_gcp_project.mlperf-inference-tests --cminit +``` + +The above command will output the IP of the created instance which will be having CM setup already done. + +`_n1-highmem.4` variation can be changed to launch a different instance. Below are the variation combinations we used for MLPerf inference 3.0 submissions. + +* `_n1-standard.4` + +### Copy the needed files + +Copy the imagenet dataset to the created instance. For example, + +``` +rsync -avz -e 'ssh -i $HOME/cmuser.pem' $HOME/imagenet-2012-val/ ubuntu@54.189.93.134: +``` +For using [nvidia-original implementation](https://github.com/mlcommons/ck/tree/main/cm-mlops/script/reproduce-mlperf-inference-nvidia) tar files for cuDNN and TensorRT are needed to be downloaded locally from Nvidia website and copied to the AWS instance similar to the above command. + +Once all the required files are copied over, login to the instance and follow the individual benchmark instructions from the README files given [here](./) diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-nvidia-jetson-orin.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-nvidia-jetson-orin.md new file mode 100644 index 000000000..08c0a8eeb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-2023/docs/setup-nvidia-jetson-orin.md @@ -0,0 +1,54 @@ +## Setup + +We used Nvidia Jetson AGX Orin developer kit with 32GB RAM and 64GB eMMC. We also connected a 500GB SSD disk via USB and Wifi connection for internet connectivity. + +We used the out of the box developer kit image which was running Ubuntu 20.04 and JetPack 5.0.1 Developer Preview (L4T 34.1.1) with CUDA 11.4. We were also using the default 4k page size (Nvidia recommends 64k for MLPerf inference). + +[cuDNN 8.6.0](https://developer.nvidia.com/compute/cudnn/secure/8.6.0/local_installers/11.8/cudnn-local-repo-ubuntu2004-8.6.0.163_1.0-1_arm64.deb) and [TensorRT 8.5.2.2](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/secure/8.5.3/local_repos/nv-tensorrt-local-repo-ubuntu2004-8.5.3-cuda-11.8_1.0-1_arm64.deb) were downloaded as Debian packages on a host machine, copied over to Nvidia Jetson Orin and installed. + + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset. + +### Copy the needed files from a host machine + +Copy the imagenet dataset to the created instance. For example, + +``` +rsync -avz $HOME/imagenet-2012-val/ user@192.168.0.27: +``` + +Login to Orin and register the imagenet dataset as +``` +cm run script --tags=get,imagenet,dataset,_2012,_full --input=$HOME/imagenet-2012-val +``` + +Once all the required files are copied over, follow the individual benchmark instructions from the README files given [here](./) All the required dependencies should be resolved by CM. + +### Power Measurement Setup + +We were measuring power in the peak performance mode (MaxN) except for one SUT where the energy efficiency mode was changed to Max15. Our aim was to showcase the out of the box performance of Nvidia Jetson AGX Orin including the power usage. + +## Reproducing the Nvidia Jetson AGX Orin Submission + +After our submission we followed the instructions from Nvidia in the inference v3.0 repository and tried to reproduce the numbers from Nvidia. For MaxN mode we were able to match the numbers by Nvidia using same versions of CUDA, cuDNN and TensorRT but outside of docker. For MaxQ mode, we could get the same performance as Nvidia but our power usage was about 5W higher. + +### Performance results MaxN + +The below table shows the performance comparison of our results under different settings and the Nvidia submission for MLPerf inference 3.0. We'll be updating our instructions for easier reproducibility of these numbers including CM scripts for flashing the L4T image and rebuilding the kernel for 64k pagesize. + + +| Workload | Results | L4T | PAGESIZE | Power Mode | FAN Dynamic Speed control | Offline Accuracy | Offline Performance | SingleStream Accuracy | SingleStream Performance | MultiStream Accuracy | MultiStream Performance | +| --------- | --------------------------------- | ----- | -------- | ---------- | ------------------------- | ---------------- | ------------------- | --------------------- | ------------------------ | -------------------- | ----------------------- | +| ResNet50 | Nvidia Submitted (docker) | r35.3 | 64k | MaxN | active | 75.934 | 6438.1 | 76.032 | 0.633479 | 76.032 | 2.187731 | +| ResNet50 | cTuning Submitted | r34.1.1 | 4k | MaxN | active | 75.934 | 4697 | 76.032 | 0.72 | 76.032 | 2.57 | +| ResNet50 | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 75.85 | 6172 | 76.056 | 0.644 | 76.056 | 2.074 | +| ResNet50 | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 75.85 | 6430 | 76.056 | 0.659 | 76.056 | 2.20 | +| RetinaNet | Nvidia Submitted (docker) | r35.3 | x | MaxN | active | 37.372 | 92.4048 | 37.403 | 13.924457 | 37.519 | 104.680313 | +| RetinaNet | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 37.346 | 80.0854 (no DLA) | 37.350 | 14,19 | 37.409 | 105.344828 | +| RetinaNet | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 37.345 | 94.6886 | 37.340 | 14.073 | 37.488 | 103.8 | +| BERT | Nvidia Submitted (docker) | r35.3 | x | MaxN | active | 90.552 | 544.243 | 90.344 | 5.635431 | NA | NA | +| BERT | cTuning Submitted | r34.1.1 | 4k | MaxN | active | 90.552 | 449.96 | 90.344 | 7.8 | NA | NA | +| BERT | MLCommons taskforce on reproducibility | r35.2.1 | 4k | MaxN | active | 90.562 | 527 (128 batchsize) | 90.311 | 6.636 | NA | NA | +| BERT | MLCommons taskforce on reproducibility | r35.3 | 64k | MaxN | active | 90.552 | 539 | 90.344 | 6.31 | NA | NA | + + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md new file mode 100644 index 000000000..b72349ad5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md @@ -0,0 +1,31 @@ +### Challenge + +Develop a reference implementation of any MLPerf inference benchmark to run on Amazon Inferentia. +Submit preliminary (unoptimized) benchmarking results to MLPerf inference v3.1 and beyond. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + + +### Prizes + +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + + + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/_cm.json new file mode 100644 index 000000000..66431963a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/_cm.json @@ -0,0 +1,27 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-amazon-inferentia-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_close_extension": true, + "date_open": "20230704", + "points":3, + "trophies":true, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "benchmark", + "automate", + "inferentia", + "mlperf-inference", + "mlperf-inference-inferentia", + "mlperf-inference-inferentia", + "mlperf-inference-inferentia-v3.1", + "mlperf-inference-inferentia-v3.1-2023", + "v3.1" + ], + "title": "Develop a reference implementation of any MLPerf inference benchmark to run on Amazon Inferentia and submit to MLPerf inference v3.1+", + "uid": "c8f2573320424e2a" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md new file mode 100644 index 000000000..c08847da6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/README.md @@ -0,0 +1,20 @@ +### Challenge + +Create any end-to-end AI application with web cam, speech recognition, chat bot, LLM +that uses any MLPerf model and CM automation. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + +Looking forward to your submissions and happy hacking! + +### Prizes + +* *All submitters will receive 1 point for submitting valid results for 1 complete benchmark on one system.* +* *All submitters will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* + + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/_cm.json new file mode 100644 index 000000000..23fb64d83 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-create-end-to-end-app/_cm.json @@ -0,0 +1,26 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-create-end-to-end-app", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_open": "20230704", + "date_close_extension": true, + "points":3, + "trophies":true, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "end-to-end-app", + "mlperf-inference", + "mlperf-inference-end-to-end-app", + "mlperf-inference-end-to-end-app", + "mlperf-inference-end-to-end-app-v3.1", + "mlperf-inference-end-to-end-app-v3.1-2023", + "v3.1" + ], + "title": "Generate end-to-end optimized AI apps (LLM, speech, etc) based on MLPerf inference results (with and without container)", + "uid": "96ca61a5aa914063" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md new file mode 100644 index 000000000..f0f8908d2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md @@ -0,0 +1,31 @@ +### Challenge + +Prepare, optimize and submit benchmarking results to MLPerf inference v3.1 using +CM automation language with the DeepSparse library, any model and any platform. + +Check [this related challenge](https://access.cknowledge.org/playground/?action=challenges&name=3e971d8089014d1f) for more details. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + +### Prizes + +* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.* +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [cKnowledge Ltd](https://cKnowledge.org) + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/_cm.json new file mode 100644 index 000000000..e1cc4f888 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-deepsparse/_cm.json @@ -0,0 +1,28 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-deepsparse", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_close_extension": true, + "date_open": "20230704", + "experiments": [], + "points": 1, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "deepsparse", + "mlperf-inference", + "mlperf-inference-deepsparse", + "mlperf-inference-deepsparse", + "mlperf-inference-deepsparse-v3.1", + "mlperf-inference-deepsparse-v3.1-2023", + "v3.1" + ], + "title": "Run and optimize MLPerf inference v3.1 benchmarks with Neural Magic's DeepSparse library", + "trophies": true, + "uid": "c495863b08e74abc" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md new file mode 100644 index 000000000..94fad05b5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md @@ -0,0 +1,32 @@ +### Challenge + +Develop a reference implementation of any MLPerf inference benchmark to run on the latest publicly available Google TPU. +Submit preliminary (unoptimized) benchmarking results to MLPerf inference v3.1 and beyond. + +Note that you can use either GCP TPU or Coral TPU USB-Accelerator CPU card. +In the latter case, you can reuse and extend our CM-MLPerf script for MobileNets! + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + +### Prizes + +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/_cm.json new file mode 100644 index 000000000..3d5aecc95 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/_cm.json @@ -0,0 +1,27 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-google-tpu-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_open": "20230704", + "points":3, + "trophies":true, + "date_close_extension": true, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "tpu", + "mlperf-inference", + "mlperf-inference-tpu", + "mlperf-inference-tpu", + "mlperf-inference-tpu-v3.1", + "mlperf-inference-tpu-v3.1-2023", + "v3.1" + ], + "title": "Develop a reference implementation of any MLPerf inference benchmark to run on the latest publicly available Google TPU (GCP or Coral USB accelerator) and submit to MLPerf inference v3.1+", + "uid": "5975fd0e18cd4073" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/README.md new file mode 100644 index 000000000..014f83f7d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/README.md @@ -0,0 +1,52 @@ +### Introduction + +Open-source [MLPerf inference benchmarks](https://arxiv.org/abs/1911.02549) +were developed by a [consortium of 50+ companies and universities (MLCommons)](https://mlcommons.org) +to enable trustable and reproducible comparison of AI/ML systems +in terms of latency, throughput, power consumption, accuracy and other metrics +across diverse software/hardware stacks from different vendors. + +However, it is difficult to customize and run MLPerf benchmarks with non-reference models. + +That's why the MLCommons Task Force on automation and reproducibility has developed +a [Collective Mind automation language](https://doi.org/10.5281/zenodo.8144274) +to modularize this benchmark and make it easier to run with different models and data sets. + + +### Challenge + +Implement a CM workflow to connect any Hugging Face model +to MLPerf loadgen and run it with random inputs to obtain a preliminary latency and througput +without accuracy. + +Resources: +* [CM script to get ML model from Hugging Face zoo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-huggingface-zoo) +* [CM script to convert Hugging Face model to ONNX](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/convert-ml-model-huggingface-to-onnx) +* [CM script to build MLPerf loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen) +* [CM script to run Python Loadgen with any ONNX model](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/app-loadgen-generic-python/README-extra.md) +* [MLPerf BERT FP32 model is available at Hugging Face](https://huggingface.co/ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1) + +Some results showcases CK workflow to benchmark Hugging Face models with MLPerf from v3.0 (BERT): +* https://access.cknowledge.org/playground/?action=experiments&name=2f1f70d8b2594149 +* https://access.cknowledge.org/playground/?action=experiments&name=mlperf-inference--v3.0--edge--open-power--language-processing--offline&result_uid=9d2594448bbb4b45 + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + +### Prizes + +* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.* +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/_cm.json new file mode 100644 index 000000000..146505b55 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-hugging-face-models-2023/_cm.json @@ -0,0 +1,27 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-hugging-face-models-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_close_extension": true, + "date_open": "20230704", + "points":3, + "trophies":true, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "huggingface", + "mlperf-inference", + "mlperf-inference-huggingface", + "mlperf-inference-huggingface", + "mlperf-inference-huggingface-v3.1", + "mlperf-inference-huggingface-v3.1-2023", + "v3.1" + ], + "title": "Implement CM automation to run benchmark Hugging Face models using MLPerf loadgen", + "uid": "72b95d08a9e04698" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md new file mode 100644 index 000000000..aec051473 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md @@ -0,0 +1,31 @@ +### Challenge + +Add CM interface to run MLPerf inference benchmarks on Intel-based platforms. + +You can start from reproducing any past MLPerf inference submission from Intel and their partners +and then adding CM automation. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + +### Prizes + +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json new file mode 100644 index 000000000..c3d9adbe4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-intel-2023/_cm.json @@ -0,0 +1,26 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-intel-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20240104", + "date_open": "20230704", + "points": 2, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "intel", + "mlperf-inference", + "mlperf-inference-intel", + "mlperf-inference-intel", + "mlperf-inference-intel-v3.1", + "mlperf-inference-intel-v3.1-2023", + "v3.1" + ], + "title": "Add the CM interface to run MLPerf inference benchmarks on Intel-based platforms", + "trophies": true, + "uid": "1c1d5da6766f4afb" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md new file mode 100644 index 000000000..6aaf4e394 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md @@ -0,0 +1,34 @@ +### Challenge + +Add support to run a reference implementation of any MLPerf inference benchmark using +[Mojo language]( https://github.com/modularml/mojo ) +from [Modular.ai](https://modular.ai). + +Prepare, optimize and submit benchmarking results to MLPerf inference v3.1 with Mojo. + +Check [this related challenge](https://access.cknowledge.org/playground/?action=challenges&name=3e971d8089014d1f) for more details. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + +### Prizes + +* *The first implementation will receive a cache prize from organizers.* +* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [cKnowledge Ltd](https://cKnowledge.org) + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/_cm.json new file mode 100644 index 000000000..e805879de --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/_cm.json @@ -0,0 +1,28 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-modular-mojo-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_close_extension": true, + "date_open": "20230704", + "experiments": [], + "points": 1, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "mojo", + "mlperf-inference", + "mlperf-inference-mojo", + "mlperf-inference-mojo", + "mlperf-inference-mojo-v3.1", + "mlperf-inference-mojo-v3.1-2023", + "v3.1" + ], + "title": "Run reference implementations of MLperf inference benchmarks using Mojo language from Modular.ai", + "trophies": true, + "uid": "0a8a7bb5572447db" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md new file mode 100644 index 000000000..c16a9335a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md @@ -0,0 +1,33 @@ +### Challenge + +Add CM interface to run MLPerf inference benchmarks on Qualcomm AI100-based platforms. + +You can start from reproducing any past submission from Dell, Lenovo or HPE +and then adding CM automation. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + +### Prizes + +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + + + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json new file mode 100644 index 000000000..07c626e25 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/_cm.json @@ -0,0 +1,26 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-qualcomm-ai100-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20240104", + "date_open": "20230704", + "points":3, + "trophies":true, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "ai100", + "mlperf-inference", + "mlperf-inference-ai100", + "mlperf-inference-ai100", + "mlperf-inference-ai100-v3.1", + "mlperf-inference-ai100-v3.1-2023", + "v3.1" + ], + "title": "Add the CM interface to run MLPerf inference benchmarks on Qualcomm AI100-based platforms", + "uid": "09bd5f9e05ff46b1" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md new file mode 100644 index 000000000..f8d9fbd71 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md @@ -0,0 +1,41 @@ +### Challenge + +Prepare, optimize and submit benchmarking results to MLPerf inference v3.1 using +CM automation language with Apache TVM, any model and any platform. + +Check [this related challenge](https://access.cknowledge.org/playground/?action=challenges&name=3e971d8089014d1f) for more details. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + +### Prizes + +* *All contributors will receive 1 point for submitting valid results for 1 complete benchmark on one system.* +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + +### Organizers + +* [Deelvin](https://deelvin.com) +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [cKnowledge Ltd](https://cKnowledge.org) + +### Status + +This challenge is under preparation. + +* https://github.com/mlcommons/ck/pull/693 +* https://github.com/mlcommons/ck/pull/700 +* https://github.com/mlcommons/ck/pull/701 + + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/_cm.json new file mode 100644 index 000000000..839fb6b86 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-tvm-2023/_cm.json @@ -0,0 +1,28 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-tvm-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_close_extension": true, + "date_open": "20230704", + "points":1, + "trophies":true, + "experiments": [], + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "tvm", + "mlperf-inference", + "mlperf-inference-tvm", + "mlperf-inference-tvm", + "mlperf-inference-tvm-v3.1", + "mlperf-inference-tvm-v3.1-2023", + "v3.1" + ], + "title": "Run and optimize MLPerf inference v3.1 benchmarks with Apache TVM", + "uid": "29c416e245884746" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md new file mode 100644 index 000000000..0a5fe9aa2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md @@ -0,0 +1,31 @@ +### Challenge + +Add more models and hardware backends to the [universal C++ implementation of MLPerf inference benchmarks)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-cpp) +being developed by the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md). + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + + +### Prizes + +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + + + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/_cm.json new file mode 100644 index 000000000..e4e5cae10 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/_cm.json @@ -0,0 +1,27 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_close_extension": true, + "date_open": "20230704", + "points": 2, + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "cpp", + "mlperf-inference", + "mlperf-inference-cpp", + "mlperf-inference-cpp", + "mlperf-inference-cpp-v3.1", + "mlperf-inference-cpp-v3.1-2023", + "v3.1" + ], + "title": "Add more models and hardware backends to the universal C++ implementation of MLPerf inference benchmarks from MLCommons", + "trophies": true, + "uid": "518420b0e6dd4fed" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md new file mode 100644 index 000000000..d587f62f8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md @@ -0,0 +1,36 @@ +### Challenge + +Prepare, optimize and submit any benchmarking results to MLPerf inference v3.1 using +CM automation language on Windows. + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + + +### Prizes + +* *All contributors will receive an official MLCommons Collective Knowledge contributor award (see [this example](https://ctuning.org/awards/ck-award-202307-zhu.pdf)).* +* *The top contributors will receive cash prizes from [MLCommons organizations](https://mlcommons.org) and [cKnowledge.org](https://www.linkedin.com/company/cknowledge)*. + + +### Organizers + +* [MLCommons](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + + +### Status + +Open ticket: [GitHub](https://github.com/mlcommons/ck/issues/696) + + +### Results + +All accepted results will be publicly available in the CM format with derived metrics +in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results), +in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments) +and at official [MLCommons website](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/_cm.json new file mode 100644 index 000000000..1a55dcbe0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/optimize-mlperf-inference-v3.1-windows-2023/_cm.json @@ -0,0 +1,28 @@ +{ + "alias": "optimize-mlperf-inference-v3.1-windows-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_close_extension": true, + "date_open": "20230704", + "points":2, + "trophies":true, + "experiments": [], + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "windows", + "mlperf-inference", + "mlperf-inference-windows", + "mlperf-inference-windows", + "mlperf-inference-windows-v3.1", + "mlperf-inference-windows-v3.1-2023", + "v3.1" + ], + "title": "Run and optimize MLPerf inference v3.1 benchmarks on Windows", + "uid": "53e56d714c7649c7" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/README.md b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/README.md new file mode 100644 index 000000000..54dd4feeb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/README.md @@ -0,0 +1,16 @@ +### Challenge + +Reproduce MLPerf inference v3.0 benchmark results for Nvidia Jetson Orin +(performance, accuracy,power) and automate it using the +[MLCommons CK framework](https://github.com/mlcommons/ck). + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [cKnowledge](https://cKnowledge.org) + +### Status + +Finished. Preliminary results are available [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/optimize-mlperf-inference-v3.0-2023/docs/setup-nvidia-jetson-orin.md). + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/_cm.json new file mode 100644 index 000000000..aff0fdba0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inf-v3.0-orin/_cm.json @@ -0,0 +1,23 @@ +{ + "alias": "repro-mlperf-inf-v3.0-orin", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230406", + "date_open": "20230301", + "experiments": [ + { + "tags": "mlperf-inference,v3.0" + } + ], + "_password_hash": "$2b$12$ionIRWe5Ft7jkn4y/7C6/eYoo6uBBMkGy/9SxwtKhaDRqZ1w2s3dO", + "tags": [ + "reproduce", + "replicate", + "automate", + "orin", + "nvidia", + "mlperf-inference-v3.0-orin" + ], + "title": "Reproduce MLPerf inference v3.0 results for Nvidia Jetson Orin", + "uid": "6d377c1a1b224636" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/README.md b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/README.md new file mode 100644 index 000000000..9917547c1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/README.md @@ -0,0 +1,39 @@ +### Challenge + +Reproduce the MLPerf inference RetinaNet benchmark during Student Cluster Competition at SuperComputing'22 +using the following [CM tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md). + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [OctoML](https://octoml.ai) + +### Status + +This challenge has been successfully completed. + +### Results + +Results from 10 international student teams are available at: +* [W&B dashboard 1 (during SCC'22)](https://wandb.ai/cmind/cm-mlperf-sc22-scc-retinanet-offline/table?workspace=user-gfursin) +* [W&B dashboard 2 (after SCC'22)](https://wandb.ai/cmind/cm-mlperf-dse-testing/table?workspace=user-gfursin) + + +### Acknowledgments + +We thank +[Hai Ah Nam](https://www.nersc.gov/about/nersc-staff/advanced-technologies-group/hai-ah-nam), +[Steve Leak](https://www.linkedin.com/in/steve-leak), +[Vijay Janappa Reddi](https://scholar.harvard.edu/vijay-janapa-reddi/home), +[Tom Jablin](https://scholar.google.com/citations?user=L_1FmIMAAAAJ&hl=en), +[Ramesh N Chukka](https://www.linkedin.com/in/ramesh-chukka-74b5b21), +[Peter Mattson](https://www.linkedin.com/in/peter-mattson-33b8863/), +[David Kanter](https://www.linkedin.com/in/kanterd), +[Pablo Gonzalez Mesa](https://www.linkedin.com/in/pablo-gonzalez-mesa-952ab2207), +[Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), +[Thomas Schmid](https://www.linkedin.com/in/tschmid) +and [Gaurav Verma](https://www.linkedin.com/in/grverma) +for their suggestions and contributions. + + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/_cm.json new file mode 100644 index 000000000..68352f9c3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-retinanet-scc2022/_cm.json @@ -0,0 +1,20 @@ +{ + "alias": "repro-mlperf-inference-retinanet-scc2022", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20221201", + "date_open": "20221101", + "tags": [ + "modularize", + "reproduce", + "replicate", + "automate", + "benchmark", + "mlperf", + "mlperf-inference", + "mlperf-inference-scc", + "mlperf-inference-scc-2022" + ], + "title": "Automate MLPerf RetinaNet benchmark at the Student Cluster Competition at SuperComputing'22 using CM", + "uid": "e71fa8b396874e68" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/README.md new file mode 100644 index 000000000..af23eb120 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/README.md @@ -0,0 +1,3 @@ +The [MLCommons](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org) +are preparing a unified interface to reproduce results from the MLPerf inference benchmark submission v4.0. +Please feel free to join the testing phase using [GitHub issues](https://github.com/mlcommons/ck/issues)! diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/_cm.yaml b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/_cm.yaml new file mode 100644 index 000000000..01bcfd52a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.0-2024/_cm.yaml @@ -0,0 +1,25 @@ +alias: repro-mlperf-inference-v4.0-2024 +uid: e6b8738383eb46d0 + +automation_alias: challenge +automation_uid: 3d84abd768f34e08 + +title: Reproduce and automate MLPerf inference benchmark results v4.0 from different vendors (Intel, Nvidia, Qualcomm, Google, NeuralMagic, ...) using CM + +date_open: '20240201' + +tags: +- modularize +- optimize +- reproduce +- replicate +- automate +- benchmark +- mlperf +- mlperf-inference +- mlperf-inference-v4.0 +- mlperf-inference-v4.0-2024 +- v4.0 + +experiments: +- tags: mlperf-inference,v4.0 diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/README.md b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/README.md new file mode 100644 index 000000000..1aacc2d59 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/README.md @@ -0,0 +1,4 @@ +The [cTuning foundation](https://cTuning.org), [cKnowledge.org](https://cKnowledge.org) and [MLCommons](https://mlcommons.org) +are preparing an open reproducibility challenge to reproduce various results from the MLPerf inference benchmark v4.1 +using the MLCommons CM automation framework. Please stay tuned for more details! + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/_cm.yaml b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/_cm.yaml new file mode 100644 index 000000000..840d58318 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/repro-mlperf-inference-v4.1-2024/_cm.yaml @@ -0,0 +1,22 @@ +alias: repro-mlperf-inference-v4.1-2024 +uid: 2093f4d750144df4 + +automation_alias: challenge +automation_uid: 3d84abd768f34e08 + +title: 'Reproduce the upcoming MLPerf inference benchmark v4.1 results' + +date_open: '20240901' + +tags: +- modularize +- optimize +- reproduce +- replicate +- automate +- benchmark +- mlperf +- mlperf-inference +- mlperf-inference-v4.1 +- mlperf-inference-v4.1-2024 +- v4.1 diff --git a/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md new file mode 100644 index 000000000..0f59f59f0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md @@ -0,0 +1,36 @@ +### Challenge + +Reproduce and automate [TinyMLPerf benchmarks](https://github.com/mlcommons/tiny). + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [cKnowledge Ltd](https://cKnowledge.org) + +### Status + +We have successfully reproduced [TinyMLPerf v1.0 submission with microTVM on the STMicroelectronics NUCLEO-L4R5ZI board](https://github.com/mlcommons/tiny_results_v1.0/tree/main/closed/OctoML), +automated it with the latest version of the [MLCommons CM automation language](https://github.com/mlcommons/ck/blob/master/docs/README.md), +submit reproduce results to the TinyMLperf v1.1 round, +and added all past TinyMLPerf results to the [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny) +for further collaborative analysis and improvement. + +Pleases check our tutorial and reproducibility report: +* [Automate TinyMLPerf benchmark](https://github.com/ctuning/mlcommons-ck/blob/master/docs/tutorials/automate-mlperf-tiny.md) - useful for all SW/HW stacks and submission rounds. +* [Reproduce TinyMLPerf v1.0 submission](https://github.com/ctuning/mlcommons-ck/blob/master/docs/tutorials/reproduce-mlperf-tiny.md). + +TinyMLPerf v1.1 results will be published at te [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny) +in mid June 2023. + +### Related discussions for the future + +* https://github.com/mlcommons/ck/pull/693 +* https://github.com/mlcommons/ck/pull/700 +* https://github.com/mlcommons/ck/pull/701 +* https://github.com/mlcommons/ck/issues/606 + +### Results + +All results will be available in [this GitHub repo](https://github.com/ctuning/cm4mlperf-results) +and can be visualized and compared using the [MLCommons Collective Knowledge Playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/_cm.json new file mode 100644 index 000000000..4e9e24850 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/_cm.json @@ -0,0 +1,23 @@ +{ + "alias": "reproduce-and-automate-tinymlperf-v1.1-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230519", + "date_open": "20230501", + "experiments": [], + "tags": [ + "modularize", + "automate", + "reproduce", + "replicate", + "optimize", + "benchmark", + "tinymlperf", + "tinymlperf-inference", + "tinymlperf-inference-v3.0", + "tinymlperf-inference-v3.0-2023", + "v1.0" + ], + "title": "Reproduce and optimize TinyMLPerf inference v1.1 benchmarks", + "uid": "d98cd66e0e5641f7" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/README.md new file mode 100644 index 000000000..a1f1ea22a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/README.md @@ -0,0 +1,17 @@ +### Challenge + +Prepare, optimize and reproduce MLPerf training v3.0 benchmarks +using the [MLCommons CM (CK2) automation framework](https://github.com/mlcommons/ck) + +### Status + +We could not do a successful submission mainly because the training scripts were not converging on a single GPU. We tried resnet and bert training. The below CM scripts are added to do MLPerf training for BERT using the reference and NVIDIA implementations. + +1. [BERT Training using Nvidia code](https://github.com/ctuning/mlcommons-ck/tree/master/cm-mlops/script/app-mlperf-training-nvidia) +2. [BERT Training using MLPerf Reference code](https://github.com/ctuning/mlcommons-ck/tree/master/cm-mlops/script/app-mlperf-training-reference) + +### Organizers + +* [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning foundation](https://cTuning.org) +* [cKnowledge](https://cKnowledge.org) diff --git a/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/_cm.json new file mode 100644 index 000000000..d1e5eddea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/reproduce-mlperf-training-v3.0-2023/_cm.json @@ -0,0 +1,23 @@ +{ + "alias": "reproduce-mlperf-training-v3.0-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230519", + "date_open": "20230501", + "experiments": [], + "tags": [ + "modularize", + "optimize", + "reproduce", + "replicate", + "automate", + "benchmark", + "mlperf", + "mlperf-training", + "mlperf-training-v3.0", + "mlperf-training-v3.0-2023", + "v3.0" + ], + "title": "Reproduce MLPerf training v3.0 benchmarks", + "uid": "1d26149c1cce4da3" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/README.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/README.md new file mode 100644 index 000000000..bd734f789 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/README.md @@ -0,0 +1,67 @@ +### Introduction + +The goal of this MLPerf@home challenge is to help the community find +the most efficient CPU (Intel/AMD/Arm) for BERT-99 model with DeepSparse engine +and different variations of MobileNets/EfficientNets with TFLite +in terms of latency, throughput, accuracy, number of cores, frequency, memory size, cost, and other metrics. + +We would like to ask you to run a few [MLPerf inference benchmarks](https://arxiv.org/abs/1911.02549) +with BERT and MobileNets/EfficientNets on one or more systems with different CPUs +that you have an access to: laptops, servers, cloud instances... + +You will be able to run benchmarks, collect all metrics and submit results in an automated way +in a native environment or Docker container using the portable and technology-agnostic +[MLCommons Collective Mind automation language (CM)](https://doi.org/10.5281/zenodo.8105339). + +Your name and benchmark submissions will be published in the official MLCommons inference v3.1 results +on September 1, 2023 (submission deadline: August 4, 2023), +will be published in the [official leaderboard](https://access.cknowledge.org/playground/?action=contributors), +will be included to the prize draw, and will be presented in our upcoming ACM/HiPEAC events. + +Please report encountered problems using [GitHub issues](https://github.com/mlcommons/ck) +to help the community improve CM automation workflows to run MLPerf benchmarks on any system with any software/hardware stack. + +Thank you in advance for helping the community find Pareto-efficient AI/ML Systems! + +### Minimal requirements + +* CPU: Any x86-64 or Arm64 +* OS: + * native: any Linux (tested on Ubuntu 22.04) + * Docker: any OS +* Disk space: + * BERT-99: ~ 20GB + * Different variations of MobileNets/EfficientNets: ~ 140GB +* Time to run: + * BERT-99: ~ 2 hours + * Different variations of MobileNets/EfficientNets: ~ 2 days + +### Instructions to run benchmarks and submit results + +You can run any of these benchmarks or all depending on available time: + +* [Automated Design Space Exploration of MobileNets/EfficientNets; TFLite MLPerf implementation; native environment or Docker](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/run-mlperf%40home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md) +* [BERT-99 model; DeepSparse MLPerf implementation; native environment](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/run-mlperf%40home-v3.1-cpu/run-cpu-bert-99-deepsparse.md) + +### Results + +All accepted results with submitter names will be publicly available +at the official [MLCommons website](https://mlcommons.org) +and in the [Collective Knowledge explorer (MLCommons CK)](https://access.cknowledge.org/playground/?action=experiments) +along with the reproducibility and automation report to help the community +build efficient AI/ML systems. + + +### Organizers + +* [MLCommons Task Force on Automation and Reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + +### Advanced challenges + +If you feel that running these benchmarks was relatively easy, +please try [more advanced challenges](https://access.cknowledge.org/playground/?action=challenges), +read about our [plans and long-term vision](https://doi.org/10.5281/zenodo.8105339), +check [CM documentation](https://github.com/mlcommons/ck/blob/master/docs/README.md) +and run other [MLPerf benchmarks](https://github.com/mlcommons/ck/tree/master/docs/mlperf). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/_cm.json new file mode 100644 index 000000000..88f4716cd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/_cm.json @@ -0,0 +1,21 @@ +{ + "alias": "run-mlperf@home-v3.1-cpu", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_open": "20230725", + "experiments": [], + "points": 2, + "sort": -20, + "tags": [ + "run", + "mlperf", + "inference", + "v3.1", + "mlperf-inference-v3.1-simple-cpu" + ], + "title": "Work with the community to find the most efficient CPUs (Intel/AMD/Arm) for BERT and MobileNets/EfficientNets (latency, throughput, accuracy, number of cores, frequency, memory size, cost and other metrics)", + "skip": true, + "trophies": true, + "uid": "498f33f3dac647c1" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md new file mode 100644 index 000000000..b4266ffa9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md @@ -0,0 +1,100 @@ +# Introduction + +This guide will help you automatically run the MLPerf inference benchmark v3.1 with BERT-99 model and DeepSparse engine +on any Linux-based system with Intel, AMD or Arm CPU. + +This benchmark is automated by the MLCommons CM language and you should be able to submit official MLPerf v3.1 inference results +for offline scenario in open division and edge category. + +It will require ~20GB of disk space and can take ~2 hours to run on 1 system. + + + + +## Install CM automation language + +Install the [MLCommons CM automation language](https://doi.org/10.5281/zenodo.8105339) as described in this [guide](../../../docs/installation.md). +It is a small Python library with `cm` and `cmr` command line front-ends and minimal dependencies including Python 3+, Git and wget. + +If you encounter problems, please report them at [GitHub](https://github.com/mlcommons/ck/issues). + + +## Install repository with CM automations + +Install the MLCommons repository with [reusable and portable automation recipes (CM scripts)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) via CM. +These scripts are being developed and shared by the community and MLCommons under Apache 2.0 license +to enable portable, modular, and technology-agnostic benchmarks and applications +that can automatically run with any software, hardware, models and data sets. + +```bash +cm pull repo mlcommons@ck +``` + +You can run it again at any time to pick up the latest updates. + +Note that CM will store all such repositories and downloaded/installed data sets, models and tools +in your `$HOME/CM` directory. + +Since MLPerf benchmarks require lots of space (somethings hundreds of Gigabytes), +you can change the above location to some large scratch disk using `CM_REPOS` +environment variable as follows: + +```bash +export CM_REPOS={new path to CM repositories and data} +echo "CM_REPOS=${CM_REPOS} >> $HOME/.bashrc" +cm pull repo mlcommons@ck +``` + + + +## Setup virtual environment + +We suggest you to setup a Python virtual environment via CM to avoid contaminating your existing Python installation: + +```bash +cm run script "install python-venv" --name=mlperf --version_min=3.8 +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" +``` + +CM will install a new Python virtual environment in CM cache and will install all Python dependencies there: +```bash +cm show cache --tags=python-venv +``` + +Note that CM downloads and/or installs models, data sets, packages, libraries and tools in this cache. + +You can clean it at any time and start from scratch using the following command: +```bash +cm rm cache -f +``` + +Alternatively, you can remove specific entries using tags: +```bash +cm show cache +cm rm cache --tags=tag1,tag2,... +``` + + + + +### Do a test run to detect and record the system performance + +```bash +cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=bert-99 --implementation=reference --device=cpu --backend=deepsparse \ +--category=edge --division=open --quiet --scenario=Offline +``` + +### Do full accuracy and performance run + +``` +cm run script --tags=generate-run-cmds,inference,_submission --model=bert-99 \ +--device=cpu --implementation=reference --backend=deepsparse \ +--execution-mode=valid --results_dir=$HOME/results_dir \ +--category=edge --division=open --quiet --scenario=Offline +``` +### Generate and upload MLPerf submission + +Follow [this guide](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/Submission.md) to generate the submission tree and upload your results. + + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md new file mode 100644 index 000000000..f41b1b463 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-cpu/run-cpu-dse-mobilenets-efficientnets-tflite.md @@ -0,0 +1,77 @@ +# Introduction + +This guide will help you automatically run the MLPerf inference benchmark v3.1 with multiple variations of MobileNets and EfficientNets +and TFLite on any Linux-based system with Intel, AMD or Arm CPU. + +This benchmark is automated by the MLCommons CM language and you should be able to submit official MLPerf v3.1 inference results +for singlestream scenario in open division and edge category. + +It will require ~140GB of disk space and can take ~2 days to run on 1 system producing 243 MLPerf results +during automatic design space exploration to trade off accuracy vs performance. + + + +## Install CM automation language + +Install the [MLCommons CM automation language](https://doi.org/10.5281/zenodo.8105339) as described in this [guide](../../../docs/installation.md). +It is a small Python library with `cm` and `cmr` command line front-ends and minimal dependencies including Python 3+, Git and wget. + +If you encounter problems, please report them at [GitHub](https://github.com/mlcommons/ck/issues). + + +## Install repository with CM automations + +Install the MLCommons repository with [reusable and portable automation recipes (CM scripts)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) via CM. +These scripts are being developed and shared by the community and MLCommons under Apache 2.0 license +to enable portable, modular, and technology-agnostic benchmarks and applications +that can automatically run with any software, hardware, models and data sets. + +```bash +cm pull repo mlcommons@ck +``` + +You can run it again at any time to pick up the latest updates. + +Note that CM will store all such repositories and downloaded/installed data sets, models and tools +in your `$HOME/CM` directory. + +Since MLPerf benchmarks require lots of space (somethings hundreds of Gigabytes), +you can change the above location to some large scratch disk using `CM_REPOS` +environment variable as follows: + +```bash +export CM_REPOS={new path to CM repositories and data} +echo "CM_REPOS=${CM_REPOS} >> $HOME/.bashrc" +cm pull repo mlcommons@ck +``` + + + +## Setup virtual environment + +We suggest you to setup a Python virtual environment via CM to avoid contaminating your existing Python installation: + +```bash +cm run script "install python-venv" --name=mlperf --version_min=3.8 +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" +``` + +CM will install a new Python virtual environment in CM cache and will install all Python dependencies there: +```bash +cm show cache --tags=python-venv +``` + +Note that CM downloads and/or installs models, data sets, packages, libraries and tools in this cache. + +You can clean it at any time and start from scratch using the following command: +```bash +cm rm cache -f +``` + +Alternatively, you can remove specific entries using tags: +```bash +cm show cache +cm rm cache --tags=tag1,tag2,... +``` + + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/README.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/README.md new file mode 100644 index 000000000..b6482d383 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/README.md @@ -0,0 +1,65 @@ +### Introduction + +The goal of this MLPerf@home challenge is to help the community find +the most efficient Nvidia GPUs for GPT-J 6B model and BERT-99 in terms of +latency, throughput, accuracy, number of cores, frequency, memory size, cost, and other metrics. + +We would like to ask you to run a few [MLPerf inference benchmarks](https://arxiv.org/abs/1911.02549) +with GPT-J and BERT-99 models on one or more systems with different Nvidia GPUs +that you have an access to: laptops, servers, cloud instances... + +You will be able to run benchmarks, collect all metrics and submit results in an automated way +in a native environment or Docker container using the portable and technology-agnostic +[MLCommons Collective Mind automation language (CM)](https://doi.org/10.5281/zenodo.8105339). + +Your name and benchmark submissions will be published in the official MLCommons inference v3.1 results +on September 1, 2023 (**submission deadline: August 17, 2023**), +will be published in the [official leaderboard](https://access.cknowledge.org/playground/?action=contributors), +will be included to the prize draw, and will be presented in our upcoming ACM/HiPEAC events. + +Please report encountered problems using [GitHub issues](https://github.com/mlcommons/ck) +to help the community improve CM automation workflows to run MLPerf benchmarks on any system with any software/hardware stack. + +Thank you in advance for helping the community find Pareto-efficient AI/ML Systems! + +### Minimal requirements + +* GPU: Nvidia +* GPU memory: + * GPT-J 6B: min 24GB + * BERT-99: min 8..16GB +* OS: + * native: any Linux (tested on Ubuntu 22.04) + * Docker: any OS + any Linux (tested on Ubuntu 22.04) +* Disk space: ~30GB per model/data set +* Time to run: + * GPT-J 6B: ~ 1 day + * BERT-99: ~ 2 hours + +### Instructions to run benchmarks and submit results + +* [GPT-J 6B model (24GB min GPU memory); PyTorch+CUDA; native environment](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/run-mlperf%40home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md) +* [BERT-99 model (8GB min GPU memory); TensorRT; Docker](https://github.com/mlcommons/ck/blob/master/cm-mlops/challenge/run-mlperf%40home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md) + +### Results + +All accepted results with submitter names will be publicly available +at the official [MLCommons website](https://mlcommons.org) +and in the [Collective Knowledge explorer (MLCommons CK)](https://access.cknowledge.org/playground/?action=experiments) +along with the reproducibility and automation report to help the community +build efficient AI/ML systems. + +### Organizers + +* [MLCommons Task Force on Automation and Reproducibility](https://cKnowledge.org/mlcommons-taskforce) +* [cTuning.org](https://www.linkedin.com/company/ctuning-foundation) +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) + +### Advanced challenges + +If you feel that running these benchmarks was relatively easy, +please try [more advanced challenges](https://access.cknowledge.org/playground/?action=challenges), +read about our [plans and long-term vision](https://doi.org/10.5281/zenodo.8105339), +check [CM documentation](https://github.com/mlcommons/ck/blob/master/docs/README.md) +and run other [MLPerf benchmarks](https://github.com/mlcommons/ck/tree/master/docs/mlperf). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/_cm.json new file mode 100644 index 000000000..af7deeada --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/_cm.json @@ -0,0 +1,20 @@ +{ + "alias": "run-mlperf@home-v3.1-gpu", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close": "20230817", + "date_open": "20230725", + "experiments": [], + "points": 2, + "sort": -30, + "tags": [ + "run", + "mlperf", + "inference", + "v3.1", + "mlperf-inference-v3.1-simple-cpu" + ], + "title": "Work with the community to find the most efficient Nvidia GPUs for GPT-J 6B model and BERT (latency, throughput, accuracy, number of cores, frequency, memory size, cost, and other metrics)", + "trophies": true, + "uid": "54230c3b66564cef" +} diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md new file mode 100644 index 000000000..f543c2362 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-bert-99-nvidia-docker-tensorrt.md @@ -0,0 +1,193 @@ +# Introduction + +This guide will help you run the Nvidia implementation of the MLPerf inference benchmark v3.1 +with BERT-99 model and TensorRT on any Linux-based system with Nvidia GPU (8..16GB min memory required) +and Docker. + +This benchmark is semi-automated by the [MLCommons CM language](https://doi.org/10.5281/zenodo.8105339) +and you should be able to submit official MLPerf v3.1 inference results +for all scenarios in closed division and edge category +(**deadline to send us results for v3.1 submission: August 3, 2023**). + + +It will require ~30GB of disk space and can take ~2 hours to run on 1 system. + + +## Install CM automation language + +Install the [MLCommons CM automation language](https://doi.org/10.5281/zenodo.8105339) as described in this [guide](../../../docs/installation.md). +It is a small Python library with `cm` and `cmr` command line front-ends and minimal dependencies including Python 3+, Git and wget. + +If you encounter problems, please report them at [GitHub](https://github.com/mlcommons/ck/issues). + + +## Install repository with CM automations + +Install the MLCommons repository with [reusable and portable automation recipes (CM scripts)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) via CM. +These scripts are being developed and shared by the community and MLCommons under Apache 2.0 license +to enable portable, modular, and technology-agnostic benchmarks and applications +that can automatically run with any software, hardware, models and data sets. + +```bash +cm pull repo mlcommons@ck +``` + +You can run it again at any time to pick up the latest updates. + +Note that CM will store all such repositories and downloaded/installed data sets, models and tools +in your `$HOME/CM` directory. + +Since MLPerf benchmarks require lots of space (somethings hundreds of Gigabytes), +you can change the above location to some large scratch disk using `CM_REPOS` +environment variable as follows: + +```bash +export CM_REPOS={new path to CM repositories and data} +echo "CM_REPOS=${CM_REPOS} >> $HOME/.bashrc" +cm pull repo mlcommons@ck +``` + + + +## Setup CUDA and Docker container + +### Download CUDA 11.8 + +Nvidia recommends the following version of CUDA to be used with their MLPerf inference implementation: + +``` +wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run +``` + +However, you are very welcome to try another version! + +### Download cuDNN, TensorRT + +For x86 machines, please download the following TAR files: +1. [cuDNN](https://developer.nvidia.com/cudnn) - note that Nvidia recommends `cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz` + but you are welcome to try another version +2. [TensorRT](https://developer.nvidia.com/tensorrt) - note that Nvidia recommends `TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz` + but you can try another version + + +### Set up Nvidia Docker container with MLPerf benchmarks + +1. [Install Docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + +2. Give Docker permission to the current user + ``` + sudo usermod -aG docker $USER + ``` + Logout and login + Restart docker if required and confirm that Nvidia container toolkit is working by + ``` + nvidia-ctk --version + ``` + +3. Check if Nvidia driver is working properly on the host. + ``` + nvidia-smi + ``` + If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access + ``` + cmr "install cuda prebuilt _driver" --version=11.8.0 + ``` + + +4. Build the docker container and mount the paths from the host machine. + + *You may need to change --cuda_run_file_path, --tensorrt_tar_file_path and --cudnn_tar_file_path if you downloaded other versions than recommended by Nvidia.* + + *You may want to change the `scratch_path` location as it can take 100s of GBs.* + + ```bash + cm docker script --tags=build,nvidia,inference,server \ + --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \ + --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --scratch_path=$HOME/mlperf_scratch \ + --docker_cm_repo=mlcommons@ck \ + --results_dir=$HOME/results_dir \ + --submission_dir=$HOME/submission_dir \ + --adr.compiler.tags=gcc + ``` + +5. At the end of the build you'll get a prompt - please enter your system name such as "aws_nvidia_t4" + (note that space, `-` and other special characters are not allowed), + and say `yes` to generating the configuration files. + + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` + Now you'll be inside the CM Nvidia docker container and can access Nvidia implementations of MLPerf inference benchmarks. + +6. Once the build is complete, you can run Nvidia implementations of MLPerf inference benchmarks + using the unified CM interface. + + You can also save the container at this stage using [Docker commit](https://docs.docker.com/engine/reference/commandline/commit/) + so that it can be launched later without having to go through the previous steps. + + +### Do a test run to detect and record the system performance + +``` +cmr "generate-run-cmds inference _find-performance _all-scenarios" \ + --model=bert-99 \ + --implementation=nvidia-original \ + --device=cuda \ + --backend=tensorrt \ + --category=edge \ + --division=closed \ + --test_query_count=1000 \ + --quiet +``` + +### Do full accuracy and performance runs + +``` +cmr "generate-run-cmds inference _submission _allscenarios" \ + --model=bert-99 \ + --device=cuda \ + --implementation=nvidia-original \ + --backend=tensorrt \ + --execution-mode=valid \ + --results_dir=$HOME/results_dir \ + --category=edge \ + --division=closed \ + --quiet +``` + +* `--offline_target_qps` and `--singlestream_target_latency` can be used to override the determined performance numbers + +### Populate the README files describing your submission + +``` +cmr "generate-run-cmds inference _populate-readme _all-scenarios" \ + --model=bert-99 \ + --device=cuda \ + --implementation=nvidia-original \ + --backend=tensorrt \ + --execution-mode=valid \ + --results_dir=$HOME/results_dir \ + --category=edge \ + --division=closed \ + --quiet +``` + +### Generate and upload MLPerf submission + +Follow [this guide](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/Submission.md) to generate the submission tree and upload your results. + + +## Questions? Suggestions? + +Please follow the [cTuning foundation](https://cTuning.org), [cKnowledge.org](https://cKnowledge.org) +and [MLCommons](https://mlcommons.org). diff --git a/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md new file mode 100644 index 000000000..39b1cc0de --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/run-mlperf@home-v3.1-gpu/run-nvidia-gpu-gpt-j-6b-ref-pytorch.md @@ -0,0 +1,314 @@ +# Introduction + +This guide will help you run the reference implementation of the MLPerf inference benchmark v3.1 +with GPT-J 6B model and PyTorch on any Linux-based system with Nvidia GPU (24GB min memory required) +using the [MLCommons CM automation language](https://doi.org/10.5281/zenodo.8105339). + +CM will help you to obtain performance and accuracy numbers for GPT-J 6B model on your system +for the SingleStream scenario and submit them to the official MLPerf v3.1 inference benchmarking round +in open division and edge category +(**deadline to send us results for v3.1 submission: August 3, 2023**). + +You can read more about scenarios, divisions and categories of MLPerf inference benchmarks +in this [MLPerf inference benchmark paper](https://arxiv.org/abs/1911.02549) - +our goal is to help the community compare performance, accuracy and other metrics of popular models across diverse systems +in an automated, unified and reproducible way! + +This benchmark will require ~30GB of disk space and can take ~1 day to run on one system +to have a valid MLPerf result. + + + +## Install CM automation language + +Install the [MLCommons CM automation language](https://github.com/mlcommons/ck) as described in this [guide](../../../docs/installation.md). +It is a small Python library with `cm` and `cmr` command line front-ends and minimal dependencies including Python 3+, Git and wget. + +If you encounter problems, please report them at [GitHub](https://github.com/mlcommons/ck/issues). + + +## Install repository with CM automations + +Install the MLCommons repository with [reusable and portable automation recipes (CM scripts)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script) via CM. +These scripts are being developed and shared by the community and MLCommons under Apache 2.0 license +to enable portable, modular, and technology-agnostic benchmarks and applications +that can automatically run with any software, hardware, models and data sets. + +```bash +cm pull repo mlcommons@ck +``` + +You can run it again at any time to pick up the latest updates. + +Note that CM will store all such repositories and downloaded/installed data sets, models, and tools +in your `$HOME/CM` directory. + +Since MLPerf benchmarks require lots of space (somethings hundreds of Gigabytes), +you can change the above location to some large scratch disk using `CM_REPOS` +environment variable as follows: + +```bash +export CM_REPOS={new path to CM repositories and data} +echo "CM_REPOS=${CM_REPOS} >> $HOME/.bashrc" +cm pull repo mlcommons@ck +``` + + + +## Setup virtual environment + +We suggest you to setup a Python virtual environment via CM to avoid contaminating your existing Python installation: + +```bash +cm run script "install python-venv" --name=mlperf --version_min=3.8 +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=mlperf" +``` + +CM will install a new Python virtual environment in CM cache and will install all Python dependencies there: +```bash +cm show cache --tags=python-venv +``` + +Note that CM downloads and/or installs models, data sets, packages, libraries and tools in this cache. + +You can clean it at any time and start from scratch using the following command: +```bash +cm rm cache -f +``` + +Alternatively, you can remove specific entries using tags: +```bash +cm show cache +cm rm cache --tags=tag1,tag2,... +``` + + +## Do the performance run + +Now you can run MLPerf inference benchmark to measure performance of GPT-J using CM command as follows +(note that `cmr` is equivalent to `cm run script`): + +```bash +cm run script --tags=generate-run-cmds,inference,_performance-only \ + --division=open \ + --category=edge \ + --model=gptj-99 \ + --precision=bfloat16 \ + --device=cuda \ + --implementation=reference \ + --backend=pytorch \ + --scenario=SingleStream \ + --env.GPTJ_BEAM_SIZE=1 \ + --execution-mode=valid \ + --results_dir=$HOME/results_dir \ + --quiet +``` + +Note that this command will need to automatically download the model (24GB) +and [CNN Daily Mail dataset (relatively small)](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-dataset-cnndm)! + +The benchmark run is expected to finish within 10-100 minutes depending on the performance of your GPU. + +In the end of the valid run, you should see [output](https://github.com/ctuning/mlperf_inference_submissions_v3.1/blob/main/open/cTuning/results/amd_zen4_workstation-reference-gpu-pytorch-v2.0.1-default_config/gptj-99/singlestream/performance/run_1/mlperf_log_summary.txt) similar to + +```txt +================================================ +MLPerf Results Summary +================================================ +SUT name : PySUT +Scenario : SingleStream +Mode : PerformanceOnly +90th percentile latency (ns) : 4751920830 +Result is : VALID + Min duration satisfied : Yes + Min queries satisfied : Yes + Early stopping satisfied: Yes +Early Stopping Result: + * Processed at least 64 queries (201). + * Would discard 9 highest latency queries. + * Early stopping 90th percentile estimate: 5387449249 + * Not enough queries processed for 99th percentile + early stopping estimate (would need to process at + least 662 total queries). + +================================================ +Additional Stats +================================================ +QPS w/ loadgen overhead : 0.33 +QPS w/o loadgen overhead : 0.33 + +Min latency (ns) : 881803157 +Max latency (ns) : 5939081711 +Mean latency (ns) : 3008773902 +50.00 percentile latency (ns) : 2788885477 +90.00 percentile latency (ns) : 4751920830 +95.00 percentile latency (ns) : 5307244203 +97.00 percentile latency (ns) : 5677375096 +99.00 percentile latency (ns) : 5927209480 +99.90 percentile latency (ns) : 5939081711 + +================================================ +Test Parameters Used +================================================ +samples_per_query : 1 +target_qps : 2000 +target_latency (ns): 0 +max_async_queries : 1 +min_duration (ms): 600000 +max_duration (ms): 620000 +min_query_count : 100 +max_query_count : 0 +qsl_rng_seed : 148687905518835231 +sample_index_rng_seed : 520418551913322573 +schedule_rng_seed : 811580660758947900 +accuracy_log_rng_seed : 0 +accuracy_log_probability : 0 +accuracy_log_sampling_target : 0 +print_timestamps : 0 +performance_issue_unique : 0 +performance_issue_same : 0 +performance_issue_same_index : 0 +performance_sample_count : 13368 + +No warnings encountered during test. + +No errors encountered during test. +``` + + +## Do the accuracy run + +```bash +cm run script --tags=generate-run-cmds,inference,_accuracy-only \ + --division=open \ + --category=edge \ + --model=gptj-99 \ + --precision=bfloat16 \ + --device=cuda \ + --implementation=reference \ + --backend=pytorch \ + --scenario=SingleStream \ + --env.GPTJ_BEAM_SIZE=1 \ + --execution-mode=valid \ + --results_dir=$HOME/results_dir \ + --quiet +``` + +This accuracy run can take many hours (typically 12..46 hours). You can estimate it using the QPS (queries per second) +from the previous performance run as follows: + +accuracy time = data set / QPS = 13368 / QPS . + +For example, if your reported QPS is 0.1 (equivalent to 10000 ms latency), it will take 13368/0.1 ~ 37 hours. + + + +## Populate the MLPerf README files describing your submission + +Now you can use CM to automatically populate README files mandated by MLPerf to describe your submission +(we also show you a simpler syntax of `cmr` instead of `cm run script --tags=`): + +```bash +cmr "generate-run-cmds inference _populate-readme" \ + --division=open \ + --category=edge \ + --model=gptj-99 \ + --precision=bfloat16 \ + --device=cuda \ + --implementation=reference \ + --backend=pytorch \ + --scenario=SingleStream \ + --env.GPTJ_BEAM_SIZE=1 \ + --execution-mode=valid \ + --results_dir=$HOME/results_dir \ + --quiet +``` + + +## Generate MLPerf submission + +Unless your organization is an official member of MLCommons, you will be able to participate in the official MLPerf inference community submission +via the cTuning foundation (founding member of MLCommons). + +You should update the following flags in the below CM command: +* Use `--hw_notes_extra` option to add your name to the submission such as `--hw_notes_extra="Result taken by NAME" `. +* Use `--hw_name="My system name"` to give a meaningful system name describing your GPU. + Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems). +* Use `--submitter=` if your organization is an official MLCommons member and you would like to submit under your organization. + +You should use the master branch of MLCommons inference repo for the submission checker: + +```bash +cmr "generate inference submission" \ + --clean \ + --submitter=cTuning \ + --results_dir=$HOME/results_dir/valid_results \ + --submission_dir=$HOME/inference_submission_tree \ + --preprocess_submission=yes \ + --adr.compiler.tags=gcc \ + --adr.inference-src.version=master \ + --run-checker +``` + +## Push the results to GitHub repo + +1. Create a fork of [this cTuning repo with the community results](https://github.com/ctuning/mlperf_inference_submissions_v3.1). + +2. Run the following command after replacing `--repo_url` with your fork URL. + + ``` + cmr "push github mlperf inference submission" \ + --submission_dir=$HOME/inference_submission_tree \ + --repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \ + --commit_message="GPTJ results on added by " + ``` + +3. Create a PR to the [cTuning repo with the community results](https://github.com/ctuning/mlperf_inference_submissions_v3.1) + + + + + + + + + +## Additional performance optimization challenge for interested enthusiasts + +The MLPerf GPT-J inference benchmark is implemented in this [backend.py](https://github.com/mlcommons/inference/blob/master/language/gpt-j/backend.py). + +It is automatically installed and cached by CM. You can find it on your system using this command: +```bash +cd `cm find cache --tags=inference,src,_branch.master`/language/gpt-j +ls backend.py +``` + +The original model is available at the [Hugging Face Zoo](https://huggingface.co/EleutherAI/gpt-j-6b). It was fine-tuned by Intel for this benchmark +and is available at the MLCommons cloud. It is automatically downloaded by CM using [this script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-ml-model-gptj/_cm.json). + +You can try to improve the performance (QPS) on this code or fine-tune model and substitute the default one +in [this line](https://github.com/mlcommons/inference/blob/master/language/gpt-j/backend.py#L27). + +Some examples of fine-tuning can be seen [here](https://betterprogramming.pub/fine-tuning-gpt-j-6b-on-google-colab-or-equivalent-desktop-or-server-gpu-b6dc849cb205). + +Any better performance or accuracy result will be very valuable to the community. + +After any modification, you can redo a quick performance run to see the performance difference. +``` +cm run script --tags=generate-run-cmds,inference,_performance-only \ + --division=open \ + --category=edge \ + --model=gptj-99 \ + --precision=bfloat16 \ + --device=cuda \ + --implementation=reference \ + --backend=pytorch \ + --scenario=SingleStream \ + --env.GPTJ_BEAM_SIZE=1 \ + --execution-mode=valid \ + --results_dir=$HOME/results_dir \ + --quiet +``` + + + diff --git a/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/README.md b/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/README.md new file mode 100644 index 000000000..4e9f6cf17 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/README.md @@ -0,0 +1,20 @@ +### Challenge + +Improve the prototype of our LLM-based assistant to suggest users how to run MLPerf inference benchmarks +using the MLCommons CM automation language: https://access.cknowledge.org/assistant . + +Read [this documentation](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/README.md) +to run reference implementations of MLPerf inference benchmarks +using the CM automation language and use them as a base for your developments. + +Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn more about our open-source project and long-term vision. + + +### Prizes + +* *Get in touch with organizers for more info!* + + +### Organizers + +* [cKnowledge.org](https://www.linkedin.com/company/cknowledge) diff --git a/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/_cm.json b/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/_cm.json new file mode 100644 index 000000000..ce6009db3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/challenge/train-llm-for-cm-mlperf-2023/_cm.json @@ -0,0 +1,21 @@ +{ + "alias": "train-llm-for-cm-mlperf-2023", + "automation_alias": "challenge", + "automation_uid": "3d84abd768f34e08", + "date_close_extension": true, + "date_open": "20230704", + "experiments": [], + "points": 3, + "tags": [ + "train", + "improve", + "llm", + "assistant", + "mlperf-llm", + "mlperf-llm-assistant", + "mlperf-assistant" + ], + "title": "Train and improve LLM to suggest users how to run MLPerf inference benchmarks using CM automation language", + "trophies": true, + "uid": "d37bf37a24c44ec3" +} diff --git a/cmx4mlops/cmx4mlops/repo/docs/cm-yaml-guide.md b/cmx4mlops/cmx4mlops/repo/docs/cm-yaml-guide.md new file mode 100644 index 000000000..2b0b1242b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/cm-yaml-guide.md @@ -0,0 +1,46 @@ +This README provides a walkthrough of the `_cm.yaml` file. + +## Keys and Datatypes followed + +1. **alias**: `string` +2. **uid**: `string` +3. **automation_alias**: `string` +4. **automation_uid**: `string` +5. **category**: `string` +6. **developers**: `list of strings` +7. **tags**: `list of strings` +8. **default_env**: `dictionary` - Contains key-value pairs where values are `strings` +9. **env**: `dictionary` - Contains key-value pairs where values are `strings` +10. **input_mapping**: `dictionary` - Contains key-value pairs where values are `strings` +11. **env_key_mapping**: `dictionary` - Contains key-value pairs where values are `strings` +12. **new_env_keys**: `list of strings` +13. **new_state_keys**: `list of strings` +14. **deps**: `list of dictionaries` - Each dictionary can contain `tags` or other nested keys +15. **names**: `list of strings` +16. **enable_if_env**: `dictionary` - Contains key-value pairs where values are lists of `strings` +17. **skip_if_env**: `dictionary` - Contains key-value pairs where values are lists of `strings` +18. **prehook_deps**: `list of dictionaries` - Each dictionary may contain `names` and `tags` as lists +19. **posthook_deps**: `list of dictionaries` - Each dictionary may contain `tags` and other keys +20. **variation_groups_order**: `list of strings` +21. **variations**: `dictionary` - Each variation is a dictionary containing keys like `alias`, `default_variations`, `group`, etc. +22. **group**: `string` +23. **add_deps_recursive**: `dictionary` - Contains nested `tags` and other keys +24. **default_variations**: `dictionary` - Contains key-value pairs where values are `strings` +25. **docker**: `dictionary` - Contains keys specific to Docker configurations: + - **base_image**: `string` + - **image_name**: `string` + - **os**: `string` + - **os_version**: `string` + - **deps**: `list of dictionaries` - Each dictionary can include `tags` or other keys. + - **env**: `dictionary` - Contains key-value pairs where values are `strings` + - **interactive**: `boolean` + - **extra_run_args**: `string` + - **mounts**: `list of strings` - Specifies mount paths in the format `"source:destination"` + - **pre_run_cmds**: `list of strings` - Commands to run before the container starts + - **docker_input_mapping**: `dictionary` - Contains key-value pairs where values are strings, mapping input parameters to Docker environment variables + - **use_host_user_id**: `boolean` + - **use_host_group_id**: `boolean` + - **skip_run_cmd**: `string` + - **shm_size**: `string` + - **real_run**: `boolean` + - **all_gpus**: `string` diff --git a/cmx4mlops/cmx4mlops/repo/docs/getting-started.md b/cmx4mlops/cmx4mlops/repo/docs/getting-started.md new file mode 100644 index 000000000..baed31eea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/getting-started.md @@ -0,0 +1,135 @@ + +# Getting Started with CM Script Automation + +## Running CM Scripts + +To execute a simple script in CM that captures OS details, use the following command: + +```bash +cm run script --tags=detect,os -j +``` + +This command gathers details about the system on which it's run, such as: + +```json +{ + "CM_HOST_OS_TYPE": "linux", + "CM_HOST_OS_BITS": "64", + "CM_HOST_OS_FLAVOR": "ubuntu", + "CM_HOST_OS_FLAVOR_LIKE": "debian", + "CM_HOST_OS_VERSION": "24.04", + "CM_HOST_OS_KERNEL_VERSION": "6.8.0-45-generic", + "CM_HOST_OS_GLIBC_VERSION": "2.39", + "CM_HOST_OS_MACHINE": "x86_64", + "CM_HOST_OS_PACKAGE_MANAGER": "apt", + "CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD": "DEBIAN_FRONTEND=noninteractive apt-get install -y", + "CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD": "apt-get update -y", + "+CM_HOST_OS_DEFAULT_LIBRARY_PATH": [ + "/usr/local/lib/x86_64-linux-gnu", + "/lib/x86_64-linux-gnu", + "/usr/lib/x86_64-linux-gnu", + "/usr/lib/x86_64-linux-gnu64", + "/usr/local/lib64", + "/lib64", + "/usr/lib64", + "/usr/local/lib", + "/lib", + "/usr/lib", + "/usr/x86_64-linux-gnu/lib64", + "/usr/x86_64-linux-gnu/lib" + ], + "CM_HOST_PLATFORM_FLAVOR": "x86_64", + "CM_HOST_PYTHON_BITS": "64", + "CM_HOST_SYSTEM_NAME": "intel-spr-i9" +} +``` + +For more details on CM scripts, see the [CM documentation](index.md). + +### Adding New CM Scripts + +CM aims to provide lightweight connectors between existing automation scripts and tools without substituting them. You can add your own scripts to CM with the following command, which creates a script named `hello-world`: + +```bash +cm add script hello-world --tags=hello-world,display,test +``` + +This command initializes a CM script in the local repository with the following structure: + +``` +└── CM + ├── index.json + ├── repos + │ ├── local + │ │ ├── cfg + │ │ ├── cache + │ │ ├── cmr.yaml + │ │ └── script + │ │ └── hello-world + │ │ ├── _cm.yaml + │ │ ├── customize.py + │ │ ├── README-extra.md + │ │ ├── run.bat + │ │ └── run.sh + │ └── mlcommons@cm4mlops + └── repos.json +``` + +You can also execute the script from Python as follows: + +```python +import cmind +output = cmind.access({'action':'run', 'automation':'script', 'tags':'hello-world,display,test'}) +if output['return'] == 0: + print(output) +``` + +If you discover that your new script is similar to an existing script in any CM repository, you can clone an existing script using the following command: + +```bash +cm copy script .: +``` + +Here, `` is the name of the existing script, and `` is the name of the new script you're creating. Existing script names in the `cm4mlops` repository can be found [here](https://github.com/mlcommons/cm4mlops/tree/mlperf-inference/script). + +## Caching and Reusing CM Script Outputs + +By default, CM scripts run in the current directory and record all new files there. For example, a universal download script might download an image to the current directory: + +```bash +cm run script --tags=download,file,_wget --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e +``` + +To cache and reuse the output of scripts, CM offers a `cache` automation feature similar to `script`. When `"cache":true` is specified in a script's metadata, CM will create a `cache` directory in `$HOME/CM/repos/local` with a unique ID and the same tags as `script`, and execute the script there. + +Subsequent executions of the same script will reuse files from the cache, avoiding redundancy. This is especially useful for large files or data sets. + +You can manage cache entries and find specific ones using commands like: + +```bash +cm show cache +cm show cache --tags=get,ml-model,resnet50,_onnx +cm find cache --tags=download,file,ml-model,resnet50,_onnx +cm info cache --tags=download,file,ml-model,resnet50,_onnx +``` + +To clean cache entries: + +```bash +cm rm cache --tags=ml-model,resnet50 +cm rm cache -f # Clean all entries +``` + +You can completely reset the CM framework by removing the `$HOME/CM` directory, which deletes all downloaded repositories and cached entries. + +## Integration with Containers + +CM scripts are designed to run natively or inside containers with the same commands. You can substitute `cm run script` with `cm docker script` to execute a script inside an automatically-generated container: + +```bash +cm docker script --tags=python,app,image-classification,onnx,_cpu +``` + +CM automatically handles the generation of Dockerfiles, building of containers, and execution within containers, providing a seamless experience whether running scripts natively or in containers. + +This approach simplifies the development process by eliminating the need for separate Dockerfile maintenance and allows for the use of native scripts and workflows directly within containers. diff --git a/cmx4mlops/cmx4mlops/repo/docs/img/logo_v2.svg b/cmx4mlops/cmx4mlops/repo/docs/img/logo_v2.svg new file mode 100644 index 000000000..fb655c627 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/img/logo_v2.svg @@ -0,0 +1,6 @@ + + + MLCommons + + + diff --git a/cmx4mlops/cmx4mlops/repo/docs/img/pages (80).png b/cmx4mlops/cmx4mlops/repo/docs/img/pages (80).png new file mode 100644 index 0000000000000000000000000000000000000000..0ca65735a705ff69d234b02ad9e116e6caa61d0f GIT binary patch literal 242952 zcmeFZ1yCGYxGp*|h@c_3Lju8ry97;w>)`HAu;64va7jXNhXjWSF2g_)G`RaffZ!Gw zTwl)&d!KV|-8!dU)xA}(>g}##XHU;`_v*j@^~?8ttHV{4AwijA2W2=pR6KHf?tPKUT-2O+ZdPU4|tsEX7F<|l!l z@N}OJ6&FaXrmM)}qf!RDG`FJV%;- zL@C~QmF&~+bm_W0>)L$5`^SRv$xUpA1tiak|^?z~AY0s|) zcx}q}HM5?6T-@k2@F%bNB~6q>xS=hya~`T4aVb}H#%?D8)H5N$rw`$+%1w|vj!<<5vF0J{}QRY zSr#mUx^a8XW!nr``1@I(wyg9?;n1~S(A{(Bd-XdpyBT%zyFtHw)N9{ zU4gH&(^c>_?rT1GS0UJc;D*z16CR9_9XcSubL#@zFBm`R_OQtv?OS7^lg}_c-ucZR zIP;|4U-@?O#}K$&sPYfWKZM78B3KXR6aP@$#H0Hm@d@7`INBK>_c2MhmRf}ldnl_(@L5Xodo{KvVO|&ir6Cjwd_guae>B&mIi0hefOKAZ`WnuQqj561^w1s zx>Th>H4=z{x2x7}L=7N=7)R;aTu654VBD%F30BBk6O*22eMvM56qek1B(5aGA@xzD ztqR7=U$slHv@q@79efKh87{vw@oMws=95i4kB4Vea;;4BQO+@S#`+K0altWm?FOyb z^Lfq>-ncXh&EDL38~e@aN8&H?Kk#GW^c`}ozKOrw3 z7vc4W8@6gMc$nVVeYA_=l6L%7`iV_k-Df;C3>%I$fi}l?)@A+`1BxR5&vr5{hYFBHAPQ0IBxu21vG_S&^^7`r0C+<(epLARr>p}#;A_~66 zZ>o<4cU5-<@1+E12Zu`MNS8}jwkn3(Feel}ch8blK@@frfC~wDg7aQ24joseS7A)? zPCcfSvKsW-7x2cvusQ`9JboF^ldzxIlTeb-NW)}KgN2O|`{CPDlQheyXJW6IUbuXw z`*QfA`W5!eUit2q)34-S5x=T^i7OxQ$)ro5v$cCOaqW{AUF65ngoBU0A4TbW6g{+J z*sYj45?MY8vmL#lN*GT#N|<8}P-rspS9-4c{UxnTw%UBrj}khmK`ly&o-iwFOIv#*!FxhGn@m zD-97%M6!{kgb;V_jWWIxhaAdWgHk^^%rVZ<@KM1M?z;Q1E?>=|qEpS>hR1+EWNIir$7WKs@we z4({XYd_QUiJtA`3!WYwiH&$?eC77t5?sa9W)2mC|!uMf3lRUeDSrn8h72l#RN7!NA zK_vGfE_~QBzq`-c3cPaBmn(9IDs#M*pgyVI-et}GN&RV^R=t?(um?e%JG5n+eH(M8 zW2^|Gs5`xOL*xw=rbxz{3K4bDfP)`zyu3AiM34CP`wq4d*(dhZ{#?&fgG*~0JGY{Vgzue6nlcPA zSMa=M?qV+Du@#zPK9$&4h*DJW>al0#uVFrPPjl6GnDK3tJKsJ25*!#@Bdu?0y-@$y zR?Szhn6@)$@0Z)4YA*^nC~Hxv5G;cfHG+pIb`P z%9}KsVRO5B8P5%EW~b9{2tyHsd$lXdo3E!v)o8S6o|3>F>Y<02Be><_SerVq{MY>} zKf-?K$zaGV$waoDFX)MrU25+x4$T(8Zav9+B6nzV_+*QG(y8)Fx=`9iX|46f(Je}g zo{Aq8wV3==jXI9{hCX4=PZ!^R{ZX1ztXBHkXvtJlVbrKl@qy;}TG-iUz zrc|?lt?1$n&*5s2P<}&1d?XO(H#g29cet8Ear;{lj zx;`eDBjtrsBc#JjoA=krL{?sxB$@$)7Sm+Rh{86JN9U8YtC^ z_&ho7lJ0NM=m6h>%Sl$F8;p1nIBxMie`0_NOqOBolijU%rkCtL_66~!*O*E2FMW?#Hr<5QC# zE#t9V)NCTNVj)y4zI2zL&kN6oPv@%rod{Cyb3{$Y9*HBo4&VGGzPlG&B~HYM$c)QO zRAy4-RU}u;%ydbsKQo0#{!C=zAhvvP`RTZFIexjgGGOv@l;i#*#U`Rg!&B&SbWiG{ zQ}oX=^?g&E ztD*)_IMJBBP*Vhfycs~Cz+e#Q6xbEG3Ie%51A*3!K_HH&p^VG2LD&%3!YE!N%vg0S|O+1%?F2K$oyr9ZDwG@52rCJHN2VdWD9F|VvPAaMt61icb&UNbr@Fk z-Ntbx=02O>QzIWw<5_Y)B-&?v@;jUO7BLH%_lGuP;kO~dwUF7iortvQ(Us#*8t#4S znwkBQVMU8YE9X9ag8ogXqB~otjq(OCKJ@nD+nGE6-ycArU&J}l|9p~=lqUb@mw~}y ze8>_Lz1_oixc&G4gC5dN-TmjoW9cgVe?Ghjv!?&&!+X3z*?&IJ(EfWNz!?9ZGu``K zcm6D@fQ$T0@$(Jwi-S!6(?5_a4Ntv)p4_Srfto(jw{5H&()&% z`3f%i)x7vPysijVi`u}R+(n0oQ1Q#<72JT+%Z{zgCJ2=6pHuBakFGWbuZ}LRP9axI zS7$p{r$!6$xki`k9CFR)zxp5nN8i&M^uQ-$1<-#S5d>;p9kyPay_zDQI5Tpu{ST*% zp%8riyK@uWc=!E{OSU7RlBYAIY&A7 zKRJEMzfKR>>fdRMbeMl5AZptD>kR)o1IiJQ|M}M${&j}`YtA63>v;=#^1y9w@c_i~ z`T5w`;o+Fp?#JR{<%|*_ts5f0%}TwlMcE=%7N@4#+oyEl4yXdbJNjPXKb6A2B`eZc zpnr>Sq!s>k2Ba1Kbq16b{&j|bo#Fpvnm~5VD2uL&UH!pjR61`FAAut&I#3pWI2tx# zJL?EI-|G%I?`Y6NU3#B^Kn<1aPzU}Cs36Vo-$6t%V{qh80)w>mrYC^E$&+npdU|$t ztT+`}N)icmW>Nvv0CX_Gf&>Hx++k!!5R#VO-+4$O>@ro%*^QN&CJ|u$+FbkF1&6o-}Ya3w%4!L&wnn z($`DytP5R-!MARL0{uO!wlRPk1byNa65`?#YJgpIh&wuUXt;2$l0a&OL|G(ihgoiZ zn}>NJ4!2VIQ4r0s9@=o=h|ZW_I1+z9C50E*$TOj~{{xK$Md>w8BtckHQ&U@8!^dA+ z%g^uW;o<4&v2c)_Je+T3DJv%@D=W9*h(sp8eV|8#L!o4X?h8;e3%%c48C8e-J!cXsODSW$_9dkPCre(#=~ zR2EN9rzAT)Ud+zk%cRhw5xYV=$j><(My06e>eCZYTh2Jdl@L7~mz1w?g?Gl{lh)zU8L~ykPAYGy_i%d4 z*z*Ux4`Hm8vXZ;+2%ausdiq~Lakod{2}R+25kTz! z<0SRm)pfkAx0e^!s7*y0Jw5f^=`p@-&jswSUS2HEe>nz6MjH85*O5pGhuS!JygxIJ z2(3uD(i0bcN?09GK`3AfmANRL!0FALEMps(sZkQ>c_s@zR2{<=KR@XgKOd++c<1K3 zKj;*2HYgW*65N6-wQe*0WJ-|#sEydw)ypL8uCKWrC!dj(S*&@jMB~Scp5xlc$XZ7> zb}e=KB0;{9MnilkYncGa2H`Ywj#95DeL25be+^6f;8urMut-;^ST6Q1_aV48%>`wt z6NjXu)(_Z}venJn!*^fEe1SC@>(m?xdu&dWn{}}3A8%GZAYLG^?C0vm|1&W)`Ceb^ zY9_#&%HE%U2{DMENv8pVqeR!K;FPtbEkL;F?qz71pEvzXQP6GUbJ5aC=) zt0$&hR#e0c>jSX#n`W$A`DO+A z8kYAE$QklN?KfFtrk7`Vr_W@nIK7J|xOO3XId9r{=@;XKReAX+GcYSp=Y4?;lLc)w zLiV>=RH||GPIxRoh(eS9Dt$vzBJf9*6@xg}QtEK!@R0hd4hA!VEYTdKnZ9SjC^uDE zZ;Nzl_S?-FWfD8O#e(#cq~?p}WAgcIAw`P`jqF?xT@>-*@z^2F z2eZ!o81Zh~Q>L(}ne}gj>eUhvr`9U(<4&rWC0~5>j3v7&s+Ecc0*rh2$*LC~V1NK* z13%ztipSza!h|8IOWF97ATxpgDG8fRM$x9Rsj*K0O{FzSp$=B5jC>dFJEF+@6kbvS zXQMUSaJ#w3rjtSnOPE!3L<8YnhO~$NBe>lUs9S40oOSI_W~p9;Xl*GElM?LBHjKWy z8Z-2s5}A}RF~t75saw0@bCi#Fu@G5bzj7VOo&Mu_-4%tC}Yj&s$js!z$M*Ije$nu9%=&}voFzz>aF`EChw z5%Z=39`Wkm9NQ>7s_5c;KDPPfq$RWI%H=G7XT$LU2sFx+;mC@Ci=&=Ot2~lJLJGuJ zS=p4i==tb^XM?jl7pu8*mxsBrd$YY|HVSJbLCTy}!)1It%B^Fp%(#@66D2xW@zGJ| zmq+yUZJJP_-7T+_*l)+P1^ilyaB_8i-U`cgM;hoc$d$w z?Lu$m6rrUzG%f0&RbpT$kW1k6n2rhmK9k|zvkfXyO$}T7?OdU9e#E1lrAB7Qts#RB zFN#oApAv9?ouzkMiT9Bz#oKa^^vlDAjIWtH+=w?+J?mS@>0D|%E39cKdIOX-F~|{c zkeF-a@4PbJeD+HT?oj9p-*5zStp;rf2?C@1$7pdPhY@Tl+$r{Z5y{htHCV+?y9BtZ zbo6H>4o8UOQGTsmfO`T7R#K+;>&F34?YvGVD=(P(rw(9eqfrcuR+bSgy6y|pk$Uld zU(O!|!yXG4+QA*x9Mg*l6Ps4`rZkrdhP|uvu_uL;y}I2Np0XKz;G7}?&ucX=A!DU@ zK$GCZ!FpISW8cxiL3H%N!Ez~O#{??ZYa3**xWym}ZX`&`7u7>MYhFQ;;AHMwxzn{f zvoJs59r$YGl=}|xQG?fDyZyzKcd<|2H-vG9p(wQ|E>L&wV<2n#0pf^ap*Cmv{CwY( zjEuCjj6NSvo3-6V59Oyi_7+Pgi1>Jt7aKEyuaX1X`ueuF`;-{R$_MM*)(oVAMipL> zRLm!B2D^;<`Un{|`=1>iY&7T%K3F&}g%Unm+sfcG?P}xvu(~uwUSn6?XE%F$vW*Vw z(~%QBv#KASiTCOL+PP0S3%JYx`ua=PVK8tzb$R&%AN%0bZcw+F1f|W~yZwZi3W&U1 z(WD9Qwhi)fw(4?GT)vGh1m_!+3wt0-qW!ushDVrz5kWV%)bu%+btm*&a$R#gWU!1z zg|l9N9e7E~sjngOf>f_5g;{R$LeDa(6f9#$6|;nu=SaNr zi!D%3in4?pMuw)DX*YDey;^uCeQ+x$y?|nPUDs1Lc|Fjj%FVH;2@quVYiTVUHRN&L zSs(Hd-MTo7rGhsbH7ewrk>XPOUmUcAw)xov=XWb#ksV=qefX}TPgzPPB^4BOVy6Lq zR|7mzTU{vmTf1cL(pzQ(_fcbUhFb&|awx49$)*<|RJ0TJhI!@im_lseVN^G=CU zD9HlCsPXu4nwTR;(j=H^@d#bV>6J=XSM49~Ld$L*gm8D&X7@ByfrUD}R%{&y*@5Bg z^YsT=?Dt>CdU9k3JW$zno1+W-+BOkTd0LdlTow|lT7n7*p1-yART)VEc=YanmOot)lA>mX}ROjcf7HW3)yf*2J=Dof~ zT?(Qnpj$4w6Q6>dtEjm2M$L{n@kX2CagGV?+zr3`qH(%xD_0D2zY=)#wKaxKto&bW z4op6@gk3iG1eY?k;&*kZERa2lXB8gL0L=X85T|WOP*;dH<{MAG|8|+!$sZBigu&?* zGw%MNwXm)8_0jF^Vz7Jd5^?YFhi3na!>x_+QDp)nR@`)to_ z+HW2;E}Gk{4d#CNLh@Lk=e7e2OMgabN$U=0*r@n0>;|l;=XOU+OHfcti+gPquZ@$IT#6R{1fwbI@r)YR$e z6L+h{rrNoini-f(%U)mERjaFDyW9Q*>Z)?1?WjA{RH?=X1yX5<`Qyiv;E0bt0?wU4 zglc&@^(>K*_{*0dY?9XI=sB}rwEknTSr8_eB7MyRu3gPp#tNT!z3PCjjh7XT$yV%J zGjPNSU*FE(%kZP?o3Pez7zx-V?hkr|5YbLtO{5YzUQWftr4kW%{rh`RgWi5*!BVm) zbB>j^f#06Fd-e9%4Uoi@8Zac#sxaq!eSH?XkThH^9XUT7kPzENiuph$-Fg5$`CiwVY*7@3?pnY zR{dZKc+il!p2X3{)=y5TOOcj(uv8bFRucc3gMwQsws^IzHF&@p3jGden5fRj(l@@Hx3)>dgLNlyWEbi@pq zudJ-*ct0HgAugoCH2GLGkM%UaGQ|e7Gil$4e|LOFMh>Hr1+{n)31*q85$bZu2V# zEbeib^vp`#1e8_n%Fs#JJ>>!PNIP@TUlpng3%Ln->k_fSuCS@($aVmk$qt zFvRx7e|R`9ZimcQSI6tp*W$~`0ZD7bd>P3bE#Z80{gkvW5Lzx?ZK1kEvKqW+<;ZVn zVr9v|I9Kh#BpuOx+hxJ~6LB+0h%qlZX0AIe$VD+0+EK7`{OddaoXDH64X3|8#&nG4CbW$pwbl5% zbhw8Wob07zXY!jd(oy$91lUV!@L2h$FeQR4wt{ zJD(#WBGZWCq_W!C!~J}&mdweuJ7xo%!Ux*P%Ai#bVvY=9&t219E*F_%e%5R7WPiw_ ztQ3Ee6~|Gw-^gk8b5HOujo@fV^w&+_ixK7Fpi#~cHaEhsPh`ohDu^Uas@>Vx&s z?nHrp6lz;lYR8#_u2Bn_1088i;L%A@PvaCGhK za%yA}-?1YGI@+3cll^EfhfYIG+$$LV{d)AA4zm%>eWIFJZ1B7#?Do<-;#A1amI0&@ zM)bY6tqqA(;9f<0HEak#@}5|Ti60vCswZp^;*Nb(I*M=!@o31f%a~X!PtRo}r8tkh za&dkp{Lb;9^i}A}_Y!{rYTR~IuDky~0H*6z;$BO{Mm@bUk8`||Nt`2?$-G>mMb3hF zkf^K$W^_f1O(_OPMP{;}Xh=uSf=uzPgu#r(4or*~YA33{qM0A{-r`HQe(Br&=g+TS zf1FPe+?23vm$55lvRZGO53qZ|UNus|UKJ0ft-j0w|2(A1SBj3!6w;*lF+5z;gd2db z8nW*0xKZfVz+Hy2iGdZdyO!id|#(Wjuu6(!m@jmkvA?Xjg$UT|m?YnLbHXJEI zpo{I*#OBlPK@Rac#KxSTbN+UXhGLZ}GP4B`Na^g(O?usW###69>O1AFZ@t~E5=!Ux zR8+#}G^V$i^78sJ(o>2oyITX6_;DA%41etVsn8 zzrCI*JEg^9E_!`M)%5Qx{A2j2P;A5d2$G4&Onj}dQ-aB-vK~oA=OYHHZP`FSUpTnx z_AcBW7QJ`1ey~yNWYp}q!}D&f>i4q>^(YHW0nO#Wb;=U(J+IM&BKwyChgE}-N(#Pb zTdmUidSFy-_jCJ0*ip3*wuSE=8a2_iXr*N-yR%z)(l>9sLAcOBaP)W-TyTGl!a#Ru z_V6~$bVc{!^x@$a)YECsakg*LSRXnjG`**FWPf`nh7twODk(c>iuDJi1t3KFU8q;e0Na<#i!N;=BEmO)ssM26i^OyYQ(z;#Hjs1_C)$UeS zo}N~WW%-kKcKhXMqH5Pi--O;x&2MDK*bcnhM?1!D$dvJj1dztJLGY|RZ(|kD7NC{67^&}3kqy)6RJhR-FL#Q<84#LI!rS>hTn2iru4O`};UU8md3re63n zKO+TY1)O}&VgqJo%Zk%M?nos%jl~6pgKsmMmI4~za9zjV?DnvGd3d&1_yggG(!VOa zk_)6s>@=M0kTl+3*Ru2~d&liaj^*VPBPSkq16v-oyS$b%K2&1oryHTTh=l730r~|w zD7hG9zy&r5`5d*yRDP$qCa*X@(S@MO%9Xy}zA<=vGY9jl4AY?zXntg8+v-v(|UUDwK!R}UNAaO1JM$7EwM61>UBSf0&Kixt2U1Qhi zmb<*{tTQm^R~1k4t0v$B2LPxw&YW-W@2?FH54+bQxzAsn_}5y0>zBb~=(JpZ0uSR` z)|4%gZ)Jz**L!Sr#V>nWSt?~_Bqv|fxBu17-)=}#IC~t<)K5+>d!qAqGMY;Do+iz1 zw^nxj_1ewN+}w?aBkvw?Q5E9rYYJqcBRjH?A&=hNz<-*SBIM&?YwLfx+YPFo%^KcMs+&hL%01JHTUi2U^u2Am~y)q?Ki+^ts^V?#p#wDJ+a zW}&!*^cX?eG)}sBVs_ud$#)NW=LnzoSeL2*a|!f5@8}S*)m`crFtsV>V@o{nSX)!pGXp-=w7)&Hs zF*BuW=znPMCCI@;N?^rLmdMUa);li`oQOsdkjsx6#rU6mPdB>S4ia{+{bHLFjbdCu zAh&^`pzddj}%}-B%LY&MX4R1pC-y`FEI{j!UDOR|gCHM~zc>J+PD=u7)Kpxw7G^XeRdSV=AB zj){qhii#2QJw6!6rM8rcrcH~VPe`Z9$1h0hk+YUi5X3{u!eN&HUcw(8FiBxvRj@CR79S@h%)xxP9h!^fCc(Y*x9 za$njPY$K6O_Z94AaVi@sFzFwWA*VEGKoKpqIPII6K~`c*qw0;WD`Ta?o-vfR*4poysND_4cCSUAK^NJ40Vg6h zJzZ<`j;1Z*OE{@QKLr5L{~4FBoLVfRq!1OWtk4QALZ_jDqU$6P$ovRwAMwnzI1`A9 z#2if*hi#D!ymgwJ?YS7lOMSArMq_YMZ z(xR*cjHPXP#c4Jz69Oefj!fzAffz6`U*{uz)Pkj~^nXsI5KOkRTxO7(T8vtMG~f>i zK!9UG7BXxJ=&5pCkII<|wdbp=ot>+z|8K+}!opCfu<&jd`l;if)HEt5$}wzsnODob z)3r{sznww}7~9)*FMP8I7VP0OGqbZZJx#z?Ufx92jEjwpgM;0R@_fBz?p$i?G95zE z*`A)YVQ%h&Ls04A{NU6oR4JW|THL*;Osr7{2J@yMKLgi(_%JxARFb>#p!Bt2@U|#N z!;+G4&H0ZXaOi^vyOrB)<4omp3$rxaq^8Zk*)sboae;f@+M7jfhVb5ksebCq<5Xa= z*V-9%ZNO>Mb`9`@N6DGZUatOP7>A~TjX=ZFE{vuN-aK)l;lntjV;-?alH?Tr1sowH zx#?|lhW73UKjTJ6!^3xX!^0abOdqha&OmFwer-J8oZNdo`_(&x#5DH3I-v0w>7JbO zK6!0d43o+)Zz_)|Pjs58^S@}1))Hrg#+p8$$V(qA!o+PlS^gg6QOXoL=!>%)t}Wru zx{OS|A)%O~J1lHJTs+dURo5R>uZZf&!;YkjvNm-MHK5r=HX^0PLIzKoyP0pFR zL-o)>C9w&LlWbCa=hXPLXVn|!FsBbZyqzhW?H>WSme44Nd z>mHAs+Sz3{a^zXCLh#a!@kr7#-42P+94?kSX8lXwOOHOGCqyOpNqMZkQNc$5`>ped41hvYQ-fl97A<*GP5^tpxunR|< zokR$i-x1=UH1df3luRQ~bB?Z})00jUm+aYr$a*(n&5h6s2&^7!D@D;e<4ApFpg&k-gsjD9i|lr^ayR)% zR04(N&B+pyg4tMVzXMGc_d!zPpzooiZc${>Mjql6hLEf)u`B@3y5{^-z!x*ow4nv$4e%* z?NQ2`tS?Q(4(Ds0t`9WxpzLN%R)S<;cY5SJY3p!ZDMJ~%WEPPVuw4a}NBGBvnP@mu zke*!Z$jMImTreY6@RnFgfNDhL32LNd=Al)HgnDnF-7W?T1BP1qNq!j9sbtXN!#M+` zHnuH2RVB43!UZ}kKqxmey91AXsHD`yvOf^+#NnuU7~i5Gdasn(^|MSJ?dg|-B29)U zZ3bhvBqomj>q=}0_N(MO)kihqBk_VhffhkD zdLG#c)ifuZL+P4hsXdotk>#ZT7J8(Q#_hsQekm#65D^s>6ckNAay)490a(FINJ*=+ z)5=W`3ugT7&CP8px4))Lkudg7gqo(HjSXK?e7ag?ic=_84oAdnmM;mXFurdM<4^y- z@67HpSWh`vNp_dkTfPqiJ^sqk3b;%k>ap}He(T=a{`w#!4^Rad7_AcZMOvl{aLm}` z+oN0So{w#`omzzBc+sx3>q%oUWYjr+hb%rMzR(d71X+1?KMK{D`cn7$B`{3YRJM@O z4eSZ`vL8CFn317FX5F>LuN2ESFWCs7!OSxIw0#{xG8ptFmC&<%@vZI4KP`ayT5xyI zzDq`1tlRhY`JdWQ`hoY8y`~v!O!WPV>@|g8-kZ=zx@t~$6xDt;T93^lex!Sxb}2uf zoi)o#lr07yw1lJa76XQA0W+ zhhWCWGZNVgcxv9CgD_5A>UQ2-1*{L3K^ACPbQbFsQ2?c;>L8FOa9>?SWai#zYfua| zHHmyZZ)Db9?qbq}QO71bwaEw}zLdRV)prSkete--uuXuJ6$8e5zRo9XA6Pr%e(dVx;VNYBFcTz16!H~g4Wsn~pIv$KhCjBr=EDsZVtqY4ulDod z@ar1-*zX!;+E~9jJ(#YA7>&am{=?Ne;;Fx~mTeAAdlx#YkaCU~G>+km+wKI`dPl1m zrUl_+vKn%|yK8w8rj`sEnEx_dod4C6Mu0=;d%8z$WG?5sEQe$kG2nEXLg+Pb-bN?S zE7t9PYH&ZH^UBLt&dkVQr}}7wqS}`J00K^K84VVUSTxGHK8vC9d!%r8S3Q6I^su)# zk%OOqV-{npwz3jBt*oAvkeon#s>qQ}yj^wA<+1cqTiv2V(=5!p8hW;_31(F(iOB63d zO`qe|@>D;*)7GfC0bssWQS{ltO$iDb%fja)EpCZ-$(mTEj*oYr=SJ^3Egq5VM`r@% z!{;*cTwE6qPqXeDIPK{k9O2^2*P?xaDzX5wNAqnH`4;ORiYEauX}|zAtFkPmq^JW# zczDq5Urmjv7S^_GX|-19?@_zp%MbnsT%gVf;n9NpBu~P~3DZ{6MPyNQtG+Z*pJj!aNkAITO9lpKis%@)F9D$Ds`5WZ zjIz~}{#O1bdBNltUc6IOqTn;+VIXv-QUqk)s%Mtg4L3!P-+WL>US z7d3q-r=-T;!hQ1%UL`=}D=KPml=A!t04}cUNRogOu0_xQV}`V|bEoa)D0r_hzNYDP~UC z8d@VdI-PN;VfkJxq#c+dWc^Aq2FnD2rWWfj+9ttHB2Rp_S8PzA__{5v#L7M*{SXOP zLm3v!3H*c3yrv*7@_|CjtPhqJAuEXqRN}L- ze3k*RBw8+Txva)wFo#i-clDcOuK6EIPYFt*E~PU;`!$!1!|cPg)MCDwrP#ik6W_j- zn{C0la&zhFb91*YdKwP_pTEW-m;j zmv5XHMT^!y+lz{lM{rN&THl#f95-52>@G0N96%a9a~bQ=L9NJ%Tq6Dyp>%e<`}XZ( zM`R)rt9LLmm{;n+6V5IZ7YCf~*E7@&+T!8IEj<5@UPM!DuZeZsCgFrtk7IN?=WUaMj?aG2oJ#NYMkRpcgG)iQ~s2ZSfh!g0>s4pfRjA7MQ5Jje)y2 zPF~N*(B!;rw(M%!YI&xZ-`?-F;l^3voaj*nUoUh!1n7xI*jUPVCdr4^MgjRP-0z@h zTMJnK(ieFN9gY!hZ}jBnH)@)h5t(4Ll6q@l`blRgac2gfe0*u*K^viH_XHBNnEMFV zh5;B^78FH>j4)udbV-{>u)L;}9ia?&Rye0>$r)8Pumv?uAhD2y2doK3AN|#R?ZE-R z6ANkFkD1le@nD{YysfM9*CasSs7Hh@z~FV=?Hv)m=MI!s@oY?Drj>6=PU8?cIxBsV z{?Os3H@h?Sk;I7EXqfZYi@#wLYF2imWn3p9L5Umc+U~e>mwhZxTznJ8W<#C8tUyTsToY7 zaYq2<-G)!sb_ZBW(=J93fE$7|F=Or2U7}kKXG2^0#L;Wf7UByQNSlcw}ZgmMv>9I0EO zhe)G!;m{@)+Nv*eS3)`hO-J6^@g_XlLTj*gm!A(5yqXXZfQKiv)?%UJexlRMmDT|t z!_Q1V6c|2l^LxqpH&V&@7Y{b9V&)L3p3nNGp`olLN~bfEE;u|Opj}2jM;2ThpB&*6 zqH>{=44-ldSsrbTFqS*Fug;I&N+#ZqQjTQ)7@FzdQ0sHxru%W~;GHSV4_TKfWfH^R z9YT# zJ!j9mA7;@0fdD++cSjS*$Q1xTQrrxz(+Lu8zJz0X0&=raX>u6=6%CYb`>KPLy@ZX` zfoFgP%@E=h^Iz8hRyH}4h^Jz@;EKN>rOiMiHARzL=vNW&;qv8QbJP3qf4J$jg3C@^ zm){*a1Luxa`KN{8VsVyK87xlko#k)RX{G2zWcauaF*~izU5C?N;I}QUPk_Y*iMB6c za$GuV6t{GT$`Cnv+f}#>Q?5&0)pp>r-|z04!E&~Z`JRytD|b}0mdRPKGe2%5?wfS9g4es-%e4jJDgYLu;sFb7-k_K5{W+3h zdw)zC6v?TdNSV=tlb4X$$$@c~JX$`HELsU1+%`WhDUjFW6Z#<|N}|M)gP8>DZqv6D8n2w-m4UT@{DMU-Jl& zJI+G8D@dIEk$SZ%5@F8jZhfzZC52uiMhG#-3`)&ML{1s2GWMwG*LIJQM!+bB=O-)Q zS7`a{Y-;>cWmCWw4^-M172XQFhwh@&3?Q4-Z01+N43eQ<2ROovdhc9NRCd1CSqBjn zsuzkJi;2k);1u#c-lCo(cV}6Qw_obxIbsEJ5AJEbZLW3hHO>tT4fJzu#d`7o>MPVb z2T@8qkXtd@1J@m80(7pWrCOlT!C@_BWnf(i=>1J(Sp@nOI5x(|H#WBJct?U`co>Kd zpn*(O>I%GNi>k5z%Qydnas@aVxgG2MZwN+oeT)79z$Ra_HJjh~8SQuj%qV#E z+VcO}8fl3CZsj=2`_b0=_r(t=JN)}f7^E5gb%y`Z8UBu0&UonV8m%E24LEB)?=A*k z?QvKnq@lRNCK#}6aSLEN=Wwu)+ix~*9v%S!9v<|Pl7nfK>;R1?1C6Qag9n~F?5rd3 zh(~&F&~GppfgK`;^X>VyEU96rwe|S86<99~tR+|V)EnYLy=}m@8~H2r+9u!;vJ>5C zGV8#=h@k|C-9i-z&`*Ov4e97W=~jN1_dep?U2j>@Zq&+iS{i$>G(XM#h-_{woA zKxYY(aKb}LR|2U9`>V;#;EFu}j0*z6o+G<-fZuLK5yRHan(R@!@v60e_#e3&v~^8O zi$2MTzEE%enrWwr4C4|IRQm=B%mCiN;D7gACXCxwa}2qD9&{OSxXoA4q7~R+h5j0R71IJ)y71THw4N;<7L2NJejI;ep`w|Tgq3+{WRAMzqfuc8C3KU z$&KCuDV-zWJ-NW6JG{0}6P{vPZni7jY%(>>@=#3=iLrvR&{_o0{s+erIEmSIbjTEP zw*(rN0VEeW*{-!35>aGvs!kQ#<;d?q>1Y)NpeD|VUK@YD*-jRR+%18)zMmcG0vD6{ zQ$UB((6QG`=S#}Vut9|^oj?OND6|JO0ANFMavdGHYrt?$UbV7AzgY00et>iRgK;9F zG6VTS4>O)@rDh%Uch@?Yf$wLXv{3}avrs{zrUA^8=hL3t61KH?AK;$m*@;^H7l19yLtlCVAU zbTpLc;d~o+tKf7S36Dzo;7*Nt1I_#Us45Wj%T7rBC8w>FYum@MUp&$+t*|84YGbniJhwlUGWHIKz-wgS{4LU;1MB%gCY$lF!#JsUgXIJ zw$L^WbQ&yL9#p->wf=m}PXYOjM8iMd_*Sssf^MM-N)!3BE&A(xPEN zjix&_>N5Ul9Sun1Vf5$RLbpLa4%q(O<+MD91+@(W9#Y!}+Gj-HmoaWmp&L(9U%(M= zz%k_Nw1xWe(CO+d;A*=0VjlOZkaamAw4}w?)FO zXQVPW8%yYp^XkiX=uO6(vw|6K=BfCX5xUg;nMl|r(}eeaZw8#Yk}{8=G^D-345mSf zgQYwgUfcHP4o_V@JbZjSr1?CU&o^`C?jZD%lV)wnvr`JV?N!&bxmDLe%x@>LIK~R8 zYIy~Rqj!D&oOSyMuY;44*R7Rcmdvkw+dqSpH@_bN3(=`9R`r}QlaKl5pN5E#kKoO2 zG5y0pRfL^_o0a)+b6k1DE)~z!Zqm%)MmLGh`#Z}L7x!i|!_CFTcZ0E2Gd;cOnbtVF zgCJhf0&A`n`J@ZFai;utSx!^h0d3JNj={&!T{#DBm<2RrX*8CxgvJuU@&r0TErl#9 zuYgeiuM%Sv%;Hf!8lZ1|JSdPTux>ERl#M zfMaD-6%vxXJl3} zBm!fyidzfGIy#0~Q)OkVZH&q?j#u^N<9nf-2yBH})xW(T7!V5(mtp^{7989@^OAXU z-HYNMBXyFcPv|6TWV&J?5#XgZzu1mSX88H|+=b+1MY6-mf|+;BZCXpG9B3R!7jzt zn3}ch5J&~a#YhOIam;6xKSuc~ns=4N_cq9+M< z9-VF?CUE7qw8V#n$vF2Y0wx+3mb;9drBm%H3JW&ct{~@`)dWl%6Z>=wV-LypC|-GO z@3zp9Rwa3^&G2;k1zpXB(OBH-r&x544KKJpkpAm>U7m{l>;MW2q)V!V)kKw=U77uS}6op&Cra@DblM29PaQYXN%V44Bo#vAOL zguXF2Ki&A>()P<#+B45V z8q>J2{IV|VePd&3npp-O+? zZ1OS3M>CgXATfk&CRnT;q_*Oumd0947*n7|WiBo(VO@APCopN}jzyoiUGW5Zb$iv$ z?ilzCiB`0z7z&hshwFe&vXP|oE*Z{H8B3&DDgg(4*q`J4U0y#r>%w9$bi)q0Ps4|D zX2_-Di;sxF<0CXSGyR%uZf?AG z6|emAA3}J7!sU6vMB7rHw8jP`W^-=o+qX!8fBuPL3UZm#w~0Is(9`ZVckjAGQp7_t z-Zt{^NK$633-~I=Y=uaiR2CFZ@)}tp3ksrfRqG11N(&obLfytz=U%;{HB9=WEx0H_ zPj90m@wMvL zMR&QhsLc&WH`WWU7<)>zKAjg=lUyDHJ$~^rP1Yytk6Aatnh8~vzPh8a(9TSm#g-7c zwJXNmJWmVi%v8?;*H7Zq@T*CtsCV@8=a27yHY26+PMM-Lxjl41ytJkC1>qak`r`-a zNp&%?FDxlyJ35szVZ<8GxqCOe*x`fWcu3R-%N%S9W@z?R!7l1iGpW6I)FwP8FD5Bm zzVwXXST|*vhDVcD!NYYj6+xv{wOUV|L2JJCcI^`1O*w%_m^4?f&kZ_QwtHFl`r6z3 z`ogL?_o$71gYFb^V@#s;fz3bym2peMBX1b_IZ2&TC2Cbm;mEHUqy2O0V{mSEQ*XA_ ziZA{Fw94)HU+QU32mkXJ=QF12wITEPTmd!K57qmvum#`m+t-O}BoH$ndvcpCaSUr| zJZdY5K1sX7U&faEUSR)=k5lS@Ne|noYbG9%vZ`yV_p5K~@m0fqX|SM~9UH&0^fJsf z$zt*vQ0*kuuE21gxUD<35JE3-=qEe8i`54|O%y2y%zEM$8fN|1D6=Qf0S3&luyg6# zhq1AL_F1=Hzn*ZpkerPF>-rvdy3*lq!U&?zXmrg+g(1q;bF|QJM4Zbn$MP=~cn!Tf%7^70$nGd%q#+aXLXz3biDmik2R32Mnu4(xpxG|a!TPse~r)?~L1<8u9 zIx8~Dy5skqAjzxz;|kw*V0zK8g)F(A;Ej zui3y(q_5g1?EC(;>Ec0;1X0(dg9o|gBrm=tzoF+I6=oHSVf zHR$l6gSC`r)Uz5KqyS~Z7Rc_TaQSD`)D35>2h)LzAVRWujN3k}KyMbprI#yPb(@y~Ikq zmHf*!gClY30%K#z;&vO+&c}W$+y6pCKkpg`aJwL_;ujxGEw)?k{6p42_l!?(rL1-n zn&f58s%{H?k&{xe+vP7hd<)?+;ON@_LoVVDVxz+S$8eo?Y~PBAZd$&A2B&LL&l4q@ zH<4_fGc@zg8#a`4bR8YW=6LtVVAoc)Kr!F3sTpgKLMi8&YhSF3`x=KKp)41M>%zds zbwG3ZfRAIbVTD7{?b8o!e(g0R{e7{H{$CF0_xaZbYsu*31i!7N9;I0nCJmV_Dou(M zmxiQ->{?HmkEd=@85{9bzx5x0ukT0392WQF#FcTPc7(-o$78QFT9_2(J1!OrIx4qx zr9%^ZQitZ8bJru{PpC~qQr^H!mQbhQkc=h8gthx*xSb0^!*9K)!)sl# z+Ud`4q(K*_cD-8rstC$PufR(|f*#0e3WRZOPRYBzCpu&6TZaJnufrmq1cdbX` z1){ktCo=5cJa+0LX1C|RzLPnWV zuBi`@R$sp=Gu9T}!L^RBh@%&tA7emX^sjTF2uubLc{^?=z%1(Q(50B_ETag?U_>8Y zB~oa*4_cR$DENzLwZh&Xarup?*mJHEkYxKZz5bu@&JV0^H#=l)!c`;(#_fP;g06IS zqG-m44COggV&K34Z38_f;G5}VH&@%+=V-Zu*3p5OqMvyi6R)JQD<=U5D-b%bEz-@c zX0>i$ph=AJGf1q48%CC5~0?xwH0x$T40PN{v38{BBh!Qe# zkQiHPcW9KGEqSg3B9q=;BRCETREG0k?V6yX3bt~mbA>7Ig#r7j`oA807E+E^WsyW> zEsVD%E8~>5$YEXjmxBbEvTrXWbY3y&b2l%;53ky7VQhG~B_4e6b?>75IJR=cxDeF} z3ME=v5}8tNb(d+5F6@fvX9^boXhu)5+VA_v#l^=5a(#wAd=SvwyKhp7sfm2x9H9yo z94R0--%Zbd&A``x%~)$l`)Dd5>JD<5^%|N@_A(%7WFD!ig@z=;n?p>&2ApD)r*Ye5z7-Fuiv&l$=>rg;8R3XQu}J$=^4 zm6MY$P~YVB#ZWNhOJ%-TCArEt-J%-_+Oc9a8#AL#x&F&S)<2*T4wH03=q99z7Nz0k zP-5*McI}D?3uA1yg@vs(2*@JOJTp?)cZ4E<3qvOQFHB5LO*h}n6@%hoVpG#%{+#IRfIOOw<4n>58>fhW{d}QhYo^WTq3XzR z-~!*-bP}?YM5{~M5lF<^bI@7&b=dtxzxB>=7@(ATV_EOz%>NnIQih$xF=kClLxtYg z^$z(=ZmyLM*-@KKnZA3NuHYpNF!1vEspWx9>GKm7JhfJ8B2rdZ^I>b{_!l+}g|+gf zl_fSsQqG#k=&9pE>rTz}+o%syI>quSP#RR_&mG@Q|IzP-k=4Oht9Eh`iGMs*H@f%` zz4p>gyu{{Jf3>*UbZK`tdiy@MIP)P!t-##M82LdwVrz2^+OLFAckyTTWI(azk&9&g zF?`3sASTw(ps{pE(?8!GfqeN-gLnUoP&_hx`wJAFg$tr44Su&EcA4N;yb^R1FViE# zQyoKJwz7wiB_m{p*hhb3ru=HYhYaO4n=veI+i z?Qn#%F&Yv=prJ1E^L~u_MPeSySEs3|kuQ;v!I#IbU=bo=M)y2!&g%k#xA8Vl5A!2j zOre~!ZuVCDufxAJoZf)S1?+O&bKugL{7zsk6Q{9&TThp1g(^pdNrtjo$N7a zcZhbF?%rHFU6OTax9g3+>>wLEA+zJgoif)!WC&9)?Xp0e4#f#PJwBHfyfTWj6HZ(A z;(rz$5JmYC?x5J@kT$a4p^i*3Xx+U#=_VcPmzGTSt`8$jjx z%jCYwbilvgs@qrip?dc|r@_A5;u5a<_3ZEWJ9s^=7;8pMMP&HWu4!JP^Bx$?_0e7# z`iq=AD9o%{Jm1O6+(FdXYa(kLM&FeanAM#?)IuU0pF^Joy5e_yO6Cv$MY9p#^G<;R zM0MeUWB=5YUq7>UrLx1*5U6OxG$9wLR!lqLzmqXx`i?3qmyK4KcgCWJ$ghK#(x#`U zXfSI9oRl>9Lqwyy-VnX@LkI1TJtt5zx|6&bY+#mRY~h7qS&f1SSMhi_H9NM#_FF?s z_@*@cV2EIW**lU1) z_!rCpT8IDPQp^9T(5#)^IdKDa?8Is|Kk-#>-Ul@{<~B%)&)n%7hkpE@o-*Lx)!NFA z2JW~b@}-ka0nUazrKiO6#F%$QUzER3jyR|AAyI+ zbjx2eYwUrlP_)l2=f1x9`SXvjAe$Ii1UNx>hnZjYf4kws|9e-*e>p*cGj1z0boyWgQ}YAI^~Me(3v*Hb z6W#xRs(t;h+%vzb^-w``x95z5!@#?Rk*jHQBUd*!;7Wjcf3mE&r2BB~&!6q>KYxZC zYaoDMC@D!xD+NrnzkmPgN+P#+`0@e>pOhF1yxz+W_LCGq6H!u{tQe;G<_!wxZ%3c# z07Y-!-0JZc4|*93P=4hWj`3^$eT`Vdux8W>rWu`Aj;&Q|6){OJ^YL%Gs zh8eA?4Y>BuYZ&GCEuCf>qxVZ`a9^ysp|*0EkC#K%PnP$5r~=5a*xB8`Z)eA>29&J5 zz|{VUNf>QI&CCpTcUO4oB=d4!_Aj@IfAIir9-AGwgRXJQIM-@)Iu#FNyjt4ccXrOt zcXmR&p-SGJeeZ3!ZMn@VtKrT>I%*}#A3NL|&uJY_U!DdN{=_|scyu4EZ#D&;=Yi>^0mXKJJ#q8;_vKBLaktj;~$noIw5hWwk_P2j} zt<^@y#@X4&#|2_a-@0tAe^gc5T9)a{tShUL#b6PwVUgD3r_5h!5)L2cReYKInPXx3 z=l8L(PeI~GC=Di3yLGXbmcLeGoa@Qx#FiF=DKPkH6{4E?H{cu3vm-w()uTVqyEpjr z*ctUog~hA_V_q`@ARp1H=TLk#XrWyg|S;(@h82&V*!ipYKEr9VeVkZ zAwM^_g%%l_Rw0tNPy+0iT$2h3gB4dO>&W6)@vGGDgYL2s`4e!|nO2m%eOnV#`$~b0 zGqPa|$rsMniT0cOk%9J$+$!x@ok-IV(5S9C`xzk@ehc&HIGpWTpBhRN81&kJ)GBOrGTu8PxiP{T#|%I1npr*rK@6CF9sctgUMy?~)%xvKWJzHoC`re(N==aCJ9 z)|1T7M#rc69lQM(#$?pBS3IW>emT4@pf{{1x4)FAd1OThwc@*CU^qM2Bt6?2YEAI6 zZ1(P;KsmKvly!k6=Sa~Ukp)&lQfwigjXOLAzV-H5=L}6;9uC*7x)hM@SCq?hwUUkU zrn@D~1|#vRxhFJoWc(B_;TvXSI2w8<6(WpC%s(PK9<1>PGLwNMJv(&}D)GC1$t!Yxqg zW{pX^jop*R>0Zk@iKi%fciJd`Gv%-0_f{eAgc~obV!wYl#jxbs6b!h;@4VaXGa^Rm z-mb*wLDRfS0|S1mZ`}&rzNt8q59b^;#?zyxxvnHUK&0giiEu#+ruFN(zmR`4O-gAv z)_6#oU(g#$c_+cTrw~v!abCt|52ZOJ?{|u+sRor%E{)QjK7cK7a=)aCPgI~nZxUz# zE7_Fav-MTDd6feEyvm}|k4(ysD(2dF_3&=A%6G8h*NAC& zioeESV|g~zANg;&@s#u3X%9xFj{Eli0Q`sw2DC>$ZmyHGwD? zFRF4oNx@b!Ng)dqPKeSTZ+?^Xwr&HhQ;H_lE$~p@g*H~e!l|uc);u?tpFcNOJ`Q~r zOK;6TSf+9aoLSynYi(T^N0wB=nssG}i^FTSs$BDi?~NNbZr;36Cya7`A|fg(CMIfD z<+BE8P+N-VC=(Mjz#0MNz;^;Sjx8wm8T$SSWPy&@?>Y?L|KGOo^UN3kzG!?dqp9TK z;PSIKzr3M0p8uPya!r}6GP>x-9&T&vj0|h*m2!scN>S=zxafSFT-Z=C1#9ieB<70x zz_~w`8(FAgar#2^zC%CGAS5?SY-o**0f1zna1PsbKwk<>HUcp5Po8ub6w@BR0|=g* z>*$!9OLa`0AKrd>o=65@NdPO~Vc>g{8KwDd=QvQU933$j$J})#fi8~x&sTz=RQw4( zJ}%+Ix0ugqQ2h!Mqm+VAwWCD~10dJCqxWhzrM0!2nzXe$3KUX9M>kBF%L51i@*8!Q zO17kyoTAe-_w6I_=K4a*!!ogh9+ph^>8^*;j!DWS-^wn5T@eclVPOjk>ivKLKp~a= z`)3DXI)!q8ogTKBF7WWxj=y7k1-^%umlXUqSAV)hizdVS0K5mrj=n_Bc+*&V$JqGJ z9pmik7I-7^Df)~A`#gX?z1?I-8C=`blwI4T?>J@c`b2HI9xM+(?Vo+|@ke-%J4WS_ zy_TmYCMbniK2GLUK5&kxNX6SiR3Tu%Mj^m-MJwwor4WOmvT2yYM19Jj{K$^43B_EvsI;Kt3$qX*gaEV zp8z0EX3`qnP<(v!w}#;4dhcidVHKIVkY5PJqFxvbd_ue-)iKXBE*{JZib@OhGF2Pp zHFJ}R>FFNf-`5tPWH{9efCPuU1gaxs&AOteMjT!rlL5R})6+Ui00M@NgA|_KgF3p& zf3r@~jeEJn=nLv!zxNopgl9h$)B$MCFHv*Y@R1#dX8pvzrD8gS!knKiP!Z@X5b6Qm!|6B<3GPYc)CVfw-o*WTbTL{K>%6d&1EkzbxsWl1P6?@Lg#4d)iFC`^%l~QxyoskK7x!%NtP=7D&sw4b=6`(bAB+=*@V7c5CZRowio#uK{u= zt|niW5O*EBZf!LNtF(puR(Flg!|Iysx?R)L)J-ej3JFbSlJ>_0=wfhlhuc zfJ-w#cL{_)HFt=@iyAnOau;vG4y2=J-3N!QI=b@G8t|u+aY>4us1skdElh4&r{xJr zL61cRrw4cLg1A~ON7Qar5lr3#r1*1M(vM=D)BF}EzJ2Qey$u))jpplk`G#CUu>x(W z3if!a-atEMF(V+a?;QA#sWYM9sN4s=%&Y&OXV0OJ{#TbFa~1xx{O13n7`L4r)=H!M zl)ctQ`8MwYT3c`1tQiDQW6V$yRyws*`?=D7IG!UbUH}DamzcpV7RThpdb$_p%5m&7VuBdqR$`HkQB9yf7p@I3ai7x-|U^N@OAJj9| zSN!<#7<&~M2-N4Z>;Ty++wby1iGm+rIf2zD%9E32`Yp_BuE>An_1TAYJ7ICOu$!AMplt;>LV;E-Of{)tzv^|GzX{=y18oeI z3TCxH#W2*EXDvO)Ms={S-lAi8IZRAxYcWi0IMODG3pKgc(Oa^*{ZxNwWfA#-Zz;>|95e zvf?{$cNN@(3MrIOF`~S9Ny6>Nvp!(z+l84<-o!3lUjg(3jGv#oyPv{iAIJ%C@06dt zu2Zvcb2$TSfoh6gZ{Mwb`w%6Fua6R>G=R|(C*)lDsqP2C_6Yzq zm-}U{_y~RYT@m1JVZ^pp7aaR+yDmY_<7YXnodsZO+oY-cgD)Sr$aE#2Cd|`Mtl4pc z-z*kF(Sfs_Hjh8c{NR6liM(eId3NB7asXV>S*sHQe?7N;av)Kji;KD_a~9a6OUxqm zv)f&v<(YL*n_q&g>*i|6v9$(>*94+)2W#^5sg3#;oJ?Dr1!Y?1saFXN&6Za3Eh%1_ ziYw^?w-0x4YH8^T(`{7!hilv0Wsu*X`LzJlB%t#I|`Simewb4-_|7XUcSF*v!vK?uy#A`gX^3Ex3ZE#C3)+sk~)&B z+uk9l3Z*!lcy#vV?Ftp`CWmJw zys_5Eu1xb|Ycl*bor6nTDvpG0Obxd`$aV z(2*ktxs|vH7O|y;O&s~9Mvq=3CMG2%I&S0U8n!WQ6l>~afH@-F@0Fs}U3fsj{(4r? zA@-0h0)Y?_K`6D`YyLnmCc?PF;%Ea`0|#km6i7LF%L1MD;(=8?XIeR|GFD21gg%5B zwA{mmzDrC-n<{HENzh+q`!xRLxWB%B@W>+>Z@lJfEdeYYr6lP|WMxG|>FVF85}%0CNz8{}m? zN3T^roO9mH3#r*((AJ+#PUjp&GH7L;j3$6uggUST^AJsc8A=iClyeax#n!Vh!^kuQN>IkMm`93L=pS2PL$(wgP^i!iG-y@PHx8{T zcHaAPZi#H;R>-xu z^%ZA_{?6+Dn}WxnDrF^ZA72TfeC{!C=2NeH#RYOaj|YNt{WK|WQ%Q>{g78Y~zxedznP+UK0Lt=Bj#Rxyj8 z!F39iC94xEBo-4!mRzPZ?|USdwK@B8^$^qb{jqVA*+yDzCUP?YcIX40 zNoI@a*Y4_s&ED8<;ruCgIIn6Md!;jy@16*tygD~_Dni&wAVzqY>UElqsy#R|E>ywr z^FPoh*zX~svs6|UYG3yyid5}0Z&7(0AnMBw*KV8vLw)JyGZ$=e*p@G1=?xWDHWSf? zPoC&`?W@ym;P z8jY?=FjqbwbfPeHyWX~Y>oQPNPLP>+TC_Q=W*}lFb6ZC^=1l1F)7-@pvSP!>HFh)r zRkNcl7%dtu%cVKb%*+Yt#YW~7`vWB$_qq<8Phy0z*JN2;Wfdo?OS~lmHoQLB&Z!~u zFD@iZQ;kM`GW|v}ri7MJ0FJemYRCLD;}gED5_gVghq;Lji?r<1peeL9m=}cgV39o` z;+4yNwYBynR={6;Rz`x$bxNEo--~-zH)fyg; z0*U2|e>eNtUkOqlH?q5dzs>e{ed>PWwa)tz1>-u~6{CiQ9-sIA8OC2ovDYFdd6wd7 zbzu?`lyUrAEvo~r;pj%V=5Qj%?AM;XhiC)me9+cyc?OCC`rjkYwejdWQxP5g1yXN` z4xHmgdpK!Zdg-cetX1yV7L-mlb+TT*%72zRwe)&o$!^={=&J=k*`-9!HyKp#44wv7 z^Q5n&9Q3wtOOOXJH2%z;kTBoJn%~tC;iBhVD`+B`gEYUB*BF!E&U!$4?so2>)6s;} zs4x!^U&*h>b9l^8uTg>{S#pI_B#EI0B+>MH7BRHW5;Rx{)xKa;;ChN0FW*b^^pLleW?&mPozv~dl!q4kk@`a?g zEdPF35!f_$|j*AT}iAVJ4x(`iV7(n z6ZF-Y{^1+K!dOYp3|yIj&~QX%>1^->-x_t+bu>g4v(Z|#{jS^~WT1OP7jIA;+KOf+ z@NjV}c;rZEA|hL!lS094vG4>r<0B%N0PbfYtAA&%;yviRBJ`_25nPf_yo3$4Sd z``2WfU|&e&a*=Z+=~`kqRF&&5oYHmto*ONCvdNO>O7VzG^scX*A3evfZ+M(7{Dszd zELU3+507Z|v&yaIbZzIFK~TZayK%8~q^E}xI-ZOAl=#CJlXpUlThkkPrO z%S+SO!}6C~9G!`J{Z>%QbJC0a+|VV&u+k^}lTh|lE>B@T*`@;pW`5Zem`0C!%$oLn z;L$~Rs=b0%+tIZx<`8?m?;4zu>EHVMc|b+ELDuj6al+mMO_uh;C3Z>5YZks6Q9AR4 z2G)Y8$OaG1`|+-D?w^^5?Nw%;ebwmSqlIp)8>jo}zAG;NkUbG};xaO(EYM*Uyo z_eAEA&WEm=H3}Qn2^-C-V%FA?fDSz>5$fN(qxtc2;GN4@g0|HCw>wqZ^ihU0bp2_2&7(DSlFZo0sltA9f^2U;(hNDB%5;`65)Hn@R4}N)ZUm0yiRQsb)cg%y^wxrI(ezvEPaV^vavqn9T`)q4) z#15qYiooPa1C#d6qC=MMfr-hrmr62ts(VqxrQl#yP1PN1#&BQCf8 zoLN^Kbz=K9qqsP$TK_icYig+!-yie~`JW8~x)8hU9iOUVQ=Jf`fN0Cl$5T1FW2K2A z<>OGg(YAQB7nQvg*IM9eAN%Qt^-OTK3#2T9-D~4OyKP}^XIg-pnWtr5-knjzd#`R| zR+r{kM~~%?k0}%nZo>pj_v}I1HW*g+?wzn9|Cp@oDvj^R;Uw4xQy7{#JNU!p-?w|8 zK7%@ucx2T5&EP5ZY{?r<2EW;Aczn<1!FSr^0^ov@|7O{YZqX1_-SkDv&po%B0^g#74%MnHB&Y3HIb9WdeseahZGkXJ=y>1{Z39Auf!(`VRAeB^puoP@4CS7tc$Q_iyO|Q^?=t<~NyfyQU$rG|5F~EV z)b-wyJyNkq5ncRdE<(8#j7$j-0-PIs@{>FC#{5*3N zQUS_V!VBsSc@uVQ*5s_)O-&g$qf6DDL=DN6GSq|wzlD~0`L_LMFNhpGG+lMreCo0Z zqd1D>r9gglCFn4Z-0=r}9s?2$O0<{U|2>P)vRBm z#WO2vVctedw>kLPS24a@{-xx5YT06^jPc;wT9fuL0Q`?E6VGGq3>Y99Cr*U1eowj*IcAiFK4r4y7 z0+i8sTcYrv;Tn&VGzrl7E*o6;lavLpq>ttK>8v$jG#}O)>jiS&etfiHZrC@&u1@=v z7iMN*`hW0fAgAaX%{@50#4Cy7%f!zm_0@3vEilDleP4~2CLrEoI1}FZX1a_4@@4Bx z?kAPbY>{yFuzdOfE$Dz%9AM->LOZW!`BAVq))ziVIqla=yoRLKE2xq!OK*t}MtRk~ zp4g?C!Rx-oX4La za`z5ike)eRvpW1$r)HIZLpP2tPAS{vdA1T5<;C5Q*O1wQirojg)38F$FWDL~I;W`T zb~Ii&fspMZbw9sGC>*)VZ>O$_<)dz^XXaNHqeZkd>k>g5=*IVmU^-Z<*kC{8yJKOF zJH^0x(KKGSjXa0EywO(R!a1so_Lhy+BG1g+B${S*vXATNyM@2NR36AqF!Vk2lALbH zDB1N3PEgDVqaGS#Mqjb5{~i6tmU7kxSrXDGkzIj}rQaA1J;9b?i#v3TXsTXe&s&h% zcVlfyxpm(wjq*f!{%ki*wbEeU_)IUuq8kJ0MWpIPqWwFe*)0c$e4BWO&HP5GY^1rq z^6CfBX83L2jw@hoC1M^iDkb(QQ64-IfKPZ%9&{hwx09t)aU8~_8LlP&_+coYGML@P z$n2Dyp2T-nSKYj7oggQ5={{Y*!*jzet5{1U$}mf8eu}c}F&J4*7u{@ZdoWMQEU@%U zRG&jd7(|bnR4=qHF3JOt63Qk9-0tKw@JV#^D}@<0*FYQUxCODS+@Rz7T{&EF*3nh_ zXa~o6j|An>;WVkyh7D*m<)GU>kR*N0q|vuCNWarfYrxmj8nA@t>xp3Nmr&o><`1wo zIU~X+M?RWJtb1n_e_mbf?JYHv(aSV28Z8bh%=eLMB(+`Xw!#f!SrY+k-$K7CUL3O# zU)mbxqdMn2h&g386P%f+3_Miexl+rq8y;(#w?SSWl@Mp!uoVwhv~1V&8aee39~?&V z8w2`s*Be5O&(NQ-)meXK_*&rjNFduN_v;?7sdaH_pZdlsrQVtl_d?d~?BmCw0mlwR zAz!{3ZHp-`URolZNKY?*gywC#_Ptr>M)bNKflt}rc;zTI+BgJh{!)Fo>eiW|6&Pr? z)T$v8s#)=Ve$qO6%s(+pyr=E&eR9A;cEjQHuoI}AlEg$n2lY4|M!V(udl-`M&(WV# zBkQ#aertF^)m6VJ^|2;hv#*5G&Q{j( z-Z8(7h2wBGa`xfYPH`gPxS2AM;07=}HeO|en471DRTwUswWyt&M|Y$GXkb}(t>P78 z61lrLRLJn3os8G2+?deJb5Oj3GSAz&ZZmXHL@6&_zV|2T1WSNZn9&*34$bU%V9;;Av!d^{V=p8456R};V7cK0;2>?HY$3%i%p}w z%sqe-Q%}66C08HQYG(Q;xi^&pwQ~hJF-Z}x1~c+e=}$B_`_~ZFD?`J>zO!}Y{HNS4 z8T2^PHk$;xI)Sgr*tNvslVhT-d4;lilXkW=TZoZ{_wKT(0((LAy2RAk`pbUs^vV)C zCvlHrlV-OP<%II!{jD6~;|Koj?kZZcNM^!LH+hC&;6NIZ^|_fDG4rTi_?z#D7sSry z@&$T1qm3C*%-CQ$8bt=B$VTO|T`^1R#dIm_( zBOc68j>DiNLyB9)+-VZ4$sjvXmhlwj$%ynqL8N= z)>U&)qHg*_Y;9MgvSfiF(Dd2pGo7~TiR3TAcGc0|V7pgX&Z;gnGEl(V>}2kOFCjlQ zqf_+kW6KKopGP@9WMfR$6d$r6YFWEhWkw^bvc)peD}S;LJBT@HS5*xja?EOK>)k01 zcqlkGo9If_da>8+WCN;&d??F)YqRzIuGV>{cJx%oyXQ64#nxbK+(7PM2I;?B1mrcA zh8X;l%Y&=*nA^ZIQ%heZ11)Qgvgs8clk|m~p&RznFQa46EmA}eUQ_Z?| zc4nSxc@HW!7}RVG_vtB0##Qx5G3K5Gl$FouG;4SxYsO~nnCp{q_Aeiomz@I&tE$L_`QGZ*x(dA39VnWs>salb=gKB8 zN$v&x-)5mQO{y_Ef02VRgEgS}g;{QI%AZ?iL!Ry@2b7l^Jrg7BDfW;qm}}e;ovDvj z9)2=vZCJuThSKD9dzKit`2utL$`w-OB}x6`uY>iDL?Bo~Q;XM;v zKm)}ivbLvM(Y1slYwJ|kv4$Y)?1nu}ZEJ8zN~mVO)+;s1bl{cLF}?8d#s~}CY=u*0 zd5KA-o-6W;60cjojo+efn(pj=+U8{#tFY-w8xoQ4ZyA`hs^dIrt-U;jC)x`tx9U5& z{A^I>#x$cUQ+>Ih#pd$h=96Rs_g7ds_K*X6; zBbU(MOE#tRUl{5S3b84?4Do+1R#fz9r%n({>Eyh+ahwMuaz~x}t%QJWbQfEy9FEO@ zt}Y`vU{0O5%%)waE)_7Wz{!M7D8pdXO+Eij{pCIA#MZO9xHM8ZT^ghljSCrSjwBPdJ?d0_CU=`b7 zzpeD-MHMFJXg%KgOLYXLVxre%dAnxl@*kAOl(pHOrF)=UV56a|$#sMo`QCL-T}Ezl zxuyqaSg>;xG--8nvDeSnPfa~OP*^yfM~cdo*5Ardg3%Nv-Ez{!k&*)acf~7y_EZ(0 z<2Q;Sl`le`m~FFn&vt9-R(C^R@?cs2$Viv|c6ok(sH-xbKAaOKtT#?{n?_dNzL{K& zV=VrV9fE<)4wZLtg$Q3@cYplw9I|x`7K{N|I0Y+NR0YR9X>%jRJ#m9>OXuvQU$544 zIae*!bB$U%+h581QPVU5%0{4Y0GW;D5b6!Ma}ZS5JoX&>%BkaQYGxwOMZq-pWZt~7 zjkznnT_&wziY@K&>IOqy9Aqzz5+p|X2ox>HB2cKmjO0Rs7&=9>c7EFF#$zyMOd&+G zK)ZEqubu8IQFDuJc3f$bfl-LzJ0zAymy7W;u|vih+mp!n7Ywi0*P>&cA`^pbj5r&T z5`!GRove-^+K5E<6e}d>e&i5nt9eL?Y>-VmVYy(q?KNbR#YwQRm&MYegND9=qF1@V zL-)n;ZmGP}h~L61s>_nP6!UiP^FT*t-YDA;f8k7UZ3wzI!)%KX+>o=1FSJHCtY&#h z6?)cS`TXNvMEOmjgaQ1u+)&vNOj%{l8Q0RU{>{`2E)|9LjRF5t(iuK1;qTE4E!DQA zt4-qDp#X`O-={&N5xDS9;l04a1KXkoMuvQgXGQJ_dE>>zgiPG%D7$P@cPmreU$ z;kD9p2*jC8EQ3~8SJxy7cf0VdO$rPT8Y+Sde`xkCq^ezLDjqJ4_AmhlFp093ewSw4 z%a;k8>w4z;I_Hhczzy~$3+kM}3+nJH^bYv_Jjwfop{Qy%j10pS5;SvLRx1jasG$v} z=~U?qwR7|%#A>hZn3$N@8eYe6A2243>HY4&9xmL{ zugK3-0pH1qWgb^O{@C-re6=1~ie*WJQgX z4YlHoa?K9(%#fK`m2c^cu8LsUNB1P*>!j=bc)=f`+ESVtapYqiHJk|(vpK{>Ym{Gq#E$tcRFT3uJr37z z!8)|xm~WZ+&A1MFtFElb$lDq74@xTOEx`Z?{#yeKAO~xVNx!~12&(xgDY6N;QN{l* zST%4f`1j9bRQwCC>0*c9V~yJN9KnY6Y5Vq#fK!k)>WTpfQO)R;`7O0$^!;Dd6Kj}< zzf$DjLoNX_i9bCuU?pMF=c(1*4Lo*FYPEK5p;(K^UlY$I|*V`BhsN(}$! zRTx||H<4RPcjgSM*J6*rq0V({dkx!XAYcR!YXm?ud?ZC%8?Fc|v}+JOWUm zlf>jS{LZqr`{YkVAG)MbQSQ(S9REOfwYrwoeoPK>E1Piz&+TXQ zYX9^UrK6+JdZ%BFiMYhBLx5<2O-!A+=iUb(i4`?#w3X`3DNnqZG7+Z31P2COKmPyh zLrZ)Et^bbvYfJ}oA^wvl(H(*SVCC9K0od6gQb2Ll7Gn$7-#=;s!@7GEq0ovYB_3|n zV)0%69>f@D_^p12yCWgf6B)3A3o?C!+LgGm8LMOMkvK!zLS9+X)kbb;Xhx*-ukd;{*`1#MP}DNxA!M= zb4~xq$w>?Q=bz_KS@#WJe77H!8N6#B`R3_iVNI^Jl=$L=#&2A=V_D-99x0j#@ExVr z=?xD?P2Iy#9Jl|Td$-y>aw9_`K=WnXN?csqKJ##eB=4b>M8iwXGBx|OK9{622@hEu z>N`8v7PPfVRV8nK_Sv`KmYQP0z(l{xZ30KQId~LZQmRP-lReG>ZvPj1?;X}u_Wp}z zW*l{fQA9+f2o5NWfFPpu=15UQ5u{fIAxKAh4bE8Th=_nxL8KEQ^r{F1=|~M#X`#me zA(WJ}vUmKQ@AsU0?!Euq=efV{GkKoL7?QpBTJL(-yWaYFmxrQHPVIC@pN!{|I^R2` za^7vlIL~v%c%uiZv3tic1xiR6tyv;0H84SbQ$+;u#>qcnN}bNi90vL*0+H+v8nd=W z@c;vhMg?!>j*b8vjH5oYtgskpJWCF)t~xrdu6>T+h(LsNjcifs(_I3B_dXG5clJJI z4u5u%ezTwP)ypyuq|tD9>f56)lY!Qo^6DsD2eSN}k%x* zKyL)*rdh1C#=P;S=~5oMogOr0;W&8eW8qz|Y-Z-cocUL|9U-!tFfZ~barbbVHs3Ur zM%P!4FbIh#eZ$Z|5xs2I?D)hPa6@T9JDrNPU1$L)|Mlh4SB8&&+qcg#??z{GBrc9g zDf|5}r97V{20;|5u1)Mau_449Zm%Nz121eQk}%8|e|29Y)qK?{&t1rEl727gnK)N`{ z(@TIURTXiS=fM8my#xED$-haG`-BFdhw{O*IF(TJC=4!%lKiQ3VTSP?LQfc;!-KG> zH-dVd^h$y6<7v}@$1`wBpk?Gv4MnH6X05TYKy)vjUD(d(zqSOtnkEnfJs3f&auG^!_$}Boee&0l)$N z+0Y%AnRiS+mV7$_rPPGgN;YKdAA~-0u+nFxlldUmQ4G477ppa90DV2nT#t&=b~3Gz zty&(ujMYV~?}0w#z?<7(v3cRuYR8^S5TRqQVS8a{iIbnB9;APS_{#_R-~GC|=z;G} zSMe!cC`a^~DDSOo^r-AP?QN;-#I24o_5ePopTkKaooe4VYoH zrvgu)dX8<3=>(GxjOiS&fBBX_ppUVIGt!DJ%m5tX>gVkE`0EMbjD4*d)6*e^aVpN--vYQ2q1YK2&hjGyb}?7$MO>S&xvYspa?2t*xc#xK7mg zZ44EG0+*5P*|l=v>m)UBia@+U-ED2HN5W`4+h0*|N7yXvpNHfG(=x<6LfQlVO0XSP$VmW%uF?a zHo1ISJgqU>S)J0W@Bl=Q?3jFlEDGVU;7_0n2ik#Mjf4* zrr9^>MWf{6f@!c8$K%MGnwhUi<52&}J1-^-5o|=C=fdurgM?BiB zFPH=Z?v6Kln*$JZHCkH_pY&fTx$;z-`zJ*BQ9t77=u`gUBGRVf_8lFDFD3Y*tnp?I z7)HoVNW5kIY~D!Ip?gyOcZLOzm;>knFRx3={wf}J_ZK^p1_#NLW)@v^O7`b|odm5b zSHSoVIV=4kW@X@%$sMMeUEVSk{wbQcJUjdKV_Y23QQJ>!R~~WkaCfuTUXpI86J zDfP(u3cjTUhihq}_*u_J#9+W+9OSn<<)M9GYqYpMcy$ShXH&_D3aPnyw+-Kk+1K zw%Pz#RXlz^96F79yJ1FoU0i%p!SwFl{6R^iofe4iBwl%mj0P%&-}Fx*-tj7T53J{U z#gP-&jW-!>LqfcT7)NHv>E`)-No>jP#nO=!`HH`f$-<1fstFyNzl7yQMMPjQ5fOtw zVH01TcH4)j5T|iQP+}_2DuN01+Czu()tl08g$?Rn`=a~%K~zY|0z4wb&_G0q>Y1lc zwL1AbA1JZj{?-gMR)$_NngsFD(fC8W1u75jf`sVDAenGy2Ndob9YT}=cw>uNfB$K# zlf=C)CB_9AmdwK<zR5~gU)c-TrI^SvcX&rLM$UNM;(?a9EHac^ z(5;Zx7RucPYQq>W(^zQMrk(J1NW{mouw1{+!lI6bEt(8zh?e$}FuTwB-RpEQ2Rq#G z3kWHKSJ`3s&Tl9f9wWm^KcsE;0tsHN*)z~uz8sjMw+T|Iete*(eM)^K#>4+brI8vq{lu76)b)iqQIBj6Jp%cp^Q@qLhDC zN2hVUebY10S4GZm``hg7(3MBVccsVk_}KN2-o2c?A{6~c_mzEb%Ww8ZBKf#SD_!mF zJ$}*M$VQQ7c5G5{Mre{>z7_Qd5<2fMf|6CV1fK5We+1SnHyikdB+4gTVDnYV9c zg+f_zA}w1|#U?&YG}Ig?D$9A=-<$1yU3B1aRu(&ZRu*yZs=lKIa$kaacGTjO$zaPX zjHYhGx^KEsO-^Z&6R(k?S)odBRIONMs{(rO>(g>`TQK9R51BL8Uj9Vi?otmZ504p% z;iKYuqWR9T9-h9k8y5EQsjyIf&$V#YJ)6;6ib!cL6Jt=pZ0x(3t+$w+Fg-)>Dw6Jb zl+zVv6SHxPM#1Ij;=T#2wK3XY&a`a zvcBRM8Yv`{ZYwo+Rj2g+msB*0#FsnFm5VmsSXpsIcXb=e7vPMAt*chjU-npZ@VQFk z&h8o$g89aOHc55kY?ehZq!9~B48rjU1#v{@RgJAclzJkPO?{f$0x7xn@HaiElOo$~x zd7GR4*?MWnh&QK#qwt;G91%ypw#DMp6ds&TlS=)Pr|1?Y%JJz>T2cf~PJ3=xyfiZL z(~CdHXO@sHzs@KfAv|^Rz8%zU;(gq-?!fd_Oo{&1Zz=Mm>4==09A2c3m@K;ET zkWPaSr>(K~N(D-5g^6j2LFvuh)56<^G|tB>Yz8VuU~zlH8w?p9$Rz{)xOH$}_jnpZ@Ghet%jFq+=4V$qmJC3VRFP?0Vf$3xuKhj9CZ_5QA$ zl=oRHs*?2e&mwFkOrx8Du*l)i*+3)Vi-a3mD)i=z4T<0BrOF+AGyE?!nl z?UdPl5wp`b0KVFWhliQUmSU;YuyUQ^?lrB5jr^pM{%L7+rn}wcHH~BxI31?(_Gvi{eGIJH zvkQ=j_k0!~UpZB*xvdxUO84}SW=Yxl*GRL|3w`O-!pXZ+U%q6;OrGm$PkY*CvJ`KA zZ!S#E`b^Z#26_Averj5}DdDQ|+U#Z0Z^`-S)5YpVabpe!Y2WPBxk*a}5)Io+1*Fs~ zFN0_Yd1uJ+O{Md0oG+FE$Gz1x^e0Te4_e<}dq(J}`te(5-SRH}o+r1YpqUTt`!(sPv)!uM z2kk`7sagVoVY9_UXMOx;uibr_AYo~4>C)V%-p0W`Bck+H&J5Se6+P@QUZ9T`&wW!* z_-o~2yH?D7wFe0(0h@X|?w)Ns)yR_BSGKtK!wEhn1`wo-A}c~m;%sk^XWrh0ZqwsE&PYd4>7 zYtYBB`{LX+B>}now8+`7OGYx%(iim1y9W>YN#BV&XCCn?Z6t8*ucc9fc&LXlRlC)q zH#9^nO^z-^=nA8AzwTcv%&dq6zJUaaY`zGze-@ zNrOv-f}|up0HK81mlW+?kEwwcw|FxrR(jsg$Jt`5*lD6V4Xm)|_r1}j{=1*uF%n5> z{i>CfR7|$N{5z7|<063`v4Xj|2+o8A88W3fn2$=W^^sAoTv{3}$uhay-qiH@^F(R# z{2QmUZk)4yH+?QUait(%-O0M$;Y(xTv_w&x9)J1-&O2ejw*aU@yu0h1`0?6bwhhac zQ{#oPivFJdy9|&_;5%uHZJj8Re0X6}MWS9`q<&Km(=7l7IPercPe+!P1`7%bcK92s zlR$w@^n6dm(xTtD54H8y!cNyGhARWe6Qwbfq?>X^Dly)#o^Ti`PD%~(<5FIn`+4mz z#nDCaw)DScgzF-FjG$BKbKY%<2Hz!pO2c5~->w7&(VpY0?)z!e5{G%WBN|5<=|n7< z5R(*LPyp&xIvUsVBUCF=*KUCk$zz$`awp3)${z(o$IhaT-wtYLi;6?p8NWHYQ>o)c z+OQ{;<(kvpyk-BB%jYuRst>1m^#jsUyr>@^E__TiCxaavF@;?0ZW|QGa}8v>#-Fg{ zk?jOCN@0wZU@mDA$lDBdSl$ZG;`mC;X>DhFUS#bqrG&ZrD6p1iNA0BJ1_|1J&$G6! z<3!v!J>N2fTf>;eDY3Een^l-;EAx^=o=%Du%Tj7bxTfMd8l|ubZm_~P6YzjW_hML1 zrjMD*O3_$k>(i<71}$3NGp2zwSZHsgC;N*Q`3T%JFwl$>y-LO4ffSylZW4J)`Ah(d} z^?a{V)2tlnms&b179w8V(&a^>#rM;$-7F5yF#)FXU|lQPgVU4S=dy?Ad@2V!?~L)= z94kLtfBCoPq^HXT_$IzHMQp6m@gNZPMr24&iTE=J`QC0;hiB%>=E%FM1%umln@HOq zRBOZO<)X#^A4i zo{ou&d&;VF(QOtlg*`&Tf}aAp^@$5~!hN0%8?V=4r?Eww!&cR;@# zhEn9$z0}l%!?3V)GeAX({)F!5kmn2-2#Wpll zY-MHEvC)>62Hc^x+)3bZ>!;tPvU^pt$lp#|PNN4%%#H4Khm%{2EkN_?Vhg6B9~IoP z-EcC=Ce37`qk~gIT!J%*A@RPf!KJ0u>DL~baS>iaL{>D^!MAGUsAkhWtWttQ?c zFG{Efh!JU;pyqZmRpQJq#lgsSDRO~Frnt{#WV_1kcN`yUkCl;A z)S@23lFuteBSp_{FCQiscBjVEg}v&V>C<{=`7F%#zJ2U&>t`&i7tA?7z+3eskDigg zcBg$xk{@j+DQUN1_w|(+CGUk>B&=HCt)RzUY&_mE(KodtqrID)sv$XHcAhP7dH$_$ z&2FlYUZUddrr@nV)^4ZsPYeI)Xh8%raUkKMh%^b2I#JefGh(<;Qz{2aCiPmSk`i!x6X=H+ z9uDry5RZNeV{L?WXuziHS@4m@Q=dPQQL*dTf~BiF_sS{kjXGj5I4Ml^xkSf7it+C~ ze$h4(@oE0~`lGv|fYN!p`^oC+7Q`Bchr;K2z;cI0Z<3I3y6?1$%3Yf@tBv88hsMpw z;Xd7|Uh|!wKPO3bS)r=e)+}mkY9lNe%Hen_B(IMiO=_Z1V^kVYD3Xai+YgcVooTZfb2;7@y>FG;0Lkz-?01VNL}XU)SK)ZOZmc@Y+jc< zyk}rTShrtb;^$PM9*ZMVJJYGS6oYvB_J&dRZ5eLV6NwnkiP1iyTh|@ zSmg7gZQs4`RuX+%PBb%+A~`ul&m3g)yI0xa)GQas^k{Ae1>TvW zEJz)F*mi2&Nw7iAU8q3Qf;efv$>m1Eg#TL9UIh7sCqt3Z1-wRedS6BsMERx$F-f|l zMHgJ-U?{1Ir4+bb4(8mf-=5C99X}Z`XY_{6F|_tn@boOM-OnSu=54AQvZayCT}iuj zKh3j*Zo@dM2DBLbrfL?(TDO5Z&Ch>c#{aXWBC5`?)0To!*^{8p_0Yt6D)M8+^kc%q zpWiqlasv@QQ$&c&Y>T%ZTiXsQJe?UgpZ+v4=0szwzx94({iG3_s($(cj&i77;p-CK z$}bClaNgwFo10M`w(I+%-CLGxpKNz?^|_7Tj{Rnsp3dn&4Z9`kbJ;Lw;^+9uhb2!| z264N~b~y^zlWd}S$}?^r>*m<1oL6y&E0T2Fub8Wv`VyLi4k zP&+RF;=>Ew&S}m{9W8Q-u$@x+RXyz&9W%BcunP;#YI7!$u`JDKwmY_Qpk2h&qG3jh zYm?;#wF-2t^;#*PEr#P>G+Pe0mpN2$dy&fDbDWU5UHwcYr@+%|TFg0!ySO%E*YEJE zC+8zY!pEX<7IzeANd(mU7OpmSE+f4~MeOj5;xM;ZEBCY_vIozwNe>?$+Etbq4)-x6 zq2!tsUQ-mt<|ltQaSxt0x1}rbPEEbqQBiSHFPa*|Y^|N-9j_%v$sFgL^38&RxVbsb zJyUy&75K_@${K4$I*5;V2|2%D#K4`iE&5w6YJ;~wTGWPHs%JH0@v(0~-{8o|s7nHU zj&HZ!l$2A7k7YIMVr#{J3|+^n-C=8ddEgMPEzceMT*bMw_zmT6hC`v9N>jjgMk z$6vlYeq1EGhSYwF`?W!FaGI!@6>*|mvGyABO!94NICf8}0b`(*IvtcYq3q?iGu<*b zx9vZE*1ZVx3&C(bp-_Xe;=!r^x!C7&p-QoD_QmsSkH3BeL7b<1yWWJgQq77}=Q^y;1RTSWEfvkStXN1j zzeYhS=O4qL?$0n4_*HbEH!uM7riYeFc`jm`^Q4e3Uv{T_3SLM%#IlYGnGo3!BXmal zZ}xrofWr-2)?&`A%}cjN2}W(0ps(3P_4662DENufr^3UENE2@^rN-;)QN!^`DyGGX z?bptIooJT}!&b4Fx}S+x`^y{l^Bgf!)WfWhDOH>f6Ctu`eh=2egoT8cIjjv!t2cPL zD^~*ymnThdxsm~5?l&;(ZjMx!r1&U7Gopep5Dzb;%KHX1fZ|zHwmhIQdZp9Yl;PHm zj~6v6wyb|}red{w{~z%edJ{bdDXfb!GUX-aJLcJ1!1-{(DsT8n+FARSf*!+Wju}Ndl%##v zq3KgKpz>^u?KDHyN4vtIZt!pL3PHn3H}Ho?|DS&l86Bze^RSt5iK+Hlc7fthAe#X8 zPw1Du0||Cpb#-v)uy2`hhJzTT!H0)|oViMDk0(#o zJ~*TE?nK;4G66*)QuknAI{}I~($<;U4oA>9G|Ol5&}rBEx;wqmmq^)x<1@HjjQW=o zz$YgM1;~Ng1Y6Kw#I1j|VOSIv7IP?5v@C=^k2nZvk{MUd!KQgV$O-_jFRaYi9rOwG zIZ%hRAo$a1W-bc!>7Ngb-d_J<)VXH{C8wO?kZ~EW^P6dHZ&x4`79L|HqGVt|Sx6&o z`2hbhh*#h=r!VAU^J;%mdwgGCSXf^lGrtd^iZA_tmnHIFDI@1rhKi;r^Fu|MM!?Qt zX1>W^^&wwj1y&K`i6Q=@B{K?BxRR^|1Hgf|bRY(XIrG%-|K|_PI2~zVY;0m;tk#F{ z<;!IK9Lm2(i9GJnqclYib|sJ-tv0oOuy>}fOVG)5T1R8x77m#nv}Hol45sg~s;uA- zBcd)s*^ak185V4Aj_V`JnMFhC0F!8N5ZrxD*8wb(wjKLmpq=FDyL#DM7rb z(7Z;U8Y|;!*%-WG2TOyHAQ?%8M9{TDOZLe{vib$YaZkjUY65=fg zCv`h);Zc*rWIW0eQe{9$<{sQ&hw&>=iS&O~rT+Qmf1wUjw?tfQhpT52B+$i{4ZMw( zHD#p_7WJ~RtiNrNS$|u>^-nb-d)}*BO}&5L*7ogNTU&Rkg@uBTG_tFto0*us-42(* zSk!{du2s`O+dxY1Ya(%SGA(U#lG4MAN2IOE}gjgf%*V;Qt_iH4QA z-@9jTfA8KhmhWb|rLk<#?)sdlk+@Z{>v?VldiXGTq(pCVq{K8`I3Q~JHz(WFlKdS2 zSuu}a_IktMH_zz*%};ehWs6yrWXAFEi&PJ<(r%~JoSe-*hugQ=*>B$-%sH=dWM|Zo zg#}%IyoW1qoH&EQU}H32`b;-pmq?Xm`(RbH9BC4n+`+XGAto`DqVyA+{nnPZ4|&T@ z_(Ch!-XPl0Fql*z`x6d5ygWis#P9BwI})9u6n-)zk{ruQ^LF=T7Ah&kzGHYsjV z^(f(+ot-JNCDrG`u8_noJja^mu(lrm%(Nl4h6e{_rpzs(F1fnw&4eBnbcno4fS7b zLSIsw=uD@PeBlOLMA8SpKO7pSE-7iq12fZLG@=YyS#lXsD3>(@q$N+~{Q%k=2 z7etIzSRqtp$yMf(6sXg1-52=n%eQYI{*I?@ZZ1k8X(M8vG-P2k0f9Ux^E(O1qhHGb z-rjKWrRftnZ-s=`D$aQY$+Q%UE2Lkjedz092`bz{DnS$!&k1?cA~X<7H1O~I@b{G~ z@mU}J^BOLD6uRACr0lfzutj!d2qML#-_-{Cd6CxVG!#8%{~q)hl*LQ-Vfr**)tCB} zUT56xjwsBer_bq#B_cPb{qy+7s7pqoqK&%-9)_QPx&70IT7l$8fJhqZbLZZR9hnYB zgbPsthhbGSt)=ghK$LO}%{C@!q!*MtKd*h{Hx=sQmoGXf(-oq$>1^^K+O4pKh3Yujv5r{HNKsT`?;Le0cb zVWE>Dr>TD}ZRZ7>>=4T3LV9;9hXqa18JDjvQaXRN>hRzp;0wFkZ@U7?aR9G)xr;8S9> z+=UB@iWl@Y0lW*C?adOV5AQvHjXNkmXpK^g3H)*NUdeT79Bh}2ZA?{C*Y48w$qCsxQ)T@vmP4&ULj&mn}abIx0>-CcY zAl*ALaUgLb+~2{$+S(z1VFqS5C^;K(N-kh)leCJ9!^gs>uKA|_+1Bmt*H>m{2Kha9 zHTd+ z_kFh@2tDy8Twn$myn=^DLK_*OsLPo~TCs=uc=;O4BAHEP<55bPt!?~1Yb#5m)gItP zU2%!~_h;rRjvc~2893YNq6o9ZPX{zKS$RYCi$_SF(O2&j^lV2K+@>kZ`MCJ)++6dM^@K@zRlnK6?=K9aYn=kmubgw+#Omr9ENEm#p0ct+(W%zM{rA@&y&lx}rGVW%^2%8UrPQ-JKL$X=`rNugAVYDa z%H45I#yt1OlbFCje~dpOSaoQC}KoQixnkP?~5;Gdu4?qy!WwThU2zIy90MmLQlDQT@D2j&*VN38AsEfkxX?M^Cdx#qi6&kTC9~!{wMv zT9qX_eKEeTY~>_xKJ`nqB*(h23a*hZtP(bb)tblaq`pc=4;Rc@d8nn3{3p7KP79rq z7C$|aqKG-M(doljZ$$Wd7gU!RRJ&k%aV&v6u{)VS4kGpy4N54Bz?I8RmYp{uqh2Sx z>`u2b6+vn53zamG{Hub}gA$%l@R;9Rbka&h2HztgJA#$|{FE4yI3X?kjKy&TE-cZy#eo3*A5_+UY^fqvsI;9B&;A`PmAesn00v0bj>{i_wjHYrFm)h79qKNbdiiK{^CU^1_9S7cU%bY%Od^)H)Gsttgay zZhop)&Tj9&%&3Y*lpTfXd<)aKS4-bfp|26d;R9(`5RPi87X9NS|8L}%_{X98 z&tidn{olut_8-l#Ea@X5Kqo|8Q@gDHA3f23o7{i-Mx|#)%gf6hhbK{}AS#udOkYY5 zd@$20oIdL}n|)nWG(mijJc!yGo1$%10{CC>->NIB_OlJ2Z$`oL!vp&v5RF%;V8%%m zQUoj3s@KDFiSHlw_5E}Jq_SL6FLHx4A1Ip-WIDIO=ncKk;S?>PX#j88n}zZ>*_D+! zIh9QB7&C6}1Q&!OOpW&iD)@MNt&W>)n(DbvH+Oc1u|x0+{{l+$F7cMEva+2*~9p@w%2qXZ^C7jJe)cH@Spmr5bYehI`#>rSmSI4LF`apKK0*s)b znj9LHXmcCD#(Y+cm3jqtj%PYS5YA@=8b>k!8q38i!aezrapDadV<8nsZ~q3o9l|+p z0TFCk0^1V}z|bK&`G1GE2`_Vp?1+qo@&T*PH)X#-;2&k8@QGhmik%;{NA+IHiQlf<&QjGYsK}EyzmNN>b%Qi)3veb7cfh(?NKqgmT|bdve0# z%>Na_&F5AK;U@4*>2(-*=D+5Zm3^E|!!3zshhvOEm1$FFWVO8QDfz*(L^NzC_N;3h zw5%85R3F4Pb*0rBdFHLO3N!+yy92SUp%$fnLpw4lR#K&OL5e{=#4#q9P%Pg>l}LJe zSV}(EVj|y&!L$8#Nm!@5Ger@XG8~>*Iui8`vY-c;rac8bfHxn8x}_2ni3KUw}kAAUVr9Hh15rll@m&KwISm>_#$__*0hFqe7pK z=uwgI=Kz*TZlk+h3lgds6N&#VQoX*>akc5a^4%A$D2uH} zj=BgWuXUZ2DLqtwm0Qf|g#4S!)my1fzj@yyIqmbt_O*Xliq4Fh_~U$Q#BpZ{jo1LE z>G;gDeWvQAw%>#zZ(Q_Tusg-g^?b!TV#~bgahRu>; zOJ6^-c_;=4x_#RwrlsN235$6(*${D51x=nYWSl+EFu>sLBQ5Jj0aw<}6n#$^z%#55 zH!Ax*@Y@050n6eVFZc=@sF6A1t=O#R)5Rbjx;AQ{<2chlJj}0bZe?Xjo8eDLAWYT+*olcy zsd;L_wo9GH^-Kd&B`BZ;uiS8GX+2Yoo2wh?IV!7!m|;Op%OFx)?3Jg%#E2_T1#TJF zJw3=49ho94dEs-eVatVnqF0$nL-eWnm6fKZ6(=#>lZ1h_-+zqO{oo!*NoeHdR|%%z zRaAoOmT1s{gOJAl#lS0hPv`dzT+EJAy<1{9q!wAB9=RT;bDUFU=k#Ckeq);7h@DAc zVN%HH@=L4G*@oa&hfb<(S0$w$rQ!bDUZUY<+msC@C8J`>R;5w#i@Y{j1>oYxU>M!J z!K>F2!u<}Z@T5_;w|6#hwuKvN@7=AZeHxP!b87~nAH9!*u!|ASPJtAoMs9uy%v|<$ zDX9eUa}xALosJHJO6X)l{2>6ztFjUrEuB`@sAAdSgqrVk4YqN2fbMfO86sWp*6yB_ zf1Y_6acv@iyfztiCcnT1(#sIvCYZx9YH+Zr3iPv^U!Ti2gR~7qpbWH)GFC~X`fY0~ ztDV&>@0q0~d^)<=aKamUz!z6|pcW3_*@Pj`0Uf6xd~QU|!0ztf2bPx&Dk1d&v26zN zsOr;^az}i(MQyvQ>t*Ipp?+3~-rqsntnF_!>iQUAl$6EqEc7cBqWj(-zV~QA00=-ygA5UbEZ6Sx_)n z`JE?n@H=>(l2cy$L>{ z)CSjdbq8;MU0F$zVrNHoH=hiG*)W>1K39{N>{5ES@PdG~LGWH!*c8>p+FGz->)Y2| zfA?EihL0YG3tF>{GFjzd=&?%S=;@}%L?R34tCQF;oyc#uEXM>+5A<)0d+?n5)JiK= zl9nV&&@V``eT|QeWf#`aSaL;C@qehj6Ost#)I3IgSGL)wdHs5F(FyK=Xpc(V3ESnp zH$mtzS&0Pku1@osiwd>x&MPGQymct3)EJP6K4Uo(shJ@%${+MRcy8E9<=9lCUn%w3 zA(mEZFx=i_?jt%Xo>ry*))hdyjIB?rx=E*SV>tpMwy=(o`tBr`z_oOGU6Nx44?Jxq z>uovA9P<-6U4m29OX9v`#{MCzKSx;TW_mZD`o7DVskPqJ1@Z*_vwMWX$K_kC05zd2Fd0*<}_P&3I9j5 z((Y?-I&Rbzx2>=Ci=b!nYW=o+w34OZacN~*hgEcp%+_z-crTvUxplHOo8J4@!O>om7Xizw5aFP>LkrtJ7k%Ap!? z26AJL=I!i8_X7><*jFRdZ~0wfiD+T7#l9?hY*M@W2Uzcr7eS3nko>aLr=PO0|8t6@90dLYeE>*qE7V+#THZ-Kur>6xN~b4h!qp30^oGy76G4S0dPb z;nt({(hsQ%4|!xj_BD@;weEmyjc=5Xb+YVM-6@L{T!NLd|K?mm$5YRO_)#w&qx&g& zRpE>t;y-3RcDHO7d&~QK^-6W|Nu|sKpXGeq2Xt}bYk8x$Y_6Xl5;xHf35qa6#V2JRB1@fh>DdVI)(FAJK`tT9Q8B@`J(*M3haY?7eTp z)+wx+C2)uB_q3wN=LBdE2$R62T_1oOcA{8g_9E@CHirm=vZw2rZ4_z)ZdR=*H=GNy zmVRGZPSv!UTfLh#IcXSuhQK@b_;dRvQkwVWp*6L=4x^k1H)G=9^C&%a6{n)79h=vL zEh_t-*WI$lCH_2Znk5+#;NWTt+b85JX`LT`!|t^JK!sX~mBdBrt4=5(+Y-h}oN@X0V1 zYJ5$MZpxby?yFUiX^a_r$r+l*VStu)xCXl2#WiWn37#}#K^+@D9Wmpep;ov((vz~& z<;s0>Wto6l%oa|~a#B${R;c?Sukb=_q5ZWi5!re2wLObUwvu+1OBz94cIG7}IA8l! zA2TkuNQYRznY&3xmh4*GTbg?PI^3|*qyd+eH|~q;b4f`Z2^d22B`exm0XFZp@J-O4 znjS0x%R}S4_yUq9N07;Pd|<()2x!gFYA#p^z{Ute2wEu&p1yNSSeUm`f-@e7>k>q z;BRz$%@q_sOarYf=L(Em3?I?Mmk&w{Qj8h|W`-Piy(g>6VBv}L@ecqo=~ZWqJ;xSV zL%Pn#ZrNV}BV^P&71S_HX4O3tE7Z1IP`&Q;{hDfLc@W{~4uZ;`th$3|=WIps^T&_R z&zG`wUZ9Kc+Y?G}zUfp>!|;pUprb-PQkf_{DV)zz#jDam7| z2p7lj0^%jZiXW|;y*DZ%ZBaAkWy<4m-CjI*8|vQOF~43!NDuum7iGleR_8uLZftbV z(oH|1bc^?0glMXWT(vk$Iw*H*cecovUc7+i;Y*cDzv zQrs)*S>4ByKOQnLC@Qd${x-1t&AjLW3$Nxx=}=jY>-kA3o`A>_yzjZgr=E08 zVkV@;)%7bcU9wKjk^0wD?A?7fhm`cjK)>LIrHZn22SIYjP=Y@er1%{w|! z6q}sk1(9M(0>|%-hC?pD8CX=h7aboCb@uM8iJY8FRR{>!XyCzn-e1g$dHHhM+;#PK z`#`(%R+2xV9vJBL)KMjTtZ^wOl=gQ?`i7l}l7zsO@9=4#h}6X9zvYG$zNr65E8`qaatWlY}; zT@uJM4_GtJQ_N35&G>EcT}*)B7RZkzfJ7sWf!C4eZ39onlDjm<<-r)(v5IiRt2kkh zx)CRwEHl(%!HZmlm6LY*7;{UUgynnL)n9)oywI0BB<4+7)uHsoq-}=THtDRofWcY% z>iHG~HW%sU*SmD2kdG}@#^><@oWC7>L4g@daC}Tzh0u>Dved+`1Tv4s!20r2PRN++ z*cjHs%4Zk2l)hoo5B)}2K|Vc9o#uRcSlzj)%8uJh&0WHEHF%LJr6YcZ;_o$o?%3aP zFS_t5CouGU=AVP&E|X4dKZvo1YyE!FT-lz3h|YC44yISSqKGZ4W40AeLHpkEO`I2R zsZIks6<{Y@x+phaO0Qp^Sqk5FGG8TfOAZaC5WhPYRTTZYghAyy*PR*@TZi~d zhDqT*6L&CutwUw``O8~dtH2$@IqxXp=ty2GDstYk0=5P*MkgFmVIy0})0!Q++45t1 z$-NCb@)6yp(1$}$F3;3qgSZW(lG*&%zE23!A61S1xQ!Im)~2lau(LDlB4l-sh+1=w zFK*h!IIT9G*kxD~K<+_JQvMpSeOcmyt$8v5w*D;wWrs)v9dx0-o^N#nq8(?}CRkPK z0(`u;KC&W_f=^Cdzp7c@qH%6&oVo8Fj)i_x>(Eq6p_C zRvx&GKADY5mGL&0hb_Lqy)AtDR0DN$F)XY$))F65BhOlQK1H>9|^oMVXPI~bmAFT_#;JsWA9CpJf z>I!CCUf}PpP%-S$&%pcY-Kxv@>3uyo!$DL{sk%%2$@O;mfV)H9g&Md>TR(+>v)(g$ zp#@4JY&3&$@B@^`=~Q-vYIj> zHRJO=$an+fZ)mcT66w*kOfuF%(REOxQ&wKrlM8N(mN*yvYLgUVGw`uscUu};r603U z4;uk4v$q8ss-JBP7NvOn47MqaBA@Byd< z4~C2Rsk!~4JMl5Dll6Oem4xNWV@}Ro#0zBCon_&ZsY>E4T)65D0zDYt{aqqZ!`z3} zRL|F*_Dsvpj!`x>U3voROR>y{h3323Iy9ObsTKjdjkd$sZJ^@*F=zYFO`}F?h>igl z?boJv#d6H-I%u>6%hAA=uJwm)Tj2=rt*RIG7i{w841@i}`ptO{f%XUh#pmL@C%ky8 zS0O(^Mg*@iWvSpPE2nF}Wnb=1J!L+h*|q{SpPZgofZ6NL*tfTe-gQ2&V(T9twVt+J z8}E%KyOJZX&k=|kfdA$su-J{JEVBY%CY9dke7~*2)ihqGGog; zeR4CEn_KJmh9M`0;Gc>6@rrJJq4v3t-M54`4I4&nH(vZQImmuB>XuQG@x5gmWO-(V zm6)aZeD`XadtRr~q_L#oiO0xUK+?_7rvRAUsGSiez#%zs@0|$XcBgTg*1d(Yag$6Z`9}cAtiBpf*>T6 zsRRB11VDN4W&FEx+g^pee^tXhQ*E&O52Wi07Uh4_3LUv6T{r|Q2L|OixXi=mL}@lk zyV>({OjKkYv9;|gQQE+w#P+ZFD0v^<@?zNquGg*k0wLwUPptd6$V6Tzy7rrx2QYZY zeq;C^l9%Brc&5yhBK^bU>4=~pK|$f0v)C4a=G%kQ6$#??bXxXxqL*mG9hV^E9BmbB zNen|{o0%zcGLskRG^*oW9Up(n-`!Q-;H>*mf>7Hf>tp*@T1$~bn$uMujE@UgKMxNU z76bEwa#%~77;RjQgcwf`>bivJ&2Gh_NCiYP#(=V9?TD!M&*`?GT^gphNFq`5jCwC96jk}yk zn0|GC?&y*afwt2#MKWn||LOwigr{sFO&ejc3d>uN$*SA(?$YJV(6$t1B}w;P&pHpv z_Ln;x*UzWRZ6r^n$g#8RNfT~o8bvItX$0PI6K*z;S3xa)rUXuM06lL=e+sm*x1#TM z$!s)?0fYy(_tPzTX8k%AqVe8#`DD$`QT zXKaPf9=j}EJW}Tn$|G4D7H0S4whi6S#?GwcK6cu_jpaf4{J5>HsL8y6TcpQu-DIenS=pPa(P=xck5$K3 zT{SMASNO|zDyE>!_Y+}Fk)hF_x-^#W0{)Na74y@L39VK;e`ZO-xP>;n+98at{O^xu+nns##8`hn>d4wDR9v!2-;Zr$RE71l7;3gR^H8D zssOWpvpnB5HaR`~L@f?;X|D`mT$zbh+$eL^{F}6r@X6il90)@$}@NAwaNmQ+b-HdF_Jf&-Rsd} zn8moRE)k)Qu9>hS3rCX~7rGXtp8^91u*n%1&_SFEfsi2oHd^Ol@8;%m){Tcq9+F7% znU+}@8xZXZFl+FOXDe?g^;tPCx;S3vG!z@Wz9d*vErS;;0Xj&AyAQQmS!fi-UIN)}!g)m&Ek z*-qK*pHNG|NWLQvuth}l*`LVk@VoeJ$A*|$0M31ssOuz#Y9wL;e1gb7zQEbsU?XBsJU73Rv~hcdli&A3Sn${t)Lt8 zbWodWY-Ql|Xc{l{ZuqS>P=0RDukd`{OzR7`Lf7NAUTch_3glNKy($49%?g0T<=t(2 zMR3P3tr&V)n)6-$kK;pK0Xn+fc$$>~{qkRtqcVRGME>+_V}s9S?e~v2G<q5@hzx z9V;&fAw@aU`p$3P)VPF&#~T7xCdTXZt3drmF!BHwC~!chjS3eop*g<^L5epb|FMe> zMCsrN`-UwB!)!4JiUI}@nlYFW;TA}j6+%3O3<=2J>LHd`?#*Q{3?(M+t>;Zo?`$&t z(K2j0aZQB=G^R^Yqp5^Yku&-%`}r+m9sHOYEpebtPgBu!BNI?)aX+6BiroqxfRYuo zS=|zZK1TcHzis00e_kr{zidSNIa4M3T1bR&La}L@%78O>@6z@S;4)FuW=gtajeFle zkx9TZ-sxL?Jm5mZmVnL{Mzo|TZ4>>!CYJX#G5BCwE;VTs;EzWW>tZ<-{MYAn2B?zb zZ*$1j0u6+Rp-dRTgftF1VN*dTCADzy)%WeR%p-I+r5n)cr2Q89e7_eX?YE;iC}DML z2m!b}0Ypgt4+Q%JQGXipXg~7_2YMUCozVyNVH+?rXb0zzfJ3&u63NCus?2&(tSRGBck% zNtH=Vbej;GSp5WuqMY*iwX{}5J;9X?YwMQarK6c^cPlwczba=y=>I5jLOa;d0RIQ( z>Ho_%_rGog_RzmM6fl~8hW6IKnv7EC;a_Txwj2K2ZifAY4anHk_eIX+I^X$%JOpUJ9vlQZfZ2L^*ukdPrMw!TL2Pq*T_hk z9HHy??^5e8U+R2TM$iF1+X`y*O_UXYjA{%~Y~%u0Y78kS(a=n{`ZhU%vIV|vVQR1A zq&(^OOYkyR^s*=xQ&=lZ9o6{4A}%7L+#d93cmFuz!d+Wst)|r-bB>^C@l)&u{L2@R zW1t7TV^GKwP++5t9#oWX^+lOCfSAn2{HJF?wbReIFeqHU>bJf6^Cz{0|K}SP@5Ho} z*$*EEZMiv$8xQB^+1uyk85+1wS{OfczAqUg86(oQQsXf-HQVLlt%xgUXtB_e%hs2W zFf8flket6o-GLdLqZ58RqTpo)PanM7(OfwCZPgk@OE1!SCWikN6N9)aA#1{JA00X} zJY1k_ZQZQT!gG3?LMks`Y2+eB=a2ma2>ds1VC|_XwT~V+3S)jhVbFtv@oW z(q2wJE!H_+7XTu>T?RJM!F66ve%ph{xtEJ1`23T6Sys}+iPE8|QG0v88W`zaas*Vp zL15p*;70#}Db5!+!uVS@ZQ!Md$w(xNSPO_lBIMoiU1{^lo3mE&q^pcVN! zxUZG;Aczp|hKx_{_3M^)f?61&jRaMWhm~#Cm4^}j^KJ2P&LXII107;w*Tv;FUYjE-`s>tSAwBlYH+oJGGX8#@=;gIRA7W$B(dkv|i4zkYt}4~roNk|F6doTbRr_5U_|e^NrY$}>dA0XTXXIM&(#Y;E+6mWO zSB~%Fv0x2C_@HS=Y+(Op{EVBIUMM#Eih zzl&PT8rX<(uvWg7(yJqEWI}e)x zb?d!nHLVqWoEQ5G_=MHR^QS+we>KSM5IHxIqD4EV^L#X5ozj;k?ta+!_R*QS-Y+lL zZoY)ytC#esc=X`M&W6DNEV?;^r_0Et!<8mVa!N_jTld|~q;}*5{G~}O<@S{ti!^}# zEjZ-(pVZ|QGO{D!Rrybij@P2=0~ZVy{AUaDWb3@|C-*q&p`0z)@yDWBY`2@sXrt?g z9Dp7=o%j#c7S{blC+v+5zer{W*Sm*46(2mjL!#%%C+4T}Jxy(%l_`0%__P;}t_Gk0 zSX#8UwIGU?W6IEhd+jlQMZw&U<}* zak#f_s!c#QS^Pi@oIEz7f_?ci_T0i4TQYS&^z4TZz{S*c)CRBym4zLAm1W-6GJ2;GSniW9sd-+*S|@`cB_Kt@SS4->-nKG2+sdIC2Zxo`3%G<;#+i zm$e7-TFkvS=681oiu5;WAks7?DDqMJ7B#}Mu^svBh3D%dY}fcluEy0{j1vBla4J0h zIoFGKflh)@WA_m$RiVcuA_B2AdiZcGM=QNHEzGji4Edr;3@QWIa(ka8*#PO!;pkBf zt?n*Ot(Mo*FnytkS=t1h1ruaT(YGHR$(n$d1^BoD^sfFU%~DCb`M<~GyGPmCA(Qm0 z5*-1`^?y%|5l3&-BO7o2`O;F|yuzsU9G<6{`#~MHFVI8u+wM^s0ybB``ayePU@3=@bN0ByK~%kjTaXnwl#AXMK(̒+j0|_;k422Y!33#b zAs4n|l-CqRfc6t|=PPx-La|V8E8~wy9;Y9DT84&-8I{4Pu^$OE_%}nnpXk&f+*wgb-NqM@^MQbzsh_gGq z`-&WTsFgN}KXvj!lo1q)fpEHXCr>{>i$5u+%02j@up)7q3bzpo+?`Hza4I*J_jY(H4BOX1duf&9{U;=)9t6RYi!9I*) z8uIK(M8};XQBhyVBYn4|6yshwy zID<+7ygMvD%3+*p9c;riQji+A+&$Mjl6!l@uO=K-rtQJB>(K7Wt)><|j$&SOL zE-5Ll;Qh|>sk*nfFHcQvDlNRlYd#CBuirFQEUvC@pLmKHDx|S|9=gA-@&MLpXl52= z-hTXwrlv)$=RFH)DNXL(Q_+{k{PDT<4r~^pfB~#Mpzgy0Yh2z8F_jPg7?@n0ru+GFX!+N8sx9yqz(+1 zdux7ZlDT9}v;!~^!2rFRhHxvYE}utOEG10Nq?bXWY1;JL37A{waWl=kX^M(cwh-vL zwzJX)L1_msHCxZP6g^4-VkpHS8_IM@Y{ zS-$HjbU}e8*LqSA`~J#^k-e<&!x#10QJ3aNM(op(Nc0SiDE9ZcaSoTUec1J!>a%Ar z>FG~lo%-ja{GY?_MZ1)hHm|B*RdZ>i$^ zaf+vnlOJ|FExG2gtsV9%ZK~zGJO)&I!9VIE(|%s}b@pOcptK3}m(WJY2!ek5Z|hn9 zAJN4Bp9fCNLH}H;la(F<;32c==?Fwx8lt3D1uU`rM!&8Wk|uJRR-6cRqh!*;!Y0@d zBH(1+q1iH?#YrdFSy;p7?PcT0aeP%QB0%C(5|vH(cm<+JWe2)>@E{W zoNnGkHF(cy68TtFb{>%Qzy1wHfYAZEu%x!|LJCPHSsJB`%W_edR#`v@r5_ z;I;Y-1gk|&4sm~H@#3)bo9@Xpw1_|zxdLt9;|N{mt#iH@??6;$=qhd0-WS-g+0=f3@2YW$E3k)6}C-ZBA&?4g+xa+PFjn2JD__goy;wnM2hN+g9 zFN>Ci9o&}1=~B)Y2H8(Wq+fv!vMS^6!qFq++=%6!9gE-^lqja;qE!iaLoI-#vwmE8S*=$hq3>0;vYRH`ufU2 z%b6-yVbRH2#@>8i#m|Ik!H>;r?;Ut5?n!@TX}{NEzB_1Y-&Mb1P>&J-#S=ZHX@`4c zVs0*3B0C$``s;|al)walmQ!`=)vJELzWuZrXgQYFYW|m!gpi0uBc|Hu8aMrW-lxs8 zUGUg+K>S#2YQmKh0wyCE__PpL-1p75DxmTl681&KaP5&BDhdc+zQSKk{e}a`gJ69v ztgU%?tgTBW1M3;$rjN)O&{o};mQ>6TRzb#$6cighuPr_Pruj_OzTa5S5H+isR%3*} zjE~2KoqDo5W=kh|(?zAq_eFHNXjGKCdQ{Z$<3lJ>A-(Z?-&^tY!`%HW-OmYmt~xj| z_CR1!%04Tk+R_y{pcARb46>?hrD?Sf_R5i%r{?l?t2pU?Dop_9&Raa``XO>KgS8Bn zzt#FJB~L9Qm=MUq$Wgu$8-uu}AUjWg9}HrRCO3NQvhG>yjYarmsam6>v{TQ%wDX{@ zhgvM>wZL_U0VG{pq=R)6$5a}yEkBmd4}!f1hw?0=;a9AMWI^_DUP{^4Z{%C-tu@rzY5sc4kw@$Q|))^z`UutK&^3=CO{d>A;wy$s&&89 zsRI))3t`+{{&sZhH~7xNAs^`b=yDj%i>UZE>IRa~WN#vf(kxFjc;pdd@jypFHkuN9 zKC%JkuDU@z9<{o9F2&{mwB_#&8sk#P8yloe+M)X3MLSd%gn4tah^j;~wD$*`!2HhH zSzG6629f9|xcjf{Zy)M%S87r%m(cfjwYod+Y7Jo&wnDv$ z^MimxMxi`A?8H|D-u!!o{s$8i$k&!kiv0m-A~f^z)w}wf{IvuYFw#&cJcc3$Q5VWV z4VtP830W9F55Mi1&XE<>0W6RP;L##i3#CDvjC!mSa{dApFloSfr5mdWSura4n@bA| zq>^JC;GWvMso5i7-|iFSZhdWQTN<6zSFzrsU@&A-kt?;NVnI8JEanzOeZ;#^_Dvg= zFMGOOpxKGAR~$Qkh@iN$xdiI}PhyNd@H?_sluh@Jpc*XzI%&a|FCo0v!`@}7E6vy5 zcWb!x#}6Qhmqr@ub`jZ!t@cVmHA_VR%m~@D+tgb7sqdnPDrJ?H^KVK!`(wjtvN!&;qONo>H?UF&#h@J|%dB9RwLIYzJ(uD&d9wh{JX&B*4eW8&b8r{v zXcQ9jr$&l`6iE`jQh*Cb1K|PsAS9E-TF){tqDPzFe%rVH179~{|g?Z0Iru_V# zprM^zW#vopPf@BH4}2mqX*2~WUeaAZ5tCs-OiQAS zg^vJtY*sT`Q7nvgvy0u0Ly>}INAF2X;es!5s;t?N&Z|}e&FeqEh^xNC6MUA((J6*| z#o3N0iJy(+9MC!yC~Z^&VLRoC`g(gHgV!n>RW6^ zSUtXIf>?T)Bpr1dv#57>-BeGiUGulI@*ixtgE|lSiG%f=`7Kr5y{nITh*u>T_FPmn zM5lvf3UBN8=`M*bzpbY;8xw_n6^wZWC|!`2!};K)mQ$*q-79)l25SK&13e{0ml+!3 zkj+K`XWfTvTHY)wQ|9L8=jZ02Gp|#cTl&YR`t(D#CY<~0`E(0o1FVmcz_y}19yRx51k<+MScy5*`NrJ4JYn8FmC?Fc z5XB+8-X`dtXY(ac>{Ju^%%)~?Z4$fgNXR)4$GmS-CBGH|wx=lBmzHv~@VfG!&hTEeL=%G>pnV}2_lUQ>$^&A^fx84aJ68w?>GswBr1(ua z&1^(Bka>30ctGg;0ZmPt=6)}QjS4FhZZ~?|9kQWo*f2Eus`jc`UG(N z#3Yj_{)A4EPfWtm{^$3@xtP1w~^#!WR?T`OJCvzLgv!M(F{OIwCG zqBHX1c{l&t!tl;WM91-gW;C;V!p^~L_p~9bRoR3ZTwPmdF78ht+C;t;@9 zF7~pE`EmH@R-W8YK3Lu!VjqCfnH9tStasV*_mK| zS)s_cnJQ|}QRVxq{3M&U)R|ThUj04$=84q-*Qv6 z7CXCSv=h$?rpv+{zN36;f9PaoW&8HQk0Rx^X4Wv9Xi|iKCrYN=}pC1>2JG=}r^}pgO6)oy+1 zm({OpIL?HzO}&%09{U;zo>tcckk9VeHG3(s%e}BkjO;S4^m5|4z0?>v6=@wE*2q3u z)U3de?z(-;1R4-hXMh3ryqc%d;3Yb`i_<)N^V{}GZY+FcZ8)FsUfPqhWg_M41~sm- zq+KTltc8E9fLph`Yh~S1XY?(@`CrxZFV#DqM#8{zlgn{!s+c zYN#tClQrF9tZ-3I42`S*_=G=ar-8DQ>V~2CyNtc}^SO%<*Y{h!@p7ZxK)ii4msNLT zT2S1zlDijDiqkL-^jgY(@-QzV18h8-I2#Zx53_fkh#yqQ zc1r$gjt(j8&CkhlQfpQS@DJEreAua@Y$GwP1BxwCClmKtBO>B4W zo-*bs}mY#!+G<4!p%rU7u?l~ocEyh+OqHe4x14oOr8gca( zD%~=L0;Z*l+b*4FtUDJ*y9{zGJEdt_hTZ4zYa0vVACt1>z9KeWhb|agW#lrIc)-Oo z3ngMy?&faQQfv?N;L9sPmo6tKF&7R?R0-t=eNmDAIC4KGf!s5bjpxe}zsE9a`h@|u zbP3_OAADeUh|;#!A9~%34ip~7a!yfgr%u$K3GWfkA<$Q$I~cG?+5}&oYFNF~@9Dhw zxWhfHYN(rEtNb6inXj>3kb+NPN<&g^X$M5y#^AGKP8(T2J6p(rj=Dm!%{{LhZuK^O zL`dS7g_G?xap`eiX__8MBJJT6R`X?Fr#vkFM?`)CANMzyp{AZ=he$lr9JTSU*@mlEv;F+oi@(mz+$g zh}_U+{oqJJZr&BlXy4j0KozGLgr{)3Ppp-^NxB?0?YdAfSqT!A{kvh`9Nuaoh04%{ zmu)En5VTP*C~zR0f8MkzA<=C-Qjjkza$Nh_OCmlZTpkgA%h)U#cBp-nH+TjB4nUMb z77DuynmTxPwhcb*B&{X+Dk4(m3((2tBNz?kx?sq^nP?25?2<{ao6h}+DK-QGAj|z_ zv7^1c2t;o$OS+7yM414A{fYhke>__4D}`@-Lo4gkPG8 z2={F29^BeaLUu5h*3|b3m6RMjY-dl;amj+IrZl<)J>fgGMUa`e6ucUJAK$NMIXScN zHx^O8VQK1zohq+%n*{k8;*oyospP9fR_n)Rqg;W%Cjz(vwFf>g*g6w^z`R4Yns)MP z-wBRX(;E&pHp#Yd1Y*72zS{ADAW>f{O7S$#>~jp z*7ig`zNo3=Vgu)XG;ck;my6M~H+19PfB3Uaw>F>{CX<#F;f;G;l7m}89@^VGYzM|Hc zuYVb`3|Z?OvPA7T)S44~&)5qbh?(9@HX#pH-o+*#5N33+4rgvrqB=ZWd zU2B`~|JpoQcsb$d@1X7Jk0gP=fD=1eR;hir92RsaOaEryS2DZibK2rpFrS=8~y z$k~H{-(F)DWpZQPNJ^C{V>ORDr{KT!h{y30Hi;qoj(l1IYr!|>AuT;@kmAU;l|rY`JtwKs)BI0lhqO+Bs#g{9UT|D>{h_G7 z3iKg{_UNpo0eWMdrCi_L*oEu)ao*rbRzVy>1drntm;h8gE&k3TH(>f1%uIA+nbs;> zv0UxZI`A_{f68{6TzUuvT7HMSu__&<)ibt6KuC;)#~S8TFXzT_{o`H4N9@5ZW2Nvr4Jg2#S;Q85}H(S&z5;jM`$mv8omJlz%$>ES++2rOyc_Y`jF{qlHrt7H(kEyw&aBiK| zj_^}={hVLBE*z+%GK(X1qXO5a;3wVO)Vweu8a;!z^tT92Qy~D_h=lu?)SUG~3@Q>_*DY|0GL8-SNj2}qxnH2QDJ>VHLN%;2ti|VIzffP-nj- z7NFG#(qfMKat}hFh+r?D-<18f_wwZAP*q4s`RKbFO;478M1*(F_w#Yf>Nho7Ma2i*5jQrLMwwCiM03XWdoPK|prp^nAm|%O zo9;7h8aY3Oi-y?58s5Thqj`;PBE=cr;F^2OEY(;_s@rt*iHpCM#>?}FL2+^N`(8CD z<{eP6+QtKySZ?l(&C@Y#z`32VZxA> z0doz*ZrP&zSm>Jm-4L&;xo!Nf-K4?DqKoXVPMzG9rf*~#-aA?&!fnrr(j}&OyX53o zzu6qGe$k!A&ZX#6qLXgKJ7+vrIMX_~^)Qi8UZ^buq;ms={60AO*Pdg1!Rvco?7EV`to{s?V87ctaz42) zh3KqTw!Ls5Bh_VXP`Rhv_cLarL5Cf7iBr)B+FBimRP)$8onBRSlX59wbfDG{;d6i4 zQj{*13}a)ws58kOkDINPJh)8Ym}pa^ZzmPw1?zEmeo2%If0GE%zv48H?<%t>LNSD3 zHSy=|CSwzcA7}gMva|EFVq>s!a1$lzyGx7+!SYvD-rRg-{$2c`us&lWVJR^KvpQ+T zkR_9e>fUJElWWRhne{8G>mU63-ibZ13=~G^%23~9JTG9g8sw$>sryA z%{sl}*T|k;8+}^kYD3Y|eFgF`{bmMOg=+G#L71)}B|ENDyp~B&@y`45vtCF8RXwLf zaZw3z8B-#Eg#lp&x#B}uyEwuzTjXm} zd*wn3*)^MqQZz4;y&(^?1%B4xZ7t;ri{u^e&ip6LiVxbG8u9!^I@W1GdL-*`hs{}( z=7-_xC0JIO#I%0MPe2&KB~o*v7jUc!XzEMZmhatau7*`)QRxN?_lwniFDCGua74Z~ z4SSN>5bB(q@YdL_YNJg#<6F9Kk&6qshQ7=_?HK`hK%tI0c`(v3+>c=Z?c+mEXVL8i zwT$d4_tsvxvY1!#`e#=cT9UP-kiEm&&Ia%5e3v^hmNQo?rB;`Ps2RG9hQJjn5tvJ{ zq?Q#)>@24b+3GBh#rRk6%oP=mf@#sJEdB87S6_CvpF7++4B1KP-huPaQyrMaw|>d~ zoVLW|AjMpztch+rr*!3jeHrXscA?OzU$b2*`yzoK3JK_9E_Uk5+cL(Lc^e0dmHZyL zUG@QTK2sam77~3Jn@R4BF4COLhMHz7XhU^!yrwkMmYj#3rzC37P(uN`U>6`&Qk67= zYItVlrlV5k*RGYwth?GAmw_d!N?X*RTgO?pd0dSGy=(=oTq~!0`EZB=7mtefsFmqN z($4DBv%PLO0kz&)ceNHNZ4P6Hmbz%*b-#6q<;yZjTUS=jZrac`-IFWX-I@Zn)+f9Z z1+oqz)esI^{coc9D*SIm5{z*P&Py1lmR>Xt1pD(}2Py7&Ls(>FWtcnp#EBCpPoC%x zmO*ruHJEpoyBe6CcSqZxCRof(w`Vn){R2W)-%L!5klMS!?2h#QyBHxCUfwg@;;&`M z9JNW{V zeh>#<9E=R$Stf@Q1||;@jM0BI)hD!!$1yvFORFg=*Z*z8X&U_dQ@~2l@9P$)B}{Rp zfk(0fZ8FDK#DVp#!wCZ9U`ok&e}~T3%eJb~c2#*4Qi-|h(BRwx3D>=|1Bwc2wy&wy zFS==<#hMlwtdlRh0wT7&U3!+a8*X;bk#xaE@Y!H+RiR?SLD0F})1$(j=KRu0^Ep~J zJ2k~~BVMQ7|EkFoZ&`ggR|bm(#W>iVyo1u5(V44z^9!h6@0>agp6>yO^mIXsZIGP?F>^0>$L zB}eJ^TRRJGnC+c~WfNx>DnkQ&+TvS}r2~#SnZi~!6y}h+b_1h`^0E9{cl~qfOJ)6qB2t5PE%3QL z-#m@@MDMtzmqs7NV-!Z|aUZV-p38R7h%=BKB_+YDt@Dm@s?I=$rDnq?A)>_Z@l}DROz3_3SqHl@cJjhNag6h~jjK(QvlsEY^ zP6KKhajmWDiM)JHlZrk7tTa|bd!T-G+B`^~Mzgm7AS>qFqv)ew_c0)}dv>15`~nd| zvMN=r>m_9UXFBHQ27~3ci1+&ErE*130A^2ORtJ>`A6BaZDv+n2Q;WGIU<BIZcupoq%3kR!~E5;V|Nh8PVg;vfODh>j>)d~P75 zSXI08Qn47+?p4i5>lg8+@42MP;_!B#MR2lWK%kMu{W%-Y_bXR?w|;-WUvCgpd^Tc| z1mHu)5);7Fn*EP}tD(s7qhE(C*xCG-(9VYvlX~vCfJlHW0LN2fnWH-ak+LG4MkN65l5g;wxqL#X%Ip$3#|b%^ z?#lqqq)P*{^kA(@Yp8x4Mj;HyKbjEq8!!V~-uvtTuKbS{Ek$#4@_HZD)m|QJ4WL*; zq_O+=9USiejd#asTW=pKF1WL_;sdjW-*=*&!W00XXL|n^*vO~R?QfcuhX#t))(lY) z>MM@>J&Qg|`3k6XO~nMlEQ*mc2;?e@sX-72nC2C{PlIpXZrK#r60Eg;*Bv2DYG{nr zJdiWBYC`(*&{4d;4`PpAQB}cGc^kwPMgR5RKeE*Zww;*KA+<9ya3- zAvg`Nt53zB(P2`xL1@Ot%cFv@PbOd!@oXQJs7LuKq5}kg658qk0b++kWQ%T!v>{D$IPUWpd(3oZ_aQude4tqFyRp*j zJo}cI!cnj_K3X=fxIzdrUz$n1a$H@_5S6>K@*d@Un@_Wi& z#9%zZp!{zs=w0yoUr}U(cC-JsH?n~Y7GURfhwK7g!ybWaZ^LqPWp5~i(arDo{gAy+ z&-P1#IPg(pLSWW&$$g6C8JWFxEg3fv(aCxcbb>;F+Mv1s_&}())qYWL>c{`ZN2bmK z-M>)d)LHn~W}wc(zu64`>kL*}(O{_l5S(E!WQPiY6cn_r9xe|d3K`YZ!eR?u)lC3rhKq}n38LZ$*%E)7%9}{Pdi! zt+C6jji)}`KWxE_o67m6qc?(KbJ4Z&`&Zis5!20Gf*8{+2RcDzk{a$dP#~B3-8uRF z>piK>^tR@(_7V9A@@D{Oe3!;QqLbAY>KQ9wJekdF+n-kqQL~k%Xu6P2sRdx2X9uJf zU!=UHey69CW^B=ASOYZ3o2Q|$!RVB?|2zj1G^)8rz}OeUn|Ya5iddzj)iy~}zxir^ zj?6R1on%x=a&GP})IVn=&ctWZgNB!> zX)`ddoe)a}{5=!vee}5MX^Hu}2IEY$>v`=z;6f9=tF*^sz-8MN_Q|O1>*V~}?|9i`j?2(;rK3yBT1@sC77 z&-to|^|-2S)M+MdKmcmhB{Q{i-Vd$3k86t}=Q)WXb>3eJGd#skA@ynBg zC=45#bHCX~k0!JULhk~GHkFsRJsW~Z+L{e1f!e_+k@g=^T9_#*ra9NC(Uflxh#lc zZimjpt4IpPyl#hNXb427dFQAqkRqY@|70DcS1z`IsvYXON}AlGY&Px%h3u7>lgS&3 zWYpfy7cR;|BV|gFGPy^J-MdtmL!Duz*p~3-h4g_r1`I z)H;bEYD|?D8R-9SPvXeRrReP(SifjMyGG~sT=pKhQE>-Y>3g94nf#?XVLG~XTHRFH zLIXLkLIc4%M(RhEJQ6@JA9D*P>P;p7lQ;bz(XQ!V)tSKZ|C_orI0J7G)}gyKw7NQ8 z*WSLc5X*@~weScE(sLFUpE$8NA_eeG>u=IeoQxk=ovX;n5ycppv|T%6*!Jles{Zls z?;k&Qr5!wITNS4VZTFqk)rNrfc3exEgpwD_Q6N4;J84-F#pHK^d*s&EGSgDOn>R_s zlP5PefUi0vb7kxE@n_F$tHRF$N*@wg+|g-XCoRL=*NRIYJX{HyFL238UlSR*<(8Dz z;O*iq-L)5P*)#=MN{#ncn}!A7=^XN#?U)Bd)_KS13Kc}l)q|d*Jm$Wk-fK7t4c!Y^&aiH`{nrr7!iE$heMyY?ms*LG+3} zUG#J6VW}f$EK&vi(wROuXuZ|Uy!V64B9PD&0U)XFpZ}z4F3-BHrE8`t=tU+l*oxOw zX4h{%C#S~k&Jh#xRB8PPG$YWIc6160$-9b8Vja=Cd5-A(_thJRhVTQ{rTsZMpunj~ zq{<|BSJGx9K93+}at2vOXXe#(<{PeDq3d1#49o26lo-9v-QD%|-9Ta-i}h5kky*W; zhkGeu?#_VZHT$OHp#qoB+%U$Q$oZ`zt27bCmb(hUb&Y|5DAJy2eez}5Cnt;S+_YA86<(7jYwer`u{g3`+vHr%}Nh z(}{#zQ`31oxgOs?I-$;8IjtLOyD2#x?jar^lBQ@`1pa&1WoU?Dv2*9Loy>S_tD-z@ zyOYC7SL{Oz#m?NQQ^|L?u?Y;cd4H{+1y8_^QYVmBBi>;|b=Z9pW5wW=Rvf|N62&r& z3JD_bphC{Qi2{wi?zTzT^YV4Si$JA48)2ZHYK<1;k8k=P% z#4D2#h~#91NF7xHrqb+?)c&EWpn%}JO=p!m#OC3QM~hUJRR2&5^*?y9&f81iy{6kG z3hYRzh3$psgO1*DoB69o-bG+BrD?Uh1U;7FS5%1N<~TMO(ztSp2Bk5_Zw%HZB1CnNFYi zx~jV>xUeCu72Tx^h1cKG3KN3&GB#;;Y1scw)lt7f=TGCV>Gea>p}8vdP(k~CKQp=> z64l7b$fB|m=YD*_kJ1A%GoW#nW|RVV#shjg#r%FP%8f+CV9NHpo$af$j8~sNF3 z2toP`9MTo&$sbl)ShKM0v}(P0(MB6Gc#s~_KeGa~-nglS2!=d2SA>d*q0PG>)BBkF zwn6L(3D@=lAM#^XRwir8L6f$sVb1Y}x_VFvs54c@)rKopnoEAGP4F=NsS}H1S1>&r6>A z>sKEuYs_@!AO~!&ZpskFO2RTTgGeu4;3v>;43)_%{p_)UA=A<%IWLmuN<5>1!Qh~Q z0saIq1}7Tie9B0#Hyg9i&z^NFDqdlkZJnF@{dK|9)W7Ed$BQXB2$Hyv&fx~9L`9l0$ zh^6tJZjw8JX-QA4C;y&GCF*{^Ton9X#JT_p;VkEpSC`1S^f7Rd9cWbT} zIq6O<`vs3~+Jx!2RHmr}=eee@ql&Rvow$a4H@~S^Z9D6-QcHq!FJ+6((0F&SoKQ>8 zDe3VR9EoGJo&R`#Pcog4@_oYjUH+|$FFGS4@W0LGB|)_aDaFZ=U$2HI0U#m>bUE8;O-=gE!Y=3S>|E^L8R6VMb#>vUHLS;3`titZ zF&74xE~1i#no5&EjNYzf?xQl@MfO+}(7f;KS4#8z9$=F0L?7+KdYP5QVU9er;{qZ# zdUpOvS%uqfG@*WxEU3Eh}Rx?l$rn!mY3>t<-bbeytI0FJ*i8}#p*Fl@m$D_vMMYHuFb#JpM=)g z`1y>Um(LX4p-cX$X0ZMM={>K9H28_L^5?~1J?xw+n%;LsFt{~%;g#j>Wn1#E`KF{{ z9L+>8nCf20MOkr`zlp&GH9npms}VG8OW}qK&i!l!rh#7>CfKM#AV!lA(J?H}ek5NJ zm1US6(w25%@StZ@y;7h-w7l3oscY8d*Y|e#iC3(7l+stlpH7s|XA8r$!#~_0q(tG4 zX>9O37<;KKmFtNWR8uUH-4s^+EM$1yfU)GM!_8e{;wHP*#7aDTQ(^Sr#?X!RR^UuF zcz;(4EH2q(del7+7Zfbyv+I6i5cENyP7O{1=QMxwSs6PJi=LmcPw#-f!D1I*@lkBJ z-o&Mn*2pxwVs~W3Ex5cgfxOeo2a0g>r8~Rg&*!po%bsSqMGye2J{8`htGQ_Dp~8B@ z=xajP!gNuD3q7jn`oYEaL6zT2A&9ZzoV8cEy|ermtnp5e&H1j@YSp&oXN~>v6=Of2v+i)o z05up2adJ}m>-Qm0N`^et=qml{+=>Bw#ieXYUpk@JZ`^nN=DNqk)erHcfh!{V)ox;E z{xZDYFo8K{^=Hha_NqUIuoB;Fiwg7Aif}qV`ob=eSfYq`w;U>zTQc}HAy`CMYcf99 zFg`Nh{_w9fz`&t3ZRnY@I&*QpND85iOZx6?s2}0mwWS+OI9l$^h=oV8!5u&Lw;)%Y zZ5@%Ateb;aEB(&#?Y1;vOVHg`zOHxo*v!LvA>WGkV~5)3pXHe*+nj{Sw93&>X&`8# zbGhRhc4GRjhO}wK`|2yp?)rm!6$HQh!?lK$FKpw4ry`?{u3S0hekiB%gsZ&`kJQGj zW3&KF?kl@tDr0wY(B@eFX*0XG!bO`C<@gR!&Qsw55xyI~7fMnLRG6|Z!dpIu2ic7F zur~Otf5G0XL1AQNN3Lk=i8}D#K zodz*uZEG#P5y3HRZz6N*yWEL8KF!ZKwfJ0gYd<$msa-+Nzs@9SE}cdDPeF700@(J|0^=yOZ- z0D4Z^d{6dW348+kOxwk#(I#m-+fS>?Zhj5AOz(wq{AElm^G&dr!Lsq-pGy;7Q=rpP zWBLk(9JbNq-`^Bm@GD(PvciM#a`8f$;&GC}LXC4&vux(Mv_o*CAJgIEnxid^o~ovS zt4-*euP21_Or9Z67wAu>!PS&R2MdQv;^Rw85u9raW9!DxUdZGo6&G&RxNRrAy`sn+ zjjE;?0nccJ=E3Pk=D&Jv99a_3Cy2||oJ$IFtL86ZeR+-<$;rdR%gZBnLS6K7X?5*H zMj6Ya&92en*<+=o0C zz1Kd!M3IH4(#&D|x#o=z#~j)-PVQE~i1+G8NogY zy|{R5v}-R+#TJUIx_ueJqU6T`REw&=)=2YU#HGd2O(Gv@ltW6GQ#ZMpueoWK> z8PW)&$=C5yMd}soeP5iWD5Q+cwfLO2vB}5xX+mv2pi^N}5eOWOT2QPMU3`7--4pQV zl!*1!`gSt^OkG`FRaM<^Y;eq+h`2C2CUcoZAMvHW$a2<@>D9T%c$}H6L{)PQK6$tf ze=Zho^$K-`=v7;?QKLjoUu|qjum*}eX)_UTE3*QVv#iAzGvE~PYB#1De+X}AHU_gc z?>G}p&9~V`hbcfFV_YDhI`km)T5H&bEpJpreEKw%;pf2wgKg5?$?4$LGYL9T-%<|l zRa^M7g8WTXoKRmap;l3Qdq1f7yxkg~%)xO!g@H z!HC2))Ay5@o~$olG~#?iLbIx~q_uVoEmqW1OU(4yP$Z}_g63;3e>u9a!5LZ;SManH zHkxF;J8NPc!eP`ji#(Mh%4H&4J!^EK%+8KD%Up!5(UrR@bmAVav%y#p)Qq%i=}zrP z7H2&anUYc>Wadm7J3pU()EVt;j*HxkkUJzbDRkl^;@J@cf4-Tsq`Na0PAo-y&?kPl z=@}bh+vrEIIa@d->{z&7{CIwAf51qyQFknS(0_6~y20=K_TVw2`m$ED%e=sRo}j_xeW&6ITQ=el@zkx07J1l<7Sm(jNSznTzW<5c z>V_LTbDAV~M5$~VXST&vC z`o{B_X)M?xJXo0BV%&yzMpP{l$Wi#Edxl`p&q6f8VXE1BmN_>Y!xiCH{`b1lb*c); zl3GY%W!9eL2y<(~+Cti!??NY-FI1b|BTLg@11IYh70`w%pYyT&T?;*6pKaEw;&=9v z9?QA1C)V4)>$X9>urV`wtQ9G{rM5B_936kD%}7sG-12u%8Kd`a{;Vfi5;x0{=j==j zBjRLpw(q611g^FUpTNWv%nIr>tPVu7EYzD3NxYuB%?DEDu+d{Ml4q$7)BcZYXH|)eYq&INU%8#lUBx z<7>Y@CJGszS^)3gX}BCP@n@XexWL=}jX2F7|CwTfJauS!>pfYz__0;p;i&M>@+=L} z{6Z*<5m+`u;0K7X2JtxCevw>&VAMh~VsfvVZ!Nn9CM(j%Q~tiUobi7V-}49Q;w z?74t2*DHvO4&4Z6x8~;H;Ns$l#1PY!E8|Q+Ps{o+9b@Ay8J$>;(mlnVf2zj}o8Q!T z%WrYC(cJZ6MLi(TD=I3Uee`hT5-rAZ6?HP9_L=TeaBeX?6ISM9b2z9)YV{5C*gZ<|0{US>}Q631NI*DY@JTQEwCET>)BI=ooi-<_B zuC7kH5>Fd@B6irl2RSus3?`ZNQ!dWI2g9S86|o^w9FSLlHGD5@wQkI!b;r08N}}=^6i+753-O7Lgc9-g{oxlC63- zc_W;@fBVLy^g1<0sNKK8^Mg=garA`K&@_bc=5*yrW&Q5Ik`gLRD>&*(`f)D+)Km~or`@k_!PyJ|b zfPmIMep^k=3j^j>O@87C+cZwn-Nv)A_sN_(u_4$~hvsSz7ql34b6^|FG9TN{Ywl zrJv>=X$p7O#$XdS-R$;;6-3H!MsS7v4aEiT;~Ob`>Mi;-ty2T!66mdku(*5I@(P?V z`;6)j_em_LD0)(`6rAl<#t&%Z$>!1tR2OW7?DgIe$7}S83kr?janNiZqJ84A#wE z3$z-j6d0;RZpzvH$I|&(!_N08e{jUaIF(D@Z+8PD@0Tu19xxZwsi3ghj>f*W74^XT zQ_$!X8Fu!zMKFgh(BQR?wyLbp`72dKC2(B)cE<1b-#L8kk?C;Z*TmA)Qr-*Il?XMd zq?R)YdH562xCH#z5^E&VSgiHCqKG|Yc>n=Wyx97D!0x8anHQoOs5dcB?-oCH7TzT% zxc@Ph{kFQKaNa{Q4TXsvUO?=P1%Fa+G7nWR1k$)rRf?Y+04#UkKedd!7M?rD@Zb>D zx6WM9vkNFYC?s9qkk5OejXrI9Wz7P&R(}4R=@VKBt&NIlf9FgrUoVtojk96rc(g3L zWn>F^0ePGz*BlI|T7`E*@WPeDTps<-0Y7>c3tj=&Ye4{d?au+ru!%K*zCce^yun#V zth@f8o1LAjtDWkGlUI}$p0xypRZ`1gl3|bU*=rxEM|^gqa`@R1;8)0cO}5vQ(yto$ z!rM-=d{lFmT5VBVXTQXzv{)=) zl_rAxO7U?)Q$onkF9KMV-qBh7`jSh7o*@H{A05!4xkZ?M%fp)c{jy$jyD6w~zO^#sI*iIM8+ z69X8L|9C|Yo3k?7j*g+z%p1hL2boJc6R?~1`Cq!bn}YFrw!l?trEh@y)N&AQZw?-k z2zYr^_m5*r)a{oA8_ykrwpRz2pV+@E6tFsLe99m$Kr?0L(F$NY@TBDC+`pfbQ(zE{ zw2IH-HkcRcKRyfjY!Soyh-#+=|A~K@hH=|72<1eUsnENh>bG9QWq8TL7I(>^eYN$x z&(`h3{Ed*gXtNV9vk*!9Q&Q@E-Q0_p-J)-w z4Nyi9t&ojm^`e{ua)c`5$VOQ@-s90Q@$Dr*VV3NCeiK>cT)%Ty|1W<$4)^StAbNQC z6WqA56LT_Z<$*i^O8zyGmr7Gx)}JrDMAZ6`pfQq?ss5nXLXutGq5vgsk~oAP>(Mf# zVb|&DnAiQ^Msk)lo7LT7UaJ7_-reGJ9p<9lx8UA4t49JbS4X)VKu(EXXYP83 zJo9{F0=_O+|A)=ik}!*8z4jFPsb~BL6N-(xgi)epg!FmkDAl)S*@cJN0_o8A0(@6? zJfN~tI!8=UaD*c%a;l5V%jQ$%8c(p|@hF5@Q3>h9%QtE{(pgCb91q%C9xU4^F7PH3 zx7&4!Ws03%?N~3&aggqwDSlgXT|>PEqoF=b7VZg05G~R;N*-rr7(Ii6A~5t2ZuWql z%=(!IU)6Ze;}(HDJ?0lBI?-^|o~IAYUuGHC^qnXY)U~%XrDUN2_$xR!o*BvsI z@%({*y1U=I=Uz-sEPY@lnDkiqqI}v*_N!3>W5a<=602}VR*aybY4cl&QOTW#j)h{3 z!TWP+Y(isoc|{2_k4!=>s&=D1vs39N{^x?FR=hg?)!ITmucL3j^8!*Ml#TCDxqvQq z%rmMvaF`i}_(nR|u9l$@ROE@x>z?O9Pp+)NjhQ7LqX!*t#fTNvsgK7*DS_bqrGT$7 zYituGQI!6?*y6_xS1t{AH^B#M_yNA-!tJgTPdqhf=#crPG0?t}^yCJvK7O8vm?MEG zP-+M3M+jcW`wo#l@b4Ax`XkQp{1b$>axOm%;oB^5_@q4s%(+aa*140qf|(?uxZFLSQ!tRjU+Z zH4Bjb#uD*L-ss@SR_@j4TlOQ?c99i$qizPKzFjUt?^6EW+h zpT%&`3;zT7N%pIF-(4kGqt!Bzyiop#~@7GXJu<8!f(W#Z&>tGl=&X z$f3zd_WN^3tl7=gxK0EMT8WIjSPTvox)|vCa0$YHxioBL{1RK1F|Sj3l8HE*ia1Kt z_?Z_CrIMZ52M<*iIMhujA|q3f&;j%2V z8dV)>XYTeW_n6@1w@(q|sLT-^ks%`%XM=5Qf`TAgZnK4XT-iD(y-#t4Q%a~_BO|(~ zb(zrh6}WwMcAqL2q|f)Jd!(l&O&%x+r^&auqt`)hFn4)Q>%FT;=;T<`$w1Nb!Zu5y z9ya3V|IAF2NH#=usK1?E!~>PTZ|~-xdX|fcp4j75)IfZpqglm&{<46i9-mV5t&$tz zRuGeIgU|?KXk{HEM{0M%dYiedsxx^%>=%BxQ&q1h6qA_N?{63D<>c+{@9!FD@c-tiRZgzFzgICJQ_-_u4tM3<=z1Z4lm=J1Tb{C9X`JUiTiv-YlDObh znpk>AG*9kT8wZdX(4BHWla}Zj8~*{Pbg1~Tq`Yoe4KBbOIPetMmcnS{8(%m zx@*FwUXO5uZu^YVm%}s&OlST9W8uK9f9jeqy~4a6EBVdJww1p`41l=wJ1U+B&%ne0nF zch75wTh2>nsL{k&y8kskNj3*}xTYgE@h~R$B?2V9e70ucN zzTcfVa^KcDd7?`^`kC!#c>v)MS1UDT*dn&u>h$a_f5)$;SJoCFR%b{5`YcVyv*@6?xNDl%d%y8ow^*bw*2rN6La5e*em za7mEot(vLMzCX1xpi+x8lZgUECin*i(}wD(PV*E+0ZdEpDcC_(ri`u*Z_qOyU{{5X zUyAfnJlZe+9ktGHMT%%;U|TP%0#>GPjnFPk>k|h5Ro6Q|U+Gd_j;Ov%tK;yrw|8^1 z&z@hS)#$0YiPQsk8yulRHqrFW5{D6Z?@uSFK6yw~-5#^|fE9e<<0A;5(%Z{!Eh^@=g9BXEU5V?dG*kfg{22XfZ==;% zTcD7TvNanzT=0Nap4oki{w0-%kY$;gYICO*it5Gky@IL^7)Kb1Ru91V?GU3{k@o99 z<@L}o{C7+_+Hn6jUTCD%9b6JyRM^4{-?_ihaEwGArRAq${xG5p|jEs$1EU?0S@TYDl-2o)9&vA&$3WzclYiN zAqcRVb;#HQwTH0FJW2|eA8!0AGS}3sc9$CK#RKOYTwe4axi>HjXI1Id__X)%KP>n5 zuQvy{wYs|M*kmk3ys46ZyWvoSx4aQ{X2Q(e=Ng4(;Hgh(DA>atz(qp z17gl)&>a*m!jE6ND~K# z6062wN@v+(yt&kNoiymYPQolF;SUg^*_t*dXZk#vILOVeg;%a4HmPvO>HJ)qltF82 zv%sCzp+46Hj4Kn6kxCHZQZ!x*0Y&&Q*aYPo4yx@3X-YLjOM@OUTNBV>)K%1?BW@5) z?NoVXO#Ch-KHr^8sjT#9`W9kowL4!vG2ys5nW!AweGjmEyx$;EcfSczcS*X3rIebU z>8DAcsJa;;7KnEKOq1onx44gv71C>x6xY%}{mR{t%Iai=(gvS+Dpw;9Rb(W+qjDc~ z{W>~f3@m}|T}y-My?w+tN0%oe9ircwTWOksiS7Gox~24UAai9*A}FJr6nJq)`0}LM zmUQB5dU}XOg|~>}@lTlXJs7&6NR$Rs3abX7te%=u-j%I?>Sx>#*esGYwcJT-s+=~W zqXUM9M6*cd^CP#jTNId$+bB-^xXme1X!gKZSzE)B7%H}=vdHZgHzYz^1bSnORIiwe znB5_8k^Mm?o4o|OaXMc($jn^D!K}xmsoPydBKWc3znX_lVqc0@OXjLB3I-~)d+P7I zD6;9l-}?Uf^Z5AZ>n_%Q0=NCAyI=CBgDD|&|v5>T6~3bX)3gp@I&7P zG>-AeJv>Xw1q@gnip7a`6bn=cv1N6RxzN@6WAV*%pXn>u1EZv)6@yT8x;r~?F$!;5 zrDX5Bhuql!@pQo6Yte$Au)~pRc>*7ksypHZfVOk^G4pJ4Z+j1r$K{fdGrD$(?Fo4P z+CB9n)k#Sn1b568ji3*nj#7b2vw>A`-EvIzO}Y$i0YjJbu+RxM(K1(0q$H>=-kRy{ z&CT#F_Dc^eIBc{xTGb@$FMG=H@u_>w7eJPrHrcQYO~rGnMxR?&IkO6^<=JdH`}`kG zcJ?1@58Rx1_b$o$qF>2~*mw#e?qRAJ?)NP7IY^`xF>UvhQ$7F^gMz7S(HBYHQ-0Rt z)`3o4=}A_zc1S>yNFp@KlX4MHUN8KDtl$Fa(Ny}}9DK1>XVIjQe^p4}`_S-Uk+t*o z202Ru-gxIcG&qtpsvlU`?F(fmeGJ{^?T}FoHi9lP#<{+K#;g3mBE7tdVx{kATq!U~ zH2*?t|LA3CbjFGB9(A?2QCht~P?oF+4aF5rryfj@9-9M7w8M@JC0ZKB2d<;IxVA7c zF;X4virXweq}_$RQP7CxRseLrM7LsVlqYd(wFqSlR@OxhtVDP?6Avfv#d?~_J+1b) z?XMX85M*)&Cv^Z$TNqexpF+EKtdX6r4zNDK*MR=95Jlt30 zh_5x!BCBDJ2V%ZHiPZhzZo=-?YxK-+# zI)4d|l!LNhX2t1hNm8~w_nSol@d3c$3H7po@=**ckeW@{gCXP8D3r`{|ElE9w~VZ{&NGpy+!67N+_D{762{i z7++xO;eeB#Kfk`ap%E)NHo3LRB~xrIzMIPI9Ao-3mM1t^-uDl%!@c5rOEV^q>21~@ z4fHD|gk>k)yjExD=dnn1k}N7D*wa(sFp%gzD4W<=0zJZW21`Jo1-Sr$7I0Z9XaOvx zK#qFsys#9|L`{EviYw*y1vpI_{H;9Qsp%$K(I=d6*~G#KP;c@`!dta^Z*TNuVd7-Q zCTFp=wSsC~q~j$emohdh#q5heMhG?RJi2=`F+A5!*t+TIL5T_5wl*ZD>GU2#HCBHsUk_Brp`)S)l^7zUMI=b{H*~A)aQ@+ZyGh6OV2{$_mM-LdOm(wIG4T(et}tT?%YX0pqM4Ztvb zfJ~a3Dpxsy#2Bo)IOx(Uy!4kZY)2aV^r;apc)kTAc%G&t8?+E~B$OPTpKra*-xTMF zr%!=beb!huMS|@2Q)l{R&C6CffpE7!5c)1#hfW8pOJF>T7u_>}I@SZG(!PE38=J91 z@}FWqQ+p7r4UFf6#-*7U{xfJ90;tL9kP(Wx_TfgPEOOek9@uOG~N-|?} z694W`9`El*M}tx5v}zoVl~xF1S6cFG1t^#o61wWiq{yTp_F2Sy1Vk zfR94a<{J99XmmAg$}iF9rJfTm%+|`j$<)23l9fA4_$Pt7^Js4!o*^g89J&gUs*rDIFO$``yYWRMl!D>g{oxmvNc~k` z1~Fv9RD?6#ZAAZQ{P2L-^7;CXG_y)fK@_vS$nG{xP!?%Ye&#d(sz9`Y!x}k4Q~mem z`G?=sRN?9H;u2Er)Hr2cH%s_la99c87&Y1KiQA%tu4IjiIb|HIokU+0w*+l7z2}bH zfjpFM;gPZ3IqK-zS|4ergP^!$g>##c{99j@S0EFiZ+}VrIs3i(gR)KZ_+!umh{|r5 z@g=3cKE;>udZ=}&uk`uKk1qlO;Y@xx4h-LN);4XX;(hSan#6DUNF>WaH5Oh%zvoLjssO$BUEH$X=!AnuL*bifVJo@3JBUjl&|IR zVNEey)clJ++(+bXkmmu(UXSqHjtkD3 z?)%N@&dZ`{4vhfX>*CO017`>uD0ui^Rb>q(9EnUcEE~E8I6kib15<)w_%{eI9W@8v znO35%dGJ9|X;s};=x*1nGf|9gURvG7p|SiMtPjiC0bB_s)YTGdAboE_RG*}6d9XJO z(PQZA|63+X=$4!Ic>myBv|;!+Q|lj09~z2PUf3DkfVC9;DID1&=r$dC&2o(z21>uMD?m*N48n(LIE> zTr_S3sG@@oqW#TDWZqi_U;p?Ku!*{fLX14%r5}gcBbR$ zu?F2Egt`7j_pZGqSPz$H{$<}^{`VPZ)2%3yRPz3yd>pu@M8ofCU0>HtuMtBz)ok^7;c7 z=zIlqtOOQlEIPn_Lb=*W z9wYY(%E^PnQJfykz{3Z*-e6&Z)1@LK}(+xPF!d==h-z;X}r-XIs{J?`~&<;)rp$zUJl1k91-)Moo>L z6Med5eD3XHb}%dfY+DSHX&7v`Le@ugzojwP9V8zuWO}7W&$n+Fpqowqs9P@Kl@iDb zvBN3SN+Dj+C`(|ljhA_Nz;4#rVACI2aS$+y0F%QYvfED9x%dZ<55GJ|4sEpP`k64; zcCzUyIkrf50#Y?qzY#RafDuclPr+ZOnB`)A&l(|brxhNh-Gw(hee}OeWER+np2811 z#0Eas=b|-efIqxL$8lgkqlOX8UZY#(YDaHI>(KyG1fxPox4QtHm9^gv`S=BNDU}j> zR1IUWugR?c@vot{K;zB^Z4WImL^^mf>DEU;xtKr#QsrH1t9PH$uop)Ln9yC(@(7K^0H`XY@!2P6tu=nwzasr2ItnV1C-ib1NRv)pbQY8&vsiBs=DTx4Jc zEi%@Y>z#gg29U6YX`|_6!@7&;izrMxN{>jx>s1v__;3SlP8=jv4onHQiq@+Su2&r) z9YTiz3m`S|zss#*weEN`vpS!-Uq9mGzl1|e2LBF4MYB^VPH0bvR>24v_o;P{_2x(|aX3)O@WiW^kb1FP$roqciuiKLbMgt^n^mr60i zwhH}C&tk~n=%Zck-|c(!)>>F#AEUH$#Dm(W+_>Y5a|45EMglQ39Q+`Sf^z=OG@6lA zDWN$NZ^eA){nv0j@_+pmvh?!ZJKARzR-*vN2hD)6z?Z-0(-sGG2me%?(B8xUS%o?b zAEEou-waM7QJkE^)gF$HL`ufC9T83&6FUD2!1Z!yO)#zQ=t*;VIy)J&R}6`XH)tZ^ z$$>U^TCxk)O$Xvs*iEf~PGP{+rWS$V;4Y<+LBHv4NIqLq+IN0vXy|W#w3A`RW@cOC z(b?Gv3Ba%z@29c2=ykAzOaJS_0|Oz;ATP9q0kRVyO%@!mIo3EaaWEEY|84KEzwuP9 z-#J$Y>x>(YLQ_2&2DY}cH0I{G7HYu_1kh?n&s`TI`vZ2EdrI%LdJ%1@KH-ax^hm_86$+(X-cna{xF#7^(BL8hhOK$X9^WYGs49*!Q7_CR7 zw^9b@DK^6mT$dSLh!N$GgA0`ugmvA=4c;K;QCt0a!~}GZf)*Z{~)$ zvA7O!mt#@;xcBrXnIy@?l`8TG2-w;R2r%00q~Y9*(q>&=Wu3#>my%gWk z{$)(N*!9Z?yZwcg_paX9e{$a5^wLnN6ovnTU{!{KsYB-UIo^||#AnQy0R8!^(nLYE ztCy#RUf7ENxNe%_n0)W^o=4KVPImMza7%(#QPz4#staCZP)VWl}4DooK~V5cAxSy<3r z?fbdgw0sV$tRrG~SFPh>6GKCSj!5F2t6Xtq_HGM5zEoD`(CRE#m}r~T+o^;|>Awz2 zrOU0R5I|?QH*(L$R8DT_&fLt*yLVIRT%5uo_Z6mAW{4Qw4EH)Lbegs&2blPj{JXtD z>o6>Am!4x?zo=q##lbya_6Z!Zh%<1jExO5Q#hwitanOV+m4kOm^n z{(0RQeD|&HA56;B?OzY-scjYgu#!B&^=(QQBTz@5`qn-ZX65# z_5H_}159sdT%p|_^t~AtR%O{(SK+1 zydkGAY?to)tVDKbivh)QB41pLB;FS`eeTK?6b0X|#W+Cu{tJ98sZU@4~jwf(>jBfn;!X?AFr!Lw(g(;PmE#)c~6}}4HEw#0^#l%VhPPpl# z%+3m4_wP@(C+E=cx}R!0R4zhPbw?JnSjD9 zZnA{<`pXwU_N5RsKO`Qa@9_O{T4M9QahzES6YusH<)Csmjkw)$c8$2jA@cUJyzi^f ziJ=qHR#Y<413AIL5>Fc20jwWm_{BLE8}leJz$mM$+z6K$2q5?WDqTm!G;EB2{W?9Z z8!9Kqd4}k(Kxuc45Uy$gGFG`=8Km78Eo`isb)_*GA#YGVXf{@gzNoEXIHIU{aX=lJ z`HWx@)j=Yb_7@p#jT^}O|C&-(_Wv-vljkEO&AS;mH>;zI-RkHauC5p{ST*Kj6$7&` zWVbha181lSy^m{jCv2r5!j$zuL0L{Nbm~)|(9NaQIld78oxu7wt`a_S;lT#uO1G5^(-HEw zs{MHW7$a`)Hub%>$Ow~YDL!SQSG)0F(L*kNi4t5tqwHg{-TlPeRFzRJ+v-&nq!akL!4&E^Iqut%eFpBL?~u03vKG>#|u+uol<$Si$! z=z*)|T@0zZ=;LaOTM#qP7U$xW58dsM>B{hs$zdze!|bwZ^0=I?k85r^-o`~S+?E=T z!&S1ML8H-{No?%V)!`=t3$_OAT)&X<{aahi{m~h?Uj+58O=!;Btv`0x!rzy04SA)#0I&*811b_`3ggYXWL#M8sqtcUGW{bn}3FaFYUAj$A%$Y7{_@>}ub} zgE?ZCBq36jx+Kx;`b(m!+V$Mm2%snG|NWzQXux-seR~Y@G-Z^OBqWr0-&HhZyC0Rh z?`G(_Q;$kXc5?oWe7|zPoh@JFhQ}u0^?H+2B=O0Vs=xQnNiJ~b(oePO&x+W;s3@deBd=g8?>`+|qlo>mkjx@yHxput!P)a3U*^)5qaHc{<} zz=@M3xUJNKf(!JeXt};p;uK|SB;Cw5Cw4sp+hii`JDL_|>k7wRvYzv@;hIy(WFK)5 zwe7EMZ}QmQ#VB7Cr;_9BtA3)UI@}is z_8BC#o4+JCnUplGk71D$gLZS(cLdLT9~xBJ_zr4Uo-i#vM&T~_x|*aluM??>0naX9 z`)OKTdNRcY%DR8_OM&_YU@CXHReV$Z$LWI z7Q=7X7cCaqr3l2WzZ*^&?OiUIK(G03E^Tj5v}qTW=GX~->;S{=Io{l?8cgCUEvS3) z9{&e$4+gm6ar2b>A{xF+TczhPa-sy1J@S&fp|hyj>=Mh-YA&sq;Hzt`d2F~%IYR2t zZ{_;^m**}~icZd5y8bau_mm~j_JzA7*Fb=fN(mhadBOJMbwp+3N^iV?c;hP*A>q## zm@h;I>I-T8;ylK)wb{kQ<6OsTUgi39^NqV2Tr5(>G-#2lG*@`916nNds|nucqCp?$ zbj%J?UTVlDki2E3TrfJJw$j>68D=%_*#PsCNMwG}2G=n%H`~+|4jhaiBpf=K$dvAT zLSH*~m;jm3t%^iDk(2=wLD}nx=n`%)I2AKefPMkiEyO}g@bTQWrjAD=gjD4N(he}~ z{@?=yk>>D?2X1aeDe6jYLc(~RHFIdBqnQDgqiwpudGNHn*JL$H(d+!cJsu;$wx@YE zHsn{;rOxQ3`+OX{A-kJDek?Y?3#a7H>vj$7w!?J<4Ynpx9=NEOo~llPul8H15ueof zxJqj;-)nX>C)+jK3-cs0Q|?a@#PRy)1BYt!_E>E1oXXI7O6iNFd43i2%8==sPi@Pj zkh3fjNqO&BJrQ++#S13P0Yb{9t_GP+md$>086kE4ZYPxmuUBiA#JNXQrd>~HjgN;& zR`P`h*v#Rcr^T;3VU6L4bup9@aT6bz#n`Xs4JN)s;HIZOf9~y7&;I7022rcV@*o9x z-;~W8e)mC{R5ji2-_rrVuNBNoNEu&+Ub8se1Cu*Z8maSH1$V*7GsX$hyEHNJoA>|m zyJt)1A4h|!l*Z#;kzeQ4?Fzm{Mb2PUkT2NBV}h@6*&yrB^&kyd-9)dbk$SRhZ8hSI z>rXBeBNq!trZtIuF^Wk1I^q=Jt=g$&@6fGz#M@APqqdEpEE~s4mO{uO_NKLc!@A?; z3XIf>nj$LgZ*Vd9e;tU7(bLiD&KEX9MaezTu6`_LE`7dkp-9DiW*+erB`_E(xXjlab{T8oXoi>X;`O^ob$x~WoC8@Bzz5L}%Kq$h|w)*^A>XsMj zY>h=urO$!Gp;QX6oCIo<&HyVdwc3t7Pv+7 z?WZX$*nP?j`!W{j61Y`VJskXOnxpk;`rNUqmN`tBY6jb*&xqZHr3o~8QMb0($~IKi zdm4BP-gz5W2TMXXeyq-ER|EYv*jvT(kO&kkP-Wi}{_lSq@v$oc-mEA;WnonwEvd0! zi9`PVj$pQ{?k76TzhfSZ{%B!z?~{&IZcwVr`XV3Ie|<47|SEx@WD2`)vmG4Ver*Z8yj0OiI=FT~N{L$Bhgg9lZyOF0KYW=xWp24HEU7msNia4E= zd@0b&*m##_4yOKinCI!elB>+hvlzUf&Xq#T*a8kR1TBpB}I}+uMspYFD!dVoPmN8|YG7lj7|Kf>rIE z9JAR;)l`XDd0yFHZPvW9-M60A7JQEH7J2Fo4aY9S+j$Q;GVewu%b1@F8=#JUWntx} zZeS)Rly@aQ$F4S-%oi~~7`4EHLswafd@+dY6XZd>HKBVDd++@#d}FK>ot zFboA&ZQ3_^pkU$i$y7bmL7fS7ZkYszOGat?R}11YD~Ii2d~!010=CmUmb=^AzLW#g zK21b$Phq>Zv{~}!X}~bSME&!yj-#0>FFh96`2GY%r7$UhkS!>{vsCL`?Xzd=W9Ft2 zuWggpg;^d*8Y%9)(^9Reo@E1W(4ya!D_zP|kKK;tE(}S#8iFFHrD@^ipQj6p0Yuqn z!}sJyYP13quR_qq_iN3O3_q)Y!Xx0$DuJ1A9tbG5rvCaeH`vy&w1`*UC4*O1Z(BAq9!A91XM_Zj3RGD0o?cRU>a?@W^?GC4uxF=`WEFR0La!*M z$)tI2ewzS64%oYSMVGaD)B{a66<507Wl*=f-{nAT=`cMyDtt-h&CyHf=K3cM`=`}J zL_)U={IU=?ZgeTiNE#}=el7Sp-hQoq5hG)+hG>7i&%)BERir#z{apW0`J4aydin^H zI8*5;urRw@(SHwGbVdA!EfgAkA{=eYcfPc@lR{*!u(-&}i^=|sEXvk8zDAG8i)VuY zCroIFf6YubG~>2t3_!3;DsoI~Y7u9Jk*hXZzvy9C^^9@1rcH{jgA1XQ_>8^DEpt{u zf2Xv@*Y+uA34QGAQL06ID(M^POo-;R7(Ia1+fI#Go75&XUA15Pw8Vm$`nGm;RvV1s z$3Gd^glWBjf}hoqg<0(SSbd;W=FJ7;axRWda$HKZh?+lA>*Rj3e+A4#W%lK)#PjEM zQeVHm5Yjo(w!EAq?iU(a2G5Rs0@q)Eg3@=W@aJD0ieiLh$l&#N??|CkrxAzB1dWuP zH7GsthQ?$ATlwHSSFL7Qk&}}q7Hh{EZYbwB)4L5aDr9&iYs0C1%qWp7t8PY11x=uL z+1FakrkjNY6E}5$+jgc5ej9JL*BPtBt>yRX5T~(7j<|S8r@4o=E^5`7M*??{lMVH7 zDp<`oxWE>Ii=W)0C0M5KEf~A~k+x4yE}7p8dQrh$f+oblL8m~G&(awi$7`z;Kky7Hf%~j6R}} z2hBf0k2tt$rX1OmWYWNgZ=_5q6B_GlO*$7T_BN1kkqU}F@=a=+rn)l7n#!nY_4<2n zb*^jTU>5bCS{)t8)Vh$JFH9xYCB9;f= zQ<5TSyl+ic7RXMqping?ZjgMjl)!4jia6AP95#qT!zXs623jVSKFw1L)_*?te8iPcw7W*Fu6K_ZX{O+kb)HXdBH7^d9dml|C!!# zn=xMIjEzTo0b%i@y+%e4Vj6`KFRryZe%k~{;#8N3QPRb+iCJuRHqsdUktfl{i9&3YiQ601w8P1fl@IahkRQ?fZ5T0zcHuW_1G@SM47b+ z$dO#yOM5q=Bg5X1-xlWn^@xXB-nnshiKFDnOi)3}tj@s^Kt>~G1eRxojLi*%e>=xK ze}N?;GCS5$e=|ytd2bX!H$y38du67hQu^`f(*0)2Ne6vZpW1t!NieN@%_(K&=H=61 zv(wz==r!jMxH2ss;<@5Dm&@%|6S&&A-o#NH)var&BFdK+RawFOJ7sr0{4X;Bcf8ZB zpo^vF7~t1t;!60u6`PB|z&(YrTe9Lx1lMOh{tZpS-duYyP?T4$Xu5V4)HjL}Cle(z z&2Sg)Nn7FE?XFlARlaa^#ZJJN9KUsi_-_2_3aOG2?Za?U8$%ds7!!gU;DOJRf>~pSpux9+}`Z9pvQVOKGrH$~HoZmLl!>9f_Yywr? z1tA4=YYqFsAYS@P^OpWR?VVd-)6Cgw7*r^b_bRyKavRsZ99KtiQZjSfR~;U61!e~h z+&2gFy|K(df_bwq!`PT#onud0gPwlj^1;65R|Dj`s`BzB#Dw+a$x7kh6tV6;xI6$-W@<0%?tRp%NcJ)pJkvo!li)L@}+ z+2r_+OV$L|b!2swDd5r9MO7|h-!@~j{rZFmLo7C-yB+~WjCi9LaZkLImd2;R7fg~^Ix$Q-oN2kE;V(#vOx2c!u$Yy_qa*5UR7 zxAki;m7KR$P@;Pjd*WTONW_C%s(&a0Pve}tkE>BpN0W!4cbm>qy?$t^hLK$<{)*OnLCoOpsLN6j0s%k!4l+OPw*Z#fACX)XRn`S>J zKj74?f9Z)4!Pu?%VCD;gcbxaWviDv(irpEQ{1(E3e&60x)lqpVaCL~(TX=*k!)zaS z`I+yolI8835y94ZP=fm)Azr}8)wV7g;;B;SK4x*Z zHemVJ4>I{n_{a{5xhNu1DaGp8e#@uNi>)lp>H>YdJSz^mseVM_^KD01uRYIe7?P84 z@%?QgqRK8;EjGh`Jcd~@((G=L=J3`{X***Dd$Gog^1m;ytPr+4R=krrvvCV+475_X z!d_6L)4i088pgY)SPGV+?ABXdze=seLmTDjkabyGg4uDz5`PqX0{e2h!TINdW?7Q; zJ9g`_Lm}hw?mBsvMGZ=4C}M1FtgPH|ygXd;f&DU?;!%yZUZwc_{K&}sd@lgGccr-m zUIhctTe;dn+OeNY>9W}V6qjB*%dU;(jPxGe!9zg^49{?+ zjx*D04q!J44EzQ95Nz#(v>8SJ<^QYX=r1uqm!oWD9XjmQv@!0~1wrv|Eosgzpkxta zgcJV?(iq-j-vSgMO!Jx3@gfi(n>Vu!4)7{jn71kMoaSH0IDg zR|#O;*>jPCOUIyy|Hh4?&>~@OXnRv()Cj=#I(U+yNR#fWf2^i3;%e|6aBRF`TnhT- z-zB~O8%7_xg@5Wzp;7pErOf{?*PGhWGhJ_$fm1h=>qm;EVfkZFiYROrEfGq~0=-qH z6~pL716@7kkQGPzbfo8dz}+@BKZSNi`tk4HAdc>bfG2(brdPVU3JO*Ri4JqtsP#rtFbPg@se@wu_ig`k*Sk4bZ29F&y|1>mww9K5 z<*pBnnYteimtMf@2!KGUh4rwS_jm{r$zJy~iOnglNUiK4{9petb ziTd?}hyic`G){qC12U-fq#_R`FY*E9z)71vw;5?VE((48PeCnx6#lVD|4at@OaMRs znGF9-hX1F@@VD>P4c{e(1}%3`H~4@6J)hzhCe)RoxQt>H^bfCrY~>=Jx@9dxmxp_+ zocx?-vuP*)Kda)5;Xy)A{_i&K|I`anRyhMDC14*D#muWj#qc$y72Z>wv$JXPs?2*eU}?JNr66GPF14NNTq@yqIc-Jl$y#02h+7)`0#3DFMcIPx0OqvS z2_my}g@9h~D|HJ<2h_$7ldh0W96RXi8=#fB!1fQ~$^i`CrJg4-9#+&9IX$+vaO1{E zHHJ<^0seM==-W#J0~QtogES1cfbppX(U5hU!D6wCR`pe()amKnoq(=LDWG_;ximlD z+q*iasTtr2cmv$Hx#ayHxE3x;T>^~Fmt_3te20cyrx525+0k9=qqQ?LUQ@%tY4f=( zZDY6Ovy7l>vGy*L%J6=DFvG4kw?r=VL6rusazgG=7@HiQl;g@Mtq)Q#g#+} zinD}~U%gAoYG34F%uj4E5eSwOWnK<9z(b?Rjul$t~z7T_Ipe=V-tVAhOZ zGP{HA@5LR;ine`L#P7@@hVy^cp>g(xpHc`P&yRHxxmUBJA(_?aB+q38&)AbcCfQBfpxBSqpqO zU64=An7%q5qu+RW*TmBW>2@oK4R;GE&)(Cbb3GXwl~_?6?yvATZWS#8O_Q(!dM-$> z)HO_;bhQMfBthPq^J#z^&bJ-e1n>sh4XL|NMBx6QdlBYxYyW8rv$~LAr~CrH(TEeg zaw`=N94blXIYu}Qe$^D6eYF0E3yfZ8bd-pI`gZYRWEkkC_MZM4-+%(7$BkQVm1pP5 zAAe5|qFkAxrTFkTqFs?gx&FlHV7zu8k=DI3ol1||d4%|miWa{cA^^S|9y|&)W9)sS z`Brq2jIgLNm#`7NL1QbD@#R`C2cOl>G?1SS!L}@F!L*TpNq|@3;rorNZEZ5{qT+tL zR)ANb#;zX?EStBZYvGI!^yXl$V~n(+iU)kfF?gpZ>vER@-1q+EJg%C+%Na;n$T8P$ z)Ja$Ho&Wi5qj|aNc`eA#)dH1H2t}H+D_ESEm>ZPgf5Gg`{WKC~?&IaWYCKZSbkvRD ze&PXig#vo^L`5Uh%&M>G!X1UHOZ1RU3g5DOau0@X=9bf0_F(7$m9MpOm7;-uI6cU6 zpT*mX1A2Z>^6>w~-g`$i)wTPgvAk9k5fG%RAP9&ENUthIrAhBW=~6{{jfyB873m$M zm%Q{Eu+V#l00|1x14ze&z?m!I{m#92kF(Er@3`aqvv&r=p@yuiHP>9veCG4~N_o}@ zi7i5uB`cnUe**dxTYrN6-)=^pVvyz?^VnLn%m`aUOz-5-<=9+-zJ2rEKVc|UW_hC8 ztff22%w)->D=(;(% zFU6wibU#~hT7#OJ*=izm3rdQ|?oHC_a_0p84qK==eipW9RG>tT!? z9fc8!V0DfduQ+$ke%x*SSFxE|{&5n2XY(4F!ovO7vgT*(d+~s=FlXnpXPuqjQAR|t zN;9cfTaF8gUOQ@NG%YBdG`wu?`3Juh_K$|erpV!^FFY+5cvYI231%S&jYB&QH`7sC z%D=XT^0oyS38C@iyP6!QK(k?2YAPF@o6}JTwPazvwTCCK9QQ>Z>+&5#*04Z%D=KWS z*yl<4=PDm}b68m%0)Uh49U?(dac7mUh|KocJQNqlb8zOXYl%I*{~0;17$6GH;wY7P$PdO3AbDcD4`Ny>_Z5D8YKkq>$7uapl!?BRpvIqX? zO-(zKJ!eIMa7E1>2?c-yt%8Sp zcdRW^F(Ufx&1eJ%bl~2l!vm`e|A^I1;p1Q~s(5Xt6##s^fo8Oym|sM6XOBs>003?X zL_2!QiGlX3o8d**3riu975U1%%>rA@q{!~Ko$6AaO{rqWM+g|zkJe6bDS);?ug^bsE(rf3t;f+A! zX9ejHzcZ|E31gxisO`!60oJl|j2Z1u;8*y*xdD7frK5|>Wc~1*+~0hD70uVPvp6`i zvc%*3#5q1n`*`?(qR~o0obAXZI;62}m}}O+NAX|0#&YT-!_t4u?F%K8&OsNpK>y2n z`Y=d;xxzroOKSwzSczd5Ldp*>l;Mb~kF1Xk`Q2YVa1 zkK39`!kiK_F2aj>8f>JY;?QQ)CPCF7JZKzCe><5Z0a{7b)Dhp@`T~nKa%Ir5y6Yy>U~Doq&+#qCI6QR8R<^`$NxK$ z7x^>)2L+`6zigJs3mf@V|0}2n$fkEEC&h_*Z&%MHBlfH3$c~!v4kWH9td|i8_Zy>} zIg11uY2PVEK<)%KHO6G@itIg8y`SVJnw1rwaeNY{)XLnvC%L%z{=4SpcrqOk9Kwyf zFRW_6pvwgJwD8%B(p{NE{Qj{|%rZO)Q-ku%_is)KHUIssD}jec#IZT`J7JaY7I6Bm zum?N83L%UF>3*b&G)4u0qa10;nciLR034#d7kgOHb50Eeu~znbNm?`gqrjhU-6x5} z)k$AnQaca^SQx`E&6c!-vc^xrCRZe&-RJtRhXY;n6(HP9y)%&_%vhNe`2Eoz0DQm` zy4c@e6ol zUyF-#_@Hxv(Yx>uyA!q-7njG;EzGs|l1FV76o3YmIlG_ZIcZ^VE&J8~%YCGOzR;k@ z&u@P==x)(Vw>ksN^U0mn9+1V0i;Q^jJzM%~SCX(35xz3X2L_EIoScyuR_f?DV1hez z*E}YUYt%ndWXi3Pq=3o#u7?`~Z+gR8cRqzv9d+Smy4F$W1e(hRi(^pRV{w8=)TM|7 zr86&Gpw#@%lNalqacyn5>%Rigr^jhSes++PxCF?cg-;(|?_}m#y;pbztN=7EMUN{_ z?I%l!NcpZzkhkm2L&1RIlO*o9H&))%WIGb?zy>36eMnuwFHeH>;1ixq5skjz`2%}h zmPj}B<-UlJ5I?^VttQyr1NhqCW&?%CUh*133rr2iikBTTP2j^Em9@tT%Iw5l(5sDL zZRr7U#=WPmLMzw8UeViLgr#aT@mWMTXvhidg<)fV|)I3~Xv#VHD7M|MHgSa=D-t+?$v1Kv~BAb`iDuhQRPsim#0qtOk~k z!X6ka%U()TpbI(zd!WBPD)c%c+&GP!znLZASV;u%qF-zN^$TLkMX*3~^aQlz%PyyD zm}%LEDo&k$5czoI>)#x(FSATyFMj{{z@`F8*eBom77qS`SCnI9o28GC%i%u?{(b)C zSBU2NI3rN7dGj2)B?F$38@a^x1K4R4;pA_^p4~zny!e4)&aTFm=@>FE%U( zcwakKIs*K5y)Sgku7I;Z6te2QFMC$zb{I@n6BBi+=15xO9 zUoK}nVhu^K$?{I!Y998!+|qoWg^5v!u;JYOIyDhRMJV`Pa zPFlcn54Ss6hDT(%v$9lE_ATQ8uZ*0rjDs0NPXons+JuB!FIR`6t+hzb2o@>Nk_D2) z;1?MyZW~xhbMtv-21Z-n;R{X7Pyq6W7~ZJ@B927`6Ln~>2tu+H8=Lobnd&5IFt{F< zQ@tzzlBZwz2_AzEG|!KxN-j zBoDU$v~CWx8(baLl+7WHF>`CDXbijL(q$C7^h#F`LUUBu*W@NkOXkY_d zgy5{bAL?>*%q}iUc`GQ?FT+U@*7}i_jh6k{mjb~^_gsnXE7GJ|d53dXUP)oB&` z;RDJ$fgIO;c<+TpMp|_%s|o}02Cs)$?T9-4LRhLZsj};TePtrJe{-#L_YLO?EF>~h z%G+%fTgn-XZ6QAWzm*b$1V9}TS%_(1jYyOPP9|-U&>p7P+3JsOxX=Y8t0vea~=cQp=cNHRafD}9=&eTnT4kqSjoJkaOc&Mymgj1F53sC5_Vb_$L#nAQwY6YO?5 z+!_z72{O_L&l!=6Tq=Ab2_rSxl`QII-aZ1ciX6+^>BWP^CrlfZO6iuP;MXh4$Pshe zYQQ%9>qYNI@@{2jnOVa9JSE_lJK+W$&4N(g>OAEjc{Q-}G);FBwB21s+!Xzz3(^jb~TAL&Td6GVui=Kg5F@n za7`tum_;>F)C~u4#Pt#4eLSc~bxLK=ou?%WC!G?gC)tQ}I&5m?L^xJm(>>f+;=}p} z^NN7_lO0P@`wh}fBXaSWX&8R|KX!!iB_g0aiq}jmXwKDLdgz@vDE}mL7yeyT3@9rH z1h`{^M(mjBQO|!CRsuI@cG*ju%dT{t5d#>UabGcu>FsN`zFDZbo1x@qN^uX@n=|p; zBeGief_3v9&$&z`r1Z5vqD6|yupS8Wow%~T&*1*a&0ED@$IdJ=(y@+TEKa11X56OY zk{R~BsoPQ))?Y%bx<*{=@^`)R`PI9i_y~I_cUQ|P;u)UKEU(`XWy-0fWhrsGZ=F6b zWLJIrn%7OGJt4TWGu>dvhOzAGVif6qoV^#vEtEh_cN9?K;0wB2+!keie2sIohDz|T zMANl4OP&;MZ!>y~mPrK7_2(2%ZSh`AtXO;&fjT*J)4lN;($cBBdIpqEJ^i@!> zQi{R)Ngz$ae1*faOmQ~|Bg4UO(HU8Gzw;L?eDPin4ju^9cKcFBfABk)WNw=)zLDZPTTxFlQut3<+WE=~;q+oPY^--xmomxJJ zfOrl65AS~!!tegXF^rbXv`2*r4R5}n495M6E$pXUZrYAS{p!Uh(!agIdVhkxV%KZQ zp4lbjYi?EP262auZW7VZusXKd0Oyyu{HQ{^m}aJO4~Lpv6NC%a3RzaB(A$NHOXV8Z z@XXe}#BZ8VmWpi)q;lhj{QAA@^tF$36WVkbucphCVa0d5oKs55v!*7*4ApoDBebFikRX)N4Td&jFcUp9Z(xp;WaPV2G{zK#hk zs2NBo=JV}2>$uDJNtyK zR2XvF_#Q=vrCM4D=Ob(0ij<^_l^cXpW{;mtm*8dSkbn9VS$%%@DQX8Snx1P+TANYJ zGdnjZ(x<$Zs*ta@aj2JDzH@d~#WS&c^Ma?TlHW|!+L|uB&G20@Hjnv+o$utRgRU+h zQVb2*e7E6JetM$w{e4{ziNW`8 zdzQq=n|1INT{MOtY~?R+bEAW@mqtTs(fMU8YQw&JDelJ;_N%XYj$tCEG`n?jf?M)~ zr9TBplrhHVKEm}mWleg~=ql!?%RECdU)S=!#Dp)``P~8)LR>-rH5z15W1>79I%fvUGY+?#~}c#E5LOq^ysPai$a$f-LZ*pI|Gs2BH$Q z+e1~V zRFDbWTPDa@)L9vpAk@w6uTvK^YJ>T5tlo9!d$pL>pM9WmuyuP|K+%XI%Z|55peNM+ z=B&=g14`?Q-y9DVNiTkyCjm<5^{bE3KHkMXJ2`VdBJvP z+>=~5wToq|_?eg29fQs=>*%RSbHGpvy|qL1<4308C}S^LYg&SCXG?$0YD=Z(M6=P5 zll->uh+Sb7jRFl}6&kp~13DfS!CbcjdKh><=O4u7+AYyH-aKzS#2;$tC+<%#9TX8c18oE*&DS%4~LuEvXk?_Sx0GaoSbQ*{JTVCW}{K-U-jNX58k0HpW+ZMP-cdJ5)ELJa~b&1^%nRoKP|yogjZeR*$zBa zqiu-bp}5z;uyi{~@RJ>;Yq&*XRoftAt{8_o4R6s(aBVn?In1$}dfK+xUA4|pSX>i8 zKQml1qzDMiS0=y-bw&G#cm_f&YC@Z=Nv6Mwodxp)vl zdlGIY0VS&>F$v+^-Tel7jRL~?iho4Zk(h|O#*-aRRXj(S3t)(e{QgN;(GCl z%A0s*g&tKabk~l(_N?l#1GnbntzpA*?U*Uw4&5@FZPlgjEqA1zbhjbXYdhVzrzQAX zBlg`_zzpyuV!oHzULZXauXD0L7Oq|->b+rDDmo!94Og_Khb~;7IX%4~ACgoCv1aKi+ZJtn*O3i=E75o6?z8rZx6;k;s<2}J2vRcc{qEYb3NeH>32%jo3 z!X7sSE2>;UXW7}}*;>t4e1t!4&hc-tXMRTICU`*YC~e2$Wq9XM zC1lr5(JLpPi66T#{*+?iQ%a3fZ{tfPr}?Yjs9h;OHS)xyyv_1>aLWQ*RP^M%JWO=E zoy@ZnyC;6uzFeVy!(!=t;whhVqx?aTsM+?Esg@y^rgJ8@{{%8Rh>4faqyg@Td_lzb zYZzS&pKe|;I_2nAW1piIml06K`*|bxY6y=)i_^W*tD#d^iQ)U&A81f#{8N=vt2~i65ZrE z);wkbhbYvqVdCkC;27J(hh*TR>1ti+-@m&hntF029`CNyHXIOZS1%#No%m~gP6;sk z5Adi0aHC|Q!id6O<%{_)n^P71;LZ)&Klh65x=g-vS1WqRX^Nzwt1CGNcWC~!m2Z*$ z1}*E%+>wyPQam=@JXQL4)v2T;7)l!Lc*B>&mB+d4La={OQ@Yv>tiML=9p}03pFj5m zTZZ$Bn>ImTMgwS-saF%r`R5fsKFgc-etbX)%LRau)N>Xq8{@U^J1Z;aEiO?S4ZA<& zK%8?iyDbv%`oQAW{b8q%lj3O#JDif-f}ulQ+ht?)B?x>{qw722sVy0D{pH4#6aW0> zhL$pal%c2XbFZe|!E0~E#6Mjc-y@gw{FukVApBfEF$aw1*ICtDEbFjbjfl*w?xU12{j(ST9!|dBHljGO@g)o<0LWrAWR7mlSosD5X^6fh9(|qIpnHffrMdVRKj`J*r-&$LH{DcM84>ws_ zleunFb)(%U=f;CAm(iu(I|KNkbrIPL?t!zJf0xHpZ(ZNsT<3?TFjvK*K>1J2oYAo^ zSB<^IyP_YJHJPC(qxxCQ;H>N_AbX{LTIc-IEN7oae1-aE$ngkbGuYb*Y?C zu;<3|ioE=IFRTszSy$OguW;<=x9)Re?nhjEztb?5L6HP?y&^GZFc!tcLR*_~Mrcvj zkk+IitfwdsR9Ev=RFb5ngtS{;G)3#|(+h>O_mHZ}!nYn!QXc%`{Qmv0{}(YL^-#p6 z$fqm^Q<^=x-m!s3VKpYAVqTkGd4>>7(;Tok`#`|g)d#E)5+BB_d0{9A2=tsP z@=1nv!B5%d`N}yJO=T+`$wuleF>%Uk=4$&(vEK?(K3#U9w!yuD7jqYLOvACGwGcju zFbkVJ>>6S;=2@gD3YnZf)SoJ98nm0-9?938n{6LFTj1|kt}PdxQKXD@PS@qqEY8vs z9;Elx@?8?ED2n}?FYA=u4(+wT%S3xyWI|X%3r!*>#emr$DuT9xQK-t7u!0${On}!f zmM6Q;_T}G>Z4c!+4<=nW`+#oCPU?O`s9x~hQ?^G*CJB$Y1c`EigdVJ)xApvTWrBkc z<^o;KlylFx>Xe3#vg|}#Kqu6GUrPB_AoJ<+#dBMtlYU6Q9&_b0 zHlp|2oZtHc5L#0e3Dkz`6BGU?ot+t-uxV*+ZS$Gqg)7H~xT*Q9E1R7gbc}6w?6i>I zQWr*i5zb5xw}Zv&!IRW9g@4Tthb;p9i7eMbN+Qi9C-*n)O3|5H^@*3W=ba3&D@!ex ztW6sQ+9@mhPNPhdN7dL`0dfS2&uhY$W7FLvlYCi<*tc5O|}X>OoqF)tNZ&qtMv)re$`Phti4_z3#&K|o|i9Grumx$ZL4|o z^}^|m{)0L6#10n>l8<<)(VmhgpA`Ew?)~e>mg1#V>=o5#m(k*^P68?GT)w4H%C_9H zkgk!T+ZinTo!psjIu^)JL;tf$kJDmB#;rlZUxD&Ii=xrzEBG8;_+?haOjOm{mHMty zss0W=R?*+<9iAxt^cUbk#G7}v0u*K0X=Q%^)5_Xw%3Kv9NR$lB86yY&dy=}3_RK2 z^{9~ghJ;^{<>mCDA@$#`(8<<r>OWB2{7?eW6(A&l$`zj3+57o=&GWRPWonNhwDw-JL^c%&Cb@v&l8vmn)2&EN|DDocanxSpt)}>z53@3#EA>oN;YMmB(^?Ayw*mf3Q}>gX|Be z$zab&7fa(yw&kD-}jRsk+T&G+IlHz&mx1Iqze zVQVRhgz3PLtVWA!fuF$aswo8Y9qRPT$vHYA!5C7TKIU8;SHT^CLD#TyK>2%&J^h zz6_GA=74uonSoMTIaGN9qEZw|&l}~Dfl4mwQS7T?PDTbM6L%X76yjK}Fd70u4&i;h ztn9o;W!&k4EfFc=u6rKt!+J*VZxwl7j*c!enLPF3etCNerjo#GB7L;qkd4A(ip`7m zgnpba;<-bd8+%+mDZ#Ly(RCKdgON}5x~hD8;``5t7J646&A5zC7t6`SZ@2|oxteEW z?ArQ%{I}aWe+WUE*7kf1Z{Ymf1F;+>yK#pN7eGJ3w}5Qk#oj}mdw1U6O}vy|An-^Dz3l4j_2TZ#+o9Ti{UQWAPJFFyViL;d&of}ybK~>vIHk)@ z4X%3t%l+F0Y_=Xp-Ibn7r|TjELzq4Qt2qe0B7Q4+Nw*`=FLhV2y|ytSOfuRtsV0h2 zO#6CXj6_!_`loiX%?>x-R_5q3YHulw}*Cstji zM8hoRhgk(H(7rlMr$nDT^#Y zTZ{pO7Ci7eQ%b{EOh@@T`)^`PJfEPRiQjy-w3E{vj@|h}R5{p13q&TkWY@=}Jw2g` z*txBh*$`1RFGF>snq16n^;WgZC-*~i2rKv@fhj@m`gz9Xwi8TdVe$#nfW|4GlufV_ zXS7AyF&)p-;CRvZ@JCgrr^%mS{Vgu);xVI~>mUPSa`IEpa-04UIto%aeGHVs=`>AN zlMyXYe|er2+A>}}A=GZP3!p-|hJ%bD17kS$qb{1&3!U}#uwkzcHv^PRJvHhb1rkM_ zaHBy$)iKETGx~02sg;#wcXC({s&aeSZ<)5kk(P}2HgxaMq^EhivNXc-R(x{@Y#KRl z^)=B9ft6yPJ^W(6mkH6v(*pm-9jmYBF_*L}HLS-}Do>HFOrP$6Ns+ysqj6uv!uB?u zjI+(+=E5GlQZLSi*!R}5<$gPEE2-e!{*t!a3KNUp*zsD5k{3_w@5a%Od@zr%*^nAV z5D^#+Q94*^3TQ(2RtWercn)A)6%)&>)U0c%>(jL3ekS9}EYOo?y>(S*)Jxr#8CbC8 z`CDr)Cqk!6oZ=WFRnFCvpO@L1!iu+|Ofko_{RNAueqH_rVn3b`7~)6U*F0tKNQT_m zM4a5rxA+vqX*mwWfpRp19w`F7^5nq*jdymfqH0W>F7W0ZA&Ue?qIMh1=L^*wa~ru5VGv1Hu zpmgGM+!>E%_FKfDw2^u#S+_#(if`Wj)HkVUw3#2)mfMrklaeT2hhIecZ7t?&8w-pe zqjhJee3hA5VuU-CEiu06QHxS7#U)2fgv6(XT%Zj>#-4Rg@;kh{k<3(@D^=v7oH4f5 ztA=_Mclth*ALl!SD*ei&qh&1=a_w-bbF2{Xbr=2EDR^qqpE!8nehXQ3AEEf{h;=Y% zGbYe*zG*t9F{H|>qhOR3Mm;9#v}~S!Bu``Zqo>iukdPINTRqvS%CwPL<5d+DZ?$nwD)@`N>Ob1i(t;~)pZ3?P{ZHs=i6FR)CYoHlXk$Iu^c>ak z^`ta-@UQ;5eT@$z78aqI+twz6jZF+y%!_}{sj|>G+kQi^@9DOAmF}%#I9lJP;ucPM%Of^}Ep9o!!Q<0M8O*qXPxf|H`Fgu?(LKspW%t{@q;`T=qV~xg?{YFt|Yz_QaV~gr%_hbh)>{ z(JN3?yIWP`Qns45%ALn>ak_?$tYg+Xf6f|nnOy7$wwFdcMz(W#=LyIlX4?0B;0cC1 zS>j_Cj@k3i>sT_xba7!>579D9dNqEFv>isx;kkNw7z0ylfnkk;9&67v0JVQx<8Qx9|p4pm)IR)Td%$^L|i0o4tOf!zewZBU5|p1&E~Wo3va;=De5q18cGi zJM(ovb!R{HH6=u_tViiKx0rZlhPwyZSBdsg&pEj|y7nPvwZ8{rapfc3i2&Ro-g2os zF)p!Fg1W#P8l)GH9Vd_oAqrJ2b}_%2Brh&|MKA1;9nJ9@&9qC>Ho-(G#veS+8#ccD zwu&G1{Xv*gBOc6xe6L{2_ByQhG^ihNzn2Q@sg0#N7CwX>xO9w~ufgZXmkz#`)xM9X z9JOmhXAI#xQW+Ko8kXY8D?HSZj}AWSfx%+st49=bpkt*ILqP(xE&W-{O9R zYd(U7BLY?6O!R7$$4prl?h@`azVRQb_uk~5xgUv!7j5(^BX;tWLR>YB5SfwF`3`x) z3<*F!BXQN7mCspD1rb;pwHt?F$vd;%`Ox3n?q`Q?UgZOz*Bu1QXcwnrgNS!4S`g3^nAg7S1NJ@ zwth}^7S}WW0?nqo1QuKbn0D*kV}rKGQ>FiNl?K@cbga#GmS=#xmA-X`BRl5pZ27j={&n%MeR*S_W^i)9e?%nxTn36o`eUI zEQANOG0N%ZiX+NaK$qt1cnIi95zKb0JTR84!45mj88hhZN{6d`A3uBmE_s*R0F9{d zj-la=ir;!0{9oclT9;~NTXX5QbSTZB8c?aJw{_u77DK;@XwCh8vo+xcGEkw(SGA1O z#;>tyR`r%ov$us|Nj0&k5v33Kst72iB_n{IgWmfM{@QK{v6i;rW%i0V zoG_EGoydL5!EBGEGiY9J@9n{d5mbg+OVt`Wb|`hJVsxGd<3JijO^Tztn&)Phwz75G z8Y}>UD+2bd*b;gq^%8r)BQq@?=}CH5u5boNhlbXrW2J^#aq3+KbsH^YIudSgFUKT; z{p7Ou^IrS`fLf5sI)#%o&6i<|75`CkLZwW-j4D;ZN#N3b3eT8#)4zmOZKsdwEjdr! z5kllT^^g7Dz`1<+yEYL(w-gk*+i=2I8~**p{4B2wdzJ}Je^5Mk2SRO zD=sF3NV2yyoPd4_HXgHMrWEo7J$u5drXd1woFKZkHp&3G8C=*-!SU8k>TTVFyJbEZ z!QUaMaIx@I+-g06FxfzGfaA)y1Rln1$6WrUl6_J&`}$_|rgyAM#j=ZmMv9-*R62FB zl#A7)9RXi?%<_UygHnUZa{b{(xmLLU83NuABgJNmdXgyR#!KB6=HBKPk1oyjD96n# zTaKL8=3-rFT{&C$Dq*xHz5N;duNmAht$Nzjc#Sxv#^6BFTBQ^f%* z1@FG%fY_~>z*=2j8ZJ5!^LE-T0Oyuq!1NAJ!zvVo<}*zQlDbxr3AKM>qNmnI`_m)3 znn%XMglB`c8~e=Mhab*Iv9*C`jHwe;o9qgEha0j?rjDeJ1PKQVAAMXaN)>iWX>Opm zkqd~PL&UVCkp?blu^w!f{=U#*z}|>(Dq_}1AZtUc}Z7? zN2WJUl`>IPY2NtR_U#3`ad4jAVs8JkxPw?9C4 zZQX7~J=&ga?n>H{m}N58Uf#0U4)~N2=2{ThOC%4@ul)u+oNb|IK3z}bFx#K-N zu-UJPRK3QmUlFIZKdOp&EUJ?!hTTW8@3ry=gnjG4af}wD%szc(Wf6~U_&9BQi|6(qKRQPdRj>!v%axz&r zNH?Ui{uV;c^&b*i`Kg{t_IuTnJ}OEVm;!*5uf2`VJ%d_|G9={T45o0G%*Pxcp-|i7 zNqE9rei4r|xaNS~^9A#sze?Z;U(>0r5eVx>h7g*1mNRDbKU|Uu2j7+yA{f4VIci$M zvpj5U0~B}$lSLpYd5YJfbg>DBdb^fo{EI@(Jpge`YJ%yYK$*RR=b>3T8RaxZ}C>pfTp z=XX=v!R=@&p#q|GzVy4)*GvXEmeA3Vi}!Bio)fr=-{mstXKc-(Y9Mbl?Bf&b752?Q2{yX>m~mnrM{T3878YUXxo`=VMiib zC4jHL133+Wm0=7)Mmn3Hv|__~kl{9Dbn~Q((h*=daO&Y5%$Zb~g{P?o?kfdF*lNvX z+ve+?Ra87IPNz$yz2sl&;1ynGH5RHAYN{*#d0W-r2d266w#fCX%h&I^0)Ayx;$>QW9TEmcdr1F^<&zsl>~!Ak|KQN? ztf&AtioBoR&!NdVMP+pymJ7HlJNqSDT~@dDe<=1CfkF$0W9$ZaK3uecBlVA16zuYS znVF?r@SSo=O?J!8IhPS+rk7k<@h{cPcCC51WMc;u`ItT6kz>7z2EQp00EdUuVPHP;e}OX8lPyXHF(JE(AP*O z(QBck1Q?3E%rbt9gKN`k4<>pTjwtTj1^DL~`GBY@QD!S@KwUhuRyarJB)xt}kQVZI z!*E&If?HjCX>A&)#LGbXN>4g2P5RrRF9M_olcxF_s*_g(1{&OojFqH$dl&ej0373J zhT>e0j2BJJ)#?4#QxeTmKx-IK80OD89TAucQV-xK2s&g0H(2r+zRhX6hs5PZz~)fiMAs5uflqL>rWEYjG|hTnSoJN4%N%SoUt4mHdyc9b^%cy>~ATjrc41o z9caNkoVX(-;=Z*wVPuro3AYRH=;v2|qHJIv59GH%1?T#;qDPHvMT)S`j_>oq`U_TI z*hA}cqGY_hk@D4%fAH_{>w~9yYfp%c?UD-gfS}K)*=pe*N=AAnoRc8Y+F+mPTqmZ3 z?@L}AJVllafAB!dCiF;v{rKCr74{+CH%c{IAxaetMpH{><^#uQQ0D?3*ANH4WaJ8j zg_gvS2uELJ=-ic8NOyD}-YNS}rsO#DY!(n$Qb<0~`_|Mq0CJ7hh6v78`LZDz@Bch>{V z!Q4Te`zr9F{uimoSZV4v_WX?2*d^#u;=aQr9#3sa;sIW*US z`QG5LUKwf2W&77@@El%%A=Q3JPy#rOs1s=%Bne6ifq%ValNcW#8yjy^!#hMzCfC|% zKs*)jO8v+AbMD$$6`AX;QfaYqEz1*0oTA zSLES+nP(xNJH05CeP~c}fl3Ho9zC$+`)=!oQh56hcbik>elaC}jb^c>e=@|If3=>6@A#6SvWakAqc3?h~ zk>?6*@>+VBs~jbp_#r#&J=y93)~c-t<svOzj&&>g6>i_XxCZHohN<-wQ z%w!)Sf3u&RB%An;j;AbRu&u~`2jUOkepAC;~OK4I-eV9=ztSA6~=wwXs;;kTpKUADM$WJ_J|MD#?@cpG&tk ze8aHuEriM^6b}f4G^CG8kw5x{qse@Lf6Ybm;)j;Hg*;L&*iMcIF!7It82W}e6)n=J zJZ}EqQ4IpUBX?#VlB(?4D4~(|B~jaWk!aQA5zvJ!7qxUgUyuvl!G}LVs@6{5!jb2V`gM2jD z4T$mG%elXhLjD;Z2G%yi!~YV?gxMvZ{`YJ?$vRuTb#(CEhJ7R*e6)NC+;#V55QU)c z`tk$FZLduOvokDlbD$RRVnKyP1(IQZ((&{A!OA44?@AGuOw`mw$dT&F^TGn3y|wms zvVjj2DPD5?ACNxVL*OuK*2Z&5BEOaSDeO%Q&kIUiZVS*C{p<9}N6KdyroJ-z6UW-^ z8ufyfqWq(j6%uYt)G41*elw%8`1#^mJ5`h{Pe<-?bZkv%>eaN{ET>IZ8G@|)x&})o zFO8{Sn^tMXmbzp}xL!V|T4vRkF1D&ya3HfgcN#d=gEwn=no(;+^Ut8@SpTP;XDg(kg%}KUti86rQ<`Z9liaB{fxgHQ*!-j7KoQCO)m~HiU3van1;Qn5Ra_2^#LjyF<*RUjA}}F zPz|tgHQ;JCa0>+vtTuvz7L{k)sZto$JZVzs_{G~D=yE%f#i;O3v#_C`45yYar8O59 z2L%-u;~$#^&M`WIB1|tSk@aeH7+q@&G955TDhQ&t522sQPQlg_7qa;Oh;=tf^adcGSq%{c6JV6fgL~@w>JUG zx8}KgeNR%(j>swBZDh(2*ZuHV7h;H;Bb%Fxj4m@StseA6;0e6_Co!haf46>6ZCY8l z-gHoQEOn>q#68>Pt;q=WnPQv{I8_uWt9nGOw00?1;Np>%g#}9K*yLNaLtr96uVs?K z_4WKAxObrrPl?6NvzbR(jQ-TAF8A*?_}^^2LQCLZ1Y%f($e&Bn&5 zN)sZiV9pzz15V{LkO|u-Zf%u-r57f zkU-e52d$8rgp7}#58>-qHpT6W0iSi{Gn)efKp?r)PBLo+N4rBZk-Ati@>doCf1U-m z#8W%;$?>14MA}dRcjLF9cLH-M{`u1?`7+C`TG~%s+(Jd5T3uRW;6~b;flY@HG^MVb z_AVTuS5N>RujZd|6`J9vga8W!FrcHz3qH`S$UJ5N*{gai=4K}YZ6**GQM=pI(?EP) z8VektA7@|s9A5KsX$f%VkuB3>V{<9U!WnxR(a}j6QPDe_or3r8Q+<^LE#TZzD+%=M zHDQs;N)ZttK12ZX$!z+oe^?k!dX{KXO!xhJOZYj;2vS@Xkd_w5+bgm&%7$hL3u@+f zb>fiaRIidqVKA(vgurL)-xmlmV$_`zKO@K2;;a_1dSuG3?Z*LFlV6D2o z$RZBD$QosMA@6O#TkI-W-@XlJ1_r>kox_!aV=2UAQD^DkJ^;8qmBO_azlGGCJcc1W ziyA^HB6jGd{OY_zg-H=lu=#~#=|~1^p4<7V6AXWC<=sA0av+SB!c(&RT6M&kg$C^I zSo+v5{|XGOuvpc?NpEzuhFxlIaPC~MS2si~%*AP-<3;cR7b-B6pK<6wJi%mZm{C8Ga zRtfiQd)^V{1I77>8LUXw<<5En+$4LgkFxKWv-8G=v$J*;N7acF8CUYpWLzQmx*`r< zcleofv4n`K-7=zQMmsnVz#0O`qkJGw*~~S(x3Y677(tj&MrYA1gOl9xLxwR&2srEHO6| z6p*vpqSiOJuNyveLJa7JjiW<}0fLJbwq>6)>XD{XoZ*(o3s0PkU_vLI2@Xoc$q@kk zXgnbS{lzFTZaJKaZn6cMVrd(}S|#X|s_1AGVkV?MDTz3r0o3#Sx>dClFR+6JKmI;C zU(>Co2IuShLPl8yMOYcsB3hZ^2vIzAeDLc2Dpifp_8egvq&};Y!+tQP-ieC9LVOIi z1bOP{AM!r4%_;Tf-h6~-v=L5QAPD=G>nP264H+vUDk}{Uo27Zb!>(7Iuq1rF(VF+| zMxYx|ec9Y(bo?bRT#9%=9kHb-vn@V;Z0l8>2Qn_M5*r_fHt9rR6ziC4Y|UpDq6t6& z^8Hb0cYi~FLYePHtymUnoIi|ib^o8`mcObhij|fStQOZ8jm~YskMB6;OL1HrlVYaT zK9gQqN*MBS;evCr7&1O&S5uZ~`t&qW9{sSOS^IuTl>F30t;EUL;D%ISxwim{z zi^Nv-{R!&l4t+$F<;!ijHRMRGn_Q(KByCoT`0`k$kHyMPOZSm8|JUY}xBM%nzb<)} zURCOoJtl=k#4x34sCt$ujOY% z;i6YxZ_K0AFW3jhBhU!P;Z>zIdQ;(Dda1`Um(tZ#Q~VY(2eMQyqQ!}FFwW80;*#^avK?*`SPTb2p2*KGW%zsFuY zOkGyw-Nvpsa_3yGjl$M@&ct2|Cfe8|Ds0Bs;8xxQkp?u@VNdTDD!nJ9&S1L5C3>Ej zF(Hc=O&(p%Ddrk#C|R(5sYR`l^I&#!j3 z(X(0Xd=)YMChf>A<&_$@@!9El+CFtdRb5XzJ(oApJVMxb&jYoku;^GjcjGf}9Ai;`q&3W39W_h?OUfpN5!t9rDxDgbE z9P<5rp(IOkfpEWqb;>!9*)01(ESo|9N~^AGxZm1!hv{)YIlcdRq1sPrMmoE_c& zEal~G>%%G8OE(o^A)&BmgSKT<{8}dRbZ5Zp?c9>+=yeBfP_QS)%EY0X;;%qrAtp&ezTVaD4Ti z7scz;96FvLkDBQjEKbBuj8@Fxw_Wxj_OU*DX69;0sOq!13d%>KqRu13ldOZCOm6n-2fCY~yt*#&PA80(NG)W}GP-)up zscj`<`3HQk=1i>}?cY_g=-`0UFKl@9j$PSdewAMh3*QV)F~k;b@Bf6I+n4sW05E8f zOHR0@6PdKp7Z4e;Di*MhL{Y>}|M|v9N|3yB>rs=DIhP>=UJfv~hPJTpU^?bsKR@m4 z++#I?8MI}+nIj8(-5lxDJZ#_u z><15J7u&e(aCxWsqTrfLS2pRAf@gOu6X_p<2|?ty<+sV{>Tf&O%u}DTmriS-WcUi3 zp2;n?J*;?y>;14EwJt2_6R>*GHyBxr;iTRtgQRGSXIJ+ zGtjDA$_@~_LWb)H8^0pu@JR1gy~k<3?5?$R6Hu zc%H-Js9g;AG%L#}!uI}sXXpFbb7dul8mdzHTJfL9l(%i8(Y9^#!G;fXG&Lp4iioJp zpf3iLzP=XZG-Q6;%IbSva8#6c{X$-H)T60JBvxHRwG72xZXa=r2Bvi06IJ6rjCU2Q zLgpD%BlA!~SlR4`qQ||{jr`<_AcV)FZh@;mKs;f z;1kiblpq54Zz={TyVbL6KfmM}DsDb-bhNN=6eY><^;5=9HFzw~G`Jx^5dKXjC^=wT zuHcW3nlMlnAw0w3CMJaD)z4m-XYXwmM+7XjHpjg!xcf>-SYA1?ZP%gF;$tXdTYhJT zw_1jNllca)x(8eXRe!`=s;8plqHJ}EULxnMrBo@~aVMF><2SBG9ajOveB>lA_B0w? zir8O5(p9*mVsET+1cfdjO&#l0|5E`e!@?h}N$J0TbGI5>il4RZM>piNv!hGmeC6>qW>|=JNel~-vEc3JHhTl2c7M!j)ZRp*qtZJp-ptf1DtIk`50>M9Ye!xlo&B)( zxn=j&nRG(#Vph+AMD}@~t@WkJWVAv++q2`kyTXv6Yhh=LzIyLDswz7u6ka~VWX^&T@QiL$1z4o%UW`<5b-Fbfl{E<3aVI zUb<8)J>rdo{;rggTT#;#La6Kab0^${btbY(b95yZZqh!M+gXwgk;}P_4B?!W!QfR? z*V%)j4ys30Aq%zRcZP*JWt?YTb(Z(-TZDgU+!@4p*;b22o|I(tv5i3&^)lc{N=v(p z4vmD}(bh0g_C9Mb8FVnp8ptB#>!hu(kEf@NO2n~F3>#j%RU#H9TAW#>R*;lS)qUS~ zC&;}BI7e5L1FeICu7_=4oG4Wn^U@Hv^B)?kf;d!d?6ZYD!Pk#w!XO247tK)Pf#Sve zjcex>w#Guo8TCLi9%~P1I(d1A3b=krGvc6t>7;Pl&+DO|x`rvz!YC~v>ZyFG?rufu z;IfCtJ^WI*z-?FakooZ(BV=-VE^jjXX#NdMnpt>my}pp|A=J>&_{5Z9xWirfv=sZW zM{QX{%q>WPGJL)U(tXCkrt|X-K68 zGc;wVrFSnI@*YCdr^cG1rL~V6Vh6eoh4=u_RJpTMqs)x89#kG>$f4B4Gd6o3?y3>0 z=jOJYi@)?SN&C(v)vLC8?o6)0`De~C5@>#Dl9#wG_9!~fqRBXr7Q!7PyK9^;vCPge zdLaDt&XGC&jTpV0z609CGu-y9V9m{@AYpj5L9Tgq?CKtgr<*J%!5&Ldz3C_enP-WtnZwy?j)ba{WwURRE}r2bYv9vI9f zy)U!)xeRkQ(kCpgmAI=a^{ejlgCA7ry(xoKU@iWlvDV` z%a+sl4~EESu)GR_MN`H#RV{Y56p;+0ML`q(LnURD^+^YZ++0SjA4j&|!RVcZU-dLJ zU-dqJg@Z^v-ST?Tk>^nOnVj*NS6<<7nBfUrHp=oL)+*G9$}NzRZgzIjR&nljadvibaaKJ- zTAYw3V*Dyc^Fjy_(@*fBpPt3U0JL`lnL(HLDNR72zZ%XlnJ?|PFU2^Gz2Pb~ZE|)o z2}#K9-F@-*eez2!9e1L{LX~Ijo^v*lVi=Zs0Nqubp9IpPwE>U>>}?;c_JSw{p`S zeqQcsH*3cadmfJR6OpODx=~e?D?GFu*L@#skLcYFd&G~f?OJazsma>QxtGtYpS_^% z-^m&?jV~9I!h(m%6Pf`>jFYCDVnUA9Wp<=lFMdpgd!jLSLwjU!dU+s$EkZ#uL z9xjzsP3!QHgAX6it?`~vK+fHanXEF@dtr~coy&+W^j7TXliZUdxF!0Y%=f)+9bxb@ zU*RhuTn)2FAlHz@i0e@y2lq_fMU8QlAdX`$qb6o^e#5Cht`b}XZW!j)Y}n`ZZKA}) z#bI%Ukt-(pPKAu9oX~IkOic^Z8d9Gexd$|sn+ny|222&U$sorTU>1M8Ex_m`e!7)} z_t!T_IYscgch6`2W?@-hNc$uHQpIMI6n7*gEz-tze9|WNE+Mz=`O%PNcZUM7;!Bk~ zY`A{w)%$+H?Km3cnC$qe1{LY;scJwPWw_(aSpA+#$aWMX{Xq2`2s&<_cwY-+IWBWO zd>CU$h9{bhNEKLEjGr`gm7sN3px+RQb3NM9ee@d)dE2Ca+T?hRggmVn04=)COy%Oc z=h^M1tbV_US26yhn<*FYQLAk*hZ8FaW6>h2c0zGsB){*eHx87K`~I!sLV%7#JLJ(v zrYQpOr%}Bq$QAR49Oi3d9GhKTU|f<8VHqhD#}|Ik5}oaq~y zaNveTlmTO93$MB~4?C1LhfFM>Gw6Aw?ugsClf|C0hSc&Jv4{YI2TEw$K3D$*D{uaG zrV%0SX4!)L5iPwz50}o!Bzg-Zaq;o-idi{%+Jbs8Q-WeT!-8>QFEW7b; z57EeM+bq{_#`>JUeZz<68oG=$$ga*r-jE$Sn08XZHAHj*qiD_BEgbfNP|Z+b^Ni{n zrO2SSLpYaeFG2q3qM+=F+ehoZ6Itd+uHdg}$uOpKu;fgvx6;tFv-3Jo0!uk8&w(<* zdZ`E^R)sT+lJsQ1r$th9dIeQ=m$ed3X(y(ZioxL7siXQnAtmn{_;En-<>agxn4s z4sVKgOV0COooXC=*H{sN&&vK`YZvuEOIE?!VvpRf?ww2Q2nRpf0TeBx#BgxeHMoWp z>PN|p|7^i-&#+a0Uo$E}RB-vg&Gf&_17fTo;-M3z5 zf3xt|VD=juinvHdTxx(SB_Zs7Kl1tSW4y^AG6)bW+PT?POb+Ru9K1hHGe0wC1QRDz zt;A{PdDF7q1GrxI>QD?Xx=x z&rvi{2frN=ATL9HU*TRNWLlpduhr#U;Hd7qa<~}2JiNRNK%(SFFCVYQ7GH!bDiqHs zR*p6C5aUABZZ77hyQ*Hw-)@F!CEqu~vRRbvQ;A*q5Lc69yIOVQX{OD^Eo79uIGHo~ z{oswdEs8XWr{UIQ)o8dHQ99NpzZdvXPEvbvB==m4TKZT@GzonYw)B0)U5<~2>HlO= za%$tsaI#8m97kukK<=B>GbZT`tNC(5T41$u>*RWMbw0Ei1wdJs;2)N6Q6hH|HWsMp z)dNI->|B}08>q>sL%{da&;XI1#<#K>HyYw1D%RRMVQ4+IPr~|8cl3%xTt!U)sN>SL z1sERt=T$X9k2)gKbjBxID-U#O!AOGMBXyD78!P41;?mur8lS8j^y+f0GtpzcMbv?K z0Zu17OqTPUp)wycvw$=)rOts=KQlDsD3(CG9dJ?dXrZEyz{SMaIV~3u);wr?*Op^N zWU-O3K+*dKXq;fk?%c4qS!(N5aPdX%E9T66Pf}!x63?KcpHo3$l3ZufWn|)X^uaXG z@?Xiphl$9xxo3e`F6;F_kxp{t1mt*6WGIcy?l4dGO>d!Q19N&VH>WEyX18aWu2_RilBw4mAuKF zqY6bi4-l!4`DUyiA$8(xHINN=^o+?tZFP6L*vtFJB_1gkD%p?lM4HZ1O-0WM3RoU5 zTI$6VMeiiHIKTf|IjO2z6k~JTP{m$UcDJ@c!%0(peoCssp{T_3ZKygMKSqL2= zD;vp2(p+4Dt1m9k0E_5U{vk%XaNhrB1=8X-pHlt@;SEb>=fF9*mQ%of726N#f9lpY; zr^_VvR281_8AHS+`z7pzmMea8#rB*@Xp8X6^3Lc`X^6^|docUL1m z%s`0*1MO1nURAafwPpKW)h_nZgQ+=0JpS=cZf?kdt&C4R+a$hnw7hLR-azbXX@6gm z0)*;pw=Y!sIiH-V0@~S29mFMFV1FFVyocwT5!EvT;RxTfb}tt(6qxV*O*zd zdj+9yI(guy3_eXu(rnw(M%~%0h&lFl@zn(Ru|Y-HAZ`7v_6xTc z^1RuMXx5u|AOxgj5c6m6h&%(to^2>opW&h(XTZWY5mz?@lV^aCOGYXsTer{EmWncxU#)p{N|q|eDr;^)$x zaz95geiz6HU4bohUtQhZU0ofNhcLdb;k}quPK+<0ZcmzFA+e7CwrMV&7~0M(dYfQYg@JMiQ2GiTO1;@k9)rI9OFcDJgzy$MA@fef}^U4|hbqe!#|8?mrYwJO$?!E%;FuU+bMYzev+Bl=CM@c_H zR$2-)2aHxbICzny2#cL*^YdzvP0ldJ-*nuD>Tpk-&HY|e>|7=)@9sXg4f0zMc1jfhz5Hoy-iXOoItJ<4k z^kyt(b8#;$EDZDW8Y5T(4&QL5@%_=3zS$0XbLdNoaZs@{0D3lN4&Y^+_OG{1|6`>C zYMuU*?tw^SINaPE4xs)0X@nYY3^Oc-nUCb-JE0J;IcWR*d9pucjr{K2c)glhdlE0N zykA8D>F2k$k}E^&R3d3^b8D5{5{CfkgY^mUkCXqpwVeBO*VDam;=J-Qm%W$m$c7RA zk^Shk2ey;nzHFZ0Jnl;wtf^ICyf&rW&DMSd7$MS+h#L*lq%N(Vpwp7EtC>W3IK7+1 zDzVN3$cP}HgXIONp}+OH9R8&?GkIYawK4522wUWQhgXlo*MrHD2??)XCpZfJ;4J)X zcW->vX>L3+E>>Y{?R#TXRJ71iS+GBvR8v{GN-%}Pjg8@Lp^`s>?Z&63;w974T1lu1 zANPCTezc-iL6&ON!RhWriZn$cf+S1ou?H~y{r!z6(xx8T3~}YXNyfTb2Gr`K_As8n zlgE43h3@?|k5o(dqW>ISZFW|#U#)>svusMm{3q8oW_5K7uy8)xu)v}3-(y6gqr-F0 ztDam;i{#)?*fv8w%RHvf~eyR{)Z92^xVna?eC=he}iT)yGFUf_>J+kQ3mzSTe<4ds<`BPw?Mno&S~yGrPtz*g_^RwEl%8Q z-D0207IA8R4O=unhyb}C@Tg79Z9a8@-M%ICsH&WEK~$|A*`D32+2Q8WUJcu0Va_}v zAC8@Bw)?UmGN;JTmCA1kfA8WIwNP2fQ5I&J4MU4i3{`9@r}?8ytG z=eo+84!^v-XHX^A6m)a8DodJ*nCK9>$$s1UhL06M9nDY@7(BpUD@#A0;mu*}Z4K~I zIzF0afq?hGVr`s9%8)@fthdimOlAv%_Frl1-#z z)Bx0(&8#kAV|!_ETGYsjJZ5m8r;OTL(AO8RlG|4DevkgP&?g3I&#b9KAQoG9HWU^M z3WWcxbz}=vR&n}_UpbyuhLDZ5Z0q)p|Iwy*@OK#I{DsBOdnXoLs#W}o$>i{*)75u9 z97bzvii)@s!|qrLwKG~N*s=tfaiT-|?|&UDZr{I89>5iwdM;0W`qY``gLUGmbIh|% z<~`v|uLZ@q2x7OEmrdm2k!&rlU+Vj6U1>a8 z8$ngv6?P#fOuzQM!qRs-teN$qtm{t_YF1WIt09hq>p9_x#yj5lPPQjsao_SAcVdkV zT3-#hV+qDy%lw#{8Z1srvtET1s~+xQ5!3av1_Au_%m z4}MN{c6LhIs;U8D-xTUa%Fi3EsqXJLghrLJ{k6KD#9?$B)*hPv1(1!khyj0vrJZ zV1FQrLO?4D`8Usid+e~Bgm2b)zU#6SK~VUTOWQg2MA>R=|Hp6#KYVAlq84C7QZAgsIAtL%BMmxUXPc| zH+fT*$wiHg!NHA<7$<%+&d5ZUZ(3*>S!#DOzMcJ}EYrqu+sMsHZRg&}BM@+W@@5%) ze0|BbEkO}Mfe<&?h|J#kl)Yg50%J?Cy94H!FVD;*22@Aqg?O7OPw<;aU-6l^6|g-T zDRM~1kG7JX-H(WTZ5gG+Bqo%l0c~O7tHk7Z9{|?-CWs{^;{z?XkN>HLlR!HWNzC5N zK&*v!uBgNVIsM=r^y+U!Gat!ZAV+G8rJvxR$Th1rwnP+($*9Af&z>LfLY`3YTc0q5 z`>ao_T^aMxHY}2)z0e%_S&khY+L(6%-Lr^lkLk}(Egs^=OrgkS;%O*ssT2p$$(Lr9 zaH(JN^1gEFpQ4lbw34^#(P7DHXO@_yqa(UfZ2@1Y}K5N#}KgZ07$@QjUsJS@ddKl?-o5*Er{Y<{OG>b}1AK0AVY^ z+2)y{rLg6Fe|kJ}c3UEVt@K}{NTg-~-RW0Y+8ZCsVA@YxdRKaHQeJ==~%#ws?t3v|TNXGwBMOd)Ymh zm3s@+#DiWT1$}OP$gG=kWp@ZY9sow)I*wEyYqv`CUI{(NCogmU!SWPS;^X2H1TPJm zB)R%cPP&Y|PWIAZNU9r%zfwnx3xeZCg0LDl_D_Kd{iv3O%>jM+C)UOw!+!|8@<8cT z@Z;5W5}hyr7#qM-_^kazZ)K8WnK^G`B#J@sKxbCWcWw|%r-G3)R4lQkx(vDMJTAs3 zKST&v&SIv%*hcFfK@cJk)&Il&{)s39s{G3!X-sVc(ea<097KkHNXH<a;4U@AebqdXumwx0UTTA90r54CZk#i&@Cjy&hpJA_UiO}rgyTRwFqr?dkryV&WFueB7gpg8`BO^ zO2?(P7qWR&iUns`H#J*9@y0}!(J1H1Xyomv> zE{lt6MWA^BdY>tNtl!g*b>f4rAQ%19;Fg0Z2=D1VRN@3mZ|D{hQ<^*W321<#qMu-} znKenC@sKa}FnR7PjAdenDwmVYo6LWOf)h6&)E+jrpJMrl
    f8vg>m(o<=gR)bVV zAeZeu3n*`mV?H;BbLAvJoEt;L5tReEMT{H$M+^P`#9~&;2iQvo&}(1# znw$YvONW11S?z|sLoQTj!&-F13XOyYB&P=q=0Ue$OELKuO-nbX+t=}yiE`pQn}??L zmlK2;5>06qz)(<0Sn2|^AQ2LsfpCb#N~Sytep4Oaz6c1k%S&I-VaTk&zEIL8RDKpn zuwpbvv5tQd+7KcBhjN%KTvKQX|9#eV11o&ndqJM|$Ik9<>v9Xasw$(7c6wIU`(uHN z_gcK|v**pukgj0saDezVCC$cma?;ke*;z$o82zIKR8Y4mw*ZhX4-Y>~0jc9%ZEX<% zWn>&4B71ch;`}xfz3fts_SVnm^kpruTPB)aYJJuhnYcnRqcYI>DO3hP85+hKW;6NBKX@* zlE)Jh8NH{bro$s61j0zzWlIA6C^o#_?q*5OIt98Ln?5ttd^1QTK@EaRI(-5JbUkP0 z2D>v6zSGOoyN*Z&Jl(~vsbPk>EYX=`fVC#)X{KLZy=?K%=R?x_x$6`y4PPL=6V_E4VIS7hC+1D zFA&rlxJn5GsKeHAK~l1Gs6_+GH7m#d;LrX^(|yo=So+GYklEYAZ`#(@*52Nh^GS^z zP;S`NlvSu2sL8abOdPGgGfKP13L7Z8x};hMSNkL@R>`dabj6O{kUCQ`4Fi1z+M+|8 zQKd$yRdWW>sVgf0>9fC?At1q#$W#I)Dj-gO59%As2Pg3}qxfuk%#?ix$CodleO%7W zkD4`_rNFn6z(nWLXa4bNE2nt?mC^@pk;|g;X*ZvRSyObuG65Z({glA>s zhf+TfT?#Q4R0|0fICT*+1X995(3630J~n({|A{L_ac5802n$~lhgzcVFs;D9&!tp- z0(qT39{c-)#K+=oE%eVnmOWVVK81(1rf1KSeQdld<_zv+Wc5^3`A)XY*t6`iKhH6p zD}`Hfa``M&Mnx43YhK~+xqRu8JQ#b;y5ZSw*4AAHT|*22zfc!NX7e%wi_H^xbtY>+ zYlq0?Gttoq6y^qDHsl|<+EQ$*l_-NMTM}z8h5+3vnHYcE+y_oLI$EuYljtE7R#o%` zFwCr>VQp<{s+>2J}O;lShF*+CAA|1W_WtJ&$20KCZ;G2q+ z1tEOMQp+*L;EwP)S>)rFiD^3)tfG`tVK>r(d9uY`*gW3;EJ=U%+V3Aeo^-927PN;Q z6u2$o?K*sU+n!vc?8!p6$K1i;o>!gEb(+ym-x0VnX!eWzmyy}lpP+q~@{BlEnfGe4 zB1FbNN3$lgZBn4qK2afI$sjRjRj+0xZ}K-b`>1g781{$nt4w3|b(sS>@T$K87)$Nr&_Drx~{j0SrWoc8s2Pps8dhifBqS3M6=)m=*@KC4c)!$d(U0#mAt zvKRzuHJcwbt1}T%rKN_`*76@*ux0P`5xxoDnqoIrrO;rQ)|LU z6{G6r<`g%-%sD=pNk!zLs5~EBo+Dw5{oNtP+&mnK-yX}cpUaZ1IDG0E=Bsv{HNT85 zk4G~;QrsL=3#-Q!Mb@;%cUs0Q)<0^|PtW~;~88qfe zt;Kl`^$GROH%6oBs{rb}HlU1RDy49AK9bXw(j81JPQL5-O;S>kzTToyI&9@Xdz#Yi zMTk%IpGW7B^!%=VI0G}+i#+x`^e}GRR089(D zL^g-@&;9-xZ)>7ffxc7A`|RF1K!w1SbtD)k4~`*03m$9!U)fiK1F!Ll1xw8hE)^%w z4<05zb(|ASJ&mP~_U2-#<{Hw*8NP@oCaJAtwCph7sbE8d{DZH(hRten)kkB}iylTA zr4s2|O)*5^c>CTZ%k&q}DdqhI^|QPDdW6be4BQgfe6??(PKqO0#W7v`{)GU%06C#K z4vd`iyW+wfQB$j%p9_#3M2IgC;;7~fYwfTm9cgUF>q(8oE<%D8*zt7LF zbSnLX+}SPTA2k+EaXzJE$)kOHT2VyIWs}0S?Zt&39Ulnw8LiJt#-8EsoA%R0z75OE zifb?NPCQcn-bne4MSD1#F-CoI=TZ1Uw&OS zmJuY$V$4|uJ2iUOy}FCpIEl1f?p7i$`ok}Sl)vL5Q%}Ho$!Q-P6LOadYSg0Ik`YFi z(F)WX$`Vz13Ka_Vk$gZadXJJWOyTR%i_w$*9D`0Luie%EEwiXexhVCFzW#$2gphT4 zyIrG+e!mydA1;^gcjUhRyYzcg`c0wsQ9d-k__Oxq7L^vhQ<(b|<5y;LM~%!xBx187 z+RMF%6?_&+^jhjCSJ_(Lp+W`(Ib(<#@X`YFRq9TU36dpY28YLH+S{lNhbC^2sj1_d%?@$B1%#kX)ExauQ| zug0&{Y|g4FpR_;OE8l2Y=e%ekPo>aWiF3pK^pDr-3>N#0l%w(y^m&&81el1#jxYUKczpr(})Oha}zU+$2sn2;!D_~*`H%L z9IkZ6pDe>3Wk&Qp@eH0GX0#0_Q9U)En}_2jnby{aPR0dS<>%$5HU$+P>oGrfzAP1U z1K>FEq5Yj6pR1!@?0AgbN2Pp#KHU}ak{krxLATB_TvM9q}O*drnIi=sAuIOf2Ovr_>9|1 z(SCtvy~)CXzvqv=L7yt?-Rd#jHy8w`Ur$b47Cu!LJ(hQdYJO+_0$!xPeT>uMET5{V zB-!`FH~YA=TCQI&pCFyFAB+HsPW0a>I=kRqm*BO2TbmQjx69M{_~&%Y(u)>SY`N0X z3NROB-mLcT@)Q&E-9I5=&MMCdBLMtlmGC;DW!S<}MC|aF@H4{uZ{cT`i)}^n z*()%ck@Pjp(8Ir3v3G-|evBg3<3JH(SNhoGMN{KEKW+xY_z{X;?H);xyCREX43_|c ztI*PVCLvF&hZ9r~mSbxIXrKnbhJY&AD!mcYr1w7bu9>1~%(WW)y;=Q5<)Dumo?oi^ zRWSj}Qrgcie11hnDX%y09BtB&!1z-m14{f+{^~v1m%9uzg#L}xiG`%j`4>%3^mMYy z^)m3ofCIKZ7U>9PxlVP|)h%6O(y2KnP*FW>L;p+p6aAMO=Q|jUEVI8I#s(vh`wn=c}DQlsOB;OpsZbbma zxkM;ziT(TK-8ssfcd||JvyAn%-_JcsZ`vuqpU>Xgu$XihUE}TQ3Z^-X?S6XwL;74* z2u1m#aG%y@Sdb3gQhph+PUA02un`Jgh`hI;FEd)XRIdoe#{GzY+-3PHc*nD`<00o9 z{-dxxe|KSI9R!JR&O4fCW*+a%%>@I~J5)8IxT4*VRB?8W;1n&}5g`k{@o~lV#x-v} zW$tH2!yl)mh6a)ia0dN7GF(64>D`Nqid*ALj{+YbWf#GoiOYUxly~9ML6S#)Yz948D0S zEh%=l%9kdxB^e8~f6dI|;SJ_T~s4P4du#Z0X^txCTRVPi}G#%E)j zc1WLBot>Pr3#}}l7nHPYxvwiBXse|9Wjx&4Nq&U(O0Pu7Tg_v+@v-CdJ}4MZMr)E=hU5Pcz@M5#pv&g z#X_?s0)Of5Yr5ImryhE_A^7m~1Id?NX&WwR6m4Uod9oc*YnEX_#DxMiK(_)C3Jec^_SsNP@yb=}(O1>f^rANC0I&!UIc3)Q2$7IA6d{~T8+nLKZ z8;SIjEwN;nz;_3jj`p?;zmb_O8hUGzy871H#TfTXD0P)jt^%!?>Rlf}Ta2rZ=p9Wm zYG?nYQF9~S_WpFYR$S}`mM~@DsBWE93FZf46?ragJ}WkCEM`g<>QdkJ!MwA-S;wAz z6}*{|@iz}LRo17#;E$T|!Vc$pj)qRdn&sU6HUYTI0YmG!GlJc7lR9>>Om`|mKF zoVzFIhgYT1#V!c4yH1vffz6)gU3#9f>n`iwy81RY0b5J)@fh!pyZXi4HWTFu7f{A; zr}RG)YS2s4sMT1XA1NT21?#^nrk(T`)dJxxs%2~3NcZ!s+IY3Q=>truj3MF&hy!Xs zP}L{h(2W-p%884~wipofjXBtvf1$8p>v_A3%2Vv?&lQ)weSdx>Zq5Jk7+1hCTc+V% z;>_CNEL)vB>)-A!mQ?0P>hjbEU%pYS-`Aw^0c1uPbKUyMO`b-?Bjpp}5w(j=Q+D(? z2hKVvU7JWFqSO^28jNw#OV5I&q`0>yB+^z2Y;7$Qk7yk`5!HI3nPr0A7Sq0r4!HMi zbKU66sOICpN!YO$@)Pd{CSG>^4j2;GeqZ>6w&1TvPrb!}mxc+D>CSW>T%?h_ut#Q5 zi3Fh?t@-8OvTt*Geho?PZk8I{|79fCN@c!kECrO9zuogIQq{8_AF%eNuyBm-`SUB# zC-%}(E9=jKB$3r-#(%5E8fH9ehf$q#NM5gyl4kzm5p+|H@@bSHD1j$Y!p7c zEb6T9U!*;{sGC*46~OT2*PHci(An*k?tB5>3ufibz1e?p>s<&Hhb{2oDgh-aNverE zwiVQ*HQ6R6`r)a{3{LNT>jbWP+1+;p6B3Nm{_9FL-(MYWvv9yYzn5$rC;E+eaiNv1_;%4c8BqEpXkuEDkPUj#kT4%(Ha}?YW{A7Y44OJ5OZy5I44*ewySOJe4dUfZTo|kVNF1m?$ z(z}={%zj{qrRZ{IEDFmad8reyNZ#hXaOE{Y;U~T2bL8oM_N^e#qs;@AX1M68e61&_ zB*QZcUk+5qvq@7jj1&u14@j=iDK8OD;=P14;MEW-c7? zV|{7w-pTe=_o2bXXO)JHO$J3R!_wo2Ke#w4*NK_vn7J2X96w1ETtLj-?(N$@H{zCh zhW%9DjCs*K-}0Zv>>UC=14?s#IRoQlcYU$zfiexZ@-$)WC3>?(Rcleag->p+9F6*U zmzrT5HUSJohLtFvA>R5-x%k`LUE+1^&H@(-la~060jdotZ($~|b1UeJ>YAnJ${&w= zdsoB^4YmiT>X;zk&S~XU1g&8$)R1`6kXc zg4nFFHGHm5%W<1e%<4h4aqA&?)D48B5oj_oR<+eJ~;FQG6$DtPU5Sr;v`JH z#NcAp9<=qj)P1<_MZBBJfJ1XE35^z8fD}Xp#*aJ(<_ClpYFe208qSK7qMJfW8RGAC?+ktYI z_JvIAK2^UycCUr@<3Un&ZWI3EI)?K|1$gr`1CobL~=GA=kxvDPreV%^wfs_LQICzu1TRdP#wJ zSO|=qb>)A`>7bwNvOLU+WRQ{_nWoYgXoN}%s8ENDFTZV_s?V*W021EEtld(1;SuFL6UsOpu@hwX;jGF5)tC$-F=KvAaChnz0z-fkNw zzGc{R3XFWd?ORNFP~x&owniSqeWBLQHeEF8x-?)latBxNIH3aa)RD(BA+w4Q+bEFX`F}%ckTTuF%R22zDH-@@izDV zCDZQYxkxj{3$czsMP`Wy^Xq>@qBO7xL{Pprj@P%N+PX8_QH*g$Y@g1v3Qd~KFl*%c zV=xXymbJJ*MF0G{Mavl8%Q){vYWQuaDTi&x^*RniDA?PoaN-mWSbRU9%X&3!=SV9W zPm%`^u1WHimLMYuR65Y<7p3~vTUZhG=?RF6Q>-3|vlBz5T8dy~rq4Q<$vH7W_z(Cm zzY8%-N3x9c?(uN>095X%Hmt$X2&kCDjx|m4?8M>(7!ZhXu41OL=2z#LGMBMvP*`FM zu^scvKV`cR9sYk+Gya0fwt>hhFA|s&NT?YcM9_VaWz%JV7QLaCTNwahg-uBo$`9WO zMQ>!%X;}Jna$BOQ(PgiE=rnNV`5XV@5N6i>z@G$`4Ptz(td@Sn!F@p+E_`Nl7K|`l z>5T&;3>iLd7c@Ky{Erfjl@eVWT<<6OG3F?e{4%E*C4Vo(nGqFm7C8QkDR5SnP-5hy z`CIA8Fh>FgwYs*pFq(+=-&h7t%XD`fd?OU=J)I85J1Svm&DhOk`Vx!|V!~U{fAGx~ z>}Ipm@YW)XF?IU>V3ntb$7~;yHdVDNbC3?dG#VB_oxx=i!3PZMU9~uIj5P#xNBb5n z8VuYdRup3V##PCZ$#Nj207i~8g~wyt@oM6FH{J`e_O)8k``hD;;Vu`!_mDJzp6tIt z`a$>kpX#aqpQ%R)Vm2Z{PyRnOcJLqZ3&{Tudoy_OpOQFeC;mUc3{p}|JM!+&%~2w# zS0vI#v0JlgYV?f)mO!@SX4+0E4)hhQGjR(+cj(T8Za>uciuXM8|=T z_r8pcl{$omg67;0BelGWE|3>1Kw6hYhK0`$fVSI#fwn|hq$9D`vc9ga*8lL~w^HF@ zjzc}|v1$&CbT0@7dVeR>fP}$lwU>+M3c&jKKGpenXXJ^|D$nNFJkw^Z+lPnVG_}YR ziUG@K(emI@?zZIxP+{_m3>SG6g`t;kOQ#WQeaX|S;G-Fql<&8$(hUK%aKbY+TicRk zj=sYOZ9^&urJ|U|0Z-(2l@kIz-pf<(uB%0AY9(KmkRL_0NLW1WY#Z}}Ay;N5nhgsU z2=WTGC@+G$#lDu-L`6mE@`{R*l8V>IY+DX@;)8>0P(p&fh;2(t$6#&k#9IH!w)x@S z9jcZ$EvLtpmo5W~$@& zxS$=Hw-e>AT@#Y{-G>fMeSuvANID;Slj;Vbr;K>d3i^xE}2G2^bNPZZElu!!TUBSzV-S+O26(m?8Pi-o0dquy(ADWdAO%ugxLCk0)|hRm@-kcn2E?5!Dv*&u^CX`}qJi zi_H)4zGxZ!eN!d=g@ol(;mHw2>fFW(m2UgZu2OWnc!!P9xon?$2_GQj09X5w-L+B0D`@1*Uz^aHm?;6%UEYnT?s zq;7Q4M5ni>YjAg3sO8f{etRx4W7N|XwbR%2tXlR`@^1I~Z*5AesP~u80bCBg2cX~l z!M%I${FXnOk}J`RjT6nuelHU-vLXcev6#ZPr7{l2i%&6T8U#J56$Tc{O}^PZuZ8#> ze9+;6mZc=4fYiw#N!696H3R?`EjvRtwi3!)FjwYA?Y1@$G3ly2F-G2{(8q-3V9Q}4 zXyA%|sl`lUXq4c?E?D#WYBgLf<^@8d$!w%-L;2$c3*=~@uV|XazRx&ZdO8l5LvR(l zRpCNh`kdHX>k@BZ%@=&oNrqhmTHR}=#dK68Ka7)HT_yCZ!h^fS+HE@*w`3I2(_PZN zFF&-$i1g>1S04JKX~P0(J;P3@F0b+>!AnW9^8TA^OBw0;_A+x9iekpowdgfKY* z2`m!FzkVX~tDjH%l?Qi8gw2k7`XQM%0Fy%;Ufow9^GJfrqmJus_#guZ!;Ok8E#M;m zF;-#!VJU0Mc=dvf;^2is2$kaGB_<8^GBk`nwtUCwpW8n?imAl-E!oif+Mey3&*wD~ zEIHmQGV5^N@gaMfsG_kH zELJy9-sQ}Jt}RTo5(>-jixE**cj#=~b&2ARf;vh5B>QhodP*ae?o>*mEOjs^&s0sV z+_{zN;CirlzpPh8crSK`l*&8HZm>;%I8l>lh)8g6Q zXztuUH;VeIM0N+5$H@7n7y?lQKt{xcRlPoZm`b^=aj^ypj|8p^Gs{odI32;t12(6$ zoe@&tdfA^H9j%&t_wLq8$0ZFSC<^knh3sv} zL}b`b=V=*Mrijy+Hh0f;_L$U*N;7N4D{7bQ2qBFOo<0qeRmd&rB-mDMsxBJGktdXc z7n!HR^xr%Xc{$8o*!mfMMzp&tRYF80VLRoF=v77BEr!I@P%p2&oweCpJ`2ImY{)*3 zzwJkoESZ1W@~;}7{q9uDG*9w`P_0U^Vvq!xy}rzrovP|R0~dEJ6Y~TLwlu_T zIB=dAZSr>~^Mle^DY-bQc(p2=Kek-GQbE6wMn!Y&--TZc?G-5%d6MM%`%8mQ&v_PT&C8Y1IL_24#a#ZWD6(_lN?eqC5GP0+CyHA~ zFRX@D?c~Sz;_pTy$jdEuLRzfG&rfb9$)V?O@ELpOh@Kr+@v0b3z4-W{TT!wRq>(*R z1<}6o&DQcC>##8JB2%3cv2``XBF6N4W3dIy)EofvN|rJ5Q?&RHE4|)1izC%Qcc)%Fc;m?umN# zq)OmsOI}_KYfkp9S^c}0Nj*ufGXa5A?EHQ!Xf3sK19^99Weg06(>CjGy06usj>ULl zQmEoBrr{x_h~)fWbfJKjvUScyT3gciPm<|#GTdMmv%-xU4t}tD{`+Rm&hbK1;KkZ(9-#X&PhW~Z&lT@?m-u9S^G6m? zQ~38sJvi@O#-hqM=GyPPS1Kf7P#CvwR6G}#QFU=K08qs{dCG|)e6}48uee=a3X5?^ zsBpImY%&tN$X|GOh?Hi54Y>;LtsrROzE}h;DB!-(o?vp1RHWM9l+umL+-$z3#cud2 ztpxmEHIH><)^|>we!^$+Ef)X#-K8W)<;D=< zP!fmr-i?e1O-nxB_q3*UXGB-Yi_VM;pL#KsXp|!PpyK@M{p#h0-^{9Sp>YAg2Xk_kibOvaHtOU3B)x%dQBx55j)Ru+5Ua$DtMFp}6C#Kc zLc#j4l*gk-&7&EC6DwmNQ_;W}W%2M;YCoG3MqIqsZHesFW7Nb{#Y-#hPj48LT%fPk zM4;c77uRR=LC%Bx5)rQnxo!P1GvZ%sOZp4M?}(jB{`~(&i9kR6?-rz&$xus8N(C$@ zptR^YGm|h!G6znz@}x2S@~qkiW;wI9Qr&)8kAX?j>tLrZGkfeL#FFR|-o713Lte%G z99MtU)YLFaw6?l0fBZO7?c*U%GGBe6|BRNXy3d#6EAVWEVzehZEGC)ZBi^%?eZfBmonz_IX zw4XMY;67I$Qm9rr7q>k1*#U&$pi9%y`g4=>eGh{onH*}t8pR$eK804C`fh7xh8+(H z$>kk)1-LOfcm5%drJ?rrLPK;LW1Vc;h+S(d)W^-9%NI)_MU0)}Gyg{eGm(HIZl@@a z-5S$iQ{0^*X}5Eu2L;>4PSlAIm#G8qIn?J=+TRXJASfTjtlWljSr;F$AY*)26 zO;nrejTwY%b98=369h8AiXGV`uaf2g#hpu{?(fvlH_TO(8DVHcr_}qqLxyYBHJ>4m zg22UOj%R19rKKdbLuDgBrWPBQ&^4qaOL#asl5z+XA2{`jjO$)z9TeUG4jOe^kw(=S ze43P`pvxYPyppvHhBgI7F-myv9658Ki$Ihi@B-OnG~F@tbe?m*e`WhZ+YQP+`vt>5-7 zDmYm6;N*0seNNG=52)>_Q?J-z44Y`{VWi&{n!Ha(D;t1j7EGBB)7`YMjp$|4biXN4 z#B}odRa&I6p^-h9cN7v}QTCVeTq=xVo$%t^f21Pqz*j`8xgjuQO7XS%vUUH;KafYp zDl%6;DbR)-gFG_W8WDOE6>0p5gTIyO-SN^WpnkB?Jur}|7{x5@H2}t*4oWna6=9Df zG%NU-LL_de${L?;N`gR^Tz-EUN1eWSkEk>Is^N@;Fy@?1RIso@C-|C%`3VN%V>c;- z)OWf9rFHlMeQA^;NYLLY3{0Dxa>T12V9f22`uH@9{Gu1riWX)XTfyE!f zw#AjjATp%=k8PhPt$FjcK|7+WaYL073V?|q)toQfN}-KzBij={-2B?KVpNhN3#5ri z`A9c&_L>m8>`jj0S1r z7BK6Lli*nI19G&<0SqQM{xTKEvKipkVPV4vNxtaFW3CaBGG6!EI`RS&d1ue+79Ra1wxS?JHp4(rHB(+ zlCn zGBd|2k3s<|KQNIMnn7PzXu=ZMDicSZhUTy+`Gu5v)AH$`DhVn{IL1!j>>H=H)>XQN zdNJ2MDyFukY3FUb_PjN*yDu~7BBSK!?sKc-57$!2{$b_9jMbs|Hm;i`)GZowM zXtm;phSM7qj|nD)BiALEdwi4fe6)`9&Hv|#%Jf6>O3k5$nYKV~h&d>IA%sx6SwZzCDI}A=VeE z4|PutN&8%;-f;>#9esv-=E8>jzInAfakIOyfTu9o<4I?HvTC|)8Cm_vWI4{6A#OWG zVEC77?YiNJ97aH8hpy`R@bUCWTkQq zgFSdYUB~PF7sFit_d&nI!hR_cBQwgbraL`=*!V1|ws@=fi8s5ynK zKCi817#0*ERrcpn8(rf0$edG$uFEi&YjS6=IVN zM|D!t1_uz}Jj_63cljWYQwd~>P+-l=_LM!yGW63~?Ul$09-IIp<-1b!< zT=RMEH}r51_1OA6V0OOO%Bh<0J=j^Pg84B%x#}~#kA+KYS-v$<Rj0qwR5D-Or)6Yy;K)yO^?V7!S2S*9g4sAuXP()<1$wT@}) zdaKHx>M8R8sIWXT*|fdd)%mL~VYFAFoNvm^`AHIE{VS+Y^x9g*v5Ef(dS5(eJ^Ur` z0xeHLVM%rZOy7dFZ8_H0hPM*Fd?%=Tu9;76mu_mbxCLG{-uU+?yJX->-0PPL`Rr7}%-3yDtt`lPyxU6#(Ner-0{slE?A zx|u3i#MpNz);Unq)-oFDrnBwUi;&6bb}8weE6};-jjv}wi)zmWn=Lc)GbMj3uTN!A z3f%I!v`CO>dF4Ad_eAFg6Qoq-k%O$Qf4ghiZ_Ab!53yQr-O^fz*LZFQT9)PXwou)1 zy*k#T7BaT)6Tr__>~3%<9XfolT(&eUy8VO;RKTCm|j$*th^-rXd2rh$2CJN%c| zDA*;2+%4a)_3TmoiA=ZD?SCK}rNlrbMb9O^A;XH}?^3b!V=Evh;MZ9C#6h3?-Pydc zVNUlg*^(SRiC0hWzo$h#4lvuNdVw>RNg5q!8tGtgj*j$IThwEC^CJeub=}a{Ic|P^ zbQtMAhOl5wD>CO@wMPE*yl=3*<~L4hzc&~7XTe=MR9(#Q69*HLV^8DmYOjM$Hb;qK zTcYorrV(RdDE{+?d#uKa!3!HZ*tvZ#R$4mh1DvwbXBQHwP!f z)TQ63dPctiYT%5`Nl5IJ(cCoqaEu+tK(#PUhXhtd)pA0_P^@&pO)G0_GZbC-cRG5c z!;G2h;!QWi&KoZO5X6yV1%8_7+6Pq#ZwdOJw2s9x8e3jpN|Z#JQ$kWUIK!9K{_& zkFt%maadgUyeAjT@69m(pcMuyOE~f))?7*@l^Au7KTHf(aUUJp8&WzUwJsZhUdRCn zib^ZPITOS=(M6pbb`BYnPExudP8(5DfSV_^mm4XVwA?3qFupuc)%4=_wZILS24N!1 zuu50g5n~ISmD+gI4rP^e?S{a9^xvr~)kSj7<(Rr$6M^BC6R}R%w8MGPJ={ z5R0@EVnj4b9C-EtYnxJtW)8efK57tjIR+L z9k}M2o1dX>JyGp2Ha}nKaA*)(-CN|uU^S>a*RGYFrnTJXSw*|G8X+?7y%8a@o}FGe zJ;J4)c-;yW0LO7$Hj{4ep6z3A=vr#{bW38;P0P-FB=UBLUH^7EzER)#w@_tM_ShL2 z7$CJ2tF>$sTHc#sJc=1bi=8H3ANTZI@%wWp+c>zta*yX*T;4Nh?D(RsWKQG^Pspaz zt$}4eFL&_{-v*}~0guuxt;9X|lJWe)T53RJ&Cn_Hc)DGLL7DA}Iys*yHM9);u~=tR zCDruTScXth7ql?MNAxe~PHC#&F`T%wS8_&Z$oPWm$ll-?7256i!~5CoTO56GTtnqF z?B=`tL1H3k6^1^D(?rTP^h)+X5(6#)}Libs~+5KIDBQ zVr+N-A)~vj=n0eai%d=OX)7?5_`sE^I`MXoFn%rOL^_d<oxEG?v_hZ;Ma4Lae2ug+x_*ej#CK++WBZ#YQU_i zms6NVlUwj|$|zoxV&WvOpAW}}xgJPdx>v4N{j%Ua+XjM;q*VBjkBVr@p>1Ml@!`Uc z>0ANBlR5bnKHIB{ii zCcfQX`uVAyz{xs)JY67@l~154n5y#oaw+^UXa4-c*u0u2ai*(!9deOHAoY@8r3dma zM&ZieoPUkL&rf!#RbAL*cPYPI^$sT`WZ~R0FRErE9H99N*5;MV@gYOclrHw{y@VQe zglZu0>r@X9m?-uL8V3>ZSsQN1Td)oH=ZO_N8$KECt82n36|$&)C%FKxqVQ|^ne!Uv z@2An5(`smC$3ytv+*dV~4)lze%0g*w_N}Aj{+i3;MQUGMpOf!KpJEGK&8U-y`Hf23 zUtB0LJbWLiF#Pmn#UjF$VycoKjNE|3E8qlNBahtWF;LH>Es{|xrLeZP_{HS6Z?e+n z?R$d)n&0fxYa(a5)S~oPoXyt^^o;_#)%>CGWsz8^nA$k8r)OHeHkN^gw3Y$Ng~6 z8ZLjI=al61_DA;@FF2hps3Ym%PXjb0f_|DM6aK_>BuMXmj$DyWt$yLzv|%jc6POcZ z0G~v61nvfhJ)l+meB>6CaJOj6F$m%avemvqu?UBa1Ga}%Q&V2rFY+?a*(dTn7`PCT z?%*?FK*D$>idpo_MRZDRZCq@btnn-AoLLd)39&R03@p^5lTlaQ{hEZQsBHno z?MZ1G%}=bp%(%05frWbEueuf&=itbq*5aoJ4W;XK6NLr$Dp;k+5%@#D2{ZE36#^Uc zmt`CHAr0vJt8d%6o$v4Y%wol)Z>a$4$N0+*GHdxJ~VsbEFyRVMqCr ze5|&FGt_>rX~RLjjJU}^1m5p1{SAq@g?CRzMmv7(U4eP+(uG3bO;Q`WC64}O*S24o z6KD7B=3`LG&Wpr8IP~NF$xwLm*$WV5(0IH{^?OR<5?gPcc(jkzc25Pitsj z;+Q7xsM{~3f7UR6d3k}R|C%nsy}H<=Y0FrK5y#gO=s;W?o`rC;f*J$7L5eM_*|HIP zgK90bcGG1Dj^87@w*CD;|70?h0J^wQ^QyRZEKTpQgwt=<# z=LHleGxy@wGJEBL0D<$J;f;3e{!Avf7fqnnZo{p2&Yx<-=0Co5X7_j>|zAoI?6_%dO3CP3|agFPT4jJ zhCrAw{|2?Vi5qVK7e34k`V_;$^ON>##mA(TWV{zKl$z-E+N~Gq9{T;|K->?Q8jiz~ zV~DyV@OoxczLR9z5DnE<3-xPEgX{_Z%*K^vw=(CAk8jEdT8M`^>m}bDGf!1HVrpEL zbo)E>P~BzS_lb{@2x8kcGo8^Edy7P8CxKF#pTblXl0@8#uv=!s&o~p%|25;zJSD5= zz3MrpWYivdHW*g(-gBn?=aWONj};FSkG=ahf1dUBG1xd$j31#RC~hcd_Z8Fr>1*o! zK}pLV#$6_Qn|5L!2yfL33RN6g3&UKKl~~lrBwCZ;w&|V11TTB^*;STYG!HY`l(jQYCkKdL|mBr5mr#FTqLfCVz}g!}yZcoYi%+VoGr@DI8g3tM zI^Py!BRv&u*wXpdSLBFO)@Q$$PrTu|XY?>rv2{Bytx_`Do_kgSNtE;u$__T^Ukbdq zP){@Ih#S*WQ$K%BnXL(5OZZV%7$Ff?))Vr{^!O6RB}ocTqF=w`8-<;>S8bR&A6nwJ zGU~QARb-4B{)bDwF=_65G>Eq|us1Z*e{C+aZfosMTiM#2jw{o7w!d40)H#bOf8L)R zJUHZBM2);A>0&?0?1M|5@WyGLi`fvcZ$96?Bs_Wr$3by?IyChsRxkCFbwYV$k1_#n z?K6=>7AAVk?w5bn%`DJb_MB|uLg&m~2zAKvXbxUO0^JIkzye)%i~re?p1b1 zZb*{f**Y1JkAcgP`_N@^Af7u%ZRYfog!rqp!+MlkO_z9~2Y;J(Q1DN!-In1!<$~dc zh5A7yhj2;QY$v)*m0O593|i93BpEF7v87MhhkF>&>)1*oA@;!@zCQ8RV2`l=6De}` z%QLdl(C7^wrPc+XahIt8Zy%5KJf;(QA*h#RlM{B9BzDJG!80;zM=pCaoLV0V=1An2$#om6d8~6(( z4^Si2i#yht2q`#yky?7LJjJ@kyMw$Wv{B&SX@v6+g}H5%b8;<`dRmoct{aTr2g z<)ji1-}^OVB4~J0xH1T@JSI$+7GK$RMplBDgUu}0>$%~<$;)%n8M~4 z-)Uyfm4p%i(VItxS=i1*P zLf1~w4+;m6g8kf#+D${80L$`fF@~-)*FJ6?Bz278@w=yHQj*9lPxd~gDn;X4I&?dW} zj%Afj=BO9q;kgZC3U%a_2PsDhjjQd~s2wod!)?)j(sQR_#TT1;2}=(lxkR#W)_(wB z3}#P!aOV_EHE{PT73c|h5LBsqKKEgwkFXPmX**pSl%MB5HLv{gC)o&%xXA_6`FAWl zr{s+?!t!qe^`z;ads7#IGu4iRphwKcT}s#51SEBIGIS_Y#k_Y$NTT++GZmf6P|e^_TC-da?y{` z|0N@`FjxQAAFcZODyNB>>CP~(U$t&?-GzogA;G+DYXzesL~nlH(v}wkFn*)15X2XZC^Q>91L|H;}9A_S?>4YqP_UsFq4Ds*v z1C^|JC+ZDm6NyMRH|Yo7KbxC^ zs!r|`5DlW1wlSMl(fbU^K+5;)yUL{QY+Zp$Q{l zr@v~A;LW=dt`+b~%o0ubvn@xF^f~0Qv22@J5(G1$~fpb8ECk`yw8=4jvr%Zp_i9>S&wX`;Kma z&1JGo_%5QV%B|PCXZH2U$teHu8XxtxgYP< zw-r^D6?$BaI?KPrgezUFG2H5=8gzTeLP-iYn{BXTq)M1K3tgZx7@~th`c~8n z_&76C1RU*QH~`7gL|15x*~&O z(MvFcODjt{l>oTxGk&6uqU@z@*4_bzc8T-ceeFkbF) zNXZJs??&+fPV?GU)vOW@23y;7_jrqB2MWlp(31E4)MJ^|_D0#Q7Fur|Q=8P^R##ng zLF#YI4E|NIKj9fz6ZEOxo!Plhm(qc2e2a{&Mr^&3X?&`X_{yv@)dU_Y9+&G{&Ye1~ zB+2deJurNu*;y8A_?sB-TH6{UDoq?{-qlC%d)b&EpUl#DMIo$)NA?!8`eFrOs#{xu z*<)jHx2Z=0+=shN_9ldhy84Qn17~CyiP5a5DJijeC7M>Z{Kf141&3`5^g&QzLMGld zl7Xkp?0#H)vu&L*2K|$oo1<01A6s*9d3$}U(E&Y~^A%&b8M^s=@f+RehnF5+TalW| zC>*+*Bjs|}@8#m(R(hbh&`PgkEC6Pi-t3f*3L{g3aEFQ&#t%JWS*ERMSA}n5Y{zMr z=#lgBu2?Kx)kA9aNb~@c-u-ON6)zht5a9gQOU$w%=%=rEUXyE4v{S_hFk+JnRVJ7t zZ*NdEZ9hu}0g4$K4;zoztNz)f*o?ER{{0&|D>zfjX3J60PZ3nZ(7V^uIV^b++wPQ9 z&qHU|z0b94^FObRpcgJM_GM#o4HIH^bKdLE@1D$gFkv`0z!_gtPr1lcUBEFadVlk{ z4MKR6Zy={o(Wu-oF|yH3x%MKe_o1|tbO#JMwUY8+m~cZmQ3|N< zZjVdADo0VJJ3{-?mBqRem2BhMq=&jP@wUY-d;3n;D(x-ax4E6wX3b&yVp~@ZRkSiC zBX3JwAU_qf**9UhlG-eRTzE}#_XtFX{lj;Q_fRY%N-OVQ09}643s%*I=Gk^O!9LS1 zi&q*vCA8|elx}L8o2M|LV@7SUeyL_VEEzOwj}jNooR*jHY7s~fk@fM>w2vsEZaF@0 z`HP&6bYEDsaUon7@;HXFQ@kXof69*6Zi$k%DP84GTy?Czg9Fo{?X{WxP}l5?)q$Q@ z59aR0TJBko-t2Z+iX3g;GV-2fn$vKTW6tO~An&4YBMHkrBeKEm4TmO|92Rv#6+;~{ zwJz2x_O*uSWSWjB!S~rC$)VQ83+e@8l?Y#j+zrtW%l9ZI-a0H2hFK-rM}d(%2^j*l z4gAxZUnnFV>)Oi>3f7ugzHRBer#&cld?w3fO~Ogs-zu)LH*Wn-Nx{hM#_7%cl+qjy zvKjwVQRCv2pLv15_v%|m%WP)@_|)MVQ4xk&s=$0m&?ROG_nj9M5P9WLtP>Ota-3XxIx&nMwu(r7Z$;l+_cz~dT-+h|D&Oy* zc4Qv~aCID1bOsc9degptJ1D_G6=L&23* zJs9oM=W6_8-Zs5MZHIfpoKjy!Cw@plXM>a6gUjM}5;08q=eAnhlT#1Vwd|dMxP_R@ zmc4X>IPXwDx?>X`i7t6p?^6Q@?0?i_+~1bNgQG+o@S3MYORUVsGxzOI?`}@tdD?HA2&$$ihvBv83O$bK=v`{@0zbdLcRA; zNa(?OSPrL&Z=b6nWYo$bA5FxswufD?M7(f=eJgW6yZbW>IRV$3<@q6iO!T?0#*)dT zUR9x#AxY){{qp>F&5c+5w8Wx~Qhf(G?H}i)_zfiXpPM-k?&*_r zr(K~dpUO&K+3z|Vs_iOAwTvB*T|b&_vA(q2T}s+ySk2#S*f@tDds}s17xnhpu`gd1 zOvc4c96DNNp|7^;mXpDwseTce&CBB083tC)%9s(W(y_)vlU{hP&LSF{m+0lG$41hQ}HH;Wp{N)`At}dS-o083^hlD zX5o&v`9PMaZGFSxE##$6M@>rwp|bUEW-{NXVtXUAl1b^}*!J(@O#+UnKRyjYVr~M)hr^@;2sWaroe1p^FCA1Ks$7|By zRk2|q1#qyePS>W(rQi4;K3A$Zbr>rp1yWNRQ+8|DR8^M8MEFaH4y=T`E0iNr=sm`; zY~R=ZIVG3jgp4|2JDR)&`kb#v9}6wfO8naVzOd~*Eqic0QpdTPhy)%|xW91(0&EXM zxkK-=_u~cL)-T|`ER+X){os2krn@p#O?k0@h=s?Ca_pgBwkvPH-T)?Te7g>}KZvKN z-(OH~Yb~{7N!^;;grNmXyU(|x&4&e|f4xdr&PkMnh3a)c#~s~MHR9HzBbOU`uT_T> za^|PZ>S%O38dK76G!!DXZ^$!d%y?F~m6@NXWL>nNXCtLS0CEZee{RgEN>-ypLJW73sZcKTrFg7(9ivr*^%0D@_uC1M;T$x%wE*3 z<514eIRh=x-(p&udeACRdfaa&h{Zetwbq*K)Uc-DY+11msuhyEIL_+dE*@$CT^vn& zCmh?BD$&;JzPh|66t?uTkatvH2bPb){#@4EM{Znk8?aL`Wf$MysvqlY*G}Yc@=+|K zh~JbKFC~cF-B_795Z@|2gQL*x+O*y8@hu7hB8#OWB1PDO3H~`gge{V{jQ2BxLxFi} z0G^pNarr=A$;@OaLbDQ1A1jONizZywYvhlAhM|)^sDs=R@h+p=gfql2q49 z>AKYBQn{6$@i90mN1Ex!_@-1f>YTP^FO1Hs4?Oy#uYB7GG~|J80^hV&Fo%G%0ZL`% zp!x=*+ow{vm3!#ca?UjZr&Ry4fqK8PI?5}=)xu5Js(nc$BOzF^UEB9MHD(LmRdgEl zc3C`Nf|%}CtLdGFCy9W4fEj~u^cv8!AK_fBp3y=ebVlT;t&LHV&Sv7Irq1W% zkWS3Jb-=YE54gx7=LZ4O2 z91qt3D0GUqxfST$o%G?)r=Mv6cM_s~Hyg9yeLp+Hh6LDKawd~+nz5eNk^tMb|3{ua z2XG+ZX^!CYayZz@$yjFi{vaZ;d=8(OXp8)yCvtKg^^=(oZE^u?kLGp(&8b()ndh=} zS(M_*?dt140S?nfnh4-3y6uNy3}5+4i}Gtj|({S5RQ1gI)08+fE5I&#NIm5>0r2~SVMxIAwsgTyj&Ls@OXguKrvfjj)r1a+csmNBdY%8%KZA~)b^QRboNh>I!9cd1+a#`&v~OetYHx8wf3D^G z`V^s6zKaZ)4u-4p@&lR(lFXG7XO<@OPl@=&7ewk#uKa*w3Q$m*S~}KNE%ORH4rDr4 zg@`g7&ot!c|B^|as|9^@Q7R6jEKj5DLbj+yshh{g~C>r3S%GkOaMYttcIg zjOyJTocUo7)&}y>Xsw*9R{^_I-v&H|-&MCXpjh>y#%ptZ1_96wCvk6?vI#i86`u_tlb)J+=xe8^GCq?3s3pm z?l3W&Lc6?v)qL%kjID6PeyTJO%X)XIVgD_ylua%7;a#FRgWz`@SS|2KS&a#OI1DKfG4k> zONOZC!ve||uFSW=gobMChlKSz3sZh-&AmL~=}S4s0wfRDrh=FiE--yk09ljA+JnGb z`TCYsJqhXv87cm{eS=E{ z%kS0F0^cH?)h+tLI|78AjQqo2nQ5DL;={*sr;zB5_?xyR&72=)F0ae|@gVbnBnYED z8ldOk*soh5h%4x_x50v-j`aKMWWvn#crboZ&3_*lkuc(rFE>Jxv&nelDCIbGSQm&9 z-#r(ugisX(=B|mQNUm8Y0m zGa;NWS-PJS0|N~xQ)b(%m_|SBI62KG?S}D{u4o?|0OQef7QiUCNU%lWYjvfG^jd$`cm5c z=d~!}t-svRtRNF&h`;^+S0C#C>hXZ?{=ajYIO+bknsrAN7(o4%YBFgiStKFBX?&Sb zM?ezXwpR&zCV)}BIIz5&r#0BHKMev}(!Tcvh{uQR6f%=r(hJx7DN}L3kwnw@SCILO z%xI;9tLs?h;eJo*SYzWtC3Pa1G)2IIfCo_!Ll}i4_SY9LGKU*->gQ0QVWmArToUkU zK=RE&se^2J$vhJh{D^qMDSbJ5me~>{6Jr{_dIviCB=jQLE;#b79=bEBH>;)YDP$C}%IN-_@ z)-&I7b*{N_CXM)Uo=q|@C4PjOOz|S=$OaPej^HTQHDtAmOp4WnmmUe!4IZgD4%wQxHRV3M zCdB@48)%Q(Q1fpa@(bPANR(l&e?R$HEq2%0v6g#jINSd&@%kDnL=jO4Bv4jmih)X8 z1QWkn?)21w_^0$Ei42P92PwT`i>jbRNO zZqnKEi*07Bvaloz3t{3f&=1)vW%o0Y8b=`RKY&x8D<9XKsd$M|+^ZZNFL(Go_YNBO zl+>0aqQeW0gT~PaN*6GN6(d@09`@UjZfkQ?O7<_I*`gPKl5(%1wTnqbIY|I0a2n|o zetuWKasH|BMIq8zN0KIAhUI$s?uHcYqmuHeNUZbEN8PxSl>3=3Ms9eUN0VM)QBD}u zOl*P7QMpQgr^^*!rs}V7DoQ?z(EzRu1ma144u2yz6*|Vv8%t+YNq1Ty7ubXR2|b%> zzCI5GGykIzEk|d}1@oDy+{Sb)4QL)GwcLi8Jj}nvGDg8Cs0O{@+9=KAr3+c4{&>Pl z?vGd?X8;<3_meMFJZi)Yd0vBjV4Aj<#h;l3MTOERxxzNb*prbuRC>A z^t89)jL?xH(e5yj_QiTPqrv>Y`%vcRj`x1#QTIz?;`G15%@A9tJHBD{E- zY$Zlr>xh5ze^5b|a2>4sm-crILXjXczT-q#i4e(nlbH!Pbx#RfPK0d&eys7Z#pGa) za6s$Vw>wOZx`Z$GsaAoyIk~M>q*yGvEIYZ+$pp27gMlA2wCZ{d2N}edQDzhByBntm z^{oLkJYsX2$Y_OHvLqu(pDQ1@eE&W1MdW`{>G6NbF(OY6VspUyYj01~S(15rq)0qb zzA%#@;y~n}fk7?ShvMM`7f*8Q^GNSu!z{*Q!fOfR(bfhO#@gD*riG8|zv?rCF?#p89TQt$pgVgbn9pP}ZVu zp#^oC_=7k85~U~J@!fheCY~lU^XSsr=cjk5+Hh~aeHR^m#ME*S^HJ_nK$bRYPw8)X ztkpVoar=!-nU^M67T=uy`sjMpm5Gc=Sz}S%V(_Q{OGf5y0~oww$N*M|#q)r9@hG^L zv;>o;$c>w_Bx6nb#XsN-aSM8MT)9PFe@MUnPoGqp2MEXDD5eBZtF;zT#fKB_U0V9V z$J=tI#}8_NZY0n)jx_r1Z#j%rIyu#ShPd@n5zoMJeggYMnVyen_$KD;oH3r3)`&Xx`STqegf-@l4$4xhC}fe8XK5{<;SsH$ zo@eU3^$8pV^+=s)jQokun{I6cv~K*?%*L|jt#(;rG`MP+#RP=i|9p?8yW2WiHVDAiz0xcAW=xD@zYu_ zH*$yM;rFz(8u6P0+7?fiy{Esu+P3a#I9NxQ&CEz2KKB$8Ru6yCnn9rPi-2o>V2g{O zHhsax1FZ!ja6Y&9VnzaWULE5*gA3@5x3L2$_OS_2*1&gTyPCrjivq=gP}{e*y~k#N zX-J3vAev@e{YbN?E6xOO>5BUvZGvC_9^KLsMoUYJ2s*BCc`{F{EizSnu<*t={l%~+ z@~@_5IzVNYnGR;ZgGT&t8*>}W%$T~O|8#>-lC@}}#sS6gWGOvwt-X7=_35IxA-pq@uw-o_l_4|6C4Xgx_En0nQcz5t#;X8SDh z1$p#+U;ya{3YX3#mYAm8_l`9*_F1=Zt5^ZG(%#qh_4(~4Y+A@xu-^x#xKR;%{r!7= zje>@PSF@d1UuzfJP@rK&)Btx#_y^)Y(4wjfeChFeOXXd`X)%x%l5k(Q7|R0dX*!&I zzwY^Yb-$qOE*y<`WrrGvz5E5>&$kHoGyAw6?kO^J^Ag(pxOm$x2(ImT zA+^An=~N`!@PX=l&>)<-hQ2H;(t-KdN(|Vx`>V;_$b3OGBco;yf&7cV6s3XAD9_2f zYPY4POHLTuI z(i5ke0}y%%@kLOF|F{S%@gg@g)#1LoGyOS(@J3M0#iq*h<47{8ERW$!r?$89*uBjk|uuUR2^IuMy0(rDvCCMTH>ZC+KcJn4=6KJaR3 zD2REycF-N@E)Wm#JER+nEp-jkDJT+VOcWHycqCNA(pv?svvYbYxSW}ppi6+2IoY(#fAtV48VcHsfAQ*- zD-M9}86{s@*e@qbMcdPvdE3tQsH>(*H;%tl_`5_DK%?bt?Wycxu*|?0kPPHg?S#NU zK=!yE`U`QG@Y)R}sjzi_d33Xhd3AJ9H!7OfI#!zA-*@LPub=163*5^=6G4LbuHa=K zt4%#M1Ky$66+QEyu8&maktNIIZev+G#5Fw|Iwon#TlprP+x1o?e`~xQ9M1};W_DsQ zFMmg;)B3&U$5l$p4ro+?rjTt{Y{t#4l z!5?nMvC==PDlRTBFV-k$NbX^{){#+SLo025OyKHB#-EbTA>E-LY=bx)GoDW}b)r}7 zs#Io*{P{)nyG4DK2t5h6#gP)NoT<64WDu;~uoA8^3plCGYhAA#%jc``1JNiOYugd8 z2VJnAbhlju^`Md#8`EPPL&Pt`HN97tx|*&FaoZXXzz5#=BfeMWWz=4;U^8SbGvz_c zA1*BT?N==<=%>dkzW}X=D9Jt0uEY+BgA(`^FqO`#!h19zs7ZK%=fqvMo1$cFz-Ig5chgAFV~fgwumIRswfzuhfC1ljj@%DOQa3on`0JEaU}{9>J%(l6bm z7O5;Y*8Zku&t=can_LxgIjkKRlbV-nW||t0+(X1=WlH_Jtm)fNQCZXGj{)^Zc#xpA z?{=M_^}3r> z`@kQxzQHWvo0obGxe<@{^q4`nnleyJdQI0TNlpmJNNFQV|j5o7s-A9Mi0~63*05fm5;i?vGKiP6_p&b!!i* z1VF{F=LTJPtOWUH_9y4-ZqaLrn@TRqRNAA{ZqmQdN}J?LKFPf(!(c9~!U0=O;3eR< z6L_ok7naVJmA@%?U~^tG@cV88qt0wpmKU9w@L&FJnFj7=F?1DHuW_`>iCF5dr3K%e zg<OIW~ z9?ok&fmUZ_<*C)8MP-R4<12`VC#YL*S?+Fmm*8~TWUWs0hDX80g{0DWA2M3!+0=;3 z{g(Bps_hsK&0W#lsN3Y3!Roi|>*lQ-4%Ck6j6cbEr|?mJ&RSxeGrnnh+h()xN*Vp* zS)q-PxrD{C+QHC`uczJWm4nAc*E@7~PlaqST&pY{lXB;@`1E+maoex}zW8PIZn589 z|N6@S&Nx4oIYWc;_8s@7`;As70$Qpp@W{2KDks8z<3t1IKzPRMTY<0b$p?mPwAL(( zeww<=qKm76ld}^dk%v)!_3kU6=+d9MyHGKDeG^}V4rY3;mzf)3f|Ob;a~Mns$v-qeau@QFWuGJagK zC!YJE;2!c~fy0OW4ihG^E2epa1gG+L`x|-#0o^XnG(OXcysl(lDHT4shzl?{96tA^ zQsZ-BdaEcr`lR4~1G+Oh6-Z!+JEyUrF}3UB_9%y_8WXnZ+`@|E*1JTO=0)1ya(AbIWM?hoh=}-JKfEE=?|WL1 z_{*+94pRor4_0qbr#@ZOp7f_kbDkDW7>-FEFVx!s>v{$5s% z3tkWjNKo?}=vRuU0|x$`^*2`K%?1tBpO9q1t0H<^X_I3&G{7;0J@N1zj4v+M_q?qL z`*DEky@1(6a5l{w;^PEmx|iwVw{5*@@m4zdAgY5@NfYdA{gK}JT%ioK_c<1s?61|jS34YVknUfs+0Wy*BB(T}-y@T0BC7>x$sfM}E%SH0fv+DAB@|v{ zbQia^NSpU!<{y&RhrMHqrCuAurZB9I4Vh*keMy$AK*P~5&1|O|gxsi{6ZLt^&sw7} zNcOIesS5;a$+nlpDRuQ_r1d-J(70Kw$rknF4yKp~UZHx$W0PmA$q&bhj=d_UKmuIg zb4t|=cmIP2y+%|Elb$m6Lp@vUotD$vJ>Fh_e)f3R+D(J%0z0EQfkj*MMD1jyU&7(w zONGxF%H6Je6+WMO@VLt&V667=oS18$kmlkBY%<>fjAIHsEt-99;kwE>B-Y3~pM^t^ z1>Hk4r+F2uO`Y_D9rGWq*IL#uJ#ZK>`3&c9Ftt41umk-^gV7Sg)JyNrpUN$gi;^5} zS-NOfc$3=A3JIs_@xQ6A@#E2qzFCiU1ltdR8II;2?f$i9MM!&2?PuZh9gRb0jFFqP zD)1!{m%&x6ZCv)1h-GYIGULBak(9v;)86Q(k?`tl1&?p<$z~`Wi-5T6?w=m-{4()z zJxwjRu#Q06sL~?CI(J;{GDxOd3k64>H7RGAYh;EH~ZyH_LPXY84_&N043& z-(i$(BS&Z4uucrjd*Z$(h&rab-*v;xZGbEn&{i9>A3l(ZYHNQ&ca8>d6_s6*bjdLs zDd03S$u_0FFfib~+qktA#aU3D{IUG@JKAI_HduEHUz)}_lJJE4^6bf*k`$X&?g5e0E+Ar9kX`dGQ%C=+aP2?) zyR%>>g(zOYD>SWDkk39aZvn zm4D{^qIco%&60N#CKuPuEp-|Q{-`+7ET4J#4;*;C2Bu%Qt9$)ggn?x>5lhJaLVou& zzX^L)+hG6j<2$+ha~JzJR#0Gq6YY7Wx_)xWx{$AnoW~mb-Q}#~1=pGXmb+c7#37g< z=)5wTrFP;I)>ODIf)d_HNEq?HSc;0FTNISRNWg&sko}xv882&Iw~5Z&-z!bCR>0oM zFwAN^%zh@`xUc{QGc7E@jCAOz6$ldZ;=ab;Z-Vq;Hqj+ouXN(6;|S&ra_nN9NrFA^ zm~HjOfgd)}<=Jc(T|p++4YO;1b}n9EV|+S6Kn0pLW8Erg;$l_F$ZrQ)YIVgbCh!NS zzhd1sWjTa4;?%}=XC*}{$lgup;~SK1miJXhG{VuzLwIuUclr~t+xU$b$pSLBQq8j+nE zd{VX@jeg#E$I>a2k$f<`T=(kJD8c(Nz-iQNc#)l;_MPg?x@zzAiR~#rj@YShowyni zVk{qCd|=Z{m5%w{Ql_0pFNoq)Qv-PlH8tsu@*2>GzixHyySCq+@)@m zoFaBA7qlXi`Zwt~k->kY<1<@_-;krvPzLgvIm=7Ijt=iVwc@})vQIjmU*pP}PQQuN zS3E52HEjOX2&TRNYSieKiAteC5&& zf{DFvnM|Qx`5_(mxwN|NA~h|;&M=Fpz(0Y~$qN>W>(wgIUUXjN3e-eZtP&<o4vK zH``wcrXgBc9Mh7PA8BZCjb)PO8$L|npAd}A<;D}_0g+Y%*xZ1#dH!L$?KzZpGDA+k z7PMcF7)9Zep(Ibr*Ak_cwdO<%0R`jaYkEY%%*P3FX<2xKS$mU5OFy62S@F7Me3 zGY)S`n*z;4gZ}nMgz?T^-}_9m@E79kgrh}F;!SV57ko8| z1T|&iy6Y$Aq0(JBMNyf8gb%pceNFq@8@hDG!|XU@^FY<)XM z@?QLGI&Fj7Hq|Qd`qMC!(-qBGlyv>@(T~OSbUfB5BVEwDfB@c&3A9vqJ92?3$4GYW z3lCRpDdB9ik=s?9^PoWV0GC`>MuEJ#jsWP+R`U0$F_g0+*u=>Sq}Vvzee%n zj;2EE*Eu_+&BFqz=I2MEHV9*f6Z~?kJC`ZehvtUIrQOIo8WnOr-Tj5(9l(W|M~A!f zu=K*DufD-ay1#$$@M|H5u9@EB{`7RO!{w|jO#SdEWpTTzG;>BKyzheG^t;Nc-@csL zalh8KqjZ+)6Z^=2zxvBJ{TaKZ*IeAu_99l<Mnf?RW_ZgWyt_+c`V2+mC)LU#eb}KInx@L z9z@(0{lMJF91tfa_Q)CUp+fhGKILl^#r)CM;^N{!#%vgqY)ea3`CpFC7TuuBtVQ=A zoh5}uX|-~GL3yX@xo;zy`9%Om8MrXkuc`N`9)T&;FqHC?El{4 zu??^#IGH}R8lF1#1jF-fQt2TbS^D#cX)BvWCs6y0|9OrqFW(>R^7 zEX2~sBrWH3KrfHQVkx8<1#XRv?Cxf%&CUWyAg&$rCcnzPgTwh&39W5RY1Q6b`_gOA zf&#)@6p>_bf^P8p(_qrO`VIyzM`!j)^XjyaMw!D@Oqu9_gr&}*6CntE&QH+L;tT=k z{H2QyU1$HKal9J}pd@4B0P{tTt*q^b^G$9ishwoYm)udWn4t2L7PmwX1*52EgPOyB zhvq-imT@jz*-Q42)XL9g8PF(65#UwfXEafS_NeY#Hh&71|fBd7A^Jb%`#T!-`#jacz&jovh~O3I7^rAS8m)PtP5WpV6- zk$8#k&z>^2Wvz+Rh`coPE(g%6shOUH%H^O3gZvI5lw@~Vd+aNCG(=l1)wqz5=1qX`i#*4Ad&_K|JVE0`bD|%=cK2Ftw5G|nUieDo**uiP;PD3gdOI^` z&Sgo;Z9VKzf5xID+WLM|OGtU<71N_gDxGP}^mBB8uFkz1yn`xV zT%#=~hvbV4VLy@|>^x&{t25uWw@IpE!+u9F&9h0gNC$K~3e(TK)tm}c`6$qX+I)Nr zR4hcrvcU??E{_e#LT$#k!{OHBWrauXhkG+K*k$*fkdU>>@ojr>-S}-_`v~vJt!;U# z+MOP514{&lBKg=okXi5%9V|!+j^}U~yp47o;w+50@%AF_R;9M9(3_uBbpme^Em5wV zb6bS8sef}>ve}|7=OUVdldA<|$bm&uoi3)iki*esUOdv2V@CCHcBrUTd&h!m-LMt~ zy{U3m+GZN>%aF?{%}og7Iyx?1l$XDF5p|$lQ*>)AbvKtOH|d^wc{OD-O}Fh0S0dy= z6Ds>joMXtMab%dbSM3gxyhew2{*IJUVUz`MB@WZ0D6o9v=|$c)%)MnWOOvuYIAxac z;ThM>hKDGouCm%>Xl_dRO4Qb}c%*U}BB15cVnixA><2a>{IjHTjM8O=j61aC=uqWq z%ZW(3$LqwzLnNK&2TYh_1mA=6wz)@k7~zo(#mQyPoVo&0VhBF>x6O3|X6tEoRg!j` z?ImB}OXPg^6|*ropdp`3U-F{+mr$P%AJ@f`;<#x6QM`<-l$GqXFml*=gIM|8n7 z=s7f`(p#3QhGen_<08($mIPRCkF9_NOGvyhN}**U_yeE$k)8Fa>2~u9#qB6(x#BAk z-bJW5$04IlR55p*{zWM03<4!u}6*KG^_Q$f3LNl^pk^nmgPla=~5b#ZKh zMJNwmi{tOsp6g|w@*Jd4iuALN{x>BFo(P?2^;GbczwJeta$l}qI+{gw+o5!7eoNg` zE@@bq#r0U(__ld>UL5iO$s#W%M1$E}#cMCO+VI|%To0MFhwdF!SYx_GaCFIz$rP7D zR#*}y*BmuvRw{aUekng@c6h4=sVdI8|K1^se@>By8Mn7+Bo z4wkdfF#`SIN0Txyc2w^oAmVb_`cAOew#gU-}bNt8R=u9wuDFdOx+@+6(K%v9f|qI(&__-qr&P5 zT#mGlNwsdD)deCABl9@=hCr?da0H!@AaHdq>;lJ>G9QlPa07BFuDMx0nv!aqiY1rkMPq+2R#HT0>mktpa%$_M8CrUvBT&ijPlW-3oE$n zw%7h2KAf+Vz_&@<&o?sCulA~0C^*8w3539TZ~T8e@#!faL#!CSb=_+R`~)i?WdOV! z1AHeY>~Dzpyo}68KUo<-NC3#cB0&u$lCGYlaMB=$Yt{;wwiiRa=(z~IsCmET=}&ow2? zU;h0*|A>n}*wb&DKO2xp-*fhg*GFWP;iB%_Tdx2|lCaxLgpImJanbo4X-Fu8umG>) z#!P4D+GJxR0ty^>f%|;JE{lXm6cf8hG(A$$XQ;vN?d~oYoYYC$M1#U{z}XN5zT4-x z^RI(GR0%<&qSq(<&8lhnDXHuxMu|g=$GL!~C+;{ScrV|dv7QGbu!btOn@$kfa3422 zX8cwyjssGNH~zqFyxDSWgxec;SYU)-D(2Rz-OF7qg49m~`hkf(SDnv@3k@*zV{=vh zN>YcF-riJ-s{LYCi_l!3yvK zPa_a`&Eg%*%-wNcW7-V)K>gKR+s5oh@oN7m$X2n{2xxWW!U^|3wLnprOa0P<3`sen z2_jGd`;}25&7|Bw=G*2%AINIa(~0c;j+G7lO_h;S2D)*%uh$%Td9ey6z>j2NzE-;0 zn=^Y+{RHE05a6}Nx%ZU7G&4X^zIewzBaYNzV#2Q<7F76F26EGJPcf7%lS`1HP>f$4whT7?tMn!j3Jr6z?-v_;3%1k zMj4a8j!i!WdC2sj3edX4S&Tf$#F+k=dEqf=9L_rC`sdn;ryTAg6u&Ix<;!h3IZF+y zrYJ|g1Jd>p?$4hF-`Ux7q%HU}8}7n}o)_!Z0=mvJshN1qE?OJ*Bx}XQB4F59zPxU= zXi!2DO#`m9H1OII|N7@Fw!_+PgF5$xg{d0CoqzF0`lN%#hp zuq>X~F2*QrYst%y&*@M6w%5g+G69iSL)y~BW#HF&*+57|nf!=5 zUf!+<&ay@>WGc1%V4=PNUT_Gi^u6^oMBJMf1eM1Gv&;T(TC^#VqV%{MoS%VAu+rZ8 zYbyYS+MKtrC=Qi^=$~x_TVz-?#p`l}MwMj+le+#sP63Vg1K8EH^sP&2x@E+)8IljNYs}K<7x5+ zF2%o-KR2-TEasmh)zqXHzPFcSW%D1;c*qaME=}m;LAIFCW3-wej*zXo!ooA>T3Rr_ zZ0*@~$J3TC7t(Dak+KYVxlJTpVO>04p{+KkOW-NuP=FBl=uT7?S9{DWiH1vYY>(0d zCPN|(*&y|)5L7D@GzHR%4X~+YVNs6%78f)<&P53CA%{;vd_6cbR;pK?Z_shF_)Rc+ z$Y~37(ha3TkAVy*P^AQtaaFGHrmUzM;c%R3X~*l1V$0G(^!IT=`-DFcM-VhiG$$wM zpEI|0fL{7q`?g%JL<{HC;#rv4AOx0M8tWDxt zKM?C9UlX1*(xf>EUE|=Mt_(l2GPSWr1c86+n{&@Lo$VF5uG6{Cco?c?S_*$CcE_!_O2 z2&|-PZ$U-a;b520o^5@!&~rb8Zpi3yw|KRewT-?9z02wi8lClJwesZLVAgpnbvS=I z)croSY0bC;c1RovD~H}~f+ENGe>D1$(t{z*MGi+^xZ%zXx90iXF{_l6f*O~D-7u!s z(0MU1;Q7Oc%1VEKUJGXCna9P&^F2M?W$uG2JD5<5I`X0RTmcpp{-RLR<@OM|Cr^Z2 znldI_+;HYHk+ie1S70jZW%{0vJ{L``8vO11_zYyL%8p=60oBrmD&>_CNKEe3dEs(; zYV~nT#ek{qi(Wx30%0)uN$Z<&;AkAPzmx+Ip1s$?st-9rPN`iAVm`i0D<1c5J}JLv zfkQ+iygcnU_OLbXva&mCoG_)hw_m@?N1K~7-pRL=Op;UJ3JPAG7ULVvfAHpMwCYmX|^<`7B6oE7G5xmbHiFxBN2Occ4RB2~H&LxBOpd3mg~dt6#``yJnyHJ>{F6lmChMkAnzN0T&_s|y>fYVLg!&ipo#z8&nx9mJ>G$7SXRbQneaDk?u^Noi^d1D*1cP< zmflIW=H6M%06o#XbO);fo$$Edt~RSsq^e<34XyQqb`Izc%RFZq*E6rt%`{OTPajmQ=%E#m`cN$^0E~Fs4(~cC&UA8+RF{ggo!P3wqo~2|$XL%v!5HR-;az6$r zi$_S(m?Gm*^+Tm->cN&C*RGYxV4mxuE-Ov`Cav@$-@d`NGW8mFZ2hk(9UEXW-|Cg# zBWdXK;1I86y^T2jDQ!cq*??tgUR`e2;=_OsFA2aDQZQp**r{M#f#ocKQjCI>9!a- z`K_+*y-uWojo@F90JF}QT6X)9l~8ctf{-ZN@OXqMSMQUhM)7 z=d=}8D{6TA8O4*M;L8jqFk>6GUkuGwC!Wych3r7y`$Ut0m3STZcCXA4>BXo)37_d~ z^&Fk-st-H#*tED+>a@J%Sn0LYP_)yd@rud5Y<7U7ZoFs$Hg4`>72CZZAg}~HzNu=L zyk}te#!k$aBCpqki?|{o_ze}pCe;Oe_%8lX`aF95$jNTj*Ip5+n|3Am1q1$JxM_KO zcFG|i3O+|gyHu;hsyijOx|g{i$TilzwA9K_kPl2KU-KxFo>s@^n};|YrKd|*^c_!` zNc7%pELY5_`)BS8>}3&7lLpfNk|c4&U5jw0iAI+(9|t9AKk!LLN0NGsxbrJhpa)Qs zRrPIbb>DmH>|ErieEX=sK@GovPKT@G#Tk}YCnk50*gGFX`p~~69 zG;$|clO{ZAQZPpc4S&Up4XDCeb#(P>(Yp8MGgI+NMrzh2aB$G}71UVa7@ui|A3p7K z&DzA@*~|R(3x4F`S`o6T!!^Y}uAE->T?K#GuRymzp#hUj3SQ*R?pzwN6C}Llo?czq z1P9^O$IM7>9gP5;+y!yk@raW5CV{{vZ~5_gWe{YX>ElP~Gt(=Fj+36#n811J5ac~TRMZt9Z&2KU_W`5>aE9UIr+ipoMV1tp~SZo*DU#T{$i%d)08^* z5drtbp#H!X`N<_^b8{|(z5Tnt1h&nK+M0goO|GlL_jj~3*QV_d*RwvmWr|U2e8l6^ zdC$BcNi-rpE%uZT2QMTZE<$)`A45VwV}1=XVB zR1wo6F}1s++sT3b+0`Z#)yF=?DzNEde%qVu`v)57gqtdf)$F;b+j zL)5R4`VKhcEU(uJQoG+j&!{HJXC%30UYM!^<@(&PXke05QVkEXSMikn7P+dN%(8eO z+zR^Y{$d(3d!hi7_nfx;T!g3BxYErZPvf&Yc}d(GFb?$$ymy8gEb&PjN;8meDLNZw+LDY^87=uOd^v8iJJ!B zm4ztG#Yw?39ajGHJF6NrE+hPAG|BWWA7rtsY}y6*oE~44uLkbsa)l3fou3fb_vKLx zw>yWu3LSaMq$kz4hcTSxRruwc%^;BlvBDTK`HLHED=WCo;^K|suMaMnuj}5O_%&7< z%fGy;Y&)gj5kp}TY0h_G11-*Tir}ug^uxybdFqi&r0*WMTlJU3?Pxo})*oSq+{_^h zV&)J`CuDFZ1e`n?tV@oGT0!wY!IsT zeA_tXnK5pSO)UT9Qm5Z>Ue4GPZW9?DJFAbH{9O2HeA4VA+rV9Cj!aYjyv#2J?;c3S!v9y;}z-0_}9w>-78lUd3ryzT=y)rL+ zvnTHIJ}9ypX_E_28!I%i%-mI)*ds}4ws_%Mf#?1FgHl(6t7eJ%;k;TCe_fwKWMDmW zhZ@`~y9f52O$|P&Yn~}e*E{(k_yE&+YocRnx?}(#L9XC}OVTrTW0v7?HucUbx=DePG=7^`yPm5JDrwfYSKW8aEvDTvvJ~!! zv+TW6UySX(1PZo0@{+;cIDjKDFo$H{Khyh|7ev*d7RCB`U3tlW?f=mF~lP z8eEIZ71V9D<`ecqYX+%@dGMuT`dlPk%jD}~)Z&+RFv*9RC2)$LYXwodVYICl@-*2i zyV3V)N1pMUrOLu{HTP;~Kl*uZ7ytUT;^e~dszf7tD}RW$!D)xOn09_B`Y>%zGxY?& zND$fI2F=}R=&BHOj)Q-u_7e6EHZ6KYkE&Ny#f4xwrBMWpeO&$U41bCCSOXAMv zkg8^Ip9;WzTGsU3)%Jobdb2CPn)06ck#dkZOP{!=v{qqKM02PP$G%8HdcN&57phAH zwc&1?>r~Xp>2gFR+ddWWGOQ|gvX$ul$IJhcJ(S5)9ktxpbuZ1SoQ zrIz8}3Vw<6dJLF-J(Z|sS$Uo3j5u)Qt%GQMYzbx{hmhD@5E=5Dv=?hdtOLT~8p0;} z8*`JV(nclKP&joHoQO2AWVL}{#;q7Xx4Ok3qOEEAJSJwe$jmr-p|k0)*Ci-yt(OZ^ z_KK;*_GwY?aw9&irk1srHs+4!zNMm7n?`*jq`QLreAg8~rlo*sT3 z$r>w7K5_*WzChRNaFu6mw$@r^Z_6BcmfZKY4saRDCS=Ka1vi~tZPzwtD=Qc5Osr`R zH(|UoC1&&8NlEcSW=TGH;IwssJ0l4;j0U1=PC__=(HMNpDF`eqH@LnkO>C0Ew=Wr9 zXxZmNv#;aExYPTTaVPUFpYIEktru=+8@}n8>`(q*R*nKX7FQfGp{8csyCdv~Q&GX0 zd%jWfk4;GX$)Y9qe^fS>v^(5gUE7lCL339SOl?o~FmIXj%U7cXLO zr+-Osoa<4Szx*KvF)(9%Ywah9&n>v`%qV3ktwkI6E;L|_-zHUe$!veXq6a|& zJ$SF=W<*wSUkJ3bhLtIO@=w~07~u=d(lu922j4a7dtFKyySF?$TTn+oP_dyHu^Mgs zr94wPu=>2Sol*q1W2D6urS%(3e#I8$c4F>jmcLkJSZO=XI--$3enhfwxQEN%z?lRo z^S(Cl418^WCFOT(MSfn&6g7%DaRis%!3%Cwi9Yh{sat;m=h2tOXKe{f1G!}eGUL5w z_B5KJU#ZVb1fyHtwBB*j>ih9T(D7l*g_aKY;n<|HeY{OEb#&K#fglk% z32W9!gMQGY%YLIJcvmz)S5RobTd4Ws_QoW1Ya!N-USw>$?@|>8Z)y`!RNJTW8fBCm z)neR8^z2*K?N^-NZ%K27Pa&u8KX@Q4{GcD?&#T%Ah6$M2)4Y(wCW50tz%SoAJBHrS zxJh(=&^_%+o|#XahBEK{eAkStuV@wfPDD%Vc zuPFg5E!u@T8XV42iXnf6wz&p%qG}P#wLMd+_d!$<9Z~&20U8EgkbVZqA7E0Swx2sYYtMwjE=A zRlM#-T;aJ_0e?@o4Lmm_HK9$xtY?iHcF7fSZu}j^Es@6$3{8-_*=@eRRODxcrD|%3 zFbQ|9D`qZ`+ZdJd%(FXyzT#_|5F~&gPw~fLK_Wcn)Op}GM}TDw+tfUXD(jGs&7R;j zIOO`X(p*0`*B%}oZo|M(BTK58iZm84X~6J?+hi2f@CojXS!qm_R~KX!O+InylP>#+ zPeH537oM~6|9hHE-e@E4AGG;vp{KH^NbFSU1OXy}GLb86J?_gMiSlZ)8re}HOY7`Yb0fBDRji`7dyNZ#!NlMb?Ac)Kv30ck&SgnTBS`8V3D2dDR5(C6UI!2TlA09_}l= zok#_yf!7L)8P!G(5lsWS@)V!Ux-J*Y2cw)}iMoaOse*Q1(yUF?^J{yiTH>lH>uFek zSuVwfyn6c(m(C9Swg@1Nz=4=DCb?y14iO9k;B7Af^rcltcSp8qMN3+0DfrO|3y6CC zNo-KtoG~VBtcOjd%j)m0y1RDD_PR+2^oZ+AVs5bS4jNH+i>Usf(n8qenP}ao4^cG7>@md+A6b-oPsg~nMAdJy9U5uWp-Z@X8}8W2xw*BCG3;5(Z+U! zAk&=OXdb|2RS$W|)$CBI6A!a=!JW8#xEXzpRsYg;dj?gzKEjRos$OPezV5y@_u~=X zuCFGDC+&-k8fyPm#}W6TM2A_;p8JZ#k^g$pXkzvP5F}#C)w+C2Yh715a_T2Bc`~yj zpjB8+Hg49^!`EX#8>{+Zs>?IxM+S*gH@^Y!B~zb*Cv`%4^*SNy02m<$I=~C$VHe~g ze;KG`ADPdUE7H)=(9_er$`SU>MK-Ite0#9(ZYsl3w^?LBL@B>|Pnc>Vw_ZqRTi0Fd zxU|75)YXR*aYL`iuF?piAc0e9Cwr___+7--yO!VCYX{yIXeCZ0Ln)yi820Jgu z-<@_|e2E{eY~cd{OYJX@4WuG5n5JyYVl%5)sHj;h``k9Fr^BRxNHuU zrl^-Znok`gz8E;Y8X>^nHr&?NCj*%8o!SfiH#w6whXkbdBEFqk$iEJDb_!%R7Tyu@ zXLfk!k}YzJZKnU>$4Q>VxHw_ABG(ut-fRW(re<*$WD!y%dNteONORn5E_SgYKppjy z)xF24d`&i}C)p!>NBg-B`~2H8e91S2f#Sg~kNV;%b)F!p_ETD1$_oY%Q8`74U+8cd zmDP0Pe;&DA)a-M2PFT(%FTAI%&KJnlTks!-2^f6t2WLJ;$^s=XB+GLEs8%ywTW~+cc{?GgD6pB@1rIGw5T0)p#q-2` zUI0l0FE15$cc&9{G@inZs>mN@Qm>Bwl8ET9{K-A`BFA9JZw zjh+Csva6hZD$Ew^ zOt`c#|1Fj^CUO~1_=|gEKR7=0Y!Gt*o1xK0s6O`D;D@qIHOX3{Xr;;TB@r=U%>Q38B@ zjYpH)?a~fne{GrbslM@$$P0r4ww=dm-aOCM0n~`5kr+!8wYV%vT2|<&I(2P1=4=p@ z!${r5=+Vooy7{$Lj)#KViMBjxq68o}aA}j^!%W(G_LZv}J!s!{R>3Gud0vH$1p61O zBn%ZGWn5Vo*1l+Kdbpg#EGmFx6>1emH=TeXy zKw{^cE5!lCV~5}Ivz{ zE~XhVKcoeE8exYHAY9C6H={2&N8Fh@i6e5NQ$^8@3&aewyVcBtB{#2KdN33d@9OMa z<6K0(@+6P>DY3TlAqk z^{_LPDTgxNF{_S9_aB1Ku3_CwOycKe}ba{2OZyAnINjnbw#0wUh!ItwK zyO`>Ifi+f>KwmCK(m)nSW2f+IM*KC0{U6p3H|43uxa0@k^Ifoc|FP*Ob9zevm%qvA z4d0*CTk~*oD(VehvD6R=ZI-OOj&t756-4LppjRyZ%V5R=wFxzyT3S~`DxzQKj(i4X zn%bA(#Yp;GQTst6F7Rf1WAs9Ht^2(cI_X%XXhL_yDc0r4s8}V^uS!ps1`13_1<9~U zUoTH)Z&5o(XKcERu-91e8aqXZkS<14XSQ}2M$S>)P8JKQ)6@y*FkcMR>i8JnoMRTP zFebusJC^Fs(a3K*IeCe26CVs;|AFNlDUne6qV}EDe5V$N#RyXq51=j*sj$yF6f^ul zZ3Py%-U&25*rmv+QW|pz%$@|Y*A4)0+t&z4ed4F~f?XPDc@y1Zl!UU2rJg762WkgJ zr3B%}k9BrLyir28a*hgm#JsmxHhU0=yJ~`W7o}VV{8pO93#Qp0^SS)2*A?jz#s6$* z^m`SD`>e$IEaQ&!Udp*;DHq4g6%{H)4JUd1ORZmfl-!z_Y~U2CBMhAU@$?Vkzc?iu zwbBpXjzwG59F>Qd`8y#87;3DbJu=(W%cJSVdYE!uXUK!t5u$vgVgAuTnQJ zU3I?qn#`V0mEGn7A0Ns2$yM(nH+wUWm?O4{%B(hGleUlg#X&R=U7j3HjhACRE2y57nJfwtJ`B zE`5O+Z08s3M$F>DSPIj?vNRVl%Ojc+dXA8XWd@&^UNaXwjcZSlo%R4YGJfH%gO((P|FSuHCO* zkY{n>2gwMY>WhKOHnz4ef2}{EWikSDPL2r=PRL6T@j3!A zvXmV;sU0B%m~f*p+AY*K_BKNOK3<`}$sp!>-|GtL(TYW5BkpSX5*R{(X(2N7T6@Yu zV;&Gq@En5})b{n#MCI|q>RZ48P@g62r%MU8KRqk^I7orE3X0pKBv3R7d?=rW;|6m;hI?Dm z()7xsqKcwbEqXwcUJ*jh4J7jckFQo|Vj1PaO49KUne2k)JNk4tSd5p3=XjQOa%Ms~ zx4XRRMA`LU_BISWW{gHAWun%Tz@s0|!#q0uynDCWt!4hw6%mEat^0>ybLxLy1)Fo( z|3k?MZ%K`sY-P8Fwxh$Y`d@6U>$|c%DgdJJ!KeDYwLM8ptks%xgg14`ZG5V)*lNC} z?!~@Tf#S&T7y~@S#RsD2mQujq0{rg20EltKQy5)&D2IvyZNjJloiGLT&;x(>Clo`A zJmVBdPf~>@CW$y1dZV;Fd1tiyKcRyQ3lH~OTV^cATc3TcC_{=~gMY(Q`mlKzL9l&| z;vgJ>P~V*PI;LRsjC08h)Oz7M*1ZogT5!)w%#ejm^j|&_*P~^?m}^n|-ZBbI6?@(2 z`2zGU7@qN0DrpyCbw;KN0fd`d-QZip0U$Z-F>@gn2C+?--)a}b;S4Ibtmu2&Nkg%z%| zAMXZ7G&2vPf*XGj(*dGF1S*sX3FO69f{+YL-8djH4siQ{IVajEc^^oj4`bLV5k$2; zkxB-V%K4%&^@lBnx?t@0tHqxU8ymjon1s0ysVbiPY-3;QJ_bnVY7f6gIDz}+1PAv^ z;>DBKpufoH_XoqT3eQ7{RgA|v1E$QOB6a2C%B+x!^>nR?EQi4yZ@Pt}a1-yi=oMts zBXxb{W~L#2IzNwuTL9bH6h|(+ay2>eE`EfAQE(Q3INEw zlw%W3pn$!y_jFq5#UFyVI4Li=OXT)q{%`$0!upRtKNgpOgYV+L{=4Bi3RrvhMDk3T-PgnGDopFLuJGya%Zn-1vC#Lpjopu|A~6s5Vl z!4qjG_kWJT2B8mG?PWhc=u{a(4c!?wNy^&Ynf7q*+E22KiMNX$Pt_o*J$BZJ#AV{u z8gp?=QY+lp>r1h#b=$HW++HzaikLs#nRZ`4+<~ss(}RDria9+fUqIm4frKitX^WgR zpZq|jqng@$HxXx=cBxEMBdZ0Ynh2110_pVM8?HD1OcYDCL)^}KLGDD|v`f~hrDUyRX1E;u zpWOnBI%yZZHv!`f4GFqL8X8n$@Y<(pfdXt31k!)pr!}#MNbZ~+bK;k7kEF%Ujk?!-Ji5li&m zvUJAwrMP+!$?OEeBy`>O*wujaiUC6hD7Qr8ZOkL!HAUo;_HCNHbUluAz$7sPgv&Z; z{rTpFwe|8ar(T(@&A#Q>7LgF%jB@|;^H*bOGgcr+`iQ+-FRo|Akd_Y2wE}eUn~%MO zy+!ALQ&Rr^P2`_{n(sWuM91fNvN^G!PbR83Lpi0}l%ahXPABy7qsz)C<+|E0O+x>$ z8$zWfyFdEh6a&p)H~!J}SS>><0S&9c-vg^dXf3>;O!SUGwK-37oVSv~xw^e2&;qLVM(B0LlvXLXvy=)- z34)Ivqvi&2M0Pr5Bxh9N37t6v8d@AJ0>?R0|9U2^wj}*h*`9fJp6Jn&4UTr&n_<;# zUTF;u?H&#&YiWrcmMnNsE*?0B?oq;D39Hr$Yu-U#MYF<`ZcY*j<0^iEl<)wl9Di#) z){!WI{PXdwcC5=3oa|ADOIM}Z|_L8wD5oEQ9V3W*5IH!U(IC@bDiajKdUiSYZ6*8Sz zb>I7lXY%GRdmJk!W{rT_IXDy@bmL=8>xjPk!F88|0wM`AcyYEWG$*gOC5Aa95x!E` z>g1;KyV=3scJ`6U13}T~gLw_8rAggzl#9(&waxhNuJ~%ndAPgB1=O45U{<3~$nubt zFRG?G9VGiQbb&Yha94kQHn;KbC-MqaN;)ZPB>1RIYHOP2zle}-$ZbEGJ(9^Et||_l zYy60jy?!bhT}d+gX@)~OQ>|9aUzt}FwZ?UM8)g%VZd^)&t9;z0f$Q$ADiy?FGK!}h zwI7_p3i&WE8=9U@FA^|fw|~<~jpMOYukv=B6aBYvk@7Bu==m&8YXy&QWOG!3 zPtg)A-gZt-oG;db`J^L5_U)2qeibwrQ6*3y&xb{XFxW>NFgeDWQX(TuJGE)E z+6$$w%Retnbt2Xu8!|4zN}>b1 z*OO~PM2+X^-jKaM&!RU_bTN2x=^RN?nT3@J zcbQO)G4@*eFuNuLuli$mE1igLI6S@!4qsp>k_grijZXf#zH9XEhP(WFhzj<;y=Gu{ zJ*>bfxx_vdrL}wqYPZcBFp}l9Eax&9&DqoP>_=i>F?tcTWew^M7ztL_`nhQF3r&}S zx$bFp!#v9{0gx7Y=~VEEb^YK+<4Ii;wq_mXm}(O=)*n@BBgnKokK1gP=XMShETep& zT6R8y3Up)1(jXONrJ7H*JY~+A@+(bV5&f`>uftzJ`qAM-AR!%Wcdcy2V*9^s%ndG z$XiOUt3rKDw&ik!dE>|EvA5~7Qttedr+wC+2jxiC(zkoMgm)^E)!K^dE>@Ji`UKa; zQrB$Sx7>v5`K?-nH7%fy1M-}1IlfnB>btTTD(m{(Q$lWwpYSGmGz5dQ#02Ube4I0j z=JbuC8qQ?e{1i(qoU}#cR@aLLPaDAh=K}&NK%lDl9Z_AjM6|W z&>({3WF{jbNuorFEjczA=qA%-XrhucHqhj7Pj$7w`+YO_u9>xF*4$a^zV(O8rn{<6 z?K%kzohOj(r`k+9F118`1fiHR!eGi^`qVEOZT zgTmm=2-(cBAQ#xa5epXhG2Oth!9G`7vB5jynYAW5OT88FFcCDVXtZnQvt@;n7VYi+ z#0B<_g6R0IkTSJKAO%rBkBgFE|D@ zss`0RetqoFb;A>|R?Va*Ot6I<+`YokSwSb2THIRJTMnjHpYh2Iz68^7kB1P*H877b zUHo7|n$)eY)2OAfM7LK?L30_`Ph8P{Aw-j=SM3PoDEEx|(eiOcJd$Rm_sKQ^QzfRK3ctuf4C5y~EJc!q+Vd6PH+Tpt#po z5Z+O6P?DL73IhRjRD7pDpC1>cVwd?`Pp#1S3ejh9VK@q&wAVJRUkl^X?>pbi-*_*) z`XrOrQI}P;eCgAv;tE*DFHLHl;u?rr8wT{QYr}kYesFn=&e7WeI;KOK&z&2$f(Yc? zLcPA`)(AC}UD~DbVb8Po(&HU<8(hUSuQPWsCF``IB~^4>=_l{6<(vdltFny#;n~m9 zs@-Cne(^cSBn?}E;vfgTfm5QrkjchGD9R%EgfIR1xKzK>sikidMMlX+dh=*GgITi{ zr`W51VBAaWhATwiQ1-0n!J;<{R#zAoC}9%I;K#f!3)nL48I$t?Jt3=7qM$+EnE3%s z++q)!@488~iDYd(v;z2!Rogm$N4qg)vM95*XUvb6IX%`DBOP!H5U`?q)$vn{>365S zwZ50^5L839u;%fsQxUK8CZ1Or7*8^MzOLDwMYSp?d1gE zy1>P@p&fY@8?V_{kO=4AEFk1P3%4)vG+(1F>K?OBwy3HCw+Y7bwyTDI6?5})y-_b{ zXt!MX(#%(p_S8#*=cua`YcC_BllxHmyNokVnzcD62|a<3CnXLsDkoiHb zouuN@@+>dguJXwARe92tKFF~S`W8baM;uM|kEd#-&z}=>dFrwK>${6YgxIH}tq%;R za=n_F*C2GiYhP%L>Dz&*sYjUy^wPi9Xk$Wh^0fwd!9+YO#o?c#sXp`m+2CyWP!v1v zi}s51sW685^6f8(*BygY*YumG=p_y5+=?a0v+pi6``%rs<^Lu9t6`8bkUedsrwK8W$ zvsWqPyViJ&7*4b~5(PO+8q^gj#}HcnP?q%GXIa49<22u$W8DKt2W-NmJT~kYNL@ zV~1*7?UxlAm|pfM;}vKi3GOJ8CD?vyj%(N|z2wKMd^15evXN>V7j?_i%`7QcMV|O6 zbXZ`a*n4khZODCZCy00B3-A5h91nzzO2zR%{EtV(#RQef7 zvx8{94FVK=m!9K&(x1r31p;Q%9`4P0fqdsMa<@hy#9-ERj`Jq&%g2DHu?z>Pj4|bH zREg`=ME5|M&8yBAP;CVd)S%@jy+K9+sTyiV+^(&nbG^2SGgjN@UxYk2KjR0P2!9m&pCk`IPs@q+)Dij3nOq}%z0zl?Q)rqa?XFap-&Z}J9*-HUt$T!EinF-6 z|5G}0KWM-MP*(t^ROD*Xz6cCew}@-ecUzrXDSp`7Ig5i0-ffV|gi9j)4ef(ldWpbdHBHj3@? zO0ie-?s%z>*vEIOa2+%ew8cnQPtZ9k8~8wV8=ds^?(HD{I`$F*p`gDcLDKN!py-_} zj_vY>99DQQ<_CK*#zppSiH%K1zb&+#*Odl;PlkjbO%}p^k_wgJ!_;fc>x{}uG!u-E z0>&Q&ccqtg(SKpzH_;#>N2wA(RitQ>W;JxGEeRlx@subTHdR@P4h!Q{S5w33bP{`t z?qi`kOA12g(M7S>h+@Rp3g^hk#UUsM+TqDn>3x1}jc<-m;!R60?ANIlR7fp9vfqC< zeM;2cQ|tT}21#w<_hOrQ#UW@_dwCH0*nAfsG()zs#UQfm`;X1ddvZfVx$)T_4!&m> zxLx@lxoyv@5!d!=g}b%$Y2{^PHXNYcM@jkWDGwFZ6;ADVf?@6Dx}$w(E(Q|aog8Mu z!$YJD^!*0>3H0Yi{q?>|!z*K*36ZIni9h5dho(c*A}-Ejs+p2C28pAfAa<&O>qkbG zH0LG@A#wV;By$&ecP85}TxUBzYG2{8*f%97ENj?y#gCMczccdc@F}*c$D(qm7u=bL zTUQzc%~>9V4+KGYelcqwqo_47a$~lrA1ef&W@i*Q?QVyYPbTcX4~ln5<#k8Rmfxqbsrc7{Zm`;|SyK_xlwUs=+#ngZ4(KhKS0C zfh&QE`Y#N4Z%dK%E5_C@`>wn)N0&oR2G~`Un2%z*G;p|+EG}*Ksqc&O8b)kiKheP} z9CcI{R(1>*W*m(;q7fLX@|s`akJ)Pp+-Tcc1MiKxzy}!%klAT+Hs4{sFUPPacbqMx zHGy%t#`n&Jo20*^fRcv5D8A&os+uz>Uu2Up-}DPQqIo2l5CzW-)DkPgt=z3uo2KYs zgZx9)@b_^OTQ4g0YW!7=8~g8uSF)}xN_+IK{;&!+iqigZhg?^pE$EAJGf#478#i0P>na$uY>ta_6-Tq%|#s%l;YaiM^4F;L+#teou8Mf z^}tjg`HRj>$~D_KGcCbNHCd6{j)Nv!|9n1^a@72FbDe2pPz)X)8*7vGn!hgpdu=oF zYZ{ms`6k*gaqK6!yh3QyoKhZ)(*cS-;qL!#M95s$PU8y@Mn6$d2}QoLq|jJ#t8@>j5jJajJP1CgN8inHv^~@8RS4UC2wsK#MB`5%b}b$&<<*IxzqAV+5ARPA zzK}O{*rpPb>s9pnEy^RzkIL5)Ae^H`3q0s&>-SSgOS&Rr$Loe(l1XK^I{q<0=Gzi5 zqrxG;TbS-wl(%k8M)V!CURT zWa)fFnvFC?`}s|wGhb8RX8k)W$Sl_n2mdC9oqpe|P!@e}Im!~&yzJ+vtLNonjJ)dX zG`lt;WUY=*Tl09|^R=j3YR=#|{dGZghQo%7K5r!SNCg;=->TOrZe%2gSsqm#&2daI zq-tZLEm__>2r-~X<;-VQQjDbdFU7{ks`L#>m)w4MRMU83DP$;~T7fJL-U`xT)IHZX zuY$~TrsU$wadt`EnMTwWcBg1JWh+)}fK~+Xi)YmiRD6z4C@~I9^W0gA9Nya;M@wxz z8_&Y`T|Z7GgZ6(yT#^ujHFLKB^|?jH#q*c_>It(w3=C+~#RYc?MhAvhc?6VvS9vZD z;Y_5jd_P4Fi+`OGCB@Y*3D($$PkMbZdupY;*)Z!6hD@ux^qYc{IVqn}mV)zwG)$S6 ziVDA6YU7@*<8zto^`or~mbVRmOd9{~r(KhUO|q<-cb+x&mph{Mpb9sIN}8Nr*qR_I zq14kaJz#@#n2H8JOwldREYKCa@O#eo#KdEyO&p8=e$?~4$}1s&4Lh>CrEvf2(Ss!~ z>Y*gvDMCKk`33%u#s7efCYy}G2wzHl>@O6fpKiFw95xDCUH6wD)wj=Ij!vldaP_LH zsMWL3muxuwTYS)SEqjS7PEBruBjxIri@D&e#20}RcTo3C!#7!O=RTio&)U7(!`i2j zdP%!jXu(LhZg#tpY!MM0#Xgzw^@nCBrgrVTGdBN{0Mix5|Bl zUDv9mlEye`)B$*%ZrI9lckDv2OgE1d@fN-3f2gUb5zR395R~zuBs*;ALPR%I*!n5j51`cO zY>qFFQHJG^-uExInO&f<*gJ<7h8?%ThVR|OMUrtJY_dUkKy%kI(^mugb}8fFSp_!j z8@b8Oq!C8#Qyu2tR5zL}pCbwHg4zl!8k3QY9pKql{BO4aI$bE>EE9=!bUiSBR&@z? zz*gmxqKwS0sDQrFN7}q)`El-k8E4%#g}d^X>($sNf1XCTUf4A_uNCSUIk(aD))8>% zzViPh%8Ai1Z0|Amye^&4T$0_}cg=JUN-G-SN(wsr=8^msD;%DIV>K;&Z{@2)Q5*z% z28&=1?&br#xDkC8U9XtRIhy%ifg6PgMK1z++c&?Q?xe}ebjtnOk15bJ?lJtpom8ls zd+nN0nTE6NT#(;|=_64K)rME5!Ue3v=EdOjv*g2iFeGZJw2wI%$&-YHo;Stry)3f` znIv&j_1X#!f5gPkeW2jBC?Vs|O#gm)#$h&~9`la!tuEjz3t<@n+ zn?f$k1~E5z=C;@D;IC^Y8`8x5X@;bb4OWEqtON-aJ(kJg(v8qi)zu z!1TOIfv~n+TMijI&VDJlY`VzF{GG+H1kfTqw-#a^=t$XROipXf)#SOyna^BciC!NQ zN(L*L67y;2v`&WBEw$gh_M(9ofXcBOneneu9;%oOOHe7o4 zdiNSknIf40TQqFm&GGnkQ+u6JWJ$4k&SETZlP>iuKrW`|3RjOw%l^{@9_z>1F9ueI~Vko6SPLW+4ogDOA0Y6wceu1tYS-v)?O*=;*RZHb_fRs+6WpY-)o)e`Xb53XvwKe%&VL zkO8Ng+&Tcz7u`j_qgoh}l&ExfhS}$QcF7@Q_^U(oZ!B?d`zWMln-KX7I1(?)Gm8zA z$y;{+2GW@t&%O~3{x^;^szmRJ|9ref~Z_$CR9o$aBp>BeoYLRwVXd&o)P(<5dvCDP zwZFJIreEsi_a!WYdb|fw9u~(`u&SN+8aigkYx>99?ndfyRAedzdiJO`VCOS zv6Q(xgIu;pw%50Mj<5$l{0>vXpt54e!6fkzrIdA``C+ija2({|K)Lwv>H#nAQ*W$T zaZ=0P%{%)?Cn+7;RWjJoV{p1VA*ew1fRVxM2Bc$jpG`}f}g$Q>Jl&L=w7nrC(V1h36 z7<7Pwf;1qb|ZAYC_F;wWIp?%*DNZ)2rnZ`nIj zVi`soKvfLLRRsRZp53C?on)ouHzCN0h7-J%3elFr)B<;e0*=$yFU zyeuH$v%0furh4kGgu1q;)bY@iY`2-Q*i0GE9Z9}-&6CpcQO7eW^YqlX`wIO;@~~l&BUgPg;^lo zMEP8(&?ktj5GoS@5o>UGvj3>+{U}N@C~qE6g8neY*-o}brf3Dq_=PRhFcAxW`r!g4 zf+Vs5=Ck@9rq?s|BLsztrkN-su^vh>_xFQSW%feh@7T`0rv8MG8owYH3tf``F=deN z%wA1+4aSN+Q&>JnR%E94ptM7(L_7Iy24)d3Kym6HD&%1>np1{hNOQBL36wwzg}L$0 z3y8X|OpTd0rKrG@+3PjJ+iuRI1RMLBzp(wl)mjixyLYkkB1j(COR{8^Bia% zuMSW2LF1Br@IH8(m0yTZ4YL$cs&UhbnKP9zYE@r50%#?OA#mJ+a%R5WYM@$PEiCRaCBrWxQI|Q zpFy60&KQ%2AG7lIWc$yB6j)@7AINiqDpic;0{lWd;;7N&OPO4P2c;Ag)MbT0&~Oxc z*$M2=H(n#(e}xBr{y#!}H<4{uKzINQn2vIHq+DmOQWT@Td6SRI4oEry&prUZZy%+! zG_|--pZrx*v-Yy;*inG?6dYQd@V!2@x;Ty6d2g-uSMQ*9<_(9N&Ou)@vcMNkJerr` zNH~iF9evsM*0+Y87rkb41}hL%h$c4`3^~a%G4d{X^7^F7_vr}-5c}=+W~-}qiisYI z(3N}yDOY+zb29=PKqH0osO{tdS-xOhhQRBx_j9j}m~ebBT5d_N*qEFO{rABVFJlQo z#I^jOOh`ghnd{hY=+K$5aO_{JsCM1j8XWfAX~sF5rbE+CrYa&*S2r?JS4T%zS4RVV z&}U$0-fnLr!F#J8G!9s(sNOOL+sgZjwzkmFwzjC<7&l&U*^NFD0bM;vA{gp=EiV}A zYM@hR@scEHde!O51uZY@qFeCL4KTm)-n%8{dQkPo z$r(dU%Yl^eSl?MY#THy$O_{n!LaZERq%F(M2MhGivpFOK42 zb!6&4-hj5NI8PJuMdwPlt2hzKq5IVXYbjn7+zB|xpthEBiP8$ zecJ`an*^$!4xfsM7?~B=5!j857Ik;Bys|q`;p_tVh(~tLzN+i2C2xyViOl3p{*PP5 zWJkL6?)u7f9d*DkE9+pjx1-~u-|e79GdC@Lz#jK*;Mr%k3qJtqYw2ENB-#YyA-a2> zV79#jyYM`Rfzv2|Q%ODFH#=h)C)xLOYMwX}$g z5Nc6>ha#8a&fzO&wWZC;iU1UIJDZ1?DI&{OM!Q-C1IhRYZd_3t%x z@z~0<)*I-T;w}WtH)h2$+sABdSocg!YNE;eJ=G@;NFRV^k17>+Yi6n|zCNAS*eXuQ z&1Jj=$s~(F-GDG$?#!}${P^+IIuSInW*aV;jB{R{%WIQ(Rj&=*ROUJP#U4^4uvGXS z=e%-~^;6W{qGCP3CqpO@61o}!d%`ke@XzixSsNKux;ohB?N)?^DTb)2rJ0Pjf@5Id z&2a~ZyqiulMeidmJNk)41C)mZDU<2kxpQtyV}t@ubq(}Bjc0VwP4AP9JD-2$m%7Y$ z$0Xt2JDDIb(b4Ks0u`Zb0dqwSZ>Q~z0;gW|p22oDo7(oz~mm zJ3BKpH8r)_g%2qU(RaJR;Rq<|g?>}TgxAn1{S0YP#D&9nehy7`6 zLbcm-FDa5ioxFJ|Gc#xgi&HpIVQ*X8;}9e5uj=w0mQaBTSuMz$;A?{nA6e z4rIWu&NT&o5m3i4YRkHp9fNN2ID)JoP27ni?6~%Ijf5HK#vnyRFTPcta6z#_Q{w+M>II>mx&6V9cDIl%(28wc%1`{NqJ=yhhhICN5{S0wzl;GA}phoV{OS{VR7pj zUDA>3N&-04YT2Ah?0!;hipxnARnPU`om!;P(`)+P((?8#fl!kOsrc1$E?e`2NvGAF z^n~1ToS9@^@p{3oKTluPb*myGF)t6`Zn!I)((4bO<#*6}Z!L274tQ)jIxN?RA@T7N zSG_Rd4%yDS?drNl5(~?KZ9&T0MB+zG3_c!Zi_`t&i9d!RwLs#%=Gb`xBMMjx?YDDM)TZKbcnY?H6?F(xhJ<7 zuPeIGzWCZTkKK(QcSzqTn@mWxVrPDVP%+VAdWCo*?`?i`M(C@7GEfW@Ck{`~aQTZC z?5F)kD+?~A>SMW($euf~%Ui3|rxyEr8kE>87t~k=RCgSC7w5`Eq|h_?g#0mqZYFdV z%~Xl?IL4i>reX9}t?oI+4GRPBpl0Od9eZpS8y`nl1;nu>Tk~!A*_BNiLJf1E#Sh1G)cO27FBgS8x?iM5qjxl z%MfIEhNUq8swZMP^ftTq{SaypHXP(+((w3<6!g!;@q(ODUz)DAv5q4F)abvlo zgH*4ZjkKHy1H`oF)E@=XnN)i%PmpHq6R?*g!H)mrfcLJ zjNH*aJsYwX7iFTqJ)gQRu1-91cDT%-S{SlrVUHHnWes89)skW^>d>%t_1M|4vXwK4 z=r2sXZJejW3rPLo>;EAhZ%jI2?|!KLvuyuWb5AGQ2X|}^p1&%4QW0RyK+muStS8Uu z*OqBHW&3xej0~rFDr9sb^RXGvx{%j#t{Sm7bhd4`&zgOR(0iRU02WBz!=`5ecS{;D zSl9F3I8S=cUM!vS=1MJFf)u9A_4khn%K`6pwj-X`OL%ONGy9v(o^CsIqP|}eqJC&Q?@@PXe&Wy>{`Gx_#E-Ky}Jk< zZvm@{gv(b}m6~@I92}tI8wZC)|B{nw<`;W+p>B2QxcKW*w)W4=dDhr2K6!nliz9x7 zC1WHzBfnpF*r>?v_IVcm@qt=j-Vqm_=uP_mK>B2y-d+&1sfYcj!mO@oc8Yy5 z;{!E&>XoG3(gx=`+tz{HC!{@nQTO~=&kN&a!6$?OK=wJSz( zF=uoy9|;IAJm2`?l}Tj;;v}s(ef=q^I)wNy58*2dSe{%H(kPQC z!kielu6{5`w|!_B6di+PBUn2>|Hat97v9`99lXwk(b?Krv@YT{+$mPJzy5%L5~CT= z3Tf)Qshh3z?cBZF2hu_twCk^C9-!5>WYLLK|D`AIj%*9}D<~x5d2+M!KSr3e8TLFA z2;;73E^`1Iy?Bowf(~Ck=ZYmxPl&7SmhYLeF7+7d8qKnA44%BvFD+isAnsxlGSrNO z5P&j@H9-8A$AMs)yQx9U>|3)d5;;+Ay|=9VWIkiskP@~eKe;ZEY@5N6ksQN-0AdF^ zUh}NT#Rv4^HC%}6C4VELiPXhffj>42WfuTH7OKK95K zTbn9mpZ6~Ray@}3m(F=X!sY3t+JR4TTd1!Mc%Y^T1!sjS!Jct}jcgaXyjth-M2w=n z2YbH=9?o?zJESqWua?_^Ha$&h`|DuX>L==hx3*FS2AWJlQB}07!*{!ZGqs}4UB%; zC1Pk}?9nK4L83yfDZt-7*J~xyMvqWFa-y0;@g%F*vy_gIyOUb=SpWX? zG!wr0kN6^^%k+U~v&Cnh#BS$iXXoZ-J1$PO@-3B(^j$`ELU<&Mb=|nAQMvw2d|$2Y zm5@A?<0xZZ`D~Bf5a^LeM2WGU_1YLu*j*Wq3_^-yAI$G|HZ9bAbW}6))4uW!6b@JD zR9zIuT<-!miw)8(UQkJ;4$>q!VXFD-Hb(~w#9vsVZEix{q%!6xmh9pF`qAAT>3AaT z1LYd_+1U^_G`e=!F!~HbjHaSWS%JInw34=^-}6n`wbDJnZ6O81WcI}FocR-6!N}JK zGe9ED1V_85z$rfVg8Y6Eh_9|LJWGR`A+@VLL639Oo`yoMENqJtnF@-E3JpT`v?@>J zr7`ZoAw5=c)xyieRxDsR?Xa*9hhGkvon>a8ouyg68Js&K?{;R-3dwv$p-y`|VzAzJ zgsZvPxD<5Vny$_KaW>UUP}u3uFZ|ApfIK_zCT5NlOhne$=wO1P;W!P)_r-LTpoOV) zXS{8T24v|pkRchWGrl2v0fq)5%m)hd-`F;VWIteaC2K zmj~rsOt9d0U4z1(eT`5~FV*|dsL|5m;=H_Koy)=c`ufJk`cIsG+&2#mvUB+Lt*-9H z{)~{rg^CBvgYesNZ+_h8E53z()(G3se5U8mS9yq%$Y|IFKJt30Yk^B5bh9vPCOO@= zLj4%6ZEYVvw#{vj)IDXJm>`#FczNbc_qcc3&66Xk^I!;6V=`nrTE@54+--AvV|p6Y zSDcr$PulFp50=2@w~!DL-_lg0g3dP-t^mS$rLwbq!8CLacQeY3cO`R%W&?K9n$?CN ziTd3B)7S|Czdp8rhZAzq%y7Ts#6cI9vM@J!X})$Gw_kXW<7eZggc2w`4Ja|lKJWXY zRkg^%X098;!a7+&K>!@XH0wjsqRI(_fKvXa@lc7x25>#UWMcf9rlxjxl9MwMXdMmh zj7iY%sK{6w<1KiyWavKVT3?6id*?BD?iUD~-*KA+S$D0n8HzuOY0tDo@|ncO4Z`lL zUv;_(jr5oJlxi3)$>{0Nf}M>WRkDzLX2OAfb)e50`vAi#XJkSybAP{?(v*lyYRkiL1o zwwb;rbHmPw+~{)p2(e|bkC-)(pRn)#Ax#pG=d-0(r!tKh?#qYk8ldx_ z=fAH2kJAA5p!V+F4Vt_nERGhCJFP!HJaO6wr#`bX9U?8|DYlouP^SMEEy~EyOixQ& zU0^*wR3Sogn`YD0Wb?Xpvujw0tv+6f}HEk%Z4FCs&SQ54}$zL&eAgV=BZ62miIOE!=v)^GRiS26xxygLGVV!~JJx*QR%G$8e&4cEKb!9DQB;xm z&LuiLki($lP~cHJ`oWNu}uX?DP)Wbu%?>V)|*k zE?{aKL7tNyZjn@~c@%_izm;qzyV{bILhLFmUR@{l^|dUBJ}X>z5BCZ5Fe-aC-5$_-FmYzMevc<8OSrjn(%eC8M$+WPn0tm!ImF**H;$na*e-i{YT;yn=*Z zu-m_l>3Ct;;zl$G?kR+slhN0(0S=`O{il1XKPZ0d@taKoLwA|CV!3r4(tE7>#mbeZ zaO&v|_{+A0=>iM`x-_nS5<`N0@_QHv-?N+;R8_I7^0t60A1oN|=L zs|z0+P=uu~k291_5e^;vh-IO}w-Q@?N<-7%?hZBy! zotQ)$$0XoI+rH$By2O^m#pmnGzg8q7vpK!G=Z~3ZH5mq3dqAM5n#pZ>`-3-wldhXq zxsjH}XV#dcJ6x*ljpeclambG$Ce;xEFA#)}HMbJ_#lH*WR*hsiXJ1NMRg>u0Ej`u} zCk_hE%kn{*4cQZ)cgtQctKE87X{8;_!ZcztJ$AO<1T!)dq7iPWAEBE zo_w}<%&eh6=+Q~c*7S=iFwvL?<&wHpsh-lZia48tnWoi zuwFZOeAfU?^j*Ri_KvWmf4>d2XJV|{t7PzvKBE;G~StqGr3u)SqOT!Q^|`O~ph z<&UxpV6Sj)Ab>y1-ib-^TCmB0ItzY5C2_6_csNk}$-w4XI=o`JCz!?nSJ;eSD32v5Qn>+;0F)gk#)s1-{OB%YsH6 zKFMY$PVb(CQe}v)={?(#k~bsXO#Tie7+F!phyihX%x}*13@>cXOvdfI%%eX$H}x)^3A?qA#3rqHonLh0`{A%KNJj++1p%{cbW~fT(s#ev@y4iC zEnzxAv+N@U**)X@xSv)Y_5y;r{m5g^EkY)Kjk{6V4r|DEbVL4~7u2Q_AB}pgP zX;km4vB9ZVndw1T#SCwhU$XN^ObSp z-iTC7fVM7D5w~PUC%bI=^wQ=|EQavp#w^AM^(5exo{<6W-sszE%+4?bn9}BwRa=NT@};~kp4}Ts8y|o3Jkm;{CS9Qs6pU%rpTnK7#GQAD^@?#W>6>6}g%4fH?S-m|SCLMyyaI&XN>4sx-F!M260sE3fZV024A! z02(&crcuoSEYk~dWl46)Kxw-H#&hNN2yqcYubwHNZK;vdDH;(u$1}6jgTEevzgBx& zagz+DP}o*ZkfCwW#Cs(C5`fx`tE(WD*&2ro?Z1YEbk+g|l4hr#uF;B`Z~o+NXA3zM zKw|M;(Qahen*5PU8qVK z-QK>lrL0UbD^2*w_s2Yf<9bx@lVJ-?L?l6S%l-pn=ty7RXsxxi_DgJ3pUe86twlB| z2`Q7TppXzU`b;W`jj4DFvP4PCV50>)DJ8AL?&)O9tHTh95<2%fUx}9;%`F@zd@vmj zzEd(G5-$t9u^+Db{o8YErn*1()*0vuKHuS-hhNkSLm1Dkg~3j8Bmq#U`lz zN&ZylOGYNSp2ONVg`78G2SEo4iXfN-DYyR%TY~ZeysUbV)w@nTNIT<=<_bIGdzc8@ z|DfEXffpwE=kNddK=qFd08I1GJorZjU`P089{eK%NHP2~5B?X)U<|p}y2sp?;70T0 z-?Az%NUOiw-D~aW47e)`!Jm}FeC*#+81{A+hgiERYmg@g>f`f+QoeTG5WiQ-+W@q`t x#)9_5zutnAX`z#!np=wLSUhupPP{Ltz$$Kav0d literal 0 HcmV?d00001 diff --git a/cmx4mlops/cmx4mlops/repo/docs/index.md b/cmx4mlops/cmx4mlops/repo/docs/index.md new file mode 100644 index 000000000..9a74cd2b3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/index.md @@ -0,0 +1,79 @@ +# CM "script" automation specification + +Please check the [CM documentation](https://docs.mlcommons.org/ck) for more details about the CM automation language. + +See the [automatically generated catalog](scripts/index.md) of all CM scripts from MLCommons. + +## Understanding CM scripts + +* A CM script is identified by a set of tags and by unique ID. +* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. + +### CM script execution flow + + +* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order. +* Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present. +* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed. +* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* Then `postprocess` function inside customize.py is executed if present. +* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed. + +** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`. + +### Input flags +When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable. + +### Conditional execution of any `deps`, `post_deps` +We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional + +### Versions +We can specify any specific version of a script using `version`. `version_max` and `version_min` are also possible options. + +* When `version_min` is given, any version above this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if above `version_min` will be used for installation. Otherwise `version_min` will be used as `version`. + +* When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`. + +### Variations +* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. + +#### Variation groups +`group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both. + +#### Dynamic variations +Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`. + +### ENV flow during CM script execution + + +* During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it. +* Once a script execution is over (which includes all the dependent script executions as well), newly created keys and any updated keys are merged with the `saved_env` provided the keys are mentioned in `new_env_keys` +* Same behaviour applies to `state` dictionary. + +#### Special env keys +* Any env key with a prefix `CM_TMP_*` and `CM_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency. +* Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency. +* `--input` is automatically converted to `CM_INPUT` env key +* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX` +* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token. +* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS. + +### Script Meta +#### Special keys in script meta +* TBD: `reuse_version`, `inherit_variation_tags`, `update_env_tags_from_env` + +### How cache works? +* If `cache=true` is set in a script meta, the result of the script execution is cached for further use. +* For a cached script, `env` and `state` updates are done using `new_env` and `new_state` dictionaries which are stored in the `cm-cached.json` file inside the cached folder. +* By using `--new` input, a new cache entry can be forced even when an old one exist. +* By default no depndencies are run for a cached entry unless `dynamic` key is set for it. + + +Please see [here](getting-started.md) for trying CM scripts. + + + + +© 2022-24 [MLCommons](https://mlcommons.org)
    + diff --git a/cmx4mlops/cmx4mlops/repo/docs/requirements.txt b/cmx4mlops/cmx4mlops/repo/docs/requirements.txt new file mode 100644 index 000000000..ee5149cfc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/requirements.txt @@ -0,0 +1,6 @@ +mkdocs-material +swagger-markdown +mkdocs-macros-plugin +ruamel.yaml +slugify +mkdocs-caseinsensitive-plugin diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-croissant/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-croissant/index.md new file mode 100644 index 000000000..f707f1f85 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-croissant/index.md @@ -0,0 +1,86 @@ +# get-croissant +Automatically generated README for this automation recipe: **get-croissant** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get mlcommons croissant" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,mlcommons,croissant + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get mlcommons croissant " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlcommons,croissant' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get mlcommons croissant" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/run.bat) +___ +#### Script output +```bash +cmr "get mlcommons croissant " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md new file mode 100644 index 000000000..f74ec73ef --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md @@ -0,0 +1,119 @@ +# get-dataset-cifar10 +Automatically generated README for this automation recipe: **get-dataset-cifar10** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset cifar10 image-classification validation training" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,cifar10,image-classification,validation,training[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset cifar10 image-classification validation training [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,cifar10,image-classification,validation,training' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset cifar10 image-classification validation training[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_tiny` + - ENV variables: + - CM_DATASET_CONVERT_TO_TINYMLPERF: `yes` + +
    + + + * Group "**data_format**" +
    + Click here to expand this section. + + * **`_python`** (default) + - ENV variables: + - CM_DATASET: `CIFAR10` + - CM_DATASET_FILENAME: `cifar-10-python.tar.gz` + - CM_DATASET_FILENAME1: `cifar-10-python.tar` + - CM_DATASET_CIFAR10: `https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz` + +
    + + + ##### Default variations + + `_python` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/run.bat) +___ +#### Script output +```bash +cmr "get dataset cifar10 image-classification validation training [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md new file mode 100644 index 000000000..22ae3381a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md @@ -0,0 +1,128 @@ +# get-dataset-cnndm +Automatically generated README for this automation recipe: **get-dataset-cnndm** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset gpt-j cnndm cnn-dailymail original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,gpt-j,cnndm,cnn-dailymail,original[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset gpt-j cnndm cnn-dailymail original [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,gpt-j,cnndm,cnn-dailymail,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset gpt-j cnndm cnn-dailymail original[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_intel` + +
    + + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_CALIBRATION: `yes` + * **`_validation`** (default) + - ENV variables: + - CM_DATASET_CALIBRATION: `no` + +
    + + + ##### Default variations + + `_validation` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET_CALIBRATION: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run-intel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/run-intel.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset gpt-j cnndm cnn-dailymail original [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md new file mode 100644 index 000000000..98c9f978e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md @@ -0,0 +1,140 @@ +# get-dataset-coco +Automatically generated README for this automation recipe: **get-dataset-coco** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset object-detection coco" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,object-detection,coco[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset object-detection coco [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,object-detection,coco' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset object-detection coco[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**size**" +
    + Click here to expand this section. + + * **`_complete`** (default) + - ENV variables: + - CM_DATASET_COCO_SIZE: `complete` + * `_small` + - ENV variables: + - CM_DATASET_COCO_SIZE: `small` + +
    + + + * Group "**type**" +
    + Click here to expand this section. + + * `_train` + - ENV variables: + - CM_DATASET_COCO_TYPE: `train` + * **`_val`** (default) + - ENV variables: + - CM_DATASET_COCO_TYPE: `val` + +
    + + + * Group "**version**" +
    + Click here to expand this section. + + * **`_2017`** (default) + - ENV variables: + - CM_DATASET_COCO_VERSION: `2017` + +
    + + + ##### Default variations + + `_2017,_complete,_val` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--from=value` → `CM_FROM=value` + * `--home=value` → `CM_HOME_DIR=value` + * `--store=value` → `CM_STORE=value` + * `--to=value` → `CM_TO=value` + + + + +___ +#### Script output +```bash +cmr "get dataset object-detection coco [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md new file mode 100644 index 000000000..23e09b06f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md @@ -0,0 +1,152 @@ +# get-dataset-coco2014 +Automatically generated README for this automation recipe: **get-dataset-coco2014** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset coco2014 object-detection original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,coco2014,object-detection,original[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset coco2014 object-detection original [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,coco2014,object-detection,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset coco2014 object-detection original[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**annotations**" +
    + Click here to expand this section. + + * `_custom-annotations` + - ENV variables: + - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: `yes` + * **`_default-annotations`** (default) + - ENV variables: + - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: `no` + +
    + + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_CALIBRATION: `yes` + * **`_validation`** (default) + - ENV variables: + - CM_DATASET_CALIBRATION: `no` + +
    + + + * Group "**size**" +
    + Click here to expand this section. + + * **`_50`** (default) + - ENV variables: + - CM_DATASET_SIZE: `50` + * `_500` + - ENV variables: + - CM_DATASET_SIZE: `500` + * `_full` + - ENV variables: + - CM_DATASET_SIZE: `` + * `_size.#` + - ENV variables: + - CM_DATASET_SIZE: `#` + +
    + + + ##### Default variations + + `_50,_default-annotations,_validation` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET_CALIBRATION: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/run.bat) +___ +#### Script output +```bash +cmr "get dataset coco2014 object-detection original [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md new file mode 100644 index 000000000..f28c6e10d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md @@ -0,0 +1,124 @@ +# get-dataset-criteo +Automatically generated README for this automation recipe: **get-dataset-criteo** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset criteo original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,criteo,original[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset criteo original [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,criteo,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset criteo original[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_backup` + - ENV variables: + - CM_BACKUP_ZIPS: `yes` + * `_fake` + - ENV variables: + - CM_CRITEO_FAKE: `yes` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--criteo_path=value` → `CM_CRITEO_PATH=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BACKUP_ZIPS: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset criteo original [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md new file mode 100644 index 000000000..1abab6599 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md @@ -0,0 +1,119 @@ +# get-dataset-imagenet-aux +Automatically generated README for this automation recipe: **get-dataset-imagenet-aux** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get aux dataset-aux image-classification imagenet-aux" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,aux,dataset-aux,image-classification,imagenet-aux[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get aux dataset-aux image-classification imagenet-aux [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aux,dataset-aux,image-classification,imagenet-aux' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get aux dataset-aux image-classification imagenet-aux[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_2012` + - ENV variables: + - CM_DATASET_AUX_VER: `2012` + +
    + + + * Group "**download-source**" +
    + Click here to expand this section. + + * `_from.berkeleyvision` + - ENV variables: + - CM_WGET_URL: `http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz` + * **`_from.dropbox`** (default) + - ENV variables: + - CM_WGET_URL: `https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz` + +
    + + + ##### Default variations + + `_from.dropbox` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/run.bat) +___ +#### Script output +```bash +cmr "get aux dataset-aux image-classification imagenet-aux [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md new file mode 100644 index 000000000..7aae04d88 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md @@ -0,0 +1,104 @@ +# get-dataset-imagenet-calibration +Automatically generated README for this automation recipe: **get-dataset-imagenet-calibration** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-calibration/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset imagenet calibration" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,imagenet,calibration[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset imagenet calibration [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,imagenet,calibration' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset imagenet calibration[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**calibration-option**" +
    + Click here to expand this section. + + * **`_mlperf.option1`** (default) + - ENV variables: + - CM_MLPERF_IMAGENET_CALIBRATION_OPTION: `one` + - CM_DOWNLOAD_CHECKSUM: `f09719174af3553119e2c621157773a6` + * `_mlperf.option2` + - ENV variables: + - CM_MLPERF_IMAGENET_CALIBRATION_OPTION: `two` + - CM_DOWNLOAD_CHECKSUM: `e44582af00e3b4fc3fac30efd6bdd05f` + +
    + + + ##### Default variations + + `_mlperf.option1` + +___ +#### Script output +```bash +cmr "get dataset imagenet calibration [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md new file mode 100644 index 000000000..48b39fa40 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md @@ -0,0 +1,80 @@ +# get-dataset-imagenet-helper +Automatically generated README for this automation recipe: **get-dataset-imagenet-helper** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-helper/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get imagenet helper imagenet-helper" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,imagenet,helper,imagenet-helper + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get imagenet helper imagenet-helper " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,imagenet,helper,imagenet-helper' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get imagenet helper imagenet-helper" + ``` +___ + + +___ +#### Script output +```bash +cmr "get imagenet helper imagenet-helper " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md new file mode 100644 index 000000000..2b8bb952f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md @@ -0,0 +1,96 @@ +# get-dataset-imagenet-train +Automatically generated README for this automation recipe: **get-dataset-imagenet-train** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-train/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get imagenet train dataset original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,imagenet,train,dataset,original [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get imagenet train dataset original " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,imagenet,train,dataset,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get imagenet train dataset original" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--input=value` → `IMAGENET_TRAIN_PATH=value` + * `--torrent=value` → `CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-train/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get imagenet train dataset original " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md new file mode 100644 index 000000000..d9cd7b787 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md @@ -0,0 +1,149 @@ +# get-dataset-imagenet-val +Automatically generated README for this automation recipe: **get-dataset-imagenet-val** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get val validation dataset imagenet ILSVRC image-classification original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,val,validation,dataset,imagenet,ILSVRC,image-classification,original[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get val validation dataset imagenet ILSVRC image-classification original [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,val,validation,dataset,imagenet,ILSVRC,image-classification,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get val validation dataset imagenet ILSVRC image-classification original[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_2012-500` + * `_2012-full` + * `_run-during-docker-build` + +
    + + + * Group "**count**" +
    + Click here to expand this section. + + * `_full` + - ENV variables: + - CM_DATASET_SIZE: `50000` + - CM_IMAGENET_FULL: `yes` + - CM_DAE_FILENAME: `ILSVRC2012_img_val.tar` + - CM_DAE_DOWNLOADED_CHECKSUM: `29b22e2961454d5413ddabcf34fc5622` + * `_size.#` + - ENV variables: + - CM_DATASET_SIZE: `#` + * **`_size.500`** (default) + - ENV variables: + - CM_DATASET_SIZE: `500` + - CM_DAE_FILENAME: `ILSVRC2012_img_val_500.tar` + - CM_DAE_URL: `http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar` + +
    + + + * Group "**dataset-version**" +
    + Click here to expand this section. + + * **`_2012`** (default) + - ENV variables: + - CM_DATASET_VER: `2012` + +
    + + + ##### Default variations + + `_2012,_size.500` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--imagenet_path=value` → `IMAGENET_PATH=value` + * `--torrent=value` → `CM_DATASET_IMAGENET_VAL_TORRENT_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + No run file exists for Linux/macOS +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/run.bat) +___ +#### Script output +```bash +cmr "get val validation dataset imagenet ILSVRC image-classification original [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md new file mode 100644 index 000000000..5010afffc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md @@ -0,0 +1,138 @@ +# get-dataset-kits19 +Automatically generated README for this automation recipe: **get-dataset-kits19** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-kits19/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset medical-imaging kits original kits19" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,medical-imaging,kits,original,kits19[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset medical-imaging kits original kits19 [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,medical-imaging,kits,original,kits19' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset medical-imaging kits original kits19[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_CALIBRATION: `yes` + * `_default` + - ENV variables: + - CM_GIT_PATCH: `no` + * `_full-history` + - ENV variables: + - CM_GIT_DEPTH: `` + * `_no-recurse-submodules` + - ENV variables: + - CM_GIT_RECURSE_SUBMODULES: `` + * `_patch` + - ENV variables: + - CM_GIT_PATCH: `yes` + * `_short-history` + - ENV variables: + - CM_GIT_DEPTH: `--depth 5` + * `_validation` + - ENV variables: + - CM_DATASET_VALIDATION: `yes` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_CHECKOUT: `master` + * CM_GIT_DEPTH: `--depth 2` + * CM_GIT_PATCH: `no` + * CM_GIT_RECURSE_SUBMODULES: `` + * CM_GIT_URL: `https://github.com/neheller/kits19` + + +#### Versions +Default version: `master` + +* `custom` +* `master` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-kits19/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset medical-imaging kits original kits19 [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md new file mode 100644 index 000000000..05be625ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md @@ -0,0 +1,97 @@ +# get-dataset-librispeech +Automatically generated README for this automation recipe: **get-dataset-librispeech** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset speech speech-recognition librispeech validation audio training original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset speech speech-recognition librispeech validation audio training original " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset speech speech-recognition librispeech validation audio training original" + ``` +___ + +#### Versions +Default version: `dev-clean` + +* `dev-clean` +* `dev-other` +* `test-clean` +* `test-other` +* `train-clean-100` +* `train-clean-360` +* `train-other-500` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset speech speech-recognition librispeech validation audio training original " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md new file mode 100644 index 000000000..05578105c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md @@ -0,0 +1,105 @@ +# get-dataset-openimages-annotations +Automatically generated README for this automation recipe: **get-dataset-openimages-annotations** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-annotations/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get aux dataset-aux object-detection openimages annotations" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,aux,dataset-aux,object-detection,openimages,annotations[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get aux dataset-aux object-detection openimages annotations [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aux,dataset-aux,object-detection,openimages,annotations' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get aux dataset-aux object-detection openimages annotations[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**download-source**" +
    + Click here to expand this section. + + * **`_from.github`** (default) + - ENV variables: + - CM_WGET_URL: `https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip` + +
    + + + ##### Default variations + + `_from.github` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-annotations/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get aux dataset-aux object-detection openimages annotations [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md new file mode 100644 index 000000000..6e634f401 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md @@ -0,0 +1,131 @@ +# get-dataset-openimages-calibration +Automatically generated README for this automation recipe: **get-dataset-openimages-calibration** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-calibration/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset openimages calibration" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,openimages,calibration[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset openimages calibration [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openimages,calibration' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset openimages calibration[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_filter` + - ENV variables: + - CM_CALIBRATE_FILTER: `yes` + +
    + + + * Group "**calibration-option**" +
    + Click here to expand this section. + + * **`_mlperf.option1`** (default) + - ENV variables: + - CM_MLPERF_OPENIMAGES_CALIBRATION_OPTION: `one` + - CM_DOWNLOAD_CHECKSUM1: `f09719174af3553119e2c621157773a6` + +
    + + + * Group "**filter-size**" +
    + Click here to expand this section. + + * `_filter-size.#` + - ENV variables: + - CM_CALIBRATION_FILTER_SIZE: `#` + * `_filter-size.400` + - ENV variables: + - CM_CALIBRATION_FILTER_SIZE: `400` + +
    + + + ##### Default variations + + `_mlperf.option1` + +#### Native script being run +=== "Linux/macOS" + * [run-filter.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-calibration/run-filter.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset openimages calibration [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md new file mode 100644 index 000000000..5c9e2fa59 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md @@ -0,0 +1,164 @@ +# get-dataset-openimages +Automatically generated README for this automation recipe: **get-dataset-openimages** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset openimages open-images object-detection original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,openimages,open-images,object-detection,original[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset openimages open-images object-detection original [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openimages,open-images,object-detection,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset openimages open-images object-detection original[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_filter` + * `_filter-size.#` + * `_using-fiftyone` + +
    + + + * Group "**annotations**" +
    + Click here to expand this section. + + * `_custom-annotations` + - ENV variables: + - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: `yes` + * **`_default-annotations`** (default) + - ENV variables: + - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: `no` + +
    + + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_CALIBRATION: `yes` + * **`_validation`** (default) + - ENV variables: + - CM_DATASET_CALIBRATION: `no` + +
    + + + * Group "**size**" +
    + Click here to expand this section. + + * **`_50`** (default) + - ENV variables: + - CM_DATASET_SIZE: `50` + * `_500` + - ENV variables: + - CM_DATASET_SIZE: `500` + * `_full` + - ENV variables: + - CM_DATASET_SIZE: `` + * `_size.#` + - ENV variables: + - CM_DATASET_SIZE: `#` + +
    + + + ##### Default variations + + `_50,_default-annotations,_validation` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET_CALIBRATION: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/run.bat) +___ +#### Script output +```bash +cmr "get dataset openimages open-images object-detection original [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md new file mode 100644 index 000000000..a437ae42c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md @@ -0,0 +1,132 @@ +# get-dataset-openorca +Automatically generated README for this automation recipe: **get-dataset-openorca** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openorca/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset openorca language-processing original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,openorca,language-processing,original[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset openorca language-processing original [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openorca,language-processing,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset openorca language-processing original[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_CALIBRATION: `yes` + * **`_validation`** (default) + - ENV variables: + - CM_DATASET_CALIBRATION: `no` + +
    + + + * Group "**size**" +
    + Click here to expand this section. + + * `_500` + - ENV variables: + - CM_DATASET_SIZE: `500` + * **`_60`** (default) + - ENV variables: + - CM_DATASET_SIZE: `60` + * `_full` + - ENV variables: + - CM_DATASET_SIZE: `24576` + * `_size.#` + - ENV variables: + - CM_DATASET_SIZE: `#` + +
    + + + ##### Default variations + + `_60,_validation` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET_CALIBRATION: `no` + + + +___ +#### Script output +```bash +cmr "get dataset openorca language-processing original [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md new file mode 100644 index 000000000..30e0fbeee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md @@ -0,0 +1,105 @@ +# get-dataset-squad-vocab +Automatically generated README for this automation recipe: **get-dataset-squad-vocab** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad-vocab/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get aux dataset-aux language-processing squad-aux vocab squad-vocab[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**download-source**" +
    + Click here to expand this section. + + * **`_from.zenodo`** (default) + - ENV variables: + - CM_WGET_URL: `https://zenodo.org/record/3733868/files/vocab.txt` + +
    + + + ##### Default variations + + `_from.zenodo` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad-vocab/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md new file mode 100644 index 000000000..554e79a57 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md @@ -0,0 +1,92 @@ +# get-dataset-squad +Automatically generated README for this automation recipe: **get-dataset-squad** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset squad language-processing validation original" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,squad,language-processing,validation,original + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset squad language-processing validation original " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,squad,language-processing,validation,original' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset squad language-processing validation original" + ``` +___ + +#### Versions +Default version: `1.1` + +* `1.1` +* `2.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset squad language-processing validation original " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md new file mode 100644 index 000000000..c75f70bbf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md @@ -0,0 +1,137 @@ +# get-preprocessed-dataset-criteo +Automatically generated README for this automation recipe: **get-preprocessed-dataset-criteo** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset criteo recommendation dlrm preprocessed" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,criteo,recommendation,dlrm,preprocessed[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset criteo recommendation dlrm preprocessed [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,criteo,recommendation,dlrm,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset criteo recommendation dlrm preprocessed[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_1` + - ENV variables: + - CM_DATASET_SIZE: `1` + * `_50` + - ENV variables: + - CM_DATASET_SIZE: `50` + * `_fake` + - ENV variables: + - CM_CRITEO_FAKE: `yes` + * `_full` + * `_validation` + +
    + + + * Group "**type**" +
    + Click here to expand this section. + + * **`_multihot`** (default) + - ENV variables: + - CM_DATASET_CRITEO_MULTIHOT: `yes` + +
    + + + ##### Default variations + + `_multihot` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` + * `--output_dir=value` → `CM_DATASET_PREPROCESSED_OUTPUT_PATH=value` + * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run-multihot.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/run-multihot.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset criteo recommendation dlrm preprocessed [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md new file mode 100644 index 000000000..844e2c2e8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md @@ -0,0 +1,80 @@ +# get-preprocesser-script-generic +Automatically generated README for this automation recipe: **get-preprocesser-script-generic** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocesser-script-generic/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get preprocessor generic image-preprocessor script" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,preprocessor,generic,image-preprocessor,script + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get preprocessor generic image-preprocessor script " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,preprocessor,generic,image-preprocessor,script' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get preprocessor generic image-preprocessor script" + ``` +___ + + +___ +#### Script output +```bash +cmr "get preprocessor generic image-preprocessor script " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md new file mode 100644 index 000000000..c4bee08bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md @@ -0,0 +1,301 @@ +# get-preprocessed-dataset-imagenet +Automatically generated README for this automation recipe: **get-preprocessed-dataset-imagenet** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset imagenet ILSVRC image-classification preprocessed" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,imagenet,ILSVRC,image-classification,preprocessed[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset imagenet ILSVRC image-classification preprocessed [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,imagenet,ILSVRC,image-classification,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset imagenet ILSVRC image-classification preprocessed[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_default` + * `_pytorch` + - ENV variables: + - CM_PREPROCESS_PYTORCH: `yes` + - CM_MODEL: `resnet50` + * `_tflite_tpu` + - ENV variables: + - CM_MODEL: `resnet50` + - CM_PREPROCESS_TFLITE_TPU: `yes` + +
    + + + * Group "**calibration-option**" +
    + Click here to expand this section. + + * `_mlperf.option1` + - ENV variables: + - CM_DATASET_CALIBRATION_OPTION: `one` + * `_mlperf.option2` + - ENV variables: + - CM_DATASET_CALIBRATION_OPTION: `two` + +
    + + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_TYPE: `calibration` + * **`_validation`** (default) + - ENV variables: + - CM_DATASET_TYPE: `validation` + +
    + + + * Group "**extension**" +
    + Click here to expand this section. + + * `_rgb32` + - ENV variables: + - CM_DATASET_PREPROCESSED_EXTENSION: `rgb32` + * `_rgb8` + - ENV variables: + - CM_DATASET_PREPROCESSED_EXTENSION: `rgb8` + +
    + + + * Group "**interpolation-method**" +
    + Click here to expand this section. + + * `_inter.area` + - ENV variables: + - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA` + * `_inter.linear` + - ENV variables: + - CM_DATASET_INTERPOLATION_METHOD: `INTER_LINEAR` + +
    + + + * Group "**layout**" +
    + Click here to expand this section. + + * **`_NCHW`** (default) + - ENV variables: + - CM_DATASET_DATA_LAYOUT: `NCHW` + * `_NHWC` + - ENV variables: + - CM_DATASET_DATA_LAYOUT: `NHWC` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_for.mobilenet` + * `_for.resnet50` + - ENV variables: + - CM_DATASET_SUBTRACT_MEANS: `1` + - CM_DATASET_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` + - CM_DATASET_NORMALIZE_DATA: `0` + - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_float32` + - ENV variables: + - CM_DATASET_DATA_TYPE: `float32` + - CM_DATASET_QUANTIZE: `0` + - CM_DATASET_CONVERT_TO_UNSIGNED: `0` + * `_int8` + - ENV variables: + - CM_DATASET_DATA_TYPE: `int8` + - CM_DATASET_QUANTIZE: `1` + - CM_DATASET_CONVERT_TO_UNSIGNED: `0` + * `_uint8` + - ENV variables: + - CM_DATASET_DATA_TYPE: `uint8` + - CM_DATASET_DATA_TYPE_INPUT: `float32` + - CM_DATASET_QUANTIZE: `1` + - CM_DATASET_CONVERT_TO_UNSIGNED: `1` + +
    + + + * Group "**preprocessing-source**" +
    + Click here to expand this section. + + * `_generic-preprocessor` + - ENV variables: + - CM_DATASET_REFERENCE_PREPROCESSOR: `0` + * **`_mlcommons-reference-preprocessor`** (default) + - ENV variables: + - CM_DATASET_REFERENCE_PREPROCESSOR: `1` + +
    + + + * Group "**resolution**" +
    + Click here to expand this section. + + * `_resolution.#` + - ENV variables: + - CM_DATASET_INPUT_SQUARE_SIDE: `#` + * **`_resolution.224`** (default) + - ENV variables: + - CM_DATASET_INPUT_SQUARE_SIDE: `224` + +
    + + + * Group "**size**" +
    + Click here to expand this section. + + * `_1` + - ENV variables: + - CM_DATASET_SIZE: `1` + * `_500` + - ENV variables: + - CM_DATASET_SIZE: `500` + * `_full` + - ENV variables: + - CM_DATASET_SIZE: `50000` + * `_size.#` + - ENV variables: + - CM_DATASET_SIZE: `#` + +
    + + + ##### Default variations + + `_NCHW,_mlcommons-reference-preprocessor,_resolution.224,_validation` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` + * `--imagenet_path=value` → `CM_IMAGENET_PATH=value` + * `--imagenet_preprocessed_path=value` → `CM_IMAGENET_PREPROCESSED_PATH=value` + * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET_CROP_FACTOR: `87.5` + * CM_DATASET_DATA_TYPE: `float32` + * CM_DATASET_DATA_LAYOUT: `NCHW` + * CM_DATASET_QUANT_SCALE: `1` + * CM_DATASET_QUANTIZE: `0` + * CM_DATASET_QUANT_OFFSET: `0` + * CM_DATASET_PREPROCESSED_EXTENSION: `npy` + * CM_DATASET_CONVERT_TO_UNSIGNED: `0` + * CM_DATASET_REFERENCE_PREPROCESSOR: `1` + * CM_PREPROCESS_VGG: `yes` + * CM_MODEL: `resnet50` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/run.bat) +___ +#### Script output +```bash +cmr "get dataset imagenet ILSVRC image-classification preprocessed [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md new file mode 100644 index 000000000..d2a985eca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md @@ -0,0 +1,175 @@ +# get-preprocessed-dataset-kits19 +Automatically generated README for this automation recipe: **get-preprocessed-dataset-kits19** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-kits19/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset medical-imaging kits19 preprocessed" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,medical-imaging,kits19,preprocessed[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset medical-imaging kits19 preprocessed [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,medical-imaging,kits19,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset medical-imaging kits19 preprocessed[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_nvidia` + - ENV variables: + - CM_PREPROCESSING_BY_NVIDIA: `yes` + +
    + + + * Group "**dataset-count**" +
    + Click here to expand this section. + + * `_1` + - ENV variables: + - CM_DATASET_SIZE: `1` + * `_5` + - ENV variables: + - CM_DATASET_SIZE: `5` + * `_50` + - ENV variables: + - CM_DATASET_SIZE: `50` + * `_500` + - ENV variables: + - CM_DATASET_SIZE: `500` + * `_full` + - ENV variables: + - CM_DATASET_SIZE: `` + +
    + + + * Group "**dataset-precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_DATASET_DTYPE: `fp32` + * `_int8` + - ENV variables: + - CM_DATASET_DTYPE: `int8` + +
    + + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_PATH: `<<>>` + * **`_validation`** (default) + +
    + + + ##### Default variations + + `_fp32,_validation` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` + * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET: `kits19` + * CM_DATASET_DTYPE: `fp32` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-kits19/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset medical-imaging kits19 preprocessed [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md new file mode 100644 index 000000000..f683a8f52 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md @@ -0,0 +1,164 @@ +# get-preprocessed-dataset-librispeech +Automatically generated README for this automation recipe: **get-preprocessed-dataset-librispeech** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-librispeech/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset speech-recognition librispeech preprocessed" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,speech-recognition,librispeech,preprocessed[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset speech-recognition librispeech preprocessed [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,speech-recognition,librispeech,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset speech-recognition librispeech preprocessed[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**dataset-count**" +
    + Click here to expand this section. + + * `_1` + - ENV variables: + - CM_DATASET_SIZE: `1` + * `_5` + - ENV variables: + - CM_DATASET_SIZE: `5` + * `_50` + - ENV variables: + - CM_DATASET_SIZE: `50` + * `_500` + - ENV variables: + - CM_DATASET_SIZE: `500` + * `_full` + - ENV variables: + - CM_DATASET_SIZE: `` + +
    + + + * Group "**dataset-precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_DATASET_DTYPE: `fp32` + * `_int8` + - ENV variables: + - CM_DATASET_DTYPE: `int8` + +
    + + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_PATH: `<<>>` + * **`_validation`** (default) + +
    + + + ##### Default variations + + `_fp32,_validation` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` + * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET: `kits19` + * CM_DATASET_DTYPE: `fp32` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-librispeech/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset speech-recognition librispeech preprocessed [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md new file mode 100644 index 000000000..9bbe30eec --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md @@ -0,0 +1,287 @@ +# get-preprocessed-dataset-openimages +Automatically generated README for this automation recipe: **get-preprocessed-dataset-openimages** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset openimages open-images object-detection preprocessed" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,openimages,open-images,object-detection,preprocessed[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset openimages open-images object-detection preprocessed [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openimages,open-images,object-detection,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset openimages open-images object-detection preprocessed[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_filter` + * `_for.retinanet.onnx` + - ENV variables: + - CM_ML_MODEL_NAME: `retinanet` + - CM_DATASET_SUBTRACT_MEANS: `1` + - CM_DATASET_GIVEN_CHANNEL_MEANS: `0.485 0.456 0.406` + - CM_DATASET_GIVEN_CHANNEL_STDS: `0.229 0.224 0.225` + - CM_DATASET_NORMALIZE_DATA: `0` + - CM_DATASET_NORMALIZE_LOWER: `0.0` + - CM_DATASET_NORMALIZE_UPPER: `1.0` + - CM_DATASET_CONVERT_TO_BGR: `0` + - CM_DATASET_CROP_FACTOR: `100.0` + * `_nvidia` + - ENV variables: + - CM_PREPROCESSING_BY_NVIDIA: `yes` + * `_quant-offset.#` + * `_quant-scale.#` + +
    + + + * Group "**annotations**" +
    + Click here to expand this section. + + * `_custom-annotations` + * **`_default-annotations`** (default) + +
    + + + * Group "**dataset-count**" +
    + Click here to expand this section. + + * **`_50`** (default) + - ENV variables: + - CM_DATASET_SIZE: `50` + * `_500` + - ENV variables: + - CM_DATASET_SIZE: `500` + * `_full` + * `_size.#` + - ENV variables: + - CM_DATASET_SIZE: `#` + +
    + + + * Group "**dataset-layout**" +
    + Click here to expand this section. + + * **`_NCHW`** (default) + - ENV variables: + - CM_DATASET_DATA_LAYOUT: `NCHW` + * `_NHWC` + - ENV variables: + - CM_DATASET_DATA_LAYOUT: `NHWC` + +
    + + + * Group "**dataset-precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_DATASET_DTYPE: `fp32` + - CM_DATASET_INPUT_DTYPE: `fp32` + - CM_DATASET_QUANTIZE: `0` + - CM_DATASET_CONVERT_TO_UNSIGNED: `0` + * `_int8` + - ENV variables: + - CM_DATASET_DTYPE: `int8` + - CM_DATASET_INPUT_DTYPE: `fp32` + - CM_DATASET_QUANTIZE: `1` + - CM_DATASET_CONVERT_TO_UNSIGNED: `0` + * `_uint8` + - ENV variables: + - CM_DATASET_DTYPE: `uint8` + - CM_DATASET_INPUT_DTYPE: `fp32` + - CM_DATASET_QUANTIZE: `1` + - CM_DATASET_CONVERT_TO_UNSIGNED: `1` + +
    + + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_PATH: `<<>>` + - CM_DATASET_ANNOTATIONS_FILE_PATH: `<<>>` + - CM_DATASET_TYPE: `calibration` + * **`_validation`** (default) + - ENV variables: + - CM_DATASET_TYPE: `validation` + +
    + + + * Group "**extension**" +
    + Click here to expand this section. + + * `_npy` + - ENV variables: + - CM_DATASET_PREPROCESSED_EXTENSION: `npy` + * `_raw` + - ENV variables: + - CM_DATASET_PREPROCESSED_EXTENSION: `raw` + * `_rgb32` + - ENV variables: + - CM_DATASET_PREPROCESSED_EXTENSION: `rgb32` + * `_rgb8` + - ENV variables: + - CM_DATASET_PREPROCESSED_EXTENSION: `rgb8` + +
    + + + * Group "**filter-size**" +
    + Click here to expand this section. + + * `_filter-size.#` + +
    + + + * Group "**interpolation-method**" +
    + Click here to expand this section. + + * `_inter.area` + - ENV variables: + - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA` + * `_inter.linear` + - ENV variables: + - CM_DATASET_INTERPOLATION_METHOD: `INTER_LINEAR` + +
    + + + * Group "**preprocessing-source**" +
    + Click here to expand this section. + + * `_generic-preprocessor` + - ENV variables: + - CM_DATASET_REFERENCE_PREPROCESSOR: `0` + * **`_mlcommons-reference-preprocessor`** (default) + - ENV variables: + - CM_DATASET_REFERENCE_PREPROCESSOR: `1` + +
    + + + ##### Default variations + + `_50,_NCHW,_default-annotations,_fp32,_mlcommons-reference-preprocessor,_validation` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` + * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET: `OPENIMAGES` + * CM_DATASET_DTYPE: `fp32` + * CM_DATASET_INPUT_SQUARE_SIDE: `800` + * CM_DATASET_CROP_FACTOR: `100.0` + * CM_DATASET_QUANT_SCALE: `1` + * CM_DATASET_QUANTIZE: `0` + * CM_DATASET_QUANT_OFFSET: `0` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/run.bat) +___ +#### Script output +```bash +cmr "get dataset openimages open-images object-detection preprocessed [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md new file mode 100644 index 000000000..5232eaf72 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md @@ -0,0 +1,129 @@ +# get-preprocessed-dataset-openorca +Automatically generated README for this automation recipe: **get-preprocessed-dataset-openorca** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openorca/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset openorca language-processing preprocessed" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,openorca,language-processing,preprocessed[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset openorca language-processing preprocessed [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,openorca,language-processing,preprocessed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset openorca language-processing preprocessed[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**dataset-type**" +
    + Click here to expand this section. + + * `_calibration` + - ENV variables: + - CM_DATASET_CALIBRATION: `yes` + * **`_validation`** (default) + - ENV variables: + - CM_DATASET_CALIBRATION: `no` + +
    + + + * Group "**size**" +
    + Click here to expand this section. + + * **`_60`** (default) + * `_full` + * `_size.#` + +
    + + + ##### Default variations + + `_60,_validation` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET_CALIBRATION: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openorca/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset openorca language-processing preprocessed [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md new file mode 100644 index 000000000..422bbd911 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md @@ -0,0 +1,165 @@ +# get-preprocessed-dataset-squad +Automatically generated README for this automation recipe: **get-preprocessed-dataset-squad** + +Category: **[AI/ML datasets](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get dataset preprocessed tokenized squad" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,dataset,preprocessed,tokenized,squad[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get dataset preprocessed tokenized squad [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,dataset,preprocessed,tokenized,squad' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get dataset preprocessed tokenized squad[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**calibration-set**" +
    + Click here to expand this section. + + * `_calib1` + - ENV variables: + - CM_DATASET_SQUAD_CALIBRATION_SET: `one` + * `_calib2` + - ENV variables: + - CM_DATASET_SQUAD_CALIBRATION_SET: `two` + * **`_no-calib`** (default) + - ENV variables: + - CM_DATASET_SQUAD_CALIBRATION_SET: `` + +
    + + + * Group "**doc-stride**" +
    + Click here to expand this section. + + * `_doc-stride.#` + - ENV variables: + - CM_DATASET_DOC_STRIDE: `#` + * **`_doc-stride.128`** (default) + - ENV variables: + - CM_DATASET_DOC_STRIDE: `128` + +
    + + + * Group "**packing**" +
    + Click here to expand this section. + + * `_packed` + - ENV variables: + - CM_DATASET_SQUAD_PACKED: `yes` + +
    + + + * Group "**raw**" +
    + Click here to expand this section. + + * `_pickle` + - ENV variables: + - CM_DATASET_RAW: `no` + * **`_raw`** (default) + - ENV variables: + - CM_DATASET_RAW: `yes` + +
    + + + * Group "**seq-length**" +
    + Click here to expand this section. + + * `_seq-length.#` + - ENV variables: + - CM_DATASET_MAX_SEQ_LENGTH: `#` + * **`_seq-length.384`** (default) + - ENV variables: + - CM_DATASET_MAX_SEQ_LENGTH: `384` + +
    + + + ##### Default variations + + `_doc-stride.128,_no-calib,_raw,_seq-length.384` + +#### Native script being run +=== "Linux/macOS" + * [run-packed.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/run-packed.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get dataset preprocessed tokenized squad [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/index.md new file mode 100644 index 000000000..8e94f6073 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-datasets/index.md @@ -0,0 +1,27 @@ +* [get-croissant](get-croissant/index.md) +* [get-dataset-cifar10](get-dataset-cifar10/index.md) +* [get-dataset-cnndm](get-dataset-cnndm/index.md) +* [get-dataset-coco](get-dataset-coco/index.md) +* [get-dataset-coco2014](get-dataset-coco2014/index.md) +* [get-dataset-criteo](get-dataset-criteo/index.md) +* [get-dataset-imagenet-aux](get-dataset-imagenet-aux/index.md) +* [get-dataset-imagenet-calibration](get-dataset-imagenet-calibration/index.md) +* [get-dataset-imagenet-helper](get-dataset-imagenet-helper/index.md) +* [get-dataset-imagenet-train](get-dataset-imagenet-train/index.md) +* [get-dataset-imagenet-val](get-dataset-imagenet-val/index.md) +* [get-dataset-kits19](get-dataset-kits19/index.md) +* [get-dataset-librispeech](get-dataset-librispeech/index.md) +* [get-dataset-openimages](get-dataset-openimages/index.md) +* [get-dataset-openimages-annotations](get-dataset-openimages-annotations/index.md) +* [get-dataset-openimages-calibration](get-dataset-openimages-calibration/index.md) +* [get-dataset-openorca](get-dataset-openorca/index.md) +* [get-dataset-squad](get-dataset-squad/index.md) +* [get-dataset-squad-vocab](get-dataset-squad-vocab/index.md) +* [get-preprocessed-dataset-criteo](get-preprocessed-dataset-criteo/index.md) +* [get-preprocessed-dataset-imagenet](get-preprocessed-dataset-imagenet/index.md) +* [get-preprocessed-dataset-kits19](get-preprocessed-dataset-kits19/index.md) +* [get-preprocessed-dataset-librispeech](get-preprocessed-dataset-librispeech/index.md) +* [get-preprocessed-dataset-openimages](get-preprocessed-dataset-openimages/index.md) +* [get-preprocessed-dataset-openorca](get-preprocessed-dataset-openorca/index.md) +* [get-preprocessed-dataset-squad](get-preprocessed-dataset-squad/index.md) +* [get-preprocessed-dataset-generic](get-preprocessed-dataset-generic/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md new file mode 100644 index 000000000..d0a9d4436 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md @@ -0,0 +1,89 @@ +# get-google-saxml +Automatically generated README for this automation recipe: **get-google-saxml** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get google saxml" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,google,saxml + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get google saxml " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,google,saxml' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get google saxml" + ``` +___ + +#### Versions +Default version: `master` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/run.bat) +___ +#### Script output +```bash +cmr "get google saxml " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md new file mode 100644 index 000000000..04e0b0380 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md @@ -0,0 +1,111 @@ +# get-onnxruntime-prebuilt +Automatically generated README for this automation recipe: **get-onnxruntime-prebuilt** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install onnxruntime get prebuilt lib lang-c lang-cpp" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install onnxruntime get prebuilt lib lang-c lang-cpp [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install onnxruntime get prebuilt lib lang-c lang-cpp[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_ONNXRUNTIME_DEVICE: `` + * `_cuda` + - ENV variables: + - CM_ONNXRUNTIME_DEVICE: `gpu` + +
    + + + ##### Default variations + + `_cpu` +#### Versions +Default version: `1.16.3` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/run.bat) +___ +#### Script output +```bash +cmr "install onnxruntime get prebuilt lib lang-c lang-cpp [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md new file mode 100644 index 000000000..613a95510 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md @@ -0,0 +1,80 @@ +# get-qaic-apps-sdk +Automatically generated README for this automation recipe: **get-qaic-apps-sdk** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-apps-sdk/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get detect qaic apps sdk apps-sdk qaic-apps-sdk" + ``` +___ + + +___ +#### Script output +```bash +cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md new file mode 100644 index 000000000..7a5599715 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md @@ -0,0 +1,80 @@ +# get-qaic-platform-sdk +Automatically generated README for this automation recipe: **get-qaic-platform-sdk** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-platform-sdk/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get detect qaic platform sdk platform-sdk qaic-platform-sdk" + ``` +___ + + +___ +#### Script output +```bash +cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md new file mode 100644 index 000000000..159dc0edd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md @@ -0,0 +1,119 @@ +# get-qaic-software-kit +Automatically generated README for this automation recipe: **get-qaic-software-kit** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-software-kit/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get qaic software kit qaic-software-kit" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,qaic,software,kit,qaic-software-kit[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get qaic software kit qaic-software-kit [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,qaic,software,kit,qaic-software-kit' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get qaic software kit qaic-software-kit[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + +
    + + + * Group "**repo-source**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.quic`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100` + +
    + + + ##### Default variations + + `_repo.quic` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-software-kit/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get qaic software kit qaic-software-kit [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-rocm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-rocm/index.md new file mode 100644 index 000000000..c31689254 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-rocm/index.md @@ -0,0 +1,86 @@ +# get-rocm +Automatically generated README for this automation recipe: **get-rocm** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rocm/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get rocm get-rocm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,rocm,get-rocm + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get rocm get-rocm " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,rocm,get-rocm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get rocm get-rocm" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rocm/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get rocm get-rocm " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-tvm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-tvm/index.md new file mode 100644 index 000000000..da9315326 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/get-tvm/index.md @@ -0,0 +1,141 @@ +# get-tvm +Automatically generated README for this automation recipe: **get-tvm** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get tvm get-tvm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,tvm,get-tvm[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get tvm get-tvm [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,tvm,get-tvm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get tvm get-tvm[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_cuda` + - ENV variables: + - CM_TVM_USE_CUDA: `yes` + * `_openmp` + - ENV variables: + - CM_TVM_USE_OPENMP: `yes` + +
    + + + * Group "**installation-type**" +
    + Click here to expand this section. + + * **`_llvm`** (default) + - ENV variables: + - CM_TVM_USE_LLVM: `yes` + * `_pip-install` + - ENV variables: + - CM_TVM_PIP_INSTALL: `yes` + +
    + + + ##### Default variations + + `_llvm` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_CHECKOUT: `main` + * CM_GIT_URL: `https://github.com/apache/tvm` + * CM_TVM_PIP_INSTALL: `no` + + +#### Versions +* `main` +* `v0.10.0` +* `v0.7.0` +* `v0.8.0` +* `v0.9.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get tvm get-tvm [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/index.md new file mode 100644 index 000000000..dd8814fe1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/index.md @@ -0,0 +1,12 @@ +* [get-google-saxml](get-google-saxml/index.md) +* [get-onnxruntime-prebuilt](get-onnxruntime-prebuilt/index.md) +* [get-qaic-apps-sdk](get-qaic-apps-sdk/index.md) +* [get-qaic-platform-sdk](get-qaic-platform-sdk/index.md) +* [get-qaic-software-kit](get-qaic-software-kit/index.md) +* [get-rocm](get-rocm/index.md) +* [get-tvm](get-tvm/index.md) +* [install-qaic-compute-sdk-from-src](install-qaic-compute-sdk-from-src/index.md) +* [install-rocm](install-rocm/index.md) +* [install-tensorflow-for-c](install-tensorflow-for-c/index.md) +* [install-tensorflow-from-src](install-tensorflow-from-src/index.md) +* [install-tflite-from-src](install-tflite-from-src/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md new file mode 100644 index 000000000..0f04dc149 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md @@ -0,0 +1,136 @@ +# install-qaic-compute-sdk-from-src +Automatically generated README for this automation recipe: **install-qaic-compute-sdk-from-src** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-qaic-compute-sdk-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + +
    + + + * Group "**installation-mode**" +
    + Click here to expand this section. + + * `_debug` + - ENV variables: + - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `debug` + * **`_release`** (default) + - ENV variables: + - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `release` + * `_release-assert` + - ENV variables: + - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `release-assert` + +
    + + + * Group "**repo-source**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.quic`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc` + +
    + + + ##### Default variations + + `_release,_repo.quic` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-qaic-compute-sdk-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-rocm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-rocm/index.md new file mode 100644 index 000000000..74756c74d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-rocm/index.md @@ -0,0 +1,91 @@ +# install-rocm +Automatically generated README for this automation recipe: **install-rocm** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install rocm install-rocm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,rocm,install-rocm + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install rocm install-rocm " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,rocm,install-rocm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install rocm install-rocm" + ``` +___ + +#### Versions +Default version: `5.7.1` + + +#### Native script being run +=== "Linux/macOS" + * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run-rhel.sh) + * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install rocm install-rocm " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md new file mode 100644 index 000000000..0e1a158ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md @@ -0,0 +1,89 @@ +# install-tensorflow-for-c +Automatically generated README for this automation recipe: **install-tensorflow-for-c** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-for-c/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install tensorflow lib lang-c" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,tensorflow,lib,lang-c + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install tensorflow lib lang-c " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,tensorflow,lib,lang-c' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install tensorflow lib lang-c" + ``` +___ + +#### Versions +Default version: `2.8.0` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-for-c/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install tensorflow lib lang-c " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md new file mode 100644 index 000000000..36610c140 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md @@ -0,0 +1,135 @@ +# install-tensorflow-from-src +Automatically generated README for this automation recipe: **install-tensorflow-from-src** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get install tensorflow lib source from-source from-src src from.src" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,install,tensorflow,lib,source,from-source,from-src,src,from.src[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get install tensorflow lib source from-source from-src src from.src [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,tensorflow,lib,source,from-source,from-src,src,from.src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get install tensorflow lib source from-source from-src src from.src[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_tflite` + - ENV variables: + - CM_TFLITE: `on` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_URL: `https://github.com/tensorflow/tensorflow` + * CM_GIT_DEPTH: `1` + * CM_TFLITE: `off` + + +#### Versions +Default version: `master` + +* `master` +* `v1.15.0` +* `v2.0.0` +* `v2.1.0` +* `v2.10.0` +* `v2.11.0` +* `v2.12.0` +* `v2.13.0` +* `v2.14.0` +* `v2.15.0` +* `v2.16.1` +* `v2.2.0` +* `v2.3.0` +* `v2.4.0` +* `v2.5.0` +* `v2.6.0` +* `v2.7.0` +* `v2.8.0` +* `v2.9.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get install tensorflow lib source from-source from-src src from.src [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md new file mode 100644 index 000000000..f86c93efb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md @@ -0,0 +1,100 @@ +# install-tflite-from-src +Automatically generated README for this automation recipe: **install-tflite-from-src** + +Category: **[AI/ML frameworks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tflite-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get install tflite-cmake tensorflow-lite-cmake from-src" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,install,tflite-cmake,tensorflow-lite-cmake,from-src + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get install tflite-cmake tensorflow-lite-cmake from-src " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,tflite-cmake,tensorflow-lite-cmake,from-src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get install tflite-cmake tensorflow-lite-cmake from-src" + ``` +___ + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_DEPTH: `1` + + +#### Versions +Default version: `master` + +* `master` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tflite-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get install tflite-cmake tensorflow-lite-cmake from-src " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md new file mode 100644 index 000000000..0cf4982de --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md @@ -0,0 +1,101 @@ +# convert-ml-model-huggingface-to-onnx +Automatically generated README for this automation recipe: **convert-ml-model-huggingface-to-onnx** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-ml-model-huggingface-to-onnx/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "ml-model model huggingface-to-onnx onnx huggingface convert" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=ml-model,model,huggingface-to-onnx,onnx,huggingface,convert[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "ml-model model huggingface-to-onnx onnx huggingface convert [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'ml-model,model,huggingface-to-onnx,onnx,huggingface,convert' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "ml-model model huggingface-to-onnx onnx huggingface convert[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_model-path.#` + - ENV variables: + - CM_MODEL_HUGG_PATH: `#` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-ml-model-huggingface-to-onnx/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "ml-model model huggingface-to-onnx onnx huggingface convert [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md new file mode 100644 index 000000000..ab69223ae --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md @@ -0,0 +1,80 @@ +# get-bert-squad-vocab +Automatically generated README for this automation recipe: **get-bert-squad-vocab** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bert-squad-vocab/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get bert squad bert-large bert-squad vocab" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,bert,squad,bert-large,bert-squad,vocab + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get bert squad bert-large bert-squad vocab " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,bert,squad,bert-large,bert-squad,vocab' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get bert squad bert-large bert-squad vocab" + ``` +___ + + +___ +#### Script output +```bash +cmr "get bert squad bert-large bert-squad vocab " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-dlrm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-dlrm/index.md new file mode 100644 index 000000000..4aa9382d1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-dlrm/index.md @@ -0,0 +1,118 @@ +# get-dlrm +Automatically generated README for this automation recipe: **get-dlrm** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get src dlrm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,src,dlrm[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get src dlrm [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,dlrm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get src dlrm[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_full-history` + - ENV variables: + - CM_GIT_DEPTH: `` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_DEPTH: `--depth 10` + * CM_GIT_PATCH: `no` + * CM_GIT_URL: `https://github.com/facebookresearch/dlrm.git` + + +#### Versions +Default version: `main` + +* `main` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get src dlrm [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md new file mode 100644 index 000000000..de5fe50eb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md @@ -0,0 +1,130 @@ +# get-ml-model-3d-unet-kits19 +Automatically generated README for this automation recipe: **get-ml-model-3d-unet-kits19** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-3d-unet-kits19/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model raw 3d-unet kits19 medical-imaging" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,raw,3d-unet,kits19,medical-imaging[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model raw 3d-unet kits19 medical-imaging [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,raw,3d-unet,kits19,medical-imaging' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model raw 3d-unet kits19 medical-imaging[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_weights` + - ENV variables: + - CM_MODEL_WEIGHTS_FILE: `yes` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * **`_onnx`** (default) + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `onnx` + * `_pytorch` + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `pytorch` + * `_tf` + - Aliases: `_tensorflow` + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `tensorflow` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + +
    + + + ##### Default variations + + `_fp32,_onnx` + +___ +#### Script output +```bash +cmr "get ml-model raw 3d-unet kits19 medical-imaging [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md new file mode 100644 index 000000000..dc07850d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md @@ -0,0 +1,119 @@ +# get-ml-model-bert-base-squad +Automatically generated README for this automation recipe: **get-ml-model-bert-base-squad** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-base-squad/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model raw bert bert-base bert-squad language language-processing" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model raw bert bert-base bert-squad language language-processing [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model raw bert bert-base bert-squad language language-processing[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**framework**" +
    + Click here to expand this section. + + * `_deepsparse` + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `deepsparse` + - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` + - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` + - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` + - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` + - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_PRECISION: `fp32` + * `_int8` + - ENV variables: + - CM_ML_MODEL_PRECISION: `int8` + - CM_ML_MODEL_QUANTIZED: `yes` + +
    + + + ##### Default variations + + `_fp32` + +___ +#### Script output +```bash +cmr "get ml-model raw bert bert-base bert-squad language language-processing [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md new file mode 100644 index 000000000..9ba5778b3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md @@ -0,0 +1,188 @@ +# get-ml-model-bert-large-squad +Automatically generated README for this automation recipe: **get-ml-model-bert-large-squad** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-large-squad/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model raw bert bert-large bert-squad language language-processing" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model raw bert bert-large bert-squad language language-processing [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model raw bert bert-large bert-squad language language-processing[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_onnxruntime` + * `_tensorflow` + +
    + + + * Group "**download-source**" +
    + Click here to expand this section. + + * `_amazon-s3` + * `_armi` + * `_custom-url.#` + - ENV variables: + - CM_PACKAGE_URL: `#` + * `_github` + * `_zenodo` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_deepsparse` + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `deepsparse` + - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` + - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` + - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` + - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` + - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` + * **`_onnx`** (default) + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `onnx` + - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` + - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` + - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` + - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` + - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` + * `_pytorch` + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `pytorch` + - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` + - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` + - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` + - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` + - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` + * `_tf` + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `tf` + - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` + - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` + - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` + - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` + - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` + +
    + + + * Group "**packing**" +
    + Click here to expand this section. + + * `_packed` + - ENV variables: + - CM_ML_MODEL_BERT_PACKED: `yes` + * **`_unpacked`** (default) + - ENV variables: + - CM_ML_MODEL_BERT_PACKED: `no` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_PRECISION: `fp32` + * `_int8` + - ENV variables: + - CM_ML_MODEL_PRECISION: `int8` + - CM_ML_MODEL_QUANTIZED: `yes` + +
    + + + ##### Default variations + + `_fp32,_onnx,_unpacked` + +#### Native script being run +=== "Linux/macOS" + * [run-packed.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-large-squad/run-packed.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get ml-model raw bert bert-large bert-squad language language-processing [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md new file mode 100644 index 000000000..71138c9a6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md @@ -0,0 +1,165 @@ +# get-ml-model-dlrm-terabyte +Automatically generated README for this automation recipe: **get-ml-model-dlrm-terabyte** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-dlrm-terabyte/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_debug` + - ENV variables: + - CM_ML_MODEL_DEBUG: `yes` + +
    + + + * Group "**download-tool**" +
    + Click here to expand this section. + + * `_rclone` + * `_wget` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_onnx` + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `onnx` + * **`_pytorch`** (default) + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `pytorch` + - CM_TMP_MODEL_ADDITIONAL_NAME: `dlrm_terabyte.pytorch` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + +
    + + + * Group "**type**" +
    + Click here to expand this section. + + * **`_weight_sharded`** (default) + - ENV variables: + - CM_DLRM_MULTIHOT_MODEL: `yes` + +
    + + + ##### Default variations + + `_fp32,_pytorch,_weight_sharded` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--dir=value` → `CM_DOWNLOAD_PATH=value` + * `--download_path=value` → `CM_DOWNLOAD_PATH=value` + * `--to=value` → `CM_DOWNLOAD_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-dlrm-terabyte/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md new file mode 100644 index 000000000..b95cc653f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md @@ -0,0 +1,191 @@ +# get-ml-model-efficientnet-lite +Automatically generated README for this automation recipe: **get-ml-model-efficientnet-lite** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-efficientnet-lite/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_tflite` + +
    + + + * Group "**kind**" +
    + Click here to expand this section. + + * **`_lite0`** (default) + - ENV variables: + - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite0` + * `_lite1` + - ENV variables: + - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite1` + * `_lite2` + - ENV variables: + - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite2` + * `_lite3` + - ENV variables: + - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite3` + * `_lite4` + - ENV variables: + - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite4` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + * `_uint8` + - Aliases: `_int8` + - ENV variables: + - CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `uint8` + - CM_ML_MODEL_PRECISION: `uint8` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `uint8` + +
    + + + * Group "**resolution**" +
    + Click here to expand this section. + + * **`_resolution-224`** (default) + - ENV variables: + - CM_ML_MODEL_IMAGE_HEIGHT: `224` + - CM_ML_MODEL_IMAGE_WIDTH: `224` + - CM_ML_MODEL_MOBILENET_RESOLUTION: `224` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.224` + * `_resolution-240` + - ENV variables: + - CM_ML_MODEL_IMAGE_HEIGHT: `240` + - CM_ML_MODEL_IMAGE_WIDTH: `240` + - CM_ML_MODEL_MOBILENET_RESOLUTION: `240` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.240` + * `_resolution-260` + - ENV variables: + - CM_ML_MODEL_IMAGE_HEIGHT: `260` + - CM_ML_MODEL_IMAGE_WIDTH: `260` + - CM_ML_MODEL_MOBILENET_RESOLUTION: `260` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.260` + * `_resolution-280` + - ENV variables: + - CM_ML_MODEL_IMAGE_HEIGHT: `280` + - CM_ML_MODEL_IMAGE_WIDTH: `280` + - CM_ML_MODEL_MOBILENET_RESOLUTION: `280` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.280` + * `_resolution-300` + - ENV variables: + - CM_ML_MODEL_IMAGE_HEIGHT: `300` + - CM_ML_MODEL_IMAGE_WIDTH: `300` + - CM_ML_MODEL_MOBILENET_RESOLUTION: `300` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.300` + +
    + + + ##### Default variations + + `_fp32,_lite0,_resolution-224` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + * CM_ML_MODEL_PRECISION: `fp32` + * CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + + + +___ +#### Script output +```bash +cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md new file mode 100644 index 000000000..f8ba684b1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md @@ -0,0 +1,194 @@ +# get-ml-model-gptj +Automatically generated README for this automation recipe: **get-ml-model-gptj** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get raw ml-model gptj gpt-j large-language-model" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,raw,ml-model,gptj,gpt-j,large-language-model[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get raw ml-model gptj gpt-j large-language-model [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,gptj,gpt-j,large-language-model' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get raw ml-model gptj gpt-j large-language-model[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_ML_MODEL_BATCH_SIZE: `#` + +
    + + + * Group "**download-tool**" +
    + Click here to expand this section. + + * **`_rclone`** (default) + - ENV variables: + - CM_DOWNLOAD_FILENAME: `checkpoint` + - CM_DOWNLOAD_URL: `<<>>` + * `_wget` + - ENV variables: + - CM_DOWNLOAD_URL: `<<>>` + - CM_DOWNLOAD_FILENAME: `checkpoint.zip` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * **`_pytorch`** (default) + - ENV variables: + - CM_ML_MODEL_DATA_LAYOUT: `NCHW` + - CM_ML_MODEL_FRAMEWORK: `pytorch` + - CM_ML_STARTING_WEIGHTS_FILENAME: `<<>>` + * `_saxml` + +
    + + + * Group "**model-provider**" +
    + Click here to expand this section. + + * `_intel` + * **`_mlcommons`** (default) + * `_nvidia` + - ENV variables: + - CM_TMP_ML_MODEL_PROVIDER: `nvidia` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_fp32` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + * `_fp8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp8` + * `_int4` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `int4` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int4` + * `_int8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` + - CM_ML_MODEL_PRECISION: `int8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` + * `_uint8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` + - CM_ML_MODEL_PRECISION: `uint8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` + +
    + + + ##### Default variations + + `_mlcommons,_pytorch,_rclone` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--checkpoint=value` → `GPTJ_CHECKPOINT_PATH=value` + * `--download_path=value` → `CM_DOWNLOAD_PATH=value` + * `--to=value` → `CM_DOWNLOAD_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run-int4-calibration.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-int4-calibration.sh) + * [run-intel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-intel.sh) + * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-nvidia.sh) + * [run-saxml-quantized.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-saxml-quantized.sh) + * [run-saxml.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-saxml.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get raw ml-model gptj gpt-j large-language-model [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md new file mode 100644 index 000000000..5f5ef67fe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md @@ -0,0 +1,136 @@ +# get-ml-model-huggingface-zoo +Automatically generated README for this automation recipe: **get-ml-model-huggingface-zoo** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model huggingface zoo" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,huggingface,zoo[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model huggingface zoo [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,huggingface,zoo' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model huggingface zoo[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_model-stub.#` + - ENV variables: + - CM_MODEL_ZOO_STUB: `#` + * `_onnx-subfolder` + - ENV variables: + - CM_HF_SUBFOLDER: `onnx` + * `_pierreguillou_bert_base_cased_squad_v1.1_portuguese` + - ENV variables: + - CM_MODEL_ZOO_STUB: `pierreguillou/bert-base-cased-squad-v1.1-portuguese` + * `_prune` + - ENV variables: + - CM_MODEL_TASK: `prune` + +
    + + + * Group "**download-type**" +
    + Click here to expand this section. + + * `_clone-repo` + - ENV variables: + - CM_GIT_CLONE_REPO: `yes` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--download_path=value` → `CM_DOWNLOAD_PATH=value` + * `--env_key=value` → `CM_MODEL_ZOO_ENV_KEY=value` + * `--full_subfolder=value` → `CM_HF_FULL_SUBFOLDER=value` + * `--model_filename=value` → `CM_MODEL_ZOO_FILENAME=value` + * `--revision=value` → `CM_HF_REVISION=value` + * `--subfolder=value` → `CM_HF_SUBFOLDER=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/run.bat) +___ +#### Script output +```bash +cmr "get ml-model huggingface zoo [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md new file mode 100644 index 000000000..fe9e5136a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md @@ -0,0 +1,161 @@ +# get-ml-model-llama2 +Automatically generated README for this automation recipe: **get-ml-model-llama2** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-llama2/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get raw ml-model language-processing llama2 llama2-70b text-summarization[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_ML_MODEL_BATCH_SIZE: `#` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * **`_pytorch`** (default) + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `pytorch` + +
    + + + * Group "**huggingface-stub**" +
    + Click here to expand this section. + + * **`_meta-llama/Llama-2-70b-chat-hf`** (default) + - ENV variables: + - CM_GIT_CHECKOUT_FOLDER: `Llama-2-70b-chat-hf` + - CM_MODEL_ZOO_ENV_KEY: `LLAMA2` + * `_meta-llama/Llama-2-7b-chat-hf` + - ENV variables: + - CM_GIT_CHECKOUT_FOLDER: `Llama-2-7b-chat-hf` + - CM_MODEL_ZOO_ENV_KEY: `LLAMA2` + * `_stub.#` + - ENV variables: + - CM_MODEL_ZOO_ENV_KEY: `LLAMA2` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + * `_int8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` + - CM_ML_MODEL_PRECISION: `int8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` + * `_uint8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` + - CM_ML_MODEL_PRECISION: `uint8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` + +
    + + + ##### Default variations + + `_fp32,_meta-llama/Llama-2-70b-chat-hf,_pytorch` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--checkpoint=value` → `LLAMA2_CHECKPOINT_PATH=value` + + + + +___ +#### Script output +```bash +cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md new file mode 100644 index 000000000..e34f128e8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md @@ -0,0 +1,288 @@ +# get-ml-model-mobilenet +Automatically generated README for this automation recipe: **get-ml-model-mobilenet** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-mobilenet/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-mobilenet/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model mobilenet raw ml-model-mobilenet image-classification[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_tflite` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_onnx` + - ENV variables: + - CM_ML_MODEL_DATA_LAYOUT: `NCHW` + - CM_ML_MODEL_FRAMEWORK: `onnx` + * **`_tf`** (default) + - ENV variables: + - CM_ML_MODEL_DATA_LAYOUT: `NHWC` + - CM_ML_MODEL_NORMALIZE_DATA: `yes` + - CM_ML_MODEL_SUBTRACT_MEANS: `no` + - CM_ML_MODEL_INPUT_LAYER_NAME: `input` + +
    + + + * Group "**kind**" +
    + Click here to expand this section. + + * `_large` + - ENV variables: + - CM_ML_MODEL_MOBILENET_KIND: `large` + * `_large-minimalistic` + - ENV variables: + - CM_ML_MODEL_MOBILENET_KIND: `large-minimalistic` + * `_small` + - ENV variables: + - CM_ML_MODEL_MOBILENET_KIND: `small` + * `_small-minimalistic` + - ENV variables: + - CM_ML_MODEL_MOBILENET_KIND: `small-minimalistic` + +
    + + + * Group "**multiplier**" +
    + Click here to expand this section. + + * `_multiplier-0.25` + - ENV variables: + - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.25` + - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `25` + * `_multiplier-0.35` + - ENV variables: + - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.35` + - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `35` + * `_multiplier-0.5` + - ENV variables: + - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.5` + - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `50` + * `_multiplier-0.75` + - ENV variables: + - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.75` + - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `75` + * `_multiplier-1.0` + - ENV variables: + - CM_ML_MODEL_MOBILENET_MULTIPLIER: `1.0` + - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `100` + +
    + + + * Group "**opset-version**" +
    + Click here to expand this section. + + * `_opset-11` + - ENV variables: + - CM_ML_MODEL_ONNX_OPSET: `11` + * `_opset-8` + - ENV variables: + - CM_ML_MODEL_ONNX_OPSET: `8` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_MOBILENET_PRECISION: `float` + * `_int8` + - ENV variables: + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` + - CM_ML_MODEL_PRECISION: `int8` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_MOBILENET_PRECISION: `int8` + * `_uint8` + - ENV variables: + - CM_ML_MODEL_INPUTS_DATA_TYPE: `uint8` + - CM_ML_MODEL_PRECISION: `uint8` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `uint8` + - CM_ML_MODEL_MOBILENET_PRECISION: `uint8` + +
    + + + * Group "**resolution**" +
    + Click here to expand this section. + + * `_resolution-128` + - ENV variables: + - CM_ML_MODEL_MOBILENET_RESOLUTION: `128` + - CM_ML_MODEL_IMAGE_HEIGHT: `128` + - CM_ML_MODEL_IMAGE_WIDTH: `128` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.128` + * `_resolution-160` + - ENV variables: + - CM_ML_MODEL_MOBILENET_RESOLUTION: `160` + - CM_ML_MODEL_IMAGE_HEIGHT: `160` + - CM_ML_MODEL_IMAGE_WIDTH: `160` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.160` + * `_resolution-192` + - ENV variables: + - CM_ML_MODEL_MOBILENET_RESOLUTION: `192` + - CM_ML_MODEL_IMAGE_HEIGHT: `192` + - CM_ML_MODEL_IMAGE_WIDTH: `192` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.192` + * `_resolution-224` + - ENV variables: + - CM_ML_MODEL_MOBILENET_RESOLUTION: `224` + - CM_ML_MODEL_IMAGE_HEIGHT: `224` + - CM_ML_MODEL_IMAGE_WIDTH: `224` + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.224` + +
    + + + * Group "**source**" +
    + Click here to expand this section. + + * `_from.google` + - ENV variables: + - CM_DOWNLOAD_SOURCE: `google` + * `_from.zenodo` + - ENV variables: + - CM_DOWNLOAD_SOURCE: `zenodo` + +
    + + + * Group "**version**" +
    + Click here to expand this section. + + * `_v1` + - ENV variables: + - CM_ML_MODEL_MOBILENET_VERSION: `1` + - CM_ML_MODEL_FULL_NAME: `mobilenet-v1-precision_<<>>-<<>>-<<>>` + * `_v2` + - ENV variables: + - CM_ML_MODEL_MOBILENET_VERSION: `2` + - CM_ML_MODEL_VER: `2` + - CM_ML_MODEL_FULL_NAME: `mobilenet-v2-precision_<<>>-<<>>-<<>>` + * **`_v3`** (default) + - ENV variables: + - CM_ML_MODEL_MOBILENET_VERSION: `3` + - CM_ML_MODEL_VER: `3` + - CM_ML_MODEL_FULL_NAME: `mobilenet-v3-precision_<<>>-<<>>-<<>>` + +
    + + + ##### Default variations + + `_fp32,_tf,_v3` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_ML_MODEL: `mobilenet` + * CM_ML_MODEL_DATASET: `imagenet2012-val` + * CM_ML_MODEL_RETRAINING: `no` + * CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `no` + * CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + * CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + * CM_ML_MODEL_MOBILENET_NAME_SUFFIX: `` + + + +___ +#### Script output +```bash +cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md new file mode 100644 index 000000000..ddbfc6af0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md @@ -0,0 +1,271 @@ +# get-ml-model-neuralmagic-zoo +Automatically generated README for this automation recipe: **get-ml-model-neuralmagic-zoo** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_bert-base-pruned90-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none` + - CM_ML_MODEL_FULL_NAME: `bert-base-pruned90-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_bert-base-pruned95_obs_quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none` + - CM_ML_MODEL_FULL_NAME: `bert-base-pruned95_obs_quant-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` + - CM_ML_MODEL_RETRAINING: `yes` + * `_bert-base_cased-pruned90-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none` + - CM_ML_MODEL_FULL_NAME: `bert-base_cased-pruned90-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-cased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_bert-large-base-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none` + - CM_ML_MODEL_FULL_NAME: `bert-large-base-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_bert-large-pruned80_quant-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni` + - CM_ML_MODEL_FULL_NAME: `bert-large-pruned80_quant-none-vnni-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` + - CM_ML_MODEL_RETRAINING: `no` + * `_mobilebert-14layer_pruned50-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni` + - CM_ML_MODEL_FULL_NAME: `mobilebert-14layer_pruned50-none-vnni-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_mobilebert-14layer_pruned50_quant-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni` + - CM_ML_MODEL_FULL_NAME: `mobilebert-14layer_pruned50_quant-none-vnni-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` + - CM_ML_MODEL_RETRAINING: `yes` + * `_mobilebert-base_quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none` + - CM_ML_MODEL_FULL_NAME: `mobilebert-base_quant-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` + - CM_ML_MODEL_RETRAINING: `yes` + * `_mobilebert-none-base-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none` + - CM_ML_MODEL_FULL_NAME: `mobilebert-none-base-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_model-stub.#` + - ENV variables: + - CM_MODEL_ZOO_STUB: `#` + * `_obert-base-pruned90-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none` + - CM_ML_MODEL_FULL_NAME: `obert-base-pruned90-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_obert-large-base-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none` + - CM_ML_MODEL_FULL_NAME: `obert-large-base-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_obert-large-pruned95-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni` + - CM_ML_MODEL_FULL_NAME: `obert-large-pruned95-none-vnni-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_obert-large-pruned95_quant-none-vnni` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni` + - CM_ML_MODEL_FULL_NAME: `obert-large-pruned95_quant-none-vnni-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` + - CM_ML_MODEL_RETRAINING: `yes` + * `_obert-large-pruned97-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none` + - CM_ML_MODEL_FULL_NAME: `obert-large-pruned97-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_RETRAINING: `no` + * `_obert-large-pruned97-quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none` + - CM_ML_MODEL_FULL_NAME: `obert-large-pruned97-quant-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` + - CM_ML_MODEL_RETRAINING: `no` + * `_oberta-base-pruned90-quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none` + - CM_ML_MODEL_FULL_NAME: `oberta-base-pruned90-quant-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/roberta-base` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` + - CM_ML_MODEL_RETRAINING: `no` + * `_roberta-base-pruned85-quant-none` + - Aliases: `_model-stub.zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none` + - ENV variables: + - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none` + - CM_ML_MODEL_FULL_NAME: `roberta-base-pruned85-quant-none-bert-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/roberta-base` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` + - CM_ML_MODEL_RETRAINING: `no` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/run.bat) +___ +#### Script output +```bash +cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md new file mode 100644 index 000000000..0f2ff13a7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md @@ -0,0 +1,228 @@ +# get-ml-model-resnet50 +Automatically generated README for this automation recipe: **get-ml-model-resnet50** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,raw,ml-model,resnet50,ml-model-resnet50,image-classification[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,resnet50,ml-model-resnet50,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get raw ml-model resnet50 ml-model-resnet50 image-classification[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_ML_MODEL_BATCH_SIZE: `#` + * `_batch_size.1` + - ENV variables: + - CM_ML_MODEL_BATCH_SIZE: `1` + * `_fix-input-shape` + * `_from-tf` + * `_huggingface_default` + - ENV variables: + - CM_PACKAGE_URL: `https://huggingface.co/ctuning/mlperf-inference-resnet50-onnx-fp32-imagenet2012-v1.0/resolve/main/resnet50_v1.onnx` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_ncnn` + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `ncnn` + * **`_onnx`** (default) + - Aliases: `_onnxruntime` + - ENV variables: + - CM_ML_MODEL_DATA_LAYOUT: `NCHW` + - CM_ML_MODEL_FRAMEWORK: `onnx` + - CM_ML_MODEL_INPUT_LAYERS: `input_tensor:0` + - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor:0` + - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)` + - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor:0` + - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor:0` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>` + - CM_ML_MODEL_VER: `1.5` + * `_pytorch` + - ENV variables: + - CM_ML_MODEL_DATA_LAYOUT: `NCHW` + - CM_ML_MODEL_FRAMEWORK: `pytorch` + - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `?` + - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor:0` + - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": [BATCH_SIZE, 3, 224, 224]` + - CM_ML_MODEL_OUTPUT_LAYERS: `output` + - CM_ML_MODEL_OUTPUT_LAYER_NAME: `?` + - CM_ML_STARTING_WEIGHTS_FILENAME: `<<>>` + * `_tensorflow` + - Aliases: `_tf` + - ENV variables: + - CM_ML_MODEL_ACCURACY: `76.456` + - CM_ML_MODEL_DATA_LAYOUT: `NHWC` + - CM_ML_MODEL_FRAMEWORK: `tensorflow` + - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` + - CM_ML_MODEL_INPUT_LAYERS: `input_tensor` + - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor` + - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)` + - CM_ML_MODEL_NORMALIZE_DATA: `0` + - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor` + - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>` + - CM_ML_MODEL_SUBTRACT_MEANS: `YES` + - CM_PACKAGE_URL: `https://zenodo.org/record/2535873/files/resnet50_v1.pb` + * `_tflite` + - ENV variables: + - CM_ML_MODEL_ACCURACY: `76.456` + - CM_ML_MODEL_DATA_LAYOUT: `NHWC` + - CM_ML_MODEL_FRAMEWORK: `tflite` + - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` + - CM_ML_MODEL_INPUT_LAYERS: `input_tensor` + - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor` + - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)` + - CM_ML_MODEL_NORMALIZE_DATA: `0` + - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor` + - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>` + - CM_ML_MODEL_SUBTRACT_MEANS: `YES` + +
    + + + * Group "**model-output**" +
    + Click here to expand this section. + + * **`_argmax`** (default) + - ENV variables: + - CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: `yes` + * `_no-argmax` + - ENV variables: + - CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: `no` + +
    + + + * Group "**opset-version**" +
    + Click here to expand this section. + + * `_opset-11` + - ENV variables: + - CM_ML_MODEL_ONNX_OPSET: `11` + * `_opset-8` + - ENV variables: + - CM_ML_MODEL_ONNX_OPSET: `8` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + * `_int8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` + - CM_ML_MODEL_PRECISION: `int8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` + * `_uint8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` + - CM_ML_MODEL_PRECISION: `uint8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` + +
    + + + ##### Default variations + + `_argmax,_fp32,_onnx` + +#### Native script being run +=== "Linux/macOS" + * [run-fix-input.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/run-fix-input.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md new file mode 100644 index 000000000..aa0894064 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md @@ -0,0 +1,111 @@ +# get-ml-model-retinanet-nvidia +Automatically generated README for this automation recipe: **get-ml-model-retinanet-nvidia** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet-nvidia/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model nvidia-retinanet nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,nvidia-retinanet,nvidia[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model nvidia-retinanet nvidia [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,nvidia-retinanet,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model nvidia-retinanet nvidia[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_efficient-nms` + - ENV variables: + - CM_NVIDIA_EFFICIENT_NMS: `yes` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_TORCH_DEVICE: `cpu` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet-nvidia/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get ml-model nvidia-retinanet nvidia [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md new file mode 100644 index 000000000..db0a15981 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md @@ -0,0 +1,140 @@ +# get-ml-model-retinanet +Automatically generated README for this automation recipe: **get-ml-model-retinanet** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model raw resnext50 retinanet object-detection" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,raw,resnext50,retinanet,object-detection[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model raw resnext50 retinanet object-detection [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,raw,resnext50,retinanet,object-detection' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model raw resnext50 retinanet object-detection[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_no-nms` + - ENV variables: + - CM_TMP_ML_MODEL_RETINANET_NO_NMS: `yes` + - CM_ML_MODEL_RETINANET_NO_NMS: `yes` + - CM_QAIC_PRINT_NODE_PRECISION_INFO: `yes` + * `_weights` + - ENV variables: + - CM_MODEL_WEIGHTS_FILE: `yes` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * **`_onnx`** (default) + - ENV variables: + - CM_ML_MODEL_DATA_LAYOUT: `NCHW` + - CM_ML_MODEL_FRAMEWORK: `onnx` + * `_pytorch` + - ENV variables: + - CM_ML_MODEL_DATA_LAYOUT: `NCHW` + - CM_ML_MODEL_FRAMEWORK: `pytorch` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + +
    + + + ##### Default variations + + `_fp32,_onnx` + +#### Native script being run +=== "Linux/macOS" + * [run-no-nms.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/run-no-nms.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get ml-model raw resnext50 retinanet object-detection [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md new file mode 100644 index 000000000..82a0da040 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md @@ -0,0 +1,133 @@ +# get-ml-model-rnnt +Automatically generated README for this automation recipe: **get-ml-model-rnnt** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-rnnt/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model rnnt raw librispeech speech-recognition" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,rnnt,raw,librispeech,speech-recognition[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model rnnt raw librispeech speech-recognition [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,rnnt,raw,librispeech,speech-recognition' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model rnnt raw librispeech speech-recognition[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_weights` + - ENV variables: + - CM_MODEL_WEIGHTS_FILE: `yes` + +
    + + + * Group "**download-src**" +
    + Click here to expand this section. + + * `_amazon-s3` + * **`_zenodo`** (default) + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * **`_pytorch`** (default) + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `pytorch` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + +
    + + + ##### Default variations + + `_fp32,_pytorch,_zenodo` + +___ +#### Script output +```bash +cmr "get ml-model rnnt raw librispeech speech-recognition [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md new file mode 100644 index 000000000..e488ff105 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md @@ -0,0 +1,177 @@ +# get-ml-model-stable-diffusion +Automatically generated README for this automation recipe: **get-ml-model-stable-diffusion** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-stable-diffusion/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get raw ml-model stable-diffusion sdxl text-to-image" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,raw,ml-model,stable-diffusion,sdxl,text-to-image[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get raw ml-model stable-diffusion sdxl text-to-image [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,stable-diffusion,sdxl,text-to-image' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get raw ml-model stable-diffusion sdxl text-to-image[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_ML_MODEL_BATCH_SIZE: `#` + +
    + + + * Group "**download-source**" +
    + Click here to expand this section. + + * `_huggingface` + * **`_mlcommons`** (default) + +
    + + + * Group "**download-tool**" +
    + Click here to expand this section. + + * `_git` + - ENV variables: + - CM_DOWNLOAD_TOOL: `git` + * `_rclone` + - ENV variables: + - CM_RCLONE_CONFIG_CMD: `rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com` + - CM_DOWNLOAD_TOOL: `rclone` + * `_wget` + - ENV variables: + - CM_DOWNLOAD_TOOL: `wget` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * **`_pytorch`** (default) + - ENV variables: + - CM_ML_MODEL_FRAMEWORK: `pytorch` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_fp16` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp16` + - CM_ML_MODEL_PRECISION: `fp16` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp16` + * **`_fp32`** (default) + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + * `_int8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` + - CM_ML_MODEL_PRECISION: `int8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` + * `_uint8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` + - CM_ML_MODEL_PRECISION: `uint8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` + +
    + + + ##### Default variations + + `_fp32,_mlcommons,_pytorch` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--checkpoint=value` → `SDXL_CHECKPOINT_PATH=value` + * `--download_path=value` → `CM_DOWNLOAD_PATH=value` + * `--to=value` → `CM_DOWNLOAD_PATH=value` + + + + +___ +#### Script output +```bash +cmr "get raw ml-model stable-diffusion sdxl text-to-image [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md new file mode 100644 index 000000000..cb4084c9f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md @@ -0,0 +1,153 @@ +# get-ml-model-tiny-resnet +Automatically generated README for this automation recipe: **get-ml-model-tiny-resnet** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-tiny-resnet/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_ML_MODEL_BATCH_SIZE: `#` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_onnx` + - ENV variables: + - CM_TMP_ML_MODEL_TF2ONNX: `yes` + * **`_tflite`** (default) + - ENV variables: + - CM_ML_MODEL_ACCURACY: `85` + - CM_ML_MODEL_DATA_LAYOUT: `NHWC` + - CM_ML_MODEL_FRAMEWORK: `tflite` + - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `` + - CM_ML_MODEL_INPUT_LAYERS: `` + - CM_ML_MODEL_INPUT_LAYER_NAME: `` + - CM_ML_MODEL_INPUT_SHAPES: `` + - CM_ML_MODEL_NORMALIZE_DATA: `0` + - CM_ML_MODEL_OUTPUT_LAYERS: `` + - CM_ML_MODEL_OUTPUT_LAYER_NAME: `` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>` + - CM_ML_MODEL_SUBTRACT_MEANS: `YES` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_fp32` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` + - CM_ML_MODEL_PRECISION: `fp32` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` + * **`_int8`** (default) + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` + - CM_ML_MODEL_PRECISION: `int8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` + * `_uint8` + - ENV variables: + - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` + - CM_ML_MODEL_PRECISION: `uint8` + - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` + +
    + + + ##### Default variations + + `_int8,_tflite` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-tiny-resnet/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md new file mode 100644 index 000000000..27bce3765 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md @@ -0,0 +1,95 @@ +# get-ml-model-using-imagenet-from-model-zoo +Automatically generated README for this automation recipe: **get-ml-model-using-imagenet-from-model-zoo** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model model-zoo zoo imagenet image-classification" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model,model-zoo,zoo,imagenet,image-classification[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model model-zoo zoo imagenet image-classification [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model,model-zoo,zoo,imagenet,image-classification' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model model-zoo zoo imagenet image-classification[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**model-source**" +
    + Click here to expand this section. + + * `_model.#` + * `_model.resnet101-pytorch-base` + * `_model.resnet50-pruned95-uniform-quant` + +
    + + +___ +#### Script output +```bash +cmr "get ml-model model-zoo zoo imagenet image-classification [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-tvm-model/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-tvm-model/index.md new file mode 100644 index 000000000..4cff76283 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/get-tvm-model/index.md @@ -0,0 +1,188 @@ +# get-tvm-model +Automatically generated README for this automation recipe: **get-tvm-model** + +Category: **[AI/ML models](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ml-model-tvm tvm-model" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ml-model-tvm,tvm-model[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ml-model-tvm tvm-model [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ml-model-tvm,tvm-model' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ml-model-tvm tvm-model[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_tune-model` + - ENV variables: + - CM_TUNE_TVM_MODEL: `yes` + +
    + + + * Group "**batchsize**" +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_ML_MODEL_MAX_BATCH_SIZE: `#` + +
    + + + * Group "**frontend**" +
    + Click here to expand this section. + + * **`_onnx`** (default) + - ENV variables: + - CM_TVM_FRONTEND_FRAMEWORK: `onnx` + * `_pytorch` + - Aliases: `_torch` + - ENV variables: + - CM_TVM_FRONTEND_FRAMEWORK: `pytorch` + * `_tensorflow` + - Aliases: `_tf` + - ENV variables: + - CM_TVM_FRONTEND_FRAMEWORK: `tensorflow` + * `_tflite` + - ENV variables: + - CM_TVM_FRONTEND_FRAMEWORK: `tflite` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_model.#` + - ENV variables: + - CM_ML_MODEL: `#` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + * `_int8` + * `_uint8` + +
    + + + * Group "**runtime**" +
    + Click here to expand this section. + + * `_graph_executor` + - ENV variables: + - CM_TVM_USE_VM: `no` + * **`_virtual_machine`** (default) + - ENV variables: + - CM_TVM_USE_VM: `yes` + +
    + + + ##### Default variations + + `_fp32,_onnx,_virtual_machine` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_ML_MODEL_MAX_BATCH_SIZE: `1` + * CM_TUNE_TVM_MODEL: `no` + * CM_TVM_USE_VM: `yes` + * CM_TVM_FRONTEND_FRAMEWORK: `onnx` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get ml-model-tvm tvm-model [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/index.md new file mode 100644 index 000000000..c3c12890a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-models/index.md @@ -0,0 +1,21 @@ +* [convert-ml-model-huggingface-to-onnx](convert-ml-model-huggingface-to-onnx/index.md) +* [get-bert-squad-vocab](get-bert-squad-vocab/index.md) +* [get-dlrm](get-dlrm/index.md) +* [get-ml-model-3d-unet-kits19](get-ml-model-3d-unet-kits19/index.md) +* [get-ml-model-bert-base-squad](get-ml-model-bert-base-squad/index.md) +* [get-ml-model-bert-large-squad](get-ml-model-bert-large-squad/index.md) +* [get-ml-model-dlrm-terabyte](get-ml-model-dlrm-terabyte/index.md) +* [get-ml-model-efficientnet-lite](get-ml-model-efficientnet-lite/index.md) +* [get-ml-model-gptj](get-ml-model-gptj/index.md) +* [get-ml-model-huggingface-zoo](get-ml-model-huggingface-zoo/index.md) +* [get-ml-model-llama2](get-ml-model-llama2/index.md) +* [get-ml-model-mobilenet](get-ml-model-mobilenet/index.md) +* [get-ml-model-neuralmagic-zoo](get-ml-model-neuralmagic-zoo/index.md) +* [get-ml-model-resnet50](get-ml-model-resnet50/index.md) +* [get-ml-model-retinanet](get-ml-model-retinanet/index.md) +* [get-ml-model-retinanet-nvidia](get-ml-model-retinanet-nvidia/index.md) +* [get-ml-model-rnnt](get-ml-model-rnnt/index.md) +* [get-ml-model-stable-diffusion](get-ml-model-stable-diffusion/index.md) +* [get-ml-model-tiny-resnet](get-ml-model-tiny-resnet/index.md) +* [get-ml-model-using-imagenet-from-model-zoo](get-ml-model-using-imagenet-from-model-zoo/index.md) +* [get-tvm-model](get-tvm-model/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md new file mode 100644 index 000000000..9c61d1124 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md @@ -0,0 +1,186 @@ +# calibrate-model-for.qaic +Automatically generated README for this automation recipe: **calibrate-model-for.qaic** + +Category: **[AI/ML optimization](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/calibrate-model-for.qaic/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "qaic calibrate profile qaic-profile qaic-calibrate" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=qaic,calibrate,profile,qaic-profile,qaic-calibrate[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "qaic calibrate profile qaic-profile qaic-calibrate [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'qaic,calibrate,profile,qaic-profile,qaic-calibrate' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "qaic calibrate profile qaic-profile qaic-calibrate[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_first.#` + +
    + + + * Group "**batch-size**" +
    + Click here to expand this section. + + * `_bs.#` + - ENV variables: + - CM_QAIC_MODEL_BATCH_SIZE: `#` + - CM_CREATE_INPUT_BATCH: `yes` + * `_bs.1` + - ENV variables: + - CM_QAIC_MODEL_BATCH_SIZE: `1` + - CM_CREATE_INPUT_BATCH: `yes` + +
    + + + * Group "**calib-dataset-filter-size**" +
    + Click here to expand this section. + + * `_filter-size.#` + +
    + + + * Group "**calibration-option**" +
    + Click here to expand this section. + + * `_mlperf.option1` + * `_mlperf.option2` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_bert-99` + - ENV variables: + - CM_CALIBRATE_SQUAD: `yes` + - CM_QAIC_COMPILER_ARGS: `` + - CM_QAIC_COMPILER_PARAMS: `-onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>> -input-list-file=<<>> -num-histogram-bins=512 -profiling-threads=<<>>` + - CM_QAIC_MODEL_TO_CONVERT: `calibrate_bert_mlperf` + * `_resnet50` + - ENV variables: + - CM_QAIC_MODEL_NAME: `resnet50` + - CM_CALIBRATE_IMAGENET: `yes` + - CM_QAIC_COMPILER_ARGS: `` + - CM_QAIC_COMPILER_PARAMS: `-output-node-name=ArgMax -profiling-threads=<<>>` + - CM_QAIC_OUTPUT_NODE_NAME: `-output-node-name=ArgMax` + - CM_QAIC_MODEL_TO_CONVERT: `calibrate_resnet50_tf` + * `_retinanet` + - ENV variables: + - CM_QAIC_MODEL_NAME: `retinanet` + - CM_CALIBRATE_OPENIMAGES: `yes` + - CM_QAIC_COMPILER_ARGS: `` + - CM_QAIC_COMPILER_PARAMS: `-enable-channelwise -profiling-threads=<<>> -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>>` + - CM_QAIC_MODEL_TO_CONVERT: `calibrate_retinanet_no_nms_mlperf` + +
    + + + * Group "**model-framework**" +
    + Click here to expand this section. + + * `_tf` + +
    + + + * Group "**seq-length**" +
    + Click here to expand this section. + + * `_seq.#` + - ENV variables: + - CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: `#` + * `_seq.384` + - ENV variables: + - CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: `#` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/calibrate-model-for.qaic/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "qaic calibrate profile qaic-profile qaic-calibrate [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md new file mode 100644 index 000000000..4bc9d3db7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md @@ -0,0 +1,216 @@ +# compile-model-for.qaic +Automatically generated README for this automation recipe: **compile-model-for.qaic** + +Category: **[AI/ML optimization](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-model-for.qaic/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "qaic compile model model-compile qaic-compile" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=qaic,compile,model,model-compile,qaic-compile[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "qaic compile model model-compile qaic-compile [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'qaic,compile,model,model-compile,qaic-compile' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "qaic compile model model-compile qaic-compile[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_bert-99` + - ENV variables: + - CM_COMPILE_BERT: `on` + - CM_QAIC_MODEL_TO_CONVERT: `calibrate_bert_mlperf` + - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -quantization-precision-bias=Int32 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -multicast-weights -combine-inputs=false -combine-outputs=false` + - CM_QAIC_MODEL_COMPILER_ARGS: `` + * `_bert-99.9` + - ENV variables: + - CM_COMPILE_BERT: `on` + - CM_QAIC_MODEL_TO_CONVERT: `bert_mlperf` + - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -convert-to-fp16 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -combine-inputs=false -combine-outputs=false` + - CM_QAIC_MODEL_COMPILER_ARGS: `` + * `_resnet50` + - ENV variables: + - CM_COMPILE_RESNET: `on` + - CM_QAIC_MODEL_TO_CONVERT: `compile_resnet50_tf` + - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -output-node-name=ArgMax -vvv -compile-only -use-producer-dma=1` + * `_retinanet` + - ENV variables: + - CM_COMPILE_RETINANET: `on` + - CM_QAIC_MODEL_TO_CONVERT: `calibrate_retinanet_no_nms_mlperf` + - CM_QAIC_MODEL_COMPILER_ARGS: `-aic-enable-depth-first` + - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -compile-only -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>> -quantization-schema-constants=symmetric_with_uint8 -quantization-schema-activations=asymmetric -quantization-calibration=None` + +
    + + + * Group "**batch-size**" +
    + Click here to expand this section. + + * `_bs.#` + - ENV variables: + - CM_QAIC_MODEL_BATCH_SIZE: `#` + * `_bs.1` + - ENV variables: + - CM_QAIC_MODEL_BATCH_SIZE: `1` + +
    + + + * Group "**calib-dataset-filter-size**" +
    + Click here to expand this section. + + * `_filter-size.#` + +
    + + + * Group "**mlperf-scenario**" +
    + Click here to expand this section. + + * `_multistream` + * `_offline` + * `_server` + * **`_singlestream`** (default) + +
    + + + * Group "**model-framework**" +
    + Click here to expand this section. + + * `_tf` + +
    + + + * Group "**nsp**" +
    + Click here to expand this section. + + * `_nsp.14` + * `_nsp.16` + * `_nsp.8` + * `_nsp.9` + +
    + + + * Group "**percentile-calibration**" +
    + Click here to expand this section. + + * `_pc.#` + - ENV variables: + - CM_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE: `#` + - CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS: `-quantization-calibration=Percentile -percentile-calibration-value=<<>>` + +
    + + + * Group "**quantization**" +
    + Click here to expand this section. + + * `_no-quantized` + - ENV variables: + - CM_QAIC_MODEL_QUANTIZATION: `no` + * **`_quantized`** (default) + - ENV variables: + - CM_QAIC_MODEL_QUANTIZATION: `yes` + +
    + + + ##### Default variations + + `_quantized,_singlestream` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--register=value` → `CM_REGISTER_CACHE=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-model-for.qaic/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "qaic compile model model-compile qaic-compile [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/index.md new file mode 100644 index 000000000..c1a250851 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/index.md @@ -0,0 +1,3 @@ +* [calibrate-model-for.qaic](calibrate-model-for.qaic/index.md) +* [compile-model-for.qaic](compile-model-for.qaic/index.md) +* [prune-bert-models](prune-bert-models/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/prune-bert-models/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/prune-bert-models/index.md new file mode 100644 index 000000000..bf9821a7e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/AI-ML-optimization/prune-bert-models/index.md @@ -0,0 +1,132 @@ +# prune-bert-models +Automatically generated README for this automation recipe: **prune-bert-models** + +Category: **[AI/ML optimization](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "prune bert-models bert-prune prune-bert-models" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=prune,bert-models,bert-prune,prune-bert-models[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "prune bert-models bert-prune prune-bert-models [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'prune,bert-models,bert-prune,prune-bert-models' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "prune bert-models bert-prune prune-bert-models[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_model.#` + - ENV variables: + - CM_BERT_PRUNE_MODEL_NAME: `#` + - CM_MODEL_ZOO_STUB: `#` + * `_path.#` + - ENV variables: + - CM_BERT_PRUNE_CKPT_PATH: `#` + * `_task.#` + - ENV variables: + - CM_BERT_PRUNE_TASK: `#` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--constraint=value` → `CM_BERT_PRUNE_CONSTRAINT=value` + * `--output_dir=value` → `CM_BERT_PRUNE_OUTPUT_DIR=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BERT_PRUNE_TASK: `squad` + * CM_BERT_PRUNE_MODEL_NAME: `bert-large-uncased` + * CM_MODEL_ZOO_STUB: `bert-large-uncased` + * CM_BERT_PRUNE_CONSTRAINT: `0.5` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "prune bert-models bert-prune prune-bert-models [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/get-cache-dir/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/get-cache-dir/index.md new file mode 100644 index 000000000..6c62118e8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/get-cache-dir/index.md @@ -0,0 +1,95 @@ +# get-cache-dir +Automatically generated README for this automation recipe: **get-cache-dir** + +Category: **[CM Interface](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cache-dir/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get cache dir directory" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,cache,dir,directory[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get cache dir directory [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cache,dir,directory' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get cache dir directory[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_name.#` + - ENV variables: + - CM_CACHE_DIR_NAME: `#` + +
    + + +___ +#### Script output +```bash +cmr "get cache dir directory [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/index.md new file mode 100644 index 000000000..32d34042d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-Interface/index.md @@ -0,0 +1 @@ +* [get-cache-dir](get-cache-dir/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/create-custom-cache-entry/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/create-custom-cache-entry/index.md new file mode 100644 index 000000000..178195e07 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/create-custom-cache-entry/index.md @@ -0,0 +1,92 @@ +# create-custom-cache-entry +Automatically generated README for this automation recipe: **create-custom-cache-entry** + +Category: **[CM automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/create-custom-cache-entry/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "create custom cache entry" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=create,custom,cache,entry [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "create custom cache entry " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,custom,cache,entry' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "create custom cache entry" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--env_key=value` → `CM_CUSTOM_CACHE_ENTRY_ENV_KEY=value` + * `--env_key2=value` → `CM_CUSTOM_CACHE_ENTRY_ENV_KEY2=value` + * `--path=value` → `CM_CUSTOM_CACHE_ENTRY_PATH=value` + * `--to=value` → `CM_CUSTOM_CACHE_ENTRY_PATH=value` + + + + +___ +#### Script output +```bash +cmr "create custom cache entry " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/index.md new file mode 100644 index 000000000..996533c19 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-automation/index.md @@ -0,0 +1 @@ +* [create-custom-cache-entry](create-custom-cache-entry/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/index.md new file mode 100644 index 000000000..927cf1b1a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/index.md @@ -0,0 +1,2 @@ +* [test-debug](test-debug/index.md) +* [test-mlperf-inference-retinanet](test-mlperf-inference-retinanet/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-debug/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-debug/index.md new file mode 100644 index 000000000..0e848be1a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-debug/index.md @@ -0,0 +1,87 @@ +# test-debug +Automatically generated README for this automation recipe: **test-debug** + +Category: **[CM interface prototyping](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "test cm-debug" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=test,cm-debug + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "test cm-debug " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,cm-debug' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "test cm-debug" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/run.bat) +___ +#### Script output +```bash +cmr "test cm-debug " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md new file mode 100644 index 000000000..406c3a9ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md @@ -0,0 +1,86 @@ +# test-mlperf-inference-retinanet +Automatically generated README for this automation recipe: **test-mlperf-inference-retinanet** + +Category: **[CM interface prototyping](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "test mlperf-inference-win retinanet windows" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=test,mlperf-inference-win,retinanet,windows + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "test mlperf-inference-win retinanet windows " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,mlperf-inference-win,retinanet,windows' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "test mlperf-inference-win retinanet windows" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/run.bat) +___ +#### Script output +```bash +cmr "test mlperf-inference-win retinanet windows " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda-devices/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda-devices/index.md new file mode 100644 index 000000000..7fddb5a55 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda-devices/index.md @@ -0,0 +1,86 @@ +# get-cuda-devices +Automatically generated README for this automation recipe: **get-cuda-devices** + +Category: **[CUDA automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get cuda-devices" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,cuda-devices + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get cuda-devices " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cuda-devices' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get cuda-devices" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/run.bat) +___ +#### Script output +```bash +cmr "get cuda-devices " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda/index.md new file mode 100644 index 000000000..5e789f3e6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cuda/index.md @@ -0,0 +1,158 @@ +# get-cuda +Automatically generated README for this automation recipe: **get-cuda** + +Category: **[CUDA automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/README-extra.md) + + +--- + +# System dependencies + +* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit). +* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download). +* Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download). + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda,46d133d9ef92422d[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda,46d133d9ef92422d' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_cudnn` + - ENV variables: + - CM_CUDA_NEEDS_CUDNN: `yes` + * `_package-manager` + - ENV variables: + - CM_CUDA_PACKAGE_MANAGER_INSTALL: `yes` + +
    + + + * Group "**installation-mode**" +
    + Click here to expand this section. + + * `_lib-only` + - ENV variables: + - CM_CUDA_FULL_TOOLKIT_INSTALL: `no` + - CM_TMP_FILE_TO_CHECK_UNIX: `libcudart.so` + - CM_TMP_FILE_TO_CHECK_WINDOWS: `libcudart.dll` + * **`_toolkit`** (default) + - ENV variables: + - CM_CUDA_FULL_TOOLKIT_INSTALL: `yes` + - CM_TMP_FILE_TO_CHECK_UNIX: `nvcc` + - CM_TMP_FILE_TO_CHECK_WINDOWS: `nvcc.exe` + +
    + + + ##### Default variations + + `_toolkit` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--cudnn_tar_file=value` → `CM_CUDNN_TAR_FILE_PATH=value` + * `--cudnn_tar_path=value` → `CM_CUDNN_TAR_FILE_PATH=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_CUDA_PATH_LIB_CUDNN_EXISTS: `no` + * CM_REQUIRE_INSTALL: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/run.bat) +___ +#### Script output +```bash +cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cudnn/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cudnn/index.md new file mode 100644 index 000000000..76655cd84 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-cudnn/index.md @@ -0,0 +1,115 @@ +# get-cudnn +Automatically generated README for this automation recipe: **get-cudnn** + +Category: **[CUDA automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get cudnn nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,cudnn,nvidia [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get cudnn nvidia " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cudnn,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get cudnn nvidia" [--input_flags] + ``` +___ + +=== "Input Flags" + + + #### Input Flags + + * --**input:** Full path to the installed cuDNN library + * --**tar_file:** Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn) +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--input=value` → `CM_INPUT=value` + * `--tar_file=value` → `CM_CUDNN_TAR_FILE_PATH=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_INPUT: `` + * CM_SUDO: `sudo` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get cudnn nvidia " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-tensorrt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-tensorrt/index.md new file mode 100644 index 000000000..afa872119 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/get-tensorrt/index.md @@ -0,0 +1,119 @@ +# get-tensorrt +Automatically generated README for this automation recipe: **get-tensorrt** + +Category: **[CUDA automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get tensorrt nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,tensorrt,nvidia[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get tensorrt nvidia [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,tensorrt,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get tensorrt nvidia[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_dev` + - ENV variables: + - CM_TENSORRT_REQUIRE_DEV: `yes` + +
    + +=== "Input Flags" + + + #### Input Flags + + * --**input:** Full path to the installed TensorRT library (nvinfer) + * --**tar_file:** Full path to the TensorRT Tar file downloaded from the Nvidia website (https://developer.nvidia.com/tensorrt) +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--input=value` → `CM_INPUT=value` + * `--tar_file=value` → `CM_TENSORRT_TAR_FILE_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get tensorrt nvidia [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/index.md new file mode 100644 index 000000000..335dc83a5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/index.md @@ -0,0 +1,6 @@ +* [get-cuda](get-cuda/index.md) +* [get-cuda-devices](get-cuda-devices/index.md) +* [get-cudnn](get-cudnn/index.md) +* [get-tensorrt](get-tensorrt/index.md) +* [install-cuda-package-manager](install-cuda-package-manager/index.md) +* [install-cuda-prebuilt](install-cuda-prebuilt/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md new file mode 100644 index 000000000..84b7b3e48 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md @@ -0,0 +1,87 @@ +# install-cuda-package-manager +Automatically generated README for this automation recipe: **install-cuda-package-manager** + +Category: **[CUDA automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install package-manager cuda package-manager-cuda install-pm-cuda" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,package-manager,cuda,package-manager-cuda,install-pm-cuda + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install package-manager cuda package-manager-cuda install-pm-cuda " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,package-manager,cuda,package-manager-cuda,install-pm-cuda' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install package-manager cuda package-manager-cuda install-pm-cuda" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install package-manager cuda package-manager-cuda install-pm-cuda " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md new file mode 100644 index 000000000..674817343 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md @@ -0,0 +1,138 @@ +# install-cuda-prebuilt +Automatically generated README for this automation recipe: **install-cuda-prebuilt** + +Category: **[CUDA automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**install-driver**" +
    + Click here to expand this section. + + * `_driver` + - ENV variables: + - CM_CUDA_INSTALL_DRIVER: `yes` + * **`_no-driver`** (default) + - ENV variables: + - CM_CUDA_INSTALL_DRIVER: `no` + +
    + + + ##### Default variations + + `_no-driver` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--local_run_file_path=value` → `CUDA_RUN_FILE_LOCAL_PATH=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_SUDO: `sudo` + + +#### Versions +Default version: `11.8.0` + +* `11.7.0` +* `11.8.0` +* `12.0.0` +* `12.1.1` +* `12.2.0` +* `12.3.2` +* `12.4.1` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/destroy-terraform/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/destroy-terraform/index.md new file mode 100644 index 000000000..0cdd8886a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/destroy-terraform/index.md @@ -0,0 +1,87 @@ +# destroy-terraform +Automatically generated README for this automation recipe: **destroy-terraform** + +Category: **[Cloud automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "destroy terraform cmd" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=destroy,terraform,cmd + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "destroy terraform cmd " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'destroy,terraform,cmd' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "destroy terraform cmd" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/run.bat) +___ +#### Script output +```bash +cmr "destroy terraform cmd " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-aws-cli/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-aws-cli/index.md new file mode 100644 index 000000000..9e06d804b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-aws-cli/index.md @@ -0,0 +1,87 @@ +# get-aws-cli +Automatically generated README for this automation recipe: **get-aws-cli** + +Category: **[Cloud automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get aws-cli aws cli" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,aws-cli,aws,cli + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get aws-cli aws cli " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aws-cli,aws,cli' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get aws-cli aws cli" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get aws-cli aws cli " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-terraform/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-terraform/index.md new file mode 100644 index 000000000..18c91c264 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/get-terraform/index.md @@ -0,0 +1,87 @@ +# get-terraform +Automatically generated README for this automation recipe: **get-terraform** + +Category: **[Cloud automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get terraform get-terraform" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,terraform,get-terraform + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get terraform get-terraform " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,terraform,get-terraform' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get terraform get-terraform" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get terraform get-terraform " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/index.md new file mode 100644 index 000000000..84fc1dc1a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/index.md @@ -0,0 +1,6 @@ +* [destroy-terraform](destroy-terraform/index.md) +* [get-aws-cli](get-aws-cli/index.md) +* [get-terraform](get-terraform/index.md) +* [install-aws-cli](install-aws-cli/index.md) +* [install-terraform-from-src](install-terraform-from-src/index.md) +* [run-terraform](run-terraform/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-aws-cli/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-aws-cli/index.md new file mode 100644 index 000000000..5973d9c9c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-aws-cli/index.md @@ -0,0 +1,86 @@ +# install-aws-cli +Automatically generated README for this automation recipe: **install-aws-cli** + +Category: **[Cloud automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-aws-cli/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install script aws-cli aws cli" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,script,aws-cli,aws,cli + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install script aws-cli aws cli " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,script,aws-cli,aws,cli' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install script aws-cli aws cli" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-aws-cli/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install script aws-cli aws cli " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-terraform-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-terraform-from-src/index.md new file mode 100644 index 000000000..d1cba41e1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/install-terraform-from-src/index.md @@ -0,0 +1,90 @@ +# install-terraform-from-src +Automatically generated README for this automation recipe: **install-terraform-from-src** + +Category: **[Cloud automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-terraform-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install terraform from-src" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,terraform,from-src + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install terraform from-src " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,terraform,from-src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install terraform from-src" + ``` +___ + +#### Versions +Default version: `main` + +* `main` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-terraform-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install terraform from-src " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/run-terraform/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/run-terraform/index.md new file mode 100644 index 000000000..f164a7352 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Cloud-automation/run-terraform/index.md @@ -0,0 +1,388 @@ +# run-terraform +Automatically generated README for this automation recipe: **run-terraform** + +Category: **[Cloud automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/README-extra.md) + + +--- + +## Setup for Google Cloud Instances +``` +sudo snap install google-cloud-cli --classic +gcloud auth application-default login +``` + +The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. + +``` +cm run script --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit +``` +Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run terraform" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,terraform[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run terraform [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,terraform' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run terraform[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_amazon-linux-2-kernel.#` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE_OS: `amazon-linux-2-kernel.#` + * `_graviton` + - ENV variables: + - CM_TERRAFORM_AWS_GRAVITON_INSTANCE: `yes` + * `_inferentia` + - ENV variables: + - CM_TERRAFORM_AWS_INFERENTIA_INSTANCE: `yes` + * `_rhel.#` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE_OS: `rhel.#` + * `_ubuntu.#` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE_OS: `ubuntu.#` + +
    + + + * Group "**aws-instance-image**" +
    + Click here to expand this section. + + * `_amazon-linux-2-kernel.510,arm64,us-west-2` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `ami-0f1a5f5ada0e7da53` + * `_aws_instance_image.#` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `#` + * `_aws_instance_image.ami-0735c191cf914754d` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `ami-0735c191cf914754d` + * `_aws_instance_image.ami-0a0d8589b597d65b3` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `ami-0a0d8589b597d65b3` + * `_rhel.9,x86,us-west-2` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `ami-0dda7e535b65b6469` + * `_ubuntu.2204,arm64,us-west-2` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `ami-079f51a7bcca65b92` + * `_ubuntu.2204,x86,us-west-2` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `ami-0735c191cf914754d` + +
    + + + * Group "**aws-instance-type**" +
    + Click here to expand this section. + + * `_a1.2xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `a1.2xlarge` + * `_a1.metal` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `a1.metal` + * `_a1.xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `a1.xlarge` + * `_aws_instance_type.#` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `#` + * `_c5.12xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `c5.12xlarge` + * `_c5.4xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `c5.4xlarge` + * `_c5d.9xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `c5d.9xlarge` + * `_g4dn.xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `g4dn.xlarge` + * `_inf1.2xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `inf1.2xlarge` + * `_inf1.xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `inf1.xlarge` + * `_inf2.8xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `inf2.8xlarge` + * `_inf2.xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `inf2.xlarge` + * `_m7g.2xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `m7g.2xlarge` + * `_m7g.xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `m7g.xlarge` + * `_t2.#` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `t2.#` + * `_t2.2xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `t2.2xlarge` + * `_t2.large` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `t2.large` + * `_t2.medium` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `t2.medium` + * `_t2.micro` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `t2.micro` + * `_t2.nano` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `t2.nano` + * `_t2.small` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `t2.small` + * `_t2.xlarge` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `t2.xlarge` + +
    + + + * Group "**cloud-provider**" +
    + Click here to expand this section. + + * **`_aws`** (default) + - ENV variables: + - CM_TERRAFORM_CONFIG_DIR_NAME: `aws` + * `_gcp` + - ENV variables: + - CM_TERRAFORM_CONFIG_DIR_NAME: `gcp` + +
    + + + * Group "**gcp-instance-image**" +
    + Click here to expand this section. + + * `_debian-cloud/debian-11` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `debian-cloud/debian-11` + * `_gcp_instance_image.#` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `#` + * `_ubuntu-2204-jammy-v20230114` + - ENV variables: + - TF_VAR_INSTANCE_IMAGE: `ubuntu-2204-jammy-v20230114` + +
    + + + * Group "**gcp-instance-type**" +
    + Click here to expand this section. + + * `_f1-micro` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `f1-micro` + * `_gcp_instance_type.#` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `#` + * `_n1-highmem.#` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `n1-highmem-#` + * `_n1-standard.#` + - ENV variables: + - TF_VAR_INSTANCE_TYPE: `n1-highmem-#` + +
    + + + * Group "**gcp-project**" +
    + Click here to expand this section. + + * `_gcp_project.#` + - ENV variables: + - TF_VAR_GCP_PROJECT: `#` + +
    + + + * Group "**instance-name**" +
    + Click here to expand this section. + + * `_instance_name.#` + - ENV variables: + - TF_VAR_INSTANCE_NAME: `#` + +
    + + + * Group "**platform**" +
    + Click here to expand this section. + + * `_arm64` + - ENV variables: + - CM_INSTANCE_PLATFORM: `arm64` + * **`_x86`** (default) + - ENV variables: + - CM_INSTANCE_PLATFORM: `x86` + +
    + + + * Group "**region**" +
    + Click here to expand this section. + + * `_region.#` + - ENV variables: + - TF_VAR_INSTANCE_REGION: `#` + * `_us-west-2` + - ENV variables: + - TF_VAR_INSTANCE_REGION: `us-west-2` + +
    + + + * Group "**storage-size**" +
    + Click here to expand this section. + + * `_storage_size.#` + - ENV variables: + - TF_VAR_DISK_GBS: `#` + * `_storage_size.8` + - ENV variables: + - TF_VAR_DISK_GBS: `8` + +
    + + + * Group "**zone**" +
    + Click here to expand this section. + + * `_zone.#` + - ENV variables: + - TF_VAR_INSTANCE_ZONE: `#` + +
    + + + ##### Default variations + + `_aws,_x86` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--cminit=value` → `CM_TERRAFORM_CM_INIT=value` + * `--destroy=value` → `CM_DESTROY_TERRAFORM=value` + * `--gcp_credentials_json_file=value` → `CM_GCP_CREDENTIALS_JSON_PATH=value` + * `--key_file=value` → `CM_SSH_KEY_FILE=value` + * `--run_cmds=value` → `CM_TERRAFORM_RUN_COMMANDS=value` + * `--ssh_key_file=value` → `CM_SSH_KEY_FILE=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * TF_VAR_SECURITY_GROUP_ID: `sg-0783752c97d2e011d` + * TF_VAR_CPU_COUNT: `1` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "run terraform [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/index.md new file mode 100644 index 000000000..71dc75a6b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/index.md @@ -0,0 +1 @@ +* [launch-benchmark](launch-benchmark/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/launch-benchmark/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/launch-benchmark/index.md new file mode 100644 index 000000000..4ad86bd2a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Collective-benchmarking/launch-benchmark/index.md @@ -0,0 +1,81 @@ +# launch-benchmark +Automatically generated README for this automation recipe: **launch-benchmark** + +Category: **[Collective benchmarking](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/launch-benchmark/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/launch-benchmark/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "launch benchmark" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=launch,benchmark + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "launch benchmark " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'launch,benchmark' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "launch benchmark" + ``` +___ + + +___ +#### Script output +```bash +cmr "launch benchmark " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-aocl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-aocl/index.md new file mode 100644 index 000000000..7ff7292fb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-aocl/index.md @@ -0,0 +1,92 @@ +# get-aocl +Automatically generated README for this automation recipe: **get-aocl** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/_cm.json)* +* Output cached? *true* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get lib aocl amd-optimized amd" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,lib,aocl,amd-optimized,amd + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get lib aocl amd-optimized amd " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,lib,aocl,amd-optimized,amd' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get lib aocl amd-optimized amd" + ``` +___ + +#### Versions +Default version: `4.0` + +* `4.0` +* `master` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get lib aocl amd-optimized amd " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-cl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-cl/index.md new file mode 100644 index 000000000..fd2dc6cef --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-cl/index.md @@ -0,0 +1,87 @@ +# Detect or install Microsoft C compiler +Automatically generated README for this automation recipe: **get-cl** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get cl compiler c-compiler cpp-compiler get-cl" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,cl,compiler,c-compiler,cpp-compiler,get-cl + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get cl compiler c-compiler cpp-compiler get-cl " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cl,compiler,c-compiler,cpp-compiler,get-cl' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get cl compiler c-compiler cpp-compiler get-cl" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + No run file exists for Linux/macOS +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/run.bat) +___ +#### Script output +```bash +cmr "get cl compiler c-compiler cpp-compiler get-cl " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-flags/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-flags/index.md new file mode 100644 index 000000000..b1b46b23e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-flags/index.md @@ -0,0 +1,80 @@ +# get-compiler-flags +Automatically generated README for this automation recipe: **get-compiler-flags** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-flags/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get compiler-flags" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,compiler-flags + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get compiler-flags " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,compiler-flags' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get compiler-flags" + ``` +___ + + +___ +#### Script output +```bash +cmr "get compiler-flags " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-rust/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-rust/index.md new file mode 100644 index 000000000..90844ce50 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-compiler-rust/index.md @@ -0,0 +1,86 @@ +# get-compiler-rust +Automatically generated README for this automation recipe: **get-compiler-rust** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-rust/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get rust-compiler" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,rust-compiler + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get rust-compiler " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,rust-compiler' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get rust-compiler" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-rust/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get rust-compiler " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-gcc/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-gcc/index.md new file mode 100644 index 000000000..06913a2fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-gcc/index.md @@ -0,0 +1,87 @@ +# Detect or install GCC compiler +Automatically generated README for this automation recipe: **get-gcc** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get gcc compiler c-compiler cpp-compiler get-gcc" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,gcc,compiler,c-compiler,cpp-compiler,get-gcc + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get gcc compiler c-compiler cpp-compiler get-gcc " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,gcc,compiler,c-compiler,cpp-compiler,get-gcc' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get gcc compiler c-compiler cpp-compiler get-gcc" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/run.bat) +___ +#### Script output +```bash +cmr "get gcc compiler c-compiler cpp-compiler get-gcc " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-go/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-go/index.md new file mode 100644 index 000000000..7d691b01a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-go/index.md @@ -0,0 +1,87 @@ +# get-go +Automatically generated README for this automation recipe: **get-go** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get tool go get-go" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,tool,go,get-go + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get tool go get-go " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,tool,go,get-go' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get tool go get-go" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get tool go get-go " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-llvm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-llvm/index.md new file mode 100644 index 000000000..8c5855c88 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/get-llvm/index.md @@ -0,0 +1,101 @@ +# Detect or install LLVM compiler +Automatically generated README for this automation recipe: **get-llvm** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get llvm compiler c-compiler cpp-compiler get-llvm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,llvm,compiler,c-compiler,cpp-compiler,get-llvm[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get llvm compiler c-compiler cpp-compiler get-llvm [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,llvm,compiler,c-compiler,cpp-compiler,get-llvm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get llvm compiler c-compiler cpp-compiler get-llvm[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_from-prebuilt` + * `_from-src` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/run.bat) +___ +#### Script output +```bash +cmr "get llvm compiler c-compiler cpp-compiler get-llvm [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/index.md new file mode 100644 index 000000000..d24e5e703 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/index.md @@ -0,0 +1,18 @@ +* [get-aocl](get-aocl/index.md) +* [get-cl](get-cl/index.md) +* [get-compiler-flags](get-compiler-flags/index.md) +* [get-compiler-rust](get-compiler-rust/index.md) +* [get-gcc](get-gcc/index.md) +* [get-go](get-go/index.md) +* [get-llvm](get-llvm/index.md) +* [install-gcc-src](install-gcc-src/index.md) +* [install-ipex-from-src](install-ipex-from-src/index.md) +* [install-llvm-prebuilt](install-llvm-prebuilt/index.md) +* [install-llvm-src](install-llvm-src/index.md) +* [install-onednn-from-src](install-onednn-from-src/index.md) +* [install-onnxruntime-from-src](install-onnxruntime-from-src/index.md) +* [install-pytorch-from-src](install-pytorch-from-src/index.md) +* [install-pytorch-kineto-from-src](install-pytorch-kineto-from-src/index.md) +* [install-torchvision-from-src](install-torchvision-from-src/index.md) +* [install-tpp-pytorch-extension](install-tpp-pytorch-extension/index.md) +* [install-transformers-from-src](install-transformers-from-src/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-gcc-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-gcc-src/index.md new file mode 100644 index 000000000..54724e8a7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-gcc-src/index.md @@ -0,0 +1,90 @@ +# install-gcc-src +Automatically generated README for this automation recipe: **install-gcc-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gcc-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install src gcc src-gcc" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,src,gcc,src-gcc + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install src gcc src-gcc " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,gcc,src-gcc' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install src gcc src-gcc" + ``` +___ + +#### Versions +Default version: `12` + +* `master` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gcc-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install src gcc src-gcc " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-ipex-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-ipex-from-src/index.md new file mode 100644 index 000000000..673ca8376 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-ipex-from-src/index.md @@ -0,0 +1,128 @@ +# Build IPEX from sources +Automatically generated README for this automation recipe: **install-ipex-from-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-ipex-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install get src from.src ipex src-ipex" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,get,src,from.src,ipex,src-ipex[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install get src from.src ipex src-ipex [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,ipex,src-ipex' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install get src from.src ipex src-ipex[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_for-intel-mlperf-inference-v3.1-gptj` + - ENV variables: + - CM_CONDA_ENV: `yes` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/intel/intel-extension-for-pytorch`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/intel/intel-extension-for-pytorch` + +
    + + + ##### Default variations + + `_repo.https://github.com/intel/intel-extension-for-pytorch` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-ipex-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install get src from.src ipex src-ipex [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md new file mode 100644 index 000000000..96038406e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md @@ -0,0 +1,90 @@ +# Install prebuilt LLVM compiler +Automatically generated README for this automation recipe: **install-llvm-prebuilt** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm" + ``` +___ + +#### Versions +Default version: `15.0.6` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/run.bat) +___ +#### Script output +```bash +cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-src/index.md new file mode 100644 index 000000000..655046f73 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-llvm-src/index.md @@ -0,0 +1,160 @@ +# Build LLVM compiler from sources (can take >30 min) +Automatically generated README for this automation recipe: **install-llvm-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install src llvm from.src src-llvm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,src,llvm,from.src,src-llvm[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install src llvm from.src src-llvm [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,llvm,from.src,src-llvm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install src llvm from.src src-llvm[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_for-intel-mlperf-inference-v3.1-bert` + - ENV variables: + - CM_LLVM_CONDA_ENV: `yes` + * `_for-intel-mlperf-inference-v3.1-gptj` + - ENV variables: + - CM_LLVM_CONDA_ENV: `yes` + - CM_LLVM_16_INTEL_MLPERF_INFERENCE: `yes` + - USE_CUDA: `0` + - CUDA_VISIBLE_DEVICES: `` + * `_full-history` + * `_runtimes.#` + - ENV variables: + - CM_LLVM_ENABLE_RUNTIMES: `#` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**build-type**" +
    + Click here to expand this section. + + * `_debug` + - ENV variables: + - CM_LLVM_BUILD_TYPE: `debug` + * **`_release`** (default) + - ENV variables: + - CM_LLVM_BUILD_TYPE: `release` + +
    + + + * Group "**clang**" +
    + Click here to expand this section. + + * **`_clang`** (default) + - ENV variables: + - CM_LLVM_ENABLE_PROJECTS: `clang` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + +
    + + + ##### Default variations + + `_clang,_release` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install src llvm from.src src-llvm [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onednn-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onednn-from-src/index.md new file mode 100644 index 000000000..49bb4844b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onednn-from-src/index.md @@ -0,0 +1,129 @@ +# Build oneDNN from sources +Automatically generated README for this automation recipe: **install-onednn-from-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onednn-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install get src from.src onednn src-onednn" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,get,src,from.src,onednn,src-onednn[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install get src from.src onednn src-onednn [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,onednn,src-onednn' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install get src from.src onednn src-onednn[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_for-intel-mlperf-inference-v3.1-bert` + - ENV variables: + - CM_CONDA_ENV: `yes` + - CM_FOR_INTEL_MLPERF_INFERENCE: `yes` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/oneapi-src/oneDNN`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/oneapi-src/oneDNN` + +
    + + + ##### Default variations + + `_repo.https://github.com/oneapi-src/oneDNN` + +#### Native script being run +=== "Linux/macOS" + * [run-intel-mlperf-inference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onednn-from-src/run-intel-mlperf-inference.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install get src from.src onednn src-onednn [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md new file mode 100644 index 000000000..011956f1d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md @@ -0,0 +1,125 @@ +# Build onnxruntime from sources +Automatically generated README for this automation recipe: **install-onnxruntime-from-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onnxruntime-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install get src from.src onnxruntime src-onnxruntime" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,get,src,from.src,onnxruntime,src-onnxruntime[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install get src from.src onnxruntime src-onnxruntime [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,onnxruntime,src-onnxruntime' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install get src from.src onnxruntime src-onnxruntime[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_cuda` + - ENV variables: + - CM_ONNXRUNTIME_GPU: `yes` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * **`_repo.https://github.com/Microsoft/onnxruntime`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/Microsoft/onnxruntime` + +
    + + + ##### Default variations + + `_repo.https://github.com/Microsoft/onnxruntime` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onnxruntime-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install get src from.src onnxruntime src-onnxruntime [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md new file mode 100644 index 000000000..4c7c18512 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md @@ -0,0 +1,143 @@ +# Build pytorch from sources +Automatically generated README for this automation recipe: **install-pytorch-from-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install get src from.src pytorch src-pytorch" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,get,src,from.src,pytorch,src-pytorch[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install get src from.src pytorch src-pytorch [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,pytorch,src-pytorch' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install get src from.src pytorch src-pytorch[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_cuda` + - ENV variables: + - CUDA_HOME: `<<>>` + - CUDNN_LIBRARY_PATH: `<<>>` + - CUDNN_INCLUDE_PATH: `<<>>` + - CUDA_NVCC_EXECUTABLE: `<<>>` + - USE_CUDA: `1` + - USE_CUDNN: `1` + - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper` + - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1` + * `_for-intel-mlperf-inference-v3.1-bert` + - ENV variables: + - CM_CONDA_ENV: `yes` + - CM_MLPERF_INFERENCE_INTEL: `yes` + - USE_CUDA: `0` + * `_for-nvidia-mlperf-inference-v3.1` + * `_for-nvidia-mlperf-inference-v4.0` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/pytorch/pytorch`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/pytorch/pytorch` + +
    + + + ##### Default variations + + `_repo.https://github.com/pytorch/pytorch` + +#### Native script being run +=== "Linux/macOS" + * [run-intel-mlperf-inference-v3_1.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install get src from.src pytorch src-pytorch [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md new file mode 100644 index 000000000..99cb8893f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md @@ -0,0 +1,135 @@ +# Build pytorch kineto from sources +Automatically generated README for this automation recipe: **install-pytorch-kineto-from-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-kineto-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install get src from.src pytorch-kineto kineto src-pytorch-kineto[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_cuda` + - ENV variables: + - CUDA_HOME: `<<>>` + - CUDA_NVCC_EXECUTABLE: `<<>>` + - CUDNN_INCLUDE_PATH: `<<>>` + - CUDNN_LIBRARY_PATH: `<<>>` + - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper` + - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1` + - USE_CUDA: `1` + - USE_CUDNN: `1` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/pytorch/kineto`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/pytorch/kineto` + +
    + + + ##### Default variations + + `_repo.https://github.com/pytorch/kineto` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-kineto-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md new file mode 100644 index 000000000..296969afb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md @@ -0,0 +1,137 @@ +# Build pytorchvision from sources +Automatically generated README for this automation recipe: **install-torchvision-from-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-torchvision-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install get src from.src pytorchvision torchvision src-pytorchvision" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install get src from.src pytorchvision torchvision src-pytorchvision [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install get src from.src pytorchvision torchvision src-pytorchvision[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_cuda` + - ENV variables: + - CUDA_HOME: `<<>>` + - CUDA_NVCC_EXECUTABLE: `<<>>` + - CUDNN_INCLUDE_PATH: `<<>>` + - CUDNN_LIBRARY_PATH: `<<>>` + - USE_CUDA: `1` + - USE_CUDNN: `1` + - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper` + - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1` + * `_for-nvidia-mlperf-inference-v3.1` + * `_for-nvidia-mlperf-inference-v4.0` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/pytorch/vision`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/pytorch/vision` + +
    + + + ##### Default variations + + `_repo.https://github.com/pytorch/vision` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-torchvision-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install get src from.src pytorchvision torchvision src-pytorchvision [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md new file mode 100644 index 000000000..2b681138d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md @@ -0,0 +1,128 @@ +# Build TPP-PEX from sources +Automatically generated README for this automation recipe: **install-tpp-pytorch-extension** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tpp-pytorch-extension/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install get src from.src tpp-pex src-tpp-pex" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,get,src,from.src,tpp-pex,src-tpp-pex[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install get src from.src tpp-pex src-tpp-pex [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,get,src,from.src,tpp-pex,src-tpp-pex' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install get src from.src tpp-pex src-tpp-pex[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_for-intel-mlperf-inference-v3.1-gptj` + - ENV variables: + - CM_CONDA_ENV: `yes` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/libxsmm/tpp-pytorch-extension`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/libxsmm/tpp-pytorch-extension` + +
    + + + ##### Default variations + + `_repo.https://github.com/libxsmm/tpp-pytorch-extension` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tpp-pytorch-extension/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install get src from.src tpp-pex src-tpp-pex [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-transformers-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-transformers-from-src/index.md new file mode 100644 index 000000000..78b59b731 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Compiler-automation/install-transformers-from-src/index.md @@ -0,0 +1,128 @@ +# Build transformers from sources +Automatically generated README for this automation recipe: **install-transformers-from-src** + +Category: **[Compiler automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-transformers-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install src from.src transformers src-transformers" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,src,from.src,transformers,src-transformers[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install src from.src transformers src-transformers [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,from.src,transformers,src-transformers' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install src from.src transformers src-transformers[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_for-intel-mlperf-inference-v3.1-bert` + - ENV variables: + - CM_CONDA_ENV: `yes` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/pytorch/pytorch`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/huggingface/transformers` + +
    + + + ##### Default variations + + `_repo.https://github.com/pytorch/pytorch` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-transformers-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install src from.src transformers src-transformers [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/index.md new file mode 100644 index 000000000..855c981b1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/index.md @@ -0,0 +1 @@ +* [publish-results-to-dashboard](publish-results-to-dashboard/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md new file mode 100644 index 000000000..e496c921b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md @@ -0,0 +1,86 @@ +# publish-results-to-dashboard +Automatically generated README for this automation recipe: **publish-results-to-dashboard** + +Category: **[Dashboard automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "publish-results dashboard" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=publish-results,dashboard + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "publish-results dashboard " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'publish-results,dashboard' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "publish-results dashboard" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/run.bat) +___ +#### Script output +```bash +cmr "publish-results dashboard " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md new file mode 100644 index 000000000..916d64295 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md @@ -0,0 +1,109 @@ +# get-android-sdk +Automatically generated README for this automation recipe: **get-android-sdk** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-android-sdk/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-android-sdk/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get android sdk android-sdk" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,android,sdk,android-sdk [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get android sdk android-sdk " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,android,sdk,android-sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get android sdk android-sdk" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--android_cmake_version=value` → `CM_ANDROID_CMAKE_VERSION=value` + * `--android_ndk_version=value` → `CM_ANDROID_NDK_VERSION=value` + * `--android_version=value` → `CM_ANDROID_VERSION=value` + * `--build_tools_version=value` → `CM_ANDROID_BUILD_TOOLS_VERSION=value` + * `--cmdline_tools_version=value` → `CM_ANDROID_CMDLINE_TOOLS_VERSION=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_ANDROID_BUILD_TOOLS_VERSION: `29.0.3` + * CM_ANDROID_CMAKE_VERSION: `3.6.4111459` + * CM_ANDROID_CMDLINE_TOOLS_URL: `https://dl.google.com/android/repository/commandlinetools-${CM_ANDROID_CMDLINE_TOOLS_OS}-${CM_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip` + * CM_ANDROID_CMDLINE_TOOLS_VERSION: `9123335` + * CM_ANDROID_NDK_VERSION: `21.3.6528147` + * CM_ANDROID_VERSION: `30` + + + +___ +#### Script output +```bash +cmr "get android sdk android-sdk " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md new file mode 100644 index 000000000..020185dd7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md @@ -0,0 +1,97 @@ +# get-aria2 +Automatically generated README for this automation recipe: **get-aria2** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get aria2 get-aria2" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,aria2,get-aria2 [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get aria2 get-aria2 " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,aria2,get-aria2' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get aria2 get-aria2" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--install=value` → `CM_FORCE_INSTALL=value` + * `--src=value` → `CM_ARIA2_BUILD_FROM_SRC=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/run.bat) +___ +#### Script output +```bash +cmr "get aria2 get-aria2 " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md new file mode 100644 index 000000000..b891263a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md @@ -0,0 +1,87 @@ +# get-bazel +Automatically generated README for this automation recipe: **get-bazel** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get bazel get-bazel" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,bazel,get-bazel + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get bazel get-bazel " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,bazel,get-bazel' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get bazel get-bazel" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/run.bat) +___ +#### Script output +```bash +cmr "get bazel get-bazel " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md new file mode 100644 index 000000000..565ded732 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md @@ -0,0 +1,110 @@ +# get-blis +Automatically generated README for this automation recipe: **get-blis** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get lib blis" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,lib,blis[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get lib blis [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,lib,blis' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get lib blis[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**source**" +
    + Click here to expand this section. + + * `_amd` + * **`_flame`** (default) + +
    + + + ##### Default variations + + `_flame` +#### Versions +Default version: `master` + +* `0.9.0` +* `master` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/run.bat) +___ +#### Script output +```bash +cmr "get lib blis [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md new file mode 100644 index 000000000..4e31f81c9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md @@ -0,0 +1,86 @@ +# get-brew +Automatically generated README for this automation recipe: **get-brew** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-brew/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get brew" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,brew + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get brew " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,brew' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get brew" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-brew/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get brew " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md new file mode 100644 index 000000000..1b1f97f9c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md @@ -0,0 +1,86 @@ +# get-cmake +Automatically generated README for this automation recipe: **get-cmake** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get cmake get-cmake" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,cmake,get-cmake + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get cmake get-cmake " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cmake,get-cmake' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get cmake get-cmake" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/run.bat) +___ +#### Script output +```bash +cmr "get cmake get-cmake " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md new file mode 100644 index 000000000..21ab4045c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md @@ -0,0 +1,123 @@ +# get-cmsis_5 +Automatically generated README for this automation recipe: **get-cmsis_5** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get cmsis cmsis_5 arm-software" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,cmsis,cmsis_5,arm-software[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get cmsis cmsis_5 arm-software [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,cmsis,cmsis_5,arm-software' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get cmsis cmsis_5 arm-software[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_recurse-submodules` + - ENV variables: + - CM_GIT_RECURSE_SUBMODULES: `--recurse-submodules` + * `_short-history` + - ENV variables: + - CM_GIT_DEPTH: `--depth 10` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_DEPTH: `` + * CM_GIT_PATCH: `no` + * CM_GIT_URL: `https://github.com/ARM-software/CMSIS_5.git` + + +#### Versions +Default version: `custom` + +* `custom` +* `develop` +* `master` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get cmsis cmsis_5 arm-software [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md new file mode 100644 index 000000000..66e6de1a9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md @@ -0,0 +1,86 @@ +# get-docker +Automatically generated README for this automation recipe: **get-docker** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-docker/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get install docker engine" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,install,docker,engine + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get install docker engine " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,docker,engine' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get install docker engine" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-docker/run-ubuntu.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get install docker engine " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md new file mode 100644 index 000000000..72bcf7044 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md @@ -0,0 +1,214 @@ +# get-generic-sys-util +Automatically generated README for this automation recipe: **get-generic-sys-util** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-sys-util/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get sys-util generic generic-sys-util" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,sys-util,generic,generic-sys-util[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get sys-util generic generic-sys-util [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,sys-util,generic,generic-sys-util' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get sys-util generic generic-sys-util[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_g++-12` + - ENV variables: + - CM_SYS_UTIL_NAME: `g++12` + * `_gflags-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `gflags-dev` + * `_git-lfs` + - ENV variables: + - CM_SYS_UTIL_NAME: `git-lfs` + * `_glog-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `glog-dev` + * `_libboost-all-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libboost-all-dev` + * `_libbz2-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libbz2_dev` + * `_libev-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libev_dev` + * `_libffi-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libffi_dev` + * `_libffi7` + - ENV variables: + - CM_SYS_UTIL_NAME: `libffi7` + * `_libgdbm-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libgdbm_dev` + * `_libgmock-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libgmock-dev` + * `_liblzma-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `liblzma_dev` + * `_libmpfr-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libmpfr-dev` + * `_libncurses-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libncurses_dev` + * `_libnuma-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libnuma-dev` + * `_libpci-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libpci-dev` + * `_libre2-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libre2-dev` + * `_libreadline-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libreadline_dev` + * `_libsqlite3-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libsqlite3_dev` + * `_libssl-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libssl_dev` + * `_libudev-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `libudev-dev` + * `_ninja-build` + - ENV variables: + - CM_SYS_UTIL_NAME: `ninja-build` + * `_nlohmann-json3-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `nlohmann_json3_dev` + * `_ntpdate` + - ENV variables: + - CM_SYS_UTIL_NAME: `ntpdate` + * `_numactl` + - ENV variables: + - CM_SYS_UTIL_NAME: `numactl` + * `_nvidia-cuda-toolkit` + - ENV variables: + - CM_SYS_UTIL_NAME: `nvidia-cuda-toolkit` + * `_rapidjson-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `rapidjson-dev` + * `_rsync` + - ENV variables: + - CM_SYS_UTIL_NAME: `rsync` + * `_screen` + - ENV variables: + - CM_SYS_UTIL_NAME: `screen` + * `_sox` + - ENV variables: + - CM_SYS_UTIL_NAME: `sox` + * `_tk-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `tk_dev` + * `_transmission` + - ENV variables: + - CM_SYS_UTIL_NAME: `transmission` + * `_wget` + - ENV variables: + - CM_SYS_UTIL_NAME: `wget` + * `_zlib` + - ENV variables: + - CM_SYS_UTIL_NAME: `zlib` + * `_zlib1g-dev` + - ENV variables: + - CM_SYS_UTIL_NAME: `zlib1g_dev` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_CLEAN_DIRS: `bin` + * CM_SUDO: `sudo` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-sys-util/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get sys-util generic generic-sys-util [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md new file mode 100644 index 000000000..92b5250f1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md @@ -0,0 +1,89 @@ +# get-google-test +Automatically generated README for this automation recipe: **get-google-test** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-test/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get google-test googletest gtest test google" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,google-test,googletest,gtest,test,google + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get google-test googletest gtest test google " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,google-test,googletest,gtest,test,google' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get google-test googletest gtest test google" + ``` +___ + +#### Versions +Default version: `1.14.0` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-test/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get google-test googletest gtest test google " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md new file mode 100644 index 000000000..f0c5f5ac6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md @@ -0,0 +1,124 @@ +# get-java +Automatically generated README for this automation recipe: **get-java** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get java" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,java[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get java [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,java' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get java[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_install` + - ENV variables: + - CM_JAVA_PREBUILT_INSTALL: `on` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--install=value` → `CM_JAVA_PREBUILT_INSTALL=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_JAVA_PREBUILT_VERSION: `19` + * CM_JAVA_PREBUILT_BUILD: `36` + * CM_JAVA_PREBUILT_URL: `https://download.java.net/openjdk/jdk${CM_JAVA_PREBUILT_VERSION}/ri/` + * CM_JAVA_PREBUILT_FILENAME: `openjdk-${CM_JAVA_PREBUILT_VERSION}+${CM_JAVA_PREBUILT_BUILD}_${CM_JAVA_PREBUILT_HOST_OS}-x64_bin` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/run.bat) +___ +#### Script output +```bash +cmr "get java [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md new file mode 100644 index 000000000..fae70fe46 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md @@ -0,0 +1,124 @@ +# get-javac +Automatically generated README for this automation recipe: **get-javac** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get javac" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,javac[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get javac [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,javac' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get javac[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_install` + - ENV variables: + - CM_JAVAC_PREBUILT_INSTALL: `on` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--install=value` → `CM_JAVAC_PREBUILT_INSTALL=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_JAVAC_PREBUILT_VERSION: `19` + * CM_JAVAC_PREBUILT_BUILD: `36` + * CM_JAVAC_PREBUILT_URL: `https://download.java.net/openjdk/jdk${CM_JAVAC_PREBUILT_VERSION}/ri/` + * CM_JAVAC_PREBUILT_FILENAME: `openjdk-${CM_JAVAC_PREBUILT_VERSION}+${CM_JAVAC_PREBUILT_BUILD}_${CM_JAVAC_PREBUILT_HOST_OS}-x64_bin` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/run.bat) +___ +#### Script output +```bash +cmr "get javac [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md new file mode 100644 index 000000000..99b740e1f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md @@ -0,0 +1,92 @@ +# get-lib-armnn +Automatically generated README for this automation recipe: **get-lib-armnn** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-armnn/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get lib-armnn lib armnn" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,lib-armnn,lib,armnn + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get lib-armnn lib armnn " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,lib-armnn,lib,armnn' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get lib-armnn lib armnn" + ``` +___ + +#### Versions +Default version: `23.11` + +* `22.11` +* `23.05` +* `23.11` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-armnn/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get lib-armnn lib armnn " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md new file mode 100644 index 000000000..b0860ce56 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md @@ -0,0 +1,91 @@ +# get-lib-dnnl +Automatically generated README for this automation recipe: **get-lib-dnnl** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-dnnl/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get lib-dnnl lib dnnl" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,lib-dnnl,lib,dnnl + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get lib-dnnl lib dnnl " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,lib-dnnl,lib,dnnl' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get lib-dnnl lib dnnl" + ``` +___ + +#### Versions +Default version: `dev` + +* `2.2.4` +* `dev` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-dnnl/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get lib-dnnl lib dnnl " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md new file mode 100644 index 000000000..5786390ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md @@ -0,0 +1,107 @@ +# get-lib-protobuf +Automatically generated README for this automation recipe: **get-lib-protobuf** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-protobuf/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get google-protobuf protobuf lib lib-protobuf google" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,google-protobuf,protobuf,lib,lib-protobuf,google[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get google-protobuf protobuf lib lib-protobuf google [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,google-protobuf,protobuf,lib,lib-protobuf,google' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get google-protobuf protobuf lib lib-protobuf google[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_TMP_GIT_CHECKOUT: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + +#### Versions +Default version: `1.13.0` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-protobuf/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get google-protobuf protobuf lib lib-protobuf google [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md new file mode 100644 index 000000000..012b061ee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md @@ -0,0 +1,90 @@ +# get-lib-qaic-api +Automatically generated README for this automation recipe: **get-lib-qaic-api** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-qaic-api/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get api lib-qaic-api lib qaic" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,api,lib-qaic-api,lib,qaic + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get api lib-qaic-api lib qaic " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,api,lib-qaic-api,lib,qaic' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get api lib-qaic-api lib qaic" + ``` +___ + +#### Versions +Default version: `master` + +* `master` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-qaic-api/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get api lib-qaic-api lib qaic " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md new file mode 100644 index 000000000..11bd4a211 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md @@ -0,0 +1,86 @@ +# get-nvidia-docker +Automatically generated README for this automation recipe: **get-nvidia-docker** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-docker/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get install nvidia nvidia-container-toolkit nvidia-docker engine" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-docker/run-ubuntu.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md new file mode 100644 index 000000000..9eda1419e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md @@ -0,0 +1,87 @@ +# get-openssl +Automatically generated README for this automation recipe: **get-openssl** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get openssl lib lib-openssl" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,openssl,lib,lib-openssl + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get openssl lib lib-openssl " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,openssl,lib,lib-openssl' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get openssl lib lib-openssl" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get openssl lib lib-openssl " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md new file mode 100644 index 000000000..bf494897c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md @@ -0,0 +1,107 @@ +# get-rclone +Automatically generated README for this automation recipe: **get-rclone** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get rclone" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,rclone[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get rclone [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,rclone' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get rclone[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_gdrive` + - ENV variables: + - CM_RCLONE_GDRIVE: `yes` + * `_system` + - ENV variables: + - CM_RCLONE_SYSTEM: `yes` + +
    + +#### Versions +Default version: `1.65.2` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/run.bat) +___ +#### Script output +```bash +cmr "get rclone [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md new file mode 100644 index 000000000..558aa7601 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md @@ -0,0 +1,115 @@ +# get-sys-utils-cm +Automatically generated README for this automation recipe: **get-sys-utils-cm** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get sys-utils-cm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,sys-utils-cm[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get sys-utils-cm [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,sys-utils-cm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get sys-utils-cm[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_user` + - ENV variables: + - CM_PYTHON_PIP_USER: `--user` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--skip=value` → `CM_SKIP_SYS_UTILS=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run-arch.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-arch.sh) + * [run-debian.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-debian.sh) + * [run-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-macos.sh) + * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-rhel.sh) + * [run-sles.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-sles.sh) + * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-ubuntu.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get sys-utils-cm [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md new file mode 100644 index 000000000..7166bf7a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md @@ -0,0 +1,80 @@ +# get-sys-utils-min +Automatically generated README for this automation recipe: **get-sys-utils-min** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-min/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get sys-utils-min" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,sys-utils-min + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get sys-utils-min " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,sys-utils-min' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get sys-utils-min" + ``` +___ + + +___ +#### Script output +```bash +cmr "get sys-utils-min " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md new file mode 100644 index 000000000..147e88815 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md @@ -0,0 +1,99 @@ +# get-xilinx-sdk +Automatically generated README for this automation recipe: **get-xilinx-sdk** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-xilinx-sdk/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get xilinx sdk" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,xilinx,sdk [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get xilinx sdk " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,xilinx,sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get xilinx sdk" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--input=value` → `CM_XILINX_SDK_FILE_PATH=value` + + + +#### Versions +Default version: `2019.1` + +* `2019.1` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-xilinx-sdk/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get xilinx sdk " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md new file mode 100644 index 000000000..58d04192e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md @@ -0,0 +1,86 @@ +# get-zendnn +Automatically generated README for this automation recipe: **get-zendnn** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get zendnn amd from.src" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,zendnn,amd,from.src + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get zendnn amd from.src " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,zendnn,amd,from.src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get zendnn amd from.src" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/run.bat) +___ +#### Script output +```bash +cmr "get zendnn amd from.src " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md new file mode 100644 index 000000000..2f5ec2c54 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md @@ -0,0 +1,30 @@ +* [get-android-sdk](get-android-sdk/index.md) +* [get-aria2](get-aria2/index.md) +* [get-bazel](get-bazel/index.md) +* [get-blis](get-blis/index.md) +* [get-brew](get-brew/index.md) +* [get-cmake](get-cmake/index.md) +* [get-cmsis_5](get-cmsis_5/index.md) +* [get-docker](get-docker/index.md) +* [get-generic-sys-util](get-generic-sys-util/index.md) +* [get-google-test](get-google-test/index.md) +* [get-java](get-java/index.md) +* [get-javac](get-javac/index.md) +* [get-lib-armnn](get-lib-armnn/index.md) +* [get-lib-dnnl](get-lib-dnnl/index.md) +* [get-lib-protobuf](get-lib-protobuf/index.md) +* [get-lib-qaic-api](get-lib-qaic-api/index.md) +* [get-nvidia-docker](get-nvidia-docker/index.md) +* [get-openssl](get-openssl/index.md) +* [get-rclone](get-rclone/index.md) +* [get-sys-utils-cm](get-sys-utils-cm/index.md) +* [get-sys-utils-min](get-sys-utils-min/index.md) +* [get-xilinx-sdk](get-xilinx-sdk/index.md) +* [get-zendnn](get-zendnn/index.md) +* [install-bazel](install-bazel/index.md) +* [install-cmake-prebuilt](install-cmake-prebuilt/index.md) +* [install-gflags](install-gflags/index.md) +* [install-github-cli](install-github-cli/index.md) +* [install-intel-neural-speed-from-src](install-intel-neural-speed-from-src/index.md) +* [install-numactl-from-src](install-numactl-from-src/index.md) +* [install-openssl](install-openssl/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md new file mode 100644 index 000000000..d9dee3a52 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md @@ -0,0 +1,90 @@ +# install-bazel +Automatically generated README for this automation recipe: **install-bazel** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install script bazel" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,script,bazel + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install script bazel " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,script,bazel' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install script bazel" + ``` +___ + +#### Versions +Default version: `7.0.2` + + +#### Native script being run +=== "Linux/macOS" + * [run-aarch64.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run-aarch64.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run.bat) +___ +#### Script output +```bash +cmr "install script bazel " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md new file mode 100644 index 000000000..b02d49ed1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md @@ -0,0 +1,89 @@ +# install-cmake-prebuilt +Automatically generated README for this automation recipe: **install-cmake-prebuilt** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cmake-prebuilt/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake" + ``` +___ + +#### Versions +Default version: `3.28.3` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cmake-prebuilt/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md new file mode 100644 index 000000000..adc3b0922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md @@ -0,0 +1,90 @@ +# install-gflags +Automatically generated README for this automation recipe: **install-gflags** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gflags/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install src get gflags" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,src,get,gflags + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install src get gflags " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,get,gflags' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install src get gflags" + ``` +___ + +#### Versions +Default version: `2.2.2` + +* `2.2.2` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gflags/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install src get gflags " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md new file mode 100644 index 000000000..36276fc96 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md @@ -0,0 +1,88 @@ +# install-github-cli +Automatically generated README for this automation recipe: **install-github-cli** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install gh github cli github-cli" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,gh,github,cli,github-cli + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install gh github cli github-cli " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,gh,github,cli,github-cli' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install gh github cli github-cli" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run-macos.sh) + * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run-rhel.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install gh github cli github-cli " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md new file mode 100644 index 000000000..36266b661 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md @@ -0,0 +1,126 @@ +# Build Intel Neural Speed from sources +Automatically generated README for this automation recipe: **install-intel-neural-speed-from-src** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-intel-neural-speed-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install src from.src neural-speed intel-neural-speed" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,src,from.src,neural-speed,intel-neural-speed[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install src from.src neural-speed intel-neural-speed [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,from.src,neural-speed,intel-neural-speed' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install src from.src neural-speed intel-neural-speed[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_for-intel-mlperf-inference-v4.0-gptj` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/intel/neural-speed`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/intel/neural-speed` + +
    + + + ##### Default variations + + `_repo.https://github.com/intel/neural-speed` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-intel-neural-speed-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install src from.src neural-speed intel-neural-speed [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md new file mode 100644 index 000000000..6c2808bea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md @@ -0,0 +1,125 @@ +# Build numactl from sources +Automatically generated README for this automation recipe: **install-numactl-from-src** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-numactl-from-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install src from.src numactl src-numactl" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,src,from.src,numactl,src-numactl[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install src from.src numactl src-numactl [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,from.src,numactl,src-numactl' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install src from.src numactl src-numactl[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_sha.#` + - ENV variables: + - CM_GIT_CHECKOUT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * **`_repo.https://github.com/numactl/numactl`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/numactl/numactl` + +
    + + + ##### Default variations + + `_repo.https://github.com/numactl/numactl` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-numactl-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install src from.src numactl src-numactl [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md new file mode 100644 index 000000000..1e41f8cc6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md @@ -0,0 +1,90 @@ +# install-openssl +Automatically generated README for this automation recipe: **install-openssl** + +Category: **[Detection or installation of tools and artifacts](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-openssl/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install src openssl openssl-lib" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,src,openssl,openssl-lib + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install src openssl openssl-lib " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,openssl,openssl-lib' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install src openssl openssl-lib" + ``` +___ + +#### Versions +Default version: `1.1.1` + +* `1.1.1` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-openssl/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install src openssl openssl-lib " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/benchmark-program/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/benchmark-program/index.md new file mode 100644 index 000000000..0c940eff9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/benchmark-program/index.md @@ -0,0 +1,114 @@ +# benchmark-program +Automatically generated README for this automation recipe: **benchmark-program** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "benchmark program" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=benchmark,program[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "benchmark program [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'benchmark,program' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "benchmark program[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_numactl` + * `_numactl-interleave` + * `_profile` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_ENABLE_NUMACTL: `0` + * CM_ENABLE_PROFILING: `0` + + + +#### Native script being run +=== "Linux/macOS" + * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run.bat) +___ +#### Script output +```bash +cmr "benchmark program [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/compile-program/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/compile-program/index.md new file mode 100644 index 000000000..51e8e7ece --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/compile-program/index.md @@ -0,0 +1,97 @@ +# compile-program +Automatically generated README for this automation recipe: **compile-program** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program" + ``` +___ + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * SKIP_RECOMPILE: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/run.bat) +___ +#### Script output +```bash +cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/convert-csv-to-md/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/convert-csv-to-md/index.md new file mode 100644 index 000000000..30ee7342f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/convert-csv-to-md/index.md @@ -0,0 +1,96 @@ +# convert-csv-to-md +Automatically generated README for this automation recipe: **convert-csv-to-md** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "csv-to-md convert to-md from-csv" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=csv-to-md,convert,to-md,from-csv [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "csv-to-md convert to-md from-csv " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'csv-to-md,convert,to-md,from-csv' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "csv-to-md convert to-md from-csv" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--csv_file=value` → `CM_CSV_FILE=value` + * `--md_file=value` → `CM_MD_FILE=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/run.bat) +___ +#### Script output +```bash +cmr "csv-to-md convert to-md from-csv " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/copy-to-clipboard/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/copy-to-clipboard/index.md new file mode 100644 index 000000000..3612d0dc5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/copy-to-clipboard/index.md @@ -0,0 +1,98 @@ +# copy-to-clipboard +Automatically generated README for this automation recipe: **copy-to-clipboard** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "copy to clipboard copy-to-clipboard" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=copy,to,clipboard,copy-to-clipboard [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "copy to clipboard copy-to-clipboard " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'copy,to,clipboard,copy-to-clipboard' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "copy to clipboard copy-to-clipboard" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--add_quotes=value` → `CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES=value` + * `--q=value` → `CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES=value` + * `--t=value` → `CM_COPY_TO_CLIPBOARD_TEXT=value` + * `--text=value` → `CM_COPY_TO_CLIPBOARD_TEXT=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/run.bat) +___ +#### Script output +```bash +cmr "copy to clipboard copy-to-clipboard " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-conda-env/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-conda-env/index.md new file mode 100644 index 000000000..bef5e7517 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-conda-env/index.md @@ -0,0 +1,101 @@ +# create-conda-env +Automatically generated README for this automation recipe: **create-conda-env** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-conda-env/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "create get env conda-env conda-environment create-conda-environment" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=create,get,env,conda-env,conda-environment,create-conda-environment[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "create get env conda-env conda-environment create-conda-environment [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,get,env,conda-env,conda-environment,create-conda-environment' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "create get env conda-env conda-environment create-conda-environment[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_name.#` + - ENV variables: + - CM_CONDA_ENV_NAME: `#` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-conda-env/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "create get env conda-env conda-environment create-conda-environment [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-patch/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-patch/index.md new file mode 100644 index 000000000..05d7c4279 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/create-patch/index.md @@ -0,0 +1,92 @@ +# create-patch +Automatically generated README for this automation recipe: **create-patch** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/create-patch/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/create-patch/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "create patch" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=create,patch [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "create patch " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,patch' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "create patch" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--exclude=value` → `CM_CREATE_PATCH_EXCLUDE=value` + * `--new=value` → `CM_CREATE_PATCH_NEW=value` + * `--old=value` → `CM_CREATE_PATCH_OLD=value` + + + + +___ +#### Script output +```bash +cmr "create patch " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/detect-sudo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/detect-sudo/index.md new file mode 100644 index 000000000..9bb3a47f0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/detect-sudo/index.md @@ -0,0 +1,86 @@ +# detect-sudo +Automatically generated README for this automation recipe: **detect-sudo** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-sudo/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "detect sudo access" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=detect,sudo,access + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "detect sudo access " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'detect,sudo,access' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "detect sudo access" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-sudo/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "detect sudo access " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-and-extract/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-and-extract/index.md new file mode 100644 index 000000000..1bb91aa3f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-and-extract/index.md @@ -0,0 +1,145 @@ +# download-and-extract +Automatically generated README for this automation recipe: **download-and-extract** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/download-and-extract/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-and-extract/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "download-and-extract file" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=download-and-extract,file[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "download-and-extract file [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'download-and-extract,file' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "download-and-extract file[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_extract` + - ENV variables: + - CM_DAE_EXTRACT_DOWNLOADED: `yes` + * `_keep` + - ENV variables: + - CM_EXTRACT_REMOVE_EXTRACTED: `no` + * `_no-remove-extracted` + - ENV variables: + - CM_EXTRACT_REMOVE_EXTRACTED: `no` + * `_url.#` + - ENV variables: + - CM_DAE_URL: `#` + +
    + + + * Group "**download-tool**" +
    + Click here to expand this section. + + * **`_cmutil`** (default) + * `_curl` + * `_gdown` + * `_rclone` + * `_torrent` + - ENV variables: + - CM_DAE_DOWNLOAD_USING_TORRENT: `yes` + - CM_TORRENT_DOWNLOADED_FILE_NAME: `<<>>` + - CM_TORRENT_DOWNLOADED_PATH_ENV_KEY: `CM_DAE_FILEPATH` + - CM_TORRENT_WAIT_UNTIL_COMPLETED: `yes` + * `_wget` + +
    + + + ##### Default variations + + `_cmutil` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--download_path=value` → `CM_DOWNLOAD_PATH=value` + * `--extra_folder=value` → `CM_EXTRACT_TO_FOLDER=value` + * `--extract_path=value` → `CM_EXTRACT_PATH=value` + * `--from=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` + * `--local_path=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` + * `--store=value` → `CM_DOWNLOAD_PATH=value` + * `--to=value` → `CM_EXTRACT_PATH=value` + * `--url=value` → `CM_DAE_URL=value` + * `--verify=value` → `CM_VERIFY_SSL=value` + + + + +___ +#### Script output +```bash +cmr "download-and-extract file [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-file/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-file/index.md new file mode 100644 index 000000000..2899d4941 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-file/index.md @@ -0,0 +1,156 @@ +# download-file +Automatically generated README for this automation recipe: **download-file** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "download file" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=download,file[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "download file [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'download,file' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "download file[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_url.#` + - ENV variables: + - CM_DOWNLOAD_URL: `#` + +
    + + + * Group "**download-tool**" +
    + Click here to expand this section. + + * **`_cmutil`** (default) + - ENV variables: + - CM_DOWNLOAD_TOOL: `cmutil` + * `_curl` + - ENV variables: + - CM_DOWNLOAD_TOOL: `curl` + * `_gdown` + - ENV variables: + - CM_DOWNLOAD_TOOL: `gdown` + * `_rclone` + - ENV variables: + - CM_DOWNLOAD_TOOL: `rclone` + * `_wget` + - ENV variables: + - CM_DOWNLOAD_TOOL: `wget` + +
    + + + ##### Default variations + + `_cmutil` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--download_path=value` → `CM_DOWNLOAD_PATH=value` + * `--from=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` + * `--local_path=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` + * `--md5sum=value` → `CM_DOWNLOAD_CHECKSUM=value` + * `--output_file=value` → `CM_DOWNLOAD_FILENAME=value` + * `--store=value` → `CM_DOWNLOAD_PATH=value` + * `--url=value` → `CM_DOWNLOAD_URL=value` + * `--verify=value` → `CM_VERIFY_SSL=value` + * `--verify_ssl=value` → `CM_VERIFY_SSL=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_RCLONE_COPY_USING: `sync` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/run.bat) +___ +#### Script output +```bash +cmr "download file [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-torrent/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-torrent/index.md new file mode 100644 index 000000000..3d2aecbdb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/download-torrent/index.md @@ -0,0 +1,120 @@ +# download-torrent +Automatically generated README for this automation recipe: **download-torrent** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-torrent/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "download torrent download-torrent" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=download,torrent,download-torrent[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "download torrent download-torrent [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'download,torrent,download-torrent' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "download torrent download-torrent[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_torrent.#` + - ENV variables: + - CM_TORRENT_FILE: `#` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--wait=value` → `CM_TORRENT_WAIT_UNTIL_COMPLETED=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_TORRENT_WAIT_UNTIL_COMPLETED: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/download-torrent/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "download torrent download-torrent [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/extract-file/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/extract-file/index.md new file mode 100644 index 000000000..c58463bb8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/extract-file/index.md @@ -0,0 +1,120 @@ +# extract-file +Automatically generated README for this automation recipe: **extract-file** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "extract file" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=extract,file[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "extract file [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'extract,file' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "extract file[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_keep` + - ENV variables: + - CM_EXTRACT_REMOVE_EXTRACTED: `no` + * `_no-remove-extracted` + - ENV variables: + - CM_EXTRACT_REMOVE_EXTRACTED: `no` + * `_path.#` + - ENV variables: + - CM_EXTRACT_FILEPATH: `#` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--extra_folder=value` → `CM_EXTRACT_TO_FOLDER=value` + * `--extract_path=value` → `CM_EXTRACT_PATH=value` + * `--input=value` → `CM_EXTRACT_FILEPATH=value` + * `--to=value` → `CM_EXTRACT_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/run.bat) +___ +#### Script output +```bash +cmr "extract file [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/fail/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/fail/index.md new file mode 100644 index 000000000..811924c37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/fail/index.md @@ -0,0 +1,96 @@ +# fail +Automatically generated README for this automation recipe: **fail** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/fail/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/fail/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "fail filter" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=fail,filter[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "fail filter [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'fail,filter' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "fail filter[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_windows` + - ENV variables: + - CM_FAIL_WINDOWS: `True` + +
    + + +___ +#### Script output +```bash +cmr "fail filter [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-conda/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-conda/index.md new file mode 100644 index 000000000..904deffb8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-conda/index.md @@ -0,0 +1,115 @@ +# get-conda +Automatically generated README for this automation recipe: **get-conda** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get conda get-conda" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,conda,get-conda[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get conda get-conda [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,conda,get-conda' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get conda get-conda[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_name.#` + - ENV variables: + - CM_CONDA_PREFIX_NAME: `#` + +
    + + + * Group "**conda-python**" +
    + Click here to expand this section. + + * `_python-3.#` + - ENV variables: + - CM_CONDA_PYTHON_VERSION: `3.#` + * `_python-3.8` + - ENV variables: + - CM_CONDA_PYTHON_VERSION: `3.8` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/run.bat) +___ +#### Script output +```bash +cmr "get conda get-conda [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-git-repo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-git-repo/index.md new file mode 100644 index 000000000..8108b2915 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-git-repo/index.md @@ -0,0 +1,187 @@ +# get-git-repo +Automatically generated README for this automation recipe: **get-git-repo** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get git repo repository clone" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,git,repo,repository,clone[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get git repo repository clone [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,git,repo,repository,clone' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get git repo repository clone[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_lfs` + - ENV variables: + - CM_GIT_REPO_NEEDS_LFS: `yes` + * `_no-recurse-submodules` + - ENV variables: + - CM_GIT_RECURSE_SUBMODULES: `` + * `_patch` + - ENV variables: + - CM_GIT_PATCH: `yes` + * `_submodules.#` + - ENV variables: + - CM_GIT_SUBMODULES: `#` + +
    + + + * Group "**checkout**" +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_BRANCH: `#` + * `_sha.#` + - ENV variables: + - CM_GIT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**git-history**" +
    + Click here to expand this section. + + * `_full-history` + - ENV variables: + - CM_GIT_DEPTH: `` + * **`_short-history`** (default) + - ENV variables: + - CM_GIT_DEPTH: `--depth 5` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + +
    + + + ##### Default variations + + `_short-history` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--branch=value` → `CM_GIT_CHECKOUT=value` + * `--depth=value` → `CM_GIT_DEPTH=value` + * `--env_key=value` → `CM_GIT_ENV_KEY=value` + * `--folder=value` → `CM_GIT_CHECKOUT_FOLDER=value` + * `--patch=value` → `CM_GIT_PATCH=value` + * `--pull=value` → `CM_GIT_REPO_PULL=value` + * `--submodules=value` → `CM_GIT_RECURSE_SUBMODULES=value` + * `--update=value` → `CM_GIT_REPO_PULL=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_DEPTH: `--depth 4` + * CM_GIT_CHECKOUT_FOLDER: `repo` + * CM_GIT_PATCH: `no` + * CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules` + * CM_GIT_URL: `https://github.com/mlcommons/ck.git` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/run.bat) +___ +#### Script output +```bash +cmr "get git repo repository clone [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-github-cli/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-github-cli/index.md new file mode 100644 index 000000000..06d0a33f0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/get-github-cli/index.md @@ -0,0 +1,86 @@ +# get-github-cli +Automatically generated README for this automation recipe: **get-github-cli** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get gh gh-cli github cli github-cli" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,gh,gh-cli,github,cli,github-cli + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get gh gh-cli github cli github-cli " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,gh,gh-cli,github,cli,github-cli' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get gh gh-cli github cli github-cli" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/run.bat) +___ +#### Script output +```bash +cmr "get gh gh-cli github cli github-cli " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/index.md new file mode 100644 index 000000000..94dd95d37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/index.md @@ -0,0 +1,22 @@ +* [benchmark-program](benchmark-program/index.md) +* [compile-program](compile-program/index.md) +* [convert-csv-to-md](convert-csv-to-md/index.md) +* [copy-to-clipboard](copy-to-clipboard/index.md) +* [create-conda-env](create-conda-env/index.md) +* [create-patch](create-patch/index.md) +* [detect-sudo](detect-sudo/index.md) +* [download-and-extract](download-and-extract/index.md) +* [download-file](download-file/index.md) +* [download-torrent](download-torrent/index.md) +* [extract-file](extract-file/index.md) +* [fail](fail/index.md) +* [get-conda](get-conda/index.md) +* [get-git-repo](get-git-repo/index.md) +* [get-github-cli](get-github-cli/index.md) +* [pull-git-repo](pull-git-repo/index.md) +* [push-csv-to-spreadsheet](push-csv-to-spreadsheet/index.md) +* [set-device-settings-qaic](set-device-settings-qaic/index.md) +* [set-echo-off-win](set-echo-off-win/index.md) +* [set-performance-mode](set-performance-mode/index.md) +* [set-sqlite-dir](set-sqlite-dir/index.md) +* [tar-my-folder](tar-my-folder/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/pull-git-repo/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/pull-git-repo/index.md new file mode 100644 index 000000000..e600e5129 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/pull-git-repo/index.md @@ -0,0 +1,95 @@ +# pull-git-repo +Automatically generated README for this automation recipe: **pull-git-repo** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/pull-git-repo/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "pull git repo repository" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=pull,git,repo,repository [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "pull git repo repository " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'pull,git,repo,repository' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "pull git repo repository" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--path=value` → `CM_GIT_CHECKOUT_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/pull-git-repo/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "pull git repo repository " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md new file mode 100644 index 000000000..1ea013cb0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md @@ -0,0 +1,107 @@ +# push-csv-to-spreadsheet +Automatically generated README for this automation recipe: **push-csv-to-spreadsheet** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/push-csv-to-spreadsheet/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "push google-spreadsheet spreadsheet push-to-google-spreadsheet" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--csv_file=value` → `CM_CSV_FILE_PATH=value` + * `--sheet_name=value` → `CM_GOOGLE_SHEET_NAME=value` + * `--spreadsheet_id=value` → `CM_GOOGLE_SPREADSHEET_ID=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GOOGLE_SPREADSHEET_ID: `1gMHjXmFmwZR4-waPPyxy5Pc3VARqX3kKUWxkP97Xa6Y` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/push-csv-to-spreadsheet/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md new file mode 100644 index 000000000..149675edd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md @@ -0,0 +1,114 @@ +# set-device-settings-qaic +Automatically generated README for this automation recipe: **set-device-settings-qaic** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-device-settings-qaic/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "set device qaic ai100 cloud performance power setting mode vc ecc" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "set device qaic ai100 cloud performance power setting mode vc ecc [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "set device qaic ai100 cloud performance power setting mode vc ecc[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_ecc` + - ENV variables: + - CM_QAIC_ECC: `yes` + * `_vc.#` + - ENV variables: + - CM_QAIC_VC: `#` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_QAIC_DEVICES: `0` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-device-settings-qaic/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "set device qaic ai100 cloud performance power setting mode vc ecc [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-echo-off-win/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-echo-off-win/index.md new file mode 100644 index 000000000..52ff14aa9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-echo-off-win/index.md @@ -0,0 +1,80 @@ +# set-echo-off-win +Automatically generated README for this automation recipe: **set-echo-off-win** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-echo-off-win/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "set echo off win echo-off-win echo-off" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=set,echo,off,win,echo-off-win,echo-off + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "set echo off win echo-off-win echo-off " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,echo,off,win,echo-off-win,echo-off' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "set echo off win echo-off-win echo-off" + ``` +___ + + +___ +#### Script output +```bash +cmr "set echo off win echo-off-win echo-off " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-performance-mode/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-performance-mode/index.md new file mode 100644 index 000000000..3a1c6de33 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-performance-mode/index.md @@ -0,0 +1,139 @@ +# set-performance-mode +Automatically generated README for this automation recipe: **set-performance-mode** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "set system performance power mode" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=set,system,performance,power,mode[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "set system performance power mode [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,system,performance,power,mode' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "set system performance power mode[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_reproducibility` + - ENV variables: + - CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE: `yes` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_SET_PERFORMANCE_MODE_OF: `cpu` + +
    + + + * Group "**performance-mode**" +
    + Click here to expand this section. + + * **`_performance`** (default) + - ENV variables: + - CM_SET_PERFORMANCE_MODE: `performance` + +
    + + + * Group "**power**" +
    + Click here to expand this section. + + * `_power` + - ENV variables: + - CM_SET_PERFORMANCE_MODE: `power` + +
    + + + ##### Default variations + + `_cpu,_performance` + +#### Native script being run +=== "Linux/macOS" + * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run.bat) +___ +#### Script output +```bash +cmr "set system performance power mode [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-sqlite-dir/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-sqlite-dir/index.md new file mode 100644 index 000000000..69229f604 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/set-sqlite-dir/index.md @@ -0,0 +1,95 @@ +# set-sqlite-dir +Automatically generated README for this automation recipe: **set-sqlite-dir** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "set sqlite dir sqlite-dir" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=set,sqlite,dir,sqlite-dir [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "set sqlite dir sqlite-dir " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'set,sqlite,dir,sqlite-dir' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "set sqlite dir sqlite-dir" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--path=value` → `CM_SQLITE_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/run.bat) +___ +#### Script output +```bash +cmr "set sqlite dir sqlite-dir " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/tar-my-folder/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/tar-my-folder/index.md new file mode 100644 index 000000000..91b8bcaf0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/DevOps-automation/tar-my-folder/index.md @@ -0,0 +1,92 @@ +# tar-my-folder +Automatically generated README for this automation recipe: **tar-my-folder** + +Category: **[DevOps automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/tar-my-folder/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/tar-my-folder/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run tar" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,tar [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run tar " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,tar' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run tar" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--input_dir=value` → `CM_TAR_INPUT_DIR=value` + * `--outfile=value` → `CM_TAR_OUTFILE=value` + * `--output_dir=value` → `CM_TAR_OUTPUT_DIR=value` + + + + +___ +#### Script output +```bash +cmr "run tar " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-docker-image/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-docker-image/index.md new file mode 100644 index 000000000..979bdc8a1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-docker-image/index.md @@ -0,0 +1,120 @@ +# build-docker-image +Automatically generated README for this automation recipe: **build-docker-image** + +Category: **[Docker automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "build docker image docker-image dockerimage" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=build,docker,image,docker-image,dockerimage [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "build docker image docker-image dockerimage " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'build,docker,image,docker-image,dockerimage' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "build docker image docker-image dockerimage" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--cache=value` → `CM_DOCKER_CACHE=value` + * `--cm_repo=value` → `CM_MLOPS_REPO=value` + * `--docker_os=value` → `CM_DOCKER_OS=value` + * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` + * `--dockerfile=value` → `CM_DOCKERFILE_WITH_PATH=value` + * `--gh_token=value` → `CM_GH_TOKEN=value` + * `--image_name=value` → `CM_DOCKER_IMAGE_NAME=value` + * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` + * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` + * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` + * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` + * `--push_image=value` → `CM_DOCKER_PUSH_IMAGE=value` + * `--real_run=value` → `CM_REAL_RUN=value` + * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DOCKER_IMAGE_REPO: `local` + * CM_DOCKER_IMAGE_TAG: `latest` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/run.bat) +___ +#### Script output +```bash +cmr "build docker image docker-image dockerimage " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-dockerfile/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-dockerfile/index.md new file mode 100644 index 000000000..7e4ea3639 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/build-dockerfile/index.md @@ -0,0 +1,145 @@ +# build-dockerfile +Automatically generated README for this automation recipe: **build-dockerfile** + +Category: **[Docker automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "build dockerfile" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=build,dockerfile[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "build dockerfile [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'build,dockerfile' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "build dockerfile[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_slim` + - ENV variables: + - CM_DOCKER_BUILD_SLIM: `yes` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--build=value` → `CM_BUILD_DOCKER_IMAGE=value` + * `--cache=value` → `CM_DOCKER_CACHE=value` + * `--cm_repo=value` → `CM_MLOPS_REPO=value` + * `--cm_repo_flags=value` → `CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=value` + * `--cm_repos=value` → `CM_DOCKER_EXTRA_CM_REPOS=value` + * `--comments=value` → `CM_DOCKER_RUN_COMMENTS=value` + * `--copy_files=value` → `CM_DOCKER_COPY_FILES=value` + * `--docker_base_image=value` → `CM_DOCKER_IMAGE_BASE=value` + * `--docker_os=value` → `CM_DOCKER_OS=value` + * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` + * `--extra_sys_deps=value` → `CM_DOCKER_EXTRA_SYS_DEPS=value` + * `--fake_docker_deps=value` → `CM_DOCKER_FAKE_DEPS=value` + * `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value` + * `--file_path=value` → `CM_DOCKERFILE_WITH_PATH=value` + * `--gh_token=value` → `CM_GH_TOKEN=value` + * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` + * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` + * `--package_manager_update_cmd=value` → `CM_PACKAGE_MANAGER_UPDATE_CMD=value` + * `--pip_extra_flags=value` → `CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS=value` + * `--post_file=value` → `DOCKER_IMAGE_POST_FILE=value` + * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` + * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` + * `--push_image=value` → `CM_DOCKER_PUSH_IMAGE=value` + * `--real_run=value` → `CM_REAL_RUN=value` + * `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value` + * `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value` + * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` + * `--skip_cm_sys_upgrade=value` → `CM_DOCKER_SKIP_CM_SYS_UPGRADE=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DOCKER_BUILD_SLIM: `no` + * CM_DOCKER_IMAGE_EOL: ` +` + * CM_DOCKER_OS: `ubuntu` + + + +___ +#### Script output +```bash +cmr "build dockerfile [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/index.md new file mode 100644 index 000000000..ec6c83374 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/index.md @@ -0,0 +1,4 @@ +* [build-docker-image](build-docker-image/index.md) +* [build-dockerfile](build-dockerfile/index.md) +* [prune-docker](prune-docker/index.md) +* [run-docker-container](run-docker-container/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/prune-docker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/prune-docker/index.md new file mode 100644 index 000000000..e1025b409 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/prune-docker/index.md @@ -0,0 +1,86 @@ +# prune-docker +Automatically generated README for this automation recipe: **prune-docker** + +Category: **[Docker automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "prune docker" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=prune,docker + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "prune docker " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'prune,docker' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "prune docker" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/run.bat) +___ +#### Script output +```bash +cmr "prune docker " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/run-docker-container/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/run-docker-container/index.md new file mode 100644 index 000000000..68266dfa5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Docker-automation/run-docker-container/index.md @@ -0,0 +1,130 @@ +# run-docker-container +Automatically generated README for this automation recipe: **run-docker-container** + +Category: **[Docker automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-docker-container/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/run-docker-container/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run docker container" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,docker,container [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run docker container " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,docker,container' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run docker container" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--all_gpus=value` → `CM_DOCKER_ADD_ALL_GPUS=value` + * `--base=value` → `CM_DOCKER_IMAGE_BASE=value` + * `--cache=value` → `CM_DOCKER_CACHE=value` + * `--cm_repo=value` → `CM_MLOPS_REPO=value` + * `--detached=value` → `CM_DOCKER_DETACHED_MODE=value` + * `--device=value` → `CM_DOCKER_ADD_DEVICE=value` + * `--docker_image_base=value` → `CM_DOCKER_IMAGE_BASE=value` + * `--docker_os=value` → `CM_DOCKER_OS=value` + * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` + * `--extra_run_args=value` → `CM_DOCKER_EXTRA_RUN_ARGS=value` + * `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value` + * `--gh_token=value` → `CM_GH_TOKEN=value` + * `--image_name=value` → `CM_DOCKER_IMAGE_NAME=value` + * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` + * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` + * `--image_tag_extra=value` → `CM_DOCKER_IMAGE_TAG_EXTRA=value` + * `--interactive=value` → `CM_DOCKER_INTERACTIVE_MODE=value` + * `--it=value` → `CM_DOCKER_INTERACTIVE=value` + * `--mounts=value` → `CM_DOCKER_VOLUME_MOUNTS=value` + * `--num_gpus=value` → `CM_DOCKER_ADD_NUM_GPUS=value` + * `--pass_user_group=value` → `CM_DOCKER_PASS_USER_GROUP=value` + * `--port_maps=value` → `CM_DOCKER_PORT_MAPS=value` + * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` + * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` + * `--real_run=value` → `CM_REAL_RUN=value` + * `--recreate=value` → `CM_DOCKER_IMAGE_RECREATE=value` + * `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value` + * `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value` + * `--save_script=value` → `CM_DOCKER_SAVE_SCRIPT=value` + * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` + * `--shm_size=value` → `CM_DOCKER_SHM_SIZE=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DOCKER_DETACHED_MODE: `yes` + + + +___ +#### Script output +```bash +cmr "run docker container " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/gui/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/gui/index.md new file mode 100644 index 000000000..65f72b8c8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/gui/index.md @@ -0,0 +1,174 @@ +# gui +Automatically generated README for this automation recipe: **gui** + +Category: **[GUI](..)** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) + + +--- + +This CM script provides a unified GUI to run CM scripts using [Streamlit library](https://streamlit.io). + +If you want to run it in a cloud (Azure, AWS, GCP), you need to open some port and test that you can reach it from outside. + +By default, streamlit uses port 8501 but you can change it as follows: + +```bash +cm run script "cm gui" --port 80 +``` + +If you have troubles accessing this port, use this simple python module to test if your port is open: +```bash +python3 -m http.server 80 +``` + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "cm gui cm-gui script-gui cm-script-gui streamlit" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=cm,gui,cm-gui,script-gui,cm-script-gui,streamlit[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "cm gui cm-gui script-gui cm-script-gui streamlit [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'cm,gui,cm-gui,script-gui,cm-script-gui,streamlit' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "cm gui cm-gui script-gui cm-script-gui streamlit[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**app**" +
    + Click here to expand this section. + + * `_chatgpt` + - ENV variables: + - CM_GUI_APP: `chatgpt` + * `_graph` + - ENV variables: + - CM_GUI_APP: `graph` + * `_main` + - ENV variables: + - CM_GUI_APP: `app` + * `_playground` + - ENV variables: + - CM_GUI_APP: `playground` + +
    + +=== "Input Flags" + + + #### Input Flags + + * --**script:** script tags + * --**app:** gui app +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--address=value` → `CM_GUI_ADDRESS=value` + * `--app=value` → `CM_GUI_APP=value` + * `--exp_key_c=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C=value` + * `--exp_key_s=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S=value` + * `--exp_key_x=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X=value` + * `--exp_key_y=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y=value` + * `--exp_max_results=value` → `CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS=value` + * `--exp_name=value` → `CM_GUI_GRAPH_EXPERIMENT_NAME=value` + * `--exp_tags=value` → `CM_GUI_GRAPH_EXPERIMENT_TAGS=value` + * `--exp_title=value` → `CM_GUI_GRAPH_EXPERIMENT_TITLE=value` + * `--exp_uid=value` → `CM_GUI_GRAPH_EXPERIMENT_RESULT_UID=value` + * `--no_browser=value` → `CM_GUI_NO_BROWSER=value` + * `--no_run=value` → `CM_GUI_NO_RUN=value` + * `--port=value` → `CM_GUI_PORT=value` + * `--prefix=value` → `CM_GUI_SCRIPT_PREFIX_LINUX=value` + * `--script=value` → `CM_GUI_SCRIPT_TAGS=value` + * `--title=value` → `CM_GUI_TITLE=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GUI_EXTRA_CMD: `` + * CM_GUI_SCRIPT_PREFIX_LINUX: `gnome-terminal --` + * CM_GUI_APP: `app` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/run.bat) +___ +#### Script output +```bash +cmr "cm gui cm-gui script-gui cm-script-gui streamlit [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/index.md new file mode 100644 index 000000000..b30ad2181 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/GUI/index.md @@ -0,0 +1 @@ +* [gui](gui/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md new file mode 100644 index 000000000..4c43e6df2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md @@ -0,0 +1,86 @@ +# get-ck-repo-mlops +Automatically generated README for this automation recipe: **get-ck-repo-mlops** + +Category: **[Legacy CK support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ck-repo mlops ck-repo-mlops" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ck-repo,mlops,ck-repo-mlops + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ck-repo mlops ck-repo-mlops " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ck-repo,mlops,ck-repo-mlops' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ck-repo mlops ck-repo-mlops" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/run.bat) +___ +#### Script output +```bash +cmr "get ck-repo mlops ck-repo-mlops " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck/index.md new file mode 100644 index 000000000..954ae2c2e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/get-ck/index.md @@ -0,0 +1,86 @@ +# get-ck +Automatically generated README for this automation recipe: **get-ck** + +Category: **[Legacy CK support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ck ck-framework" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ck,ck-framework + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ck ck-framework " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ck,ck-framework' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ck ck-framework" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/run.bat) +___ +#### Script output +```bash +cmr "get ck ck-framework " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/index.md new file mode 100644 index 000000000..7f099c797 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Legacy-CK-support/index.md @@ -0,0 +1,2 @@ +* [get-ck](get-ck/index.md) +* [get-ck-repo-mlops](get-ck-repo-mlops/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md new file mode 100644 index 000000000..50a57acce --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md @@ -0,0 +1,109 @@ +# add-custom-nvidia-system +Automatically generated README for this automation recipe: **add-custom-nvidia-system** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "add custom system nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=add,custom,system,nvidia[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "add custom system nvidia [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'add,custom,system,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "add custom system nvidia[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**code**" +
    + Click here to expand this section. + + * `_ctuning` + * `_custom` + * `_go` + * `_mlcommons` + * `_nvidia-only` + +
    + +#### Versions +* `r2.1` +* `r3.0` +* `r3.1` +* `r4.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "add custom system nvidia [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md new file mode 100644 index 000000000..471ff7f8d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md @@ -0,0 +1,192 @@ +# benchmark-any-mlperf-inference-implementation +Automatically generated README for this automation recipe: **benchmark-any-mlperf-inference-implementation** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-any-mlperf-inference-implementation/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**implementation**" +
    + Click here to expand this section. + + * `_deepsparse` + - ENV variables: + - DIVISION: `open` + - IMPLEMENTATION: `deepsparse` + * `_intel` + - ENV variables: + - IMPLEMENTATION: `intel` + * `_mil` + - ENV variables: + - IMPLEMENTATION: `mil` + * `_nvidia` + - ENV variables: + - IMPLEMENTATION: `nvidia-original` + * `_qualcomm` + - ENV variables: + - IMPLEMENTATION: `qualcomm` + * `_reference` + - ENV variables: + - IMPLEMENTATION: `reference` + * `_tflite-cpp` + - ENV variables: + - IMPLEMENTATION: `tflite_cpp` + +
    + + + * Group "**power**" +
    + Click here to expand this section. + + * **`_performance-only`** (default) + * `_power` + - ENV variables: + - POWER: `True` + +
    + + + * Group "**sut**" +
    + Click here to expand this section. + + * `_aws-dl2q.24xlarge` + * `_macbookpro-m1` + - ENV variables: + - CATEGORY: `edge` + - DIVISION: `closed` + * `_mini` + * `_orin` + * `_orin.32g` + - ENV variables: + - CATEGORY: `edge` + - DIVISION: `closed` + * `_phoenix` + - ENV variables: + - CATEGORY: `edge` + - DIVISION: `closed` + * `_rb6` + * `_rpi4` + * `_sapphire-rapids.24c` + - ENV variables: + - CATEGORY: `edge` + - DIVISION: `closed` + +
    + + + ##### Default variations + + `_performance-only` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--backends=value` → `BACKENDS=value` + * `--category=value` → `CATEGORY=value` + * `--devices=value` → `DEVICES=value` + * `--division=value` → `DIVISION=value` + * `--extra_args=value` → `EXTRA_ARGS=value` + * `--models=value` → `MODELS=value` + * `--power_server=value` → `POWER_SERVER=value` + * `--power_server_port=value` → `POWER_SERVER_PORT=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * DIVISION: `open` + * CATEGORY: `edge` + + + +#### Native script being run +=== "Linux/macOS" + * [run-template.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-any-mlperf-inference-implementation/run-template.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md new file mode 100644 index 000000000..a6c9522ce --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md @@ -0,0 +1,164 @@ +# build-mlperf-inference-server-nvidia +Automatically generated README for this automation recipe: **build-mlperf-inference-server-nvidia** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "build mlcommons mlperf inference inference-server server nvidia-harness nvidia[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**code**" +
    + Click here to expand this section. + + * **`_ctuning`** (default) + * `_custom` + * `_go` + * `_mlcommons` + * `_nvidia-only` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * `_cpu` + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + * **`_cuda`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cuda` + - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` + * `_inferentia` + - ENV variables: + - CM_MLPERF_DEVICE: `inferentia` + +
    + + + * Group "**version**" +
    + Click here to expand this section. + + * `_r4.0` + +
    + + + ##### Default variations + + `_ctuning,_cuda` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--clean=value` → `CM_MAKE_CLEAN=value` + * `--custom_system=value` → `CM_CUSTOM_SYSTEM_NVIDIA=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MAKE_BUILD_COMMAND: `build` + * CM_MAKE_CLEAN: `no` + * CM_CUSTOM_SYSTEM_NVIDIA: `yes` + + +#### Versions +Default version: `r3.1` + +* `r2.1` +* `r3.0` +* `r3.1` +* `r4.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md new file mode 100644 index 000000000..566e49acc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md @@ -0,0 +1,122 @@ +# generate-mlperf-inference-submission +Automatically generated README for this automation recipe: **generate-mlperf-inference-submission** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-submission/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-submission/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--analyzer_settings_file=value` → `CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH=value` + * `--category=value` → `CM_MLPERF_SUBMISSION_CATEGORY=value` + * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` + * `--dashboard=value` → `CM_MLPERF_DASHBOARD=value` + * `--dashboard_wb_project=value` → `CM_MLPERF_DASHBOARD_WANDB_PROJECT=value` + * `--device=value` → `CM_MLPERF_DEVICE=value` + * `--division=value` → `CM_MLPERF_SUBMISSION_DIVISION=value` + * `--duplicate=value` → `CM_MLPERF_DUPLICATE_SCENARIO_RESULTS=value` + * `--hw_name=value` → `CM_HW_NAME=value` + * `--hw_notes_extra=value` → `CM_MLPERF_SUT_HW_NOTES_EXTRA=value` + * `--infer_scenario_results=value` → `CM_MLPERF_DUPLICATE_SCENARIO_RESULTS=value` + * `--power_settings_file=value` → `CM_MLPERF_POWER_SETTINGS_FILE_PATH=value` + * `--preprocess=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` + * `--preprocess_submission=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` + * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR_=value` + * `--run_checker=value` → `CM_RUN_SUBMISSION_CHECKER=value` + * `--run_style=value` → `CM_MLPERF_RUN_STYLE=value` + * `--skip_truncation=value` → `CM_SKIP_TRUNCATE_ACCURACY=value` + * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + * `--sw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` + * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_RUN_MLPERF_ACCURACY: `on` + * CM_MLPERF_RUN_STYLE: `valid` + + + +___ +#### Script output +```bash +cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md new file mode 100644 index 000000000..c56840eb3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md @@ -0,0 +1,122 @@ +# generate-mlperf-inference-user-conf +Automatically generated README for this automation recipe: **generate-mlperf-inference-user-conf** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-user-conf/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "generate mlperf inference user-conf inference-user-conf" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=generate,mlperf,inference,user-conf,inference-user-conf [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "generate mlperf inference user-conf inference-user-conf " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,mlperf,inference,user-conf,inference-user-conf' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "generate mlperf inference user-conf inference-user-conf" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--hw_name=value` → `CM_HW_NAME=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` + * `--num_threads=value` → `CM_NUM_THREADS=value` + * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` + * `--output_dir=value` → `OUTPUT_BASE_DIR=value` + * `--performance_sample_count=value` → `CM_MLPERF_PERFORMANCE_SAMPLE_COUNT=value` + * `--power=value` → `CM_MLPERF_POWER=value` + * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` + * `--rerun=value` → `CM_RERUN=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` + * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` + * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` + * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` + * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_LOADGEN_MODE: `accuracy` + * CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * CM_OUTPUT_FOLDER_NAME: `test_results` + * CM_MLPERF_RUN_STYLE: `test` + * CM_TEST_QUERY_COUNT: `10` + * CM_FAST_FACTOR: `5` + * CM_MLPERF_QUANTIZATION: `False` + + + +___ +#### Script output +```bash +cmr "generate mlperf inference user-conf inference-user-conf " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md new file mode 100644 index 000000000..74555e500 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md @@ -0,0 +1,107 @@ +# generate-mlperf-tiny-report +Automatically generated README for this automation recipe: **generate-mlperf-tiny-report** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "generate mlperf tiny mlperf-tiny report" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=generate,mlperf,tiny,mlperf-tiny,report [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "generate mlperf tiny mlperf-tiny report " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,mlperf,tiny,mlperf-tiny,report' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "generate mlperf tiny mlperf-tiny report" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--repo_tags=value` → `CM_IMPORT_TINYMLPERF_REPO_TAGS=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_IMPORT_TINYMLPERF_REPO_TAGS: `1.1-private` + + + +#### Native script being run +=== "Linux/macOS" + * [run_submission_checker.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/run_submission_checker.sh) +=== "Windows" + + * [run_submission_checker.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/run_submission_checker.bat) +___ +#### Script output +```bash +cmr "generate mlperf tiny mlperf-tiny report " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md new file mode 100644 index 000000000..3f583de51 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md @@ -0,0 +1,81 @@ +# generate-mlperf-tiny-submission +Automatically generated README for this automation recipe: **generate-mlperf-tiny-submission** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-submission/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-submission/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission" + ``` +___ + + +___ +#### Script output +```bash +cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md new file mode 100644 index 000000000..05f7576e2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md @@ -0,0 +1,165 @@ +# generate-nvidia-engine +Automatically generated README for this automation recipe: **generate-nvidia-engine** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + + +--- + +This CM script is in draft stage + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-nvidia-engine/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "generate engine mlperf inference nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=generate,engine,mlperf,inference,nvidia[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "generate engine mlperf inference nvidia [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'generate,engine,mlperf,inference,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "generate engine mlperf inference nvidia[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_MODEL_BATCH_SIZE: `None` + * `_copy_streams.#` + - ENV variables: + - CM_GPU_COPY_STREAMS: `None` + * `_cuda` + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + +
    + + + ##### Default variations + + `_cpu,_resnet50` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + * CM_LOADGEN_SCENARIO: `Offline` + * CM_GPU_COPY_STREAMS: `1` + * CM_TENSORRT_WORKSPACE_SIZE: `4194304` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-nvidia-engine/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "generate engine mlperf inference nvidia [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md new file mode 100644 index 000000000..4267c8146 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md @@ -0,0 +1,117 @@ +# get-mlperf-inference-intel-scratch-space +Automatically generated README for this automation recipe: **get-mlperf-inference-intel-scratch-space** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get mlperf inference intel scratch space" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,mlperf,inference,intel,scratch,space[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get mlperf inference intel scratch space [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,intel,scratch,space' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get mlperf inference intel scratch space[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**version**" +
    + Click here to expand this section. + + * `_version.#` + - ENV variables: + - CM_INTEL_SCRATCH_SPACE_VERSION: `#` + * **`_version.4_0`** (default) + - ENV variables: + - CM_INTEL_SCRATCH_SPACE_VERSION: `4_0` + +
    + + + ##### Default variations + + `_version.4_0` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--scratch_path=value` → `MLPERF_INTEL_SCRATCH_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/run.bat) +___ +#### Script output +```bash +cmr "get mlperf inference intel scratch space [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md new file mode 100644 index 000000000..85084ac54 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md @@ -0,0 +1,144 @@ +# get-mlperf-inference-loadgen +Automatically generated README for this automation recipe: **get-mlperf-inference-loadgen** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get loadgen inference inference-loadgen mlperf mlcommons" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,loadgen,inference,inference-loadgen,mlperf,mlcommons[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get loadgen inference inference-loadgen mlperf mlcommons [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,loadgen,inference,inference-loadgen,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get loadgen inference inference-loadgen mlperf mlcommons[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_copy` + * `_custom-python` + - ENV variables: + - CM_TMP_USE_CUSTOM_PYTHON: `on` + * `_download` + - ENV variables: + - CM_DOWNLOAD_CHECKSUM: `af3f9525965b2c1acc348fb882a5bfd1` + - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES` + - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0` + - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v3.1` + - CM_VERIFY_SSL: `False` + * `_download_v3.1` + - ENV variables: + - CM_DOWNLOAD_CHECKSUM: `af3f9525965b2c1acc348fb882a5bfd1` + - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES` + - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0` + - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v3.1` + - CM_VERIFY_SSL: `False` + * `_download_v4.0` + - ENV variables: + - CM_DOWNLOAD_CHECKSUM: `b4d97525d9ad0539a64667f2a3ca20c5` + - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES` + - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/gk5e9kziju5t56umxyzyx/loadgen.zip?rlkey=vsie4xnzml1inpjplm5cg7t54&dl=0` + - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v4.0` + - CM_VERIFY_SSL: `False` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_SHARED_BUILD: `no` + + +#### Versions +Default version: `master` + +* `custom` +* `main` +* `master` +* `pybind_fix` +* `r2.1` +* `r3.0` +* `r3.1` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/run.bat) +___ +#### Script output +```bash +cmr "get loadgen inference inference-loadgen mlperf mlcommons [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md new file mode 100644 index 000000000..ca4cb291c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md @@ -0,0 +1,105 @@ +# get-mlperf-inference-nvidia-common-code +Automatically generated README for this automation recipe: **get-mlperf-inference-nvidia-common-code** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-common-code/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-common-code/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get nvidia mlperf inference common-code" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,nvidia,mlperf,inference,common-code[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get nvidia mlperf inference common-code [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,nvidia,mlperf,inference,common-code' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get nvidia mlperf inference common-code[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**repo-owner**" +
    + Click here to expand this section. + + * `_ctuning` + * `_custom` + * `_go` + * `_mlcommons` + * `_nvidia-only` + +
    + +#### Versions +Default version: `r3.1` + +* `r2.1` +* `r3.0` +* `r3.1` +* `r4.0` + +___ +#### Script output +```bash +cmr "get nvidia mlperf inference common-code [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md new file mode 100644 index 000000000..6f7fd5229 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md @@ -0,0 +1,118 @@ +# get-mlperf-inference-nvidia-scratch-space +Automatically generated README for this automation recipe: **get-mlperf-inference-nvidia-scratch-space** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get mlperf inference nvidia scratch space" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,mlperf,inference,nvidia,scratch,space[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get mlperf inference nvidia scratch space [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,nvidia,scratch,space' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get mlperf inference nvidia scratch space[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**version**" +
    + Click here to expand this section. + + * `_version.#` + - ENV variables: + - CM_NVIDIA_SCRATCH_SPACE_VERSION: `#` + * **`_version.4_0`** (default) + - ENV variables: + - CM_NVIDIA_SCRATCH_SPACE_VERSION: `4_0` + +
    + + + ##### Default variations + + `_version.4_0` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--scratch_path=value` → `CM_NVIDIA_MLPERF_SCRATCH_PATH=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/run.bat) +___ +#### Script output +```bash +cmr "get mlperf inference nvidia scratch space [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md new file mode 100644 index 000000000..b67ffcfda --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md @@ -0,0 +1,111 @@ +# get-mlperf-inference-results-dir +Automatically generated README for this automation recipe: **get-mlperf-inference-results-dir** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results-dir/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get mlperf inference results dir directory" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,mlperf,inference,results,dir,directory[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get mlperf inference results dir directory [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,results,dir,directory' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get mlperf inference results dir directory[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**version**" +
    + Click here to expand this section. + + * `_version.#` + - ENV variables: + - CM_MLPERF_INFERENCE_RESULTS_VERSION: `#` + * **`_version.4_0`** (default) + - ENV variables: + - CM_MLPERF_INFERENCE_RESULTS_VERSION: `4_0` + +
    + + + ##### Default variations + + `_version.4_0` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR=value` + + + + +___ +#### Script output +```bash +cmr "get mlperf inference results dir directory [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md new file mode 100644 index 000000000..9297150e2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md @@ -0,0 +1,132 @@ +# get-mlperf-inference-results +Automatically generated README for this automation recipe: **get-mlperf-inference-results** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get results inference inference-results mlcommons mlperf" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,results,inference,inference-results,mlcommons,mlperf[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get results inference inference-results mlcommons mlperf [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,results,inference,inference-results,mlcommons,mlperf' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get results inference inference-results mlcommons mlperf[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**source-repo**" +
    + Click here to expand this section. + + * `_ctuning` + - ENV variables: + - GITHUB_REPO_OWNER: `ctuning` + * `_custom` + - ENV variables: + - GITHUB_REPO_OWNER: `arjunsuresh` + * `_go` + - ENV variables: + - GITHUB_REPO_OWNER: `GATEOverflow` + * **`_mlcommons`** (default) + - ENV variables: + - GITHUB_REPO_OWNER: `mlcommons` + * `_nvidia-only` + - ENV variables: + - GITHUB_REPO_OWNER: `GATEOverflow` + - NVIDIA_ONLY: `yes` + +
    + + + ##### Default variations + + `_mlcommons` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_CHECKOUT: `master` + * CM_GIT_DEPTH: `--depth 1` + * CM_GIT_PATCH: `no` + + +#### Versions +Default version: `v3.1` + +* `v2.1` +* `v3.0` +* `v3.1` +* `v4.0` + +___ +#### Script output +```bash +cmr "get results inference inference-results mlcommons mlperf [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md new file mode 100644 index 000000000..44b4f4b4f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md @@ -0,0 +1,192 @@ +# get-mlperf-inference-src +Automatically generated README for this automation recipe: **get-mlperf-inference-src** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-src/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get src source inference inference-src inference-source mlperf mlcommons" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,src,source,inference,inference-src,inference-source,mlperf,mlcommons[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get src source inference inference-src inference-source mlperf mlcommons [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,inference,inference-src,inference-source,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get src source inference inference-src inference-source mlperf mlcommons[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_3d-unet` + - ENV variables: + - CM_SUBMODULE_3D_UNET: `yes` + * `_deeplearningexamples` + - ENV variables: + - CM_SUBMODULE_DEEPLEARNINGEXAMPLES: `yes` + * `_deepsparse` + - ENV variables: + - CM_GIT_CHECKOUT: `deepsparse` + - CM_GIT_URL: `https://github.com/neuralmagic/inference` + - CM_MLPERF_LAST_RELEASE: `v4.0` + * `_gn` + - ENV variables: + - CM_SUBMODULE_GN: `yes` + * `_no-recurse-submodules` + - ENV variables: + - CM_GIT_RECURSE_SUBMODULES: `` + * `_nvidia-pycocotools` + - ENV variables: + - CM_GIT_PATCH_FILENAME: `coco.patch` + * `_octoml` + - ENV variables: + - CM_GIT_URL: `https://github.com/octoml/inference` + * `_openimages-nvidia-pycocotools` + - ENV variables: + - CM_GIT_PATCH_FILENAME: `openimages-pycocotools.patch` + * `_patch` + - ENV variables: + - CM_GIT_PATCH: `yes` + * `_pybind` + - ENV variables: + - CM_SUBMODULE_PYBIND: `yes` + * `_recurse-submodules` + - ENV variables: + - CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules` + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + * `_submodules.#` + - ENV variables: + - CM_GIT_SUBMODULES: `#` + +
    + + + * Group "**checkout**" +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_sha.#` + - ENV variables: + - CM_GIT_SHA: `#` + +
    + + + * Group "**git-history**" +
    + Click here to expand this section. + + * `_full-history` + - ENV variables: + - CM_GIT_DEPTH: `` + * **`_short-history`** (default) + - ENV variables: + - CM_GIT_DEPTH: `--depth 10` + +
    + + + ##### Default variations + + `_short-history` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_CHECKOUT_FOLDER: `inference` + * CM_GIT_DEPTH: `--depth 4` + * CM_GIT_PATCH: `no` + * CM_GIT_RECURSE_SUBMODULES: `` + * CM_GIT_URL: `https://github.com/mlcommons/inference.git` + + +#### Versions +Default version: `master` + +* `custom` +* `deepsparse` +* `main` +* `master` +* `pybind_fix` +* `r2.1` +* `r3.0` +* `r3.1` +* `tvm` + +___ +#### Script output +```bash +cmr "get src source inference inference-src inference-source mlperf mlcommons [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md new file mode 100644 index 000000000..d6375c0dc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md @@ -0,0 +1,111 @@ +# get-mlperf-inference-submission-dir +Automatically generated README for this automation recipe: **get-mlperf-inference-submission-dir** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-submission-dir/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get mlperf inference submission dir directory" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,mlperf,inference,submission,dir,directory[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get mlperf inference submission dir directory [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,submission,dir,directory' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get mlperf inference submission dir directory[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**version**" +
    + Click here to expand this section. + + * `_version.#` + - ENV variables: + - CM_MLPERF_INFERENCE_SUBMISSION_VERSION: `#` + * **`_version.4_0`** (default) + - ENV variables: + - CM_MLPERF_INFERENCE_SUBMISSION_VERSION: `4_0` + +
    + + + ##### Default variations + + `_version.4_0` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + + + + +___ +#### Script output +```bash +cmr "get mlperf inference submission dir directory [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md new file mode 100644 index 000000000..3aa4926b3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md @@ -0,0 +1,103 @@ +# get-mlperf-inference-sut-configs +Automatically generated README for this automation recipe: **get-mlperf-inference-sut-configs** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-configs/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-configs/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get mlperf inference sut configs sut-configs" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,mlperf,inference,sut,configs,sut-configs [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get mlperf inference sut configs sut-configs " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,inference,sut,configs,sut-configs' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get mlperf inference sut configs sut-configs" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--configs_git_url=value` → `CM_GIT_URL=value` + * `--repo_path=value` → `CM_SUT_CONFIGS_PATH=value` + * `--run_config=value` → `CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_SUT_CONFIGS_PATH: `` + * CM_GIT_URL: `` + + + +___ +#### Script output +```bash +cmr "get mlperf inference sut configs sut-configs " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md new file mode 100644 index 000000000..7082c8a80 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md @@ -0,0 +1,100 @@ +# get-mlperf-inference-sut-description +Automatically generated README for this automation recipe: **get-mlperf-inference-sut-description** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-description/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get mlperf sut description system-under-test system-description" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,mlperf,sut,description,system-under-test,system-description [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get mlperf sut description system-under-test system-description " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,sut,description,system-under-test,system-description' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get mlperf sut description system-under-test system-description" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--name=value` → `CM_HW_NAME=value` + * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_SUT_DESC_CACHE: `no` + + + +___ +#### Script output +```bash +cmr "get mlperf sut description system-under-test system-description " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md new file mode 100644 index 000000000..ce64cb510 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md @@ -0,0 +1,81 @@ +# get-mlperf-logging +Automatically generated README for this automation recipe: **get-mlperf-logging** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-logging/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-logging/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get mlperf logging mlperf-logging" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,mlperf,logging,mlperf-logging + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get mlperf logging mlperf-logging " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,mlperf,logging,mlperf-logging' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get mlperf logging mlperf-logging" + ``` +___ + + +___ +#### Script output +```bash +cmr "get mlperf logging mlperf-logging " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md new file mode 100644 index 000000000..d3cfef35e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md @@ -0,0 +1,134 @@ +# get-mlperf-power-dev +Automatically generated README for this automation recipe: **get-mlperf-power-dev** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-power-dev/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get src source power power-dev mlperf mlcommons" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,src,source,power,power-dev,mlperf,mlcommons[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get src source power power-dev mlperf mlcommons [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,power,power-dev,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get src source power power-dev mlperf mlcommons[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**checkout**" +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_sha.#` + - ENV variables: + - CM_GIT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * **`_mlcommons`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/mlcommons/power-dev.git` + * `_octoml` + - ENV variables: + - CM_GIT_URL: `https://github.com/octoml/power-dev.git` + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + +
    + + + ##### Default variations + + `_mlcommons` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_DEPTH: `--depth 1` + * CM_GIT_PATCH: `no` + * CM_GIT_CHECKOUT_FOLDER: `power-dev` + + + +___ +#### Script output +```bash +cmr "get src source power power-dev mlperf mlcommons [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md new file mode 100644 index 000000000..dab580fbd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md @@ -0,0 +1,99 @@ +# get-mlperf-tiny-eembc-energy-runner-src +Automatically generated README for this automation recipe: **get-mlperf-tiny-eembc-energy-runner-src** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner" + ``` +___ + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_CHECKOUT: `main` + * CM_GIT_PATCH: `no` + * CM_GIT_RECURSE_SUBMODULES: `` + * CM_GIT_URL: `https://github.com/eembc/energyrunner` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat) +___ +#### Script output +```bash +cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md new file mode 100644 index 000000000..9c76d468e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md @@ -0,0 +1,99 @@ +# get-mlperf-tiny-src +Automatically generated README for this automation recipe: **get-mlperf-tiny-src** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons" + ``` +___ + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_CHECKOUT: `master` + * CM_GIT_PATCH: `no` + * CM_GIT_RECURSE_SUBMODULES: `` + * CM_GIT_URL: `https://github.com/mlcommons/tiny.git` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/run.bat) +___ +#### Script output +```bash +cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md new file mode 100644 index 000000000..3ee1a15ac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md @@ -0,0 +1,112 @@ +# get-mlperf-training-nvidia-code +Automatically generated README for this automation recipe: **get-mlperf-training-nvidia-code** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-nvidia-code/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get nvidia mlperf training code training-code" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,nvidia,mlperf,training,code,training-code[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get nvidia mlperf training code training-code [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,nvidia,mlperf,training,code,training-code' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get nvidia mlperf training code training-code[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**repo-owner**" +
    + Click here to expand this section. + + * `_ctuning` + - ENV variables: + - CM_TMP_TRAINING_SRC: `ctuning` + * `_custom` + * **`_mlcommons`** (default) + - ENV variables: + - CM_TMP_TRAINING_SRC: `mlcommons` + * `_nvidia-only` + - ENV variables: + - CM_TMP_TRAINING_SRC: `GATEOverflow` + +
    + + + ##### Default variations + + `_mlcommons` +#### Versions +Default version: `r3.0` + +* `r2.1` +* `r3.0` +* `r3.1` + +___ +#### Script output +```bash +cmr "get nvidia mlperf training code training-code [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md new file mode 100644 index 000000000..ac0c7803e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md @@ -0,0 +1,181 @@ +# get-mlperf-training-src +Automatically generated README for this automation recipe: **get-mlperf-training-src** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-src/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get src source training training-src training-source mlperf mlcommons" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,src,source,training,training-src,training-source,mlperf,mlcommons[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get src source training training-src training-source mlperf mlcommons [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,training,training-src,training-source,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get src source training training-src training-source mlperf mlcommons[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_no-recurse-submodules` + - ENV variables: + - CM_GIT_RECURSE_SUBMODULES: `` + * `_nvidia-retinanet` + - ENV variables: + - CM_GIT_PATCH_FILENAMES: `nvidia-retinanet.patch,cpu_load.patch` + * `_patch` + - ENV variables: + - CM_GIT_PATCH: `yes` + +
    + + + * Group "**checkout**" +
    + Click here to expand this section. + + * `_branch.#` + - ENV variables: + - CM_GIT_CHECKOUT: `#` + * `_sha.#` + - ENV variables: + - CM_GIT_SHA: `#` + * `_tag.#` + - ENV variables: + - CM_GIT_CHECKOUT_TAG: `#` + +
    + + + * Group "**git-history**" +
    + Click here to expand this section. + + * `_full-history` + - ENV variables: + - CM_GIT_DEPTH: `` + * **`_short-history`** (default) + - ENV variables: + - CM_GIT_DEPTH: `--depth 5` + +
    + + + * Group "**repo**" +
    + Click here to expand this section. + + * `_repo.#` + - ENV variables: + - CM_GIT_URL: `#` + +
    + + + * Group "**src**" +
    + Click here to expand this section. + + * **`_cknowledge`** (default) + - ENV variables: + - CM_GIT_URL: `https://github.com/cknowledge/training.git` + * `_mlcommons` + - ENV variables: + - CM_GIT_URL: `https://github.com/mlcommons/training.git` + +
    + + + ##### Default variations + + `_cknowledge,_short-history` +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_CHECKOUT: `master` + * CM_GIT_DEPTH: `--depth 4` + * CM_GIT_PATCH: `no` + * CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules` + * CM_GIT_CHECKOUT_FOLDER: `training` + + +#### Versions +Default version: `master` + +* `custom` +* `master` + +___ +#### Script output +```bash +cmr "get src source training training-src training-source mlperf mlcommons [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md new file mode 100644 index 000000000..8746bdac3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md @@ -0,0 +1,90 @@ +# get-nvidia-mitten +Automatically generated README for this automation recipe: **get-nvidia-mitten** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get nvidia mitten nvidia-mitten" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,nvidia,mitten,nvidia-mitten + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get nvidia mitten nvidia-mitten " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,nvidia,mitten,nvidia-mitten' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get nvidia mitten nvidia-mitten" + ``` +___ + +#### Versions +Default version: `master` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/run.bat) +___ +#### Script output +```bash +cmr "get nvidia mitten nvidia-mitten " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md new file mode 100644 index 000000000..f2c9e85c7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md @@ -0,0 +1,121 @@ +# get-spec-ptd +Automatically generated README for this automation recipe: **get-spec-ptd** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons" [--input_flags] + ``` +___ + +=== "Input Flags" + + + #### Input Flags + + * --**input:** Path to SPEC PTDaemon (Optional) +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--input=value` → `CM_INPUT=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_GIT_CHECKOUT: `main` + * CM_GIT_DEPTH: `--depth 1` + * CM_GIT_PATCH: `no` + * CM_GIT_RECURSE_SUBMODULES: ` ` + * CM_GIT_URL: `https://github.com/mlcommons/power.git` + + +#### Versions +Default version: `main` + +* `custom` +* `main` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md new file mode 100644 index 000000000..8beb80672 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md @@ -0,0 +1,107 @@ +# import-mlperf-inference-to-experiment +Automatically generated README for this automation recipe: **import-mlperf-inference-to-experiment** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-inference-to-experiment/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-inference-to-experiment/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "import mlperf inference mlperf-inference experiment 2experiment to-experiment[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_skip_checker` + - ENV variables: + - CM_SKIP_SUBMISSION_CHECKER: `True` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + * `--target_repo=value` → `CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO=value` + + + + +___ +#### Script output +```bash +cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md new file mode 100644 index 000000000..ee0aa4edd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md @@ -0,0 +1,91 @@ +# import-mlperf-tiny-to-experiment +Automatically generated README for this automation recipe: **import-mlperf-tiny-to-experiment** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-tiny-to-experiment/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-tiny-to-experiment/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--target_repo=value` → `CM_IMPORT_TINYMLPERF_TARGET_REPO=value` + + + + +___ +#### Script output +```bash +cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md new file mode 100644 index 000000000..edda35499 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md @@ -0,0 +1,97 @@ +# import-mlperf-training-to-experiment +Automatically generated README for this automation recipe: **import-mlperf-training-to-experiment** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "import mlperf training mlperf-training experiment 2experiment to-experiment" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "import mlperf training mlperf-training experiment 2experiment to-experiment " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "import mlperf training mlperf-training experiment 2experiment to-experiment" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--target_repo=value` → `CM_IMPORT_MLPERF_TRAINING_TARGET_REPO=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run_mlperf_logger.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "import mlperf training mlperf-training experiment 2experiment to-experiment " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/index.md new file mode 100644 index 000000000..b4011e7ce --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/index.md @@ -0,0 +1,41 @@ +* [add-custom-nvidia-system](add-custom-nvidia-system/index.md) +* [benchmark-any-mlperf-inference-implementation](benchmark-any-mlperf-inference-implementation/index.md) +* [build-mlperf-inference-server-nvidia](build-mlperf-inference-server-nvidia/index.md) +* [generate-mlperf-inference-submission](generate-mlperf-inference-submission/index.md) +* [generate-mlperf-inference-user-conf](generate-mlperf-inference-user-conf/index.md) +* [generate-mlperf-tiny-report](generate-mlperf-tiny-report/index.md) +* [generate-mlperf-tiny-submission](generate-mlperf-tiny-submission/index.md) +* [generate-nvidia-engine](generate-nvidia-engine/index.md) +* [get-mlperf-inference-intel-scratch-space](get-mlperf-inference-intel-scratch-space/index.md) +* [get-mlperf-inference-loadgen](get-mlperf-inference-loadgen/index.md) +* [get-mlperf-inference-nvidia-common-code](get-mlperf-inference-nvidia-common-code/index.md) +* [get-mlperf-inference-nvidia-scratch-space](get-mlperf-inference-nvidia-scratch-space/index.md) +* [get-mlperf-inference-results](get-mlperf-inference-results/index.md) +* [get-mlperf-inference-results-dir](get-mlperf-inference-results-dir/index.md) +* [get-mlperf-inference-src](get-mlperf-inference-src/index.md) +* [get-mlperf-inference-submission-dir](get-mlperf-inference-submission-dir/index.md) +* [get-mlperf-inference-sut-configs](get-mlperf-inference-sut-configs/index.md) +* [get-mlperf-inference-sut-description](get-mlperf-inference-sut-description/index.md) +* [get-mlperf-logging](get-mlperf-logging/index.md) +* [get-mlperf-power-dev](get-mlperf-power-dev/index.md) +* [get-mlperf-tiny-eembc-energy-runner-src](get-mlperf-tiny-eembc-energy-runner-src/index.md) +* [get-mlperf-tiny-src](get-mlperf-tiny-src/index.md) +* [get-mlperf-training-nvidia-code](get-mlperf-training-nvidia-code/index.md) +* [get-mlperf-training-src](get-mlperf-training-src/index.md) +* [get-nvidia-mitten](get-nvidia-mitten/index.md) +* [get-spec-ptd](get-spec-ptd/index.md) +* [import-mlperf-inference-to-experiment](import-mlperf-inference-to-experiment/index.md) +* [import-mlperf-tiny-to-experiment](import-mlperf-tiny-to-experiment/index.md) +* [import-mlperf-training-to-experiment](import-mlperf-training-to-experiment/index.md) +* [install-mlperf-logging-from-src](install-mlperf-logging-from-src/index.md) +* [prepare-training-data-bert](prepare-training-data-bert/index.md) +* [prepare-training-data-resnet](prepare-training-data-resnet/index.md) +* [preprocess-mlperf-inference-submission](preprocess-mlperf-inference-submission/index.md) +* [process-mlperf-accuracy](process-mlperf-accuracy/index.md) +* [push-mlperf-inference-results-to-github](push-mlperf-inference-results-to-github/index.md) +* [run-mlperf-inference-mobilenet-models](run-mlperf-inference-mobilenet-models/index.md) +* [run-mlperf-inference-submission-checker](run-mlperf-inference-submission-checker/index.md) +* [run-mlperf-power-client](run-mlperf-power-client/index.md) +* [run-mlperf-power-server](run-mlperf-power-server/index.md) +* [run-mlperf-training-submission-checker](run-mlperf-training-submission-checker/index.md) +* [truncate-mlperf-inference-accuracy-log](truncate-mlperf-inference-accuracy-log/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md new file mode 100644 index 000000000..5b673d37d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md @@ -0,0 +1,89 @@ +# install-mlperf-logging-from-src +Automatically generated README for this automation recipe: **install-mlperf-logging-from-src** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/install-mlperf-logging-from-src/_cm.yaml)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install mlperf logging from.src" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,mlperf,logging,from.src + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install mlperf logging from.src " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,mlperf,logging,from.src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install mlperf logging from.src" + ``` +___ + +#### Versions +* `master` +* `v3.1` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-mlperf-logging-from-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install mlperf logging from.src " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md new file mode 100644 index 000000000..9b3b8d1bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md @@ -0,0 +1,120 @@ +# prepare-training-data-bert +Automatically generated README for this automation recipe: **prepare-training-data-bert** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "prepare mlperf training data input bert" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=prepare,mlperf,training,data,input,bert[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "prepare mlperf training data input bert [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'prepare,mlperf,training,data,input,bert' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "prepare mlperf training data input bert[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**implementation**" +
    + Click here to expand this section. + + * **`_nvidia`** (default) + - ENV variables: + - CM_TMP_VARIATION: `nvidia` + * `_reference` + - ENV variables: + - CM_TMP_VARIATION: `reference` + +
    + + + ##### Default variations + + `_nvidia` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--clean=value` → `CM_MLPERF_TRAINING_CLEAN_TFRECORDS=value` + * `--data_dir=value` → `CM_DATA_DIR=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run-nvidia.sh) + * [run-reference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run-reference.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "prepare mlperf training data input bert [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md new file mode 100644 index 000000000..1f4f11347 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md @@ -0,0 +1,129 @@ +# prepare-training-data-resnet +Automatically generated README for this automation recipe: **prepare-training-data-resnet** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "prepare mlperf training data input resnet" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=prepare,mlperf,training,data,input,resnet[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "prepare mlperf training data input resnet [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'prepare,mlperf,training,data,input,resnet' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "prepare mlperf training data input resnet[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_mxnet.#` + - ENV variables: + - CM_MXNET_VERSION: `#` + +
    + + + * Group "**implementation**" +
    + Click here to expand this section. + + * **`_nvidia`** (default) + - ENV variables: + - CM_TMP_VARIATION: `nvidia` + * `_reference` + - ENV variables: + - CM_TMP_VARIATION: `reference` + +
    + + + ##### Default variations + + `_nvidia` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--data_dir=value` → `CM_DATA_DIR=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/run-nvidia.sh) + * [run-reference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/run-reference.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "prepare mlperf training data input resnet [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md new file mode 100644 index 000000000..79f70a3c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md @@ -0,0 +1,96 @@ +# preprocess-mlperf-inference-submission +Automatically generated README for this automation recipe: **preprocess-mlperf-inference-submission** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/preprocess-mlperf-inference-submission/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/preprocess-mlperf-inference-submission/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md new file mode 100644 index 000000000..bd5afa8b2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md @@ -0,0 +1,177 @@ +# process-mlperf-accuracy +Automatically generated README for this automation recipe: **process-mlperf-accuracy** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run mlperf mlcommons accuracy mlc process process-accuracy" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run mlperf mlcommons accuracy mlc process process-accuracy [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run mlperf mlcommons accuracy mlc process process-accuracy[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**coco-evaluation-tool**" +
    + Click here to expand this section. + + * **`_default-pycocotools`** (default) + * `_nvidia-pycocotools` + +
    + + + * Group "**dataset**" +
    + Click here to expand this section. + + * `_cnndm` + - ENV variables: + - CM_DATASET: `cnndm` + * `_coco2014` + - ENV variables: + - CM_DATASET: `coco2014` + * **`_imagenet`** (default) + - ENV variables: + - CM_DATASET: `imagenet` + * `_kits19` + - ENV variables: + - CM_DATASET: `kits19` + * `_librispeech` + - ENV variables: + - CM_DATASET: `librispeech` + * `_open-orca` + - ENV variables: + - CM_DATASET: `openorca` + * `_openimages` + - ENV variables: + - CM_DATASET: `openimages` + * `_squad` + - ENV variables: + - CM_DATASET: `squad` + * `_terabyte` + - ENV variables: + - CM_DATASET: `squad` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_float16` + - ENV variables: + - CM_ACCURACY_DTYPE: `float16` + * **`_float32`** (default) + - ENV variables: + - CM_ACCURACY_DTYPE: `float32` + * `_float64` + - ENV variables: + - CM_ACCURACY_DTYPE: `float64` + * `_int16` + - ENV variables: + - CM_ACCURACY_DTYPE: `int16` + * `_int32` + - ENV variables: + - CM_ACCURACY_DTYPE: `int32` + * `_int64` + - ENV variables: + - CM_ACCURACY_DTYPE: `int64` + * `_int8` + - ENV variables: + - CM_ACCURACY_DTYPE: `int8` + +
    + + + ##### Default variations + + `_default-pycocotools,_float32,_imagenet` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--result_dir=value` → `CM_MLPERF_ACCURACY_RESULTS_DIR=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/run.bat) +___ +#### Script output +```bash +cmr "run mlperf mlcommons accuracy mlc process process-accuracy [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md new file mode 100644 index 000000000..2f3245a0b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md @@ -0,0 +1,109 @@ +# push-mlperf-inference-results-to-github +Automatically generated README for this automation recipe: **push-mlperf-inference-results-to-github** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/push-mlperf-inference-results-to-github/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "push mlperf mlperf-inference-results publish-results inference submission github" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=push,mlperf,mlperf-inference-results,publish-results,inference,submission,github [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "push mlperf mlperf-inference-results publish-results inference submission github " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'push,mlperf,mlperf-inference-results,publish-results,inference,submission,github' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "push mlperf mlperf-inference-results publish-results inference submission github" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--branch=value` → `CM_GIT_BRANCH=value` + * `--commit_message=value` → `CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE=value` + * `--repo_branch=value` → `CM_GIT_BRANCH=value` + * `--repo_url=value` → `CM_MLPERF_RESULTS_GIT_REPO_URL=value` + * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_RESULTS_GIT_REPO_URL: `https://github.com/ctuning/mlperf_inference_submissions_v4.0` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/push-mlperf-inference-results-to-github/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "push mlperf mlperf-inference-results publish-results inference submission github " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md new file mode 100644 index 000000000..35bd027de --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md @@ -0,0 +1,326 @@ +# run-mlperf-inference-mobilenet-models +Automatically generated README for this automation recipe: **run-mlperf-inference-mobilenet-models** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + + +--- + +## Set up + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + +
    +Click here to set up docker (Optional). + +### Docker Setup + +CM commands are expected to run natively but if you prefer not to modify the host system, you can do the below command to set up a docker container. + +``` +cm docker script --tags=run,mobilenet-models,_tflite,_accuracy-only \ +--adr.compiler.tags=gcc \ +--docker_cm_repo=mlcommons@cm4mlops \ +--imagenet_path=$HOME/imagenet-2012-val \ +--results_dir=$HOME/mobilenet_results \ +--submission_dir=$HOME/inference_submission_3.1 \ +--docker_skip_run_cmd +``` + +This command will build a docker container and give you an interactive shell from which you can execute the below CM run commands. +* `results_dir`, `submission_dir` and `imagenet_path` are mounted from the host system. +* `results_dir` and `submission_dir` are expected to be empty directories to be populated by the docker +* `imagenet_path` should point to the imagenet folder containing the 50000 validation images. + +
    + +## Run Commands + +Since the runs can take many hours, in case you are running remotely you can install screen as follows. You may omit "screen" from all commands if you are running on a host system. +``` +cmr "get generic-sys-util _screen" +``` +### Default tflite + + +#### Do a full accuracy run for all the models (can take almost a day) + +``` +screen cmr "run mobilenet-models _tflite _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Do a full performance run for all the models (can take almost a day) +``` +screen cmr "run mobilenet-models _tflite _performance-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Generate README files for all the runs +``` +cmr "run mobilenet-models _tflite _populate-readme" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Generate actual submission tree + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cmr "generate inference submission" \ +--results_dir=$HOME/mobilenet_results/valid_results \ +--submission_dir=$HOME/mobilenet_submission_tree \ +--clean \ +--infer_scenario_results=yes \ +--adr.compiler.tags=gcc --adr.inference-src.version=master \ +--run-checker \ +--submitter=cTuning \ +--hw_notes_extra="Result taken by NAME" +``` +* Use `--hw_name="My system name"` to give a meaningful system name. Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems) + +#### Push the results to GitHub repo + +First, create a fork of [this repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/). Then run the following command after replacing `--repo_url` with your fork URL. +``` +cmr "push github mlperf inference submission" \ +--submission_dir=$HOME/mobilenet_submission_tree \ +--repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \ +--commit_message="Mobilenet results added" +``` + +Create a PR to [cTuning repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/) + +### Using ARMNN with NEON + +Follow the same procedure as above but for the first three experiment runs add `_armnn,_neon` to the tags. For example +``` +cmr "run mobilenet-models _tflite _armnn _neon _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. + +### Using ARMNN with OpenCL +Follow the same procedure as above but for the first three experiment runs add `_armnn,_opencl` to the tags. For example +``` +cmr "run mobilenet-models _tflite _armnn _opencl _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-mobilenet-models/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run mobilenet models image-classification mobilenet-models mlperf inference" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run mobilenet models image-classification mobilenet-models mlperf inference [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run mobilenet models image-classification mobilenet-models mlperf inference[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_armnn` + - ENV variables: + - CM_MLPERF_USE_ARMNN_LIBRARY: `yes` + * `_neon` + - Aliases: `_use-neon` + - ENV variables: + - CM_MLPERF_USE_NEON: `yes` + * `_only-fp32` + - ENV variables: + - CM_MLPERF_RUN_INT8: `no` + * `_only-int8` + - ENV variables: + - CM_MLPERF_RUN_FP32: `no` + * `_opencl` + - ENV variables: + - CM_MLPERF_USE_OPENCL: `yes` + +
    + + + * Group "**base-framework**" +
    + Click here to expand this section. + + * **`_tflite`** (default) + +
    + + + * Group "**model-selection**" +
    + Click here to expand this section. + + * **`_all-models`** (default) + - ENV variables: + - CM_MLPERF_RUN_MOBILENETS: `yes` + - CM_MLPERF_RUN_EFFICIENTNETS: `yes` + * `_efficientnet` + - ENV variables: + - CM_MLPERF_RUN_EFFICIENTNETS: `yes` + * `_mobilenet` + - ENV variables: + - CM_MLPERF_RUN_MOBILENETS: `yes` + +
    + + + * Group "**optimization**" +
    + Click here to expand this section. + + * **`_tflite-default`** (default) + - ENV variables: + - CM_MLPERF_TFLITE_DEFAULT_MODE: `yes` + +
    + + + * Group "**run-mode**" +
    + Click here to expand this section. + + * `_accuracy-only` + - ENV variables: + - CM_MLPERF_FIND_PERFORMANCE_MODE: `no` + - CM_MLPERF_ACCURACY_MODE: `yes` + - CM_MLPERF_SUBMISSION_MODE: `no` + * `_find-performance` + - ENV variables: + - CM_MLPERF_FIND_PERFORMANCE_MODE: `yes` + - CM_MLPERF_SUBMISSION_MODE: `no` + * `_performance-only` + - ENV variables: + - CM_MLPERF_FIND_PERFORMANCE_MODE: `no` + - CM_MLPERF_PERFORMANCE_MODE: `yes` + - CM_MLPERF_SUBMISSION_MODE: `no` + * `_populate-readme` + - ENV variables: + - CM_MLPERF_FIND_PERFORMANCE_MODE: `no` + - CM_MLPERF_POPULATE_README: `yes` + * `_submission` + - ENV variables: + - CM_MLPERF_FIND_PERFORMANCE_MODE: `no` + - CM_MLPERF_SUBMISSION_MODE: `yes` + +
    + + + ##### Default variations + + `_all-models,_tflite,_tflite-default` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--find-performance=value` → `CM_MLPERF_FIND_PERFORMANCE_MODE=value` + * `--imagenet_path=value` → `IMAGENET_PATH=value` + * `--no-rerun=value` → `CM_MLPERF_NO_RERUN=value` + * `--power=value` → `CM_MLPERF_POWER=value` + * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR=value` + * `--submission=value` → `CM_MLPERF_SUBMISSION_MODE=value` + * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_RUN_MOBILENETS: `no` + * CM_MLPERF_RUN_EFFICIENTNETS: `no` + * CM_MLPERF_NO_RERUN: `no` + * CM_MLPERF_RUN_FP32: `yes` + * CM_MLPERF_RUN_INT8: `yes` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-mobilenet-models/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "run mobilenet models image-classification mobilenet-models mlperf inference [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md new file mode 100644 index 000000000..0231dcbd9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md @@ -0,0 +1,138 @@ +# run-mlperf-inference-submission-checker +Automatically generated README for this automation recipe: **run-mlperf-inference-submission-checker** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_short-run` + - ENV variables: + - CM_MLPERF_SHORT_RUN: `yes` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--extra_args=value` → `CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS=value` + * `--extra_model_benchmark_map=value` → `CM_MLPERF_EXTRA_MODEL_MAPPING=value` + * `--input=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + * `--power=value` → `CM_MLPERF_POWER=value` + * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` + * `--skip_compliance=value` → `CM_MLPERF_SKIP_COMPLIANCE=value` + * `--skip_power_check=value` → `CM_MLPERF_SKIP_POWER_CHECK=value` + * `--src_version=value` → `CM_MLPERF_SUBMISSION_CHECKER_VERSION=value` + * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_SHORT_RUN: `no` + + +#### Versions +Default version: `master` + +* `master` +* `r3.0` +* `r3.1` +* `r4.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/run.bat) +___ +#### Script output +```bash +cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md new file mode 100644 index 000000000..657bf339f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md @@ -0,0 +1,119 @@ +# run-mlperf-power-client +Automatically generated README for this automation recipe: **run-mlperf-power-client** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf power client power-client" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,mlc,mlcommons,mlperf,power,client,power-client [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run mlc mlcommons mlperf power client power-client " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,power,client,power-client' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run mlc mlcommons mlperf power client power-client" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--loadgen_logs_dir=value` → `CM_MLPERF_LOADGEN_LOGS_DIR=value` + * `--log_dir=value` → `CM_MLPERF_POWER_LOG_DIR=value` + * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` + * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` + * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` + * `--port=value` → `CM_MLPERF_POWER_SERVER_PORT=value` + * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` + * `--run_cmd=value` → `CM_MLPERF_RUN_CMD=value` + * `--server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` + * `--server_port=value` → `CM_MLPERF_POWER_SERVER_PORT=value` + * `--timestamp=value` → `CM_MLPERF_POWER_TIMESTAMP=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_POWER_LOG_DIR: `logs` + * CM_MLPERF_RUN_CMD: `` + * CM_MLPERF_POWER_SERVER_ADDRESS: `localhost` + * CM_MLPERF_POWER_NTP_SERVER: `time.google.com` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "run mlc mlcommons mlperf power client power-client " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md new file mode 100644 index 000000000..be12a1dd3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md @@ -0,0 +1,116 @@ +# run-mlperf-power-server +Automatically generated README for this automation recipe: **run-mlperf-power-server** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf power server power-server" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,mlc,mlcommons,mlperf,power,server,power-server [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run mlc mlcommons mlperf power server power-server " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,power,server,power-server' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run mlc mlcommons mlperf power server power-server" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--device_port=value` → `CM_MLPERF_POWER_DEVICE_PORT=value` + * `--device_type=value` → `CM_MLPERF_POWER_DEVICE_TYPE=value` + * `--interface_flag=value` → `CM_MLPERF_POWER_INTERFACE_FLAG=value` + * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` + * `--screen=value` → `CM_MLPERF_POWER_SERVER_USE_SCREEN=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_POWER_NTP_SERVER: `time.google.com` + * CM_MLPERF_POWER_INTERFACE_FLAG: `` + * CM_MLPERF_POWER_DEVICE_TYPE: `49` + * CM_MLPERF_POWER_SERVER_ADDRESS: `0.0.0.0` + * CM_MLPERF_POWER_SERVER_PORT: `4950` + * CM_MLPERF_POWER_DEVICE_PORT: `/dev/usbtmc0` + * CM_MLPERF_POWER_SERVER_USE_SCREEN: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/run.bat) +___ +#### Script output +```bash +cmr "run mlc mlcommons mlperf power server power-server " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md new file mode 100644 index 000000000..863aeae5e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md @@ -0,0 +1,135 @@ +# run-mlperf-training-submission-checker +Automatically generated README for this automation recipe: **run-mlperf-training-submission-checker** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-training-submission-checker/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_short-run` + - ENV variables: + - CM_MLPERF_SHORT_RUN: `yes` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--extra_args=value` → `CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS=value` + * `--input=value` → `CM_MLPERF_SUBMISSION_DIR=value` + * `--power=value` → `CM_MLPERF_POWER=value` + * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` + * `--skip_compliance=value` → `CM_MLPERF_SKIP_COMPLIANCE=value` + * `--skip_power_check=value` → `CM_MLPERF_SKIP_POWER_CHECK=value` + * `--src_version=value` → `CM_MLPERF_SUBMISSION_CHECKER_VERSION=value` + * `--submission_dir=value` → `CM_MLPERF_SUBMISSION_DIR=value` + * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_SHORT_RUN: `no` + + +#### Versions +Default version: `master` + +* `master` +* `r3.0` +* `r3.1` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-training-submission-checker/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md new file mode 100644 index 000000000..d0921c6d5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md @@ -0,0 +1,98 @@ +# truncate-mlperf-inference-accuracy-log +Automatically generated README for this automation recipe: **truncate-mlperf-inference-accuracy-log** + +Category: **[MLPerf benchmark support](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--input=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md new file mode 100644 index 000000000..981a09f07 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md @@ -0,0 +1,138 @@ +# app-image-classification-onnx-py +Automatically generated README for this automation recipe: **app-image-classification-onnx-py** + +Category: **[Modular AI/ML application pipeline](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "modular python app image-classification onnx" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=modular,python,app,image-classification,onnx[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "modular python app image-classification onnx [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'modular,python,app,image-classification,onnx' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "modular python app image-classification onnx[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**target**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - USE_CPU: `True` + * `_cuda` + - ENV variables: + - USE_CUDA: `True` + +
    + + + ##### Default variations + + `_cpu` +=== "Input Flags" + + + #### Input Flags + + * --**input:** Path to JPEG image to classify + * --**output:** Output directory (optional) + * --**j:** Print JSON output +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--input=value` → `CM_IMAGE=value` + * `--output=value` → `CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/run.bat) +___ +#### Script output +```bash +cmr "modular python app image-classification onnx [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md new file mode 100644 index 000000000..bdb43e6f0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md @@ -0,0 +1,98 @@ +# app-image-classification-tf-onnx-cpp +Automatically generated README for this automation recipe: **app-image-classification-tf-onnx-cpp** + +Category: **[Modular AI/ML application pipeline](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app image-classification cpp tensorflow onnx" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,image-classification,cpp,tensorflow,onnx + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app image-classification cpp tensorflow onnx " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,image-classification,cpp,tensorflow,onnx' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app image-classification cpp tensorflow onnx" + ``` +___ + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "app image-classification cpp tensorflow onnx " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md new file mode 100644 index 000000000..9c96b5ef2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md @@ -0,0 +1,113 @@ +# app-image-classification-torch-py +Automatically generated README for this automation recipe: **app-image-classification-torch-py** + +Category: **[Modular AI/ML application pipeline](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app image-classification python torch" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,image-classification,python,torch[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app image-classification python torch [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,image-classification,python,torch' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app image-classification python torch[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_cuda` + - ENV variables: + - USE_CUDA: `yes` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/run.bat) +___ +#### Script output +```bash +cmr "app image-classification python torch [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md new file mode 100644 index 000000000..37f6b98a5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md @@ -0,0 +1,114 @@ +# app-image-classification-tvm-onnx-py +Automatically generated README for this automation recipe: **app-image-classification-tvm-onnx-py** + +Category: **[Modular AI/ML application pipeline](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app image-classification python tvm-onnx" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,image-classification,python,tvm-onnx[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app image-classification python tvm-onnx [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,image-classification,python,tvm-onnx' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app image-classification python tvm-onnx[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_cuda` + - ENV variables: + - USE_CUDA: `yes` + * `_llvm` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "app image-classification python tvm-onnx [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md new file mode 100644 index 000000000..af2093236 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md @@ -0,0 +1,128 @@ +# app-stable-diffusion-onnx-py +Automatically generated README for this automation recipe: **app-stable-diffusion-onnx-py** + +Category: **[Modular AI/ML application pipeline](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "modular python app stable-diffusion onnx" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=modular,python,app,stable-diffusion,onnx[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "modular python app stable-diffusion onnx [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'modular,python,app,stable-diffusion,onnx' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "modular python app stable-diffusion onnx[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**target**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - USE_CPU: `True` + - CM_DEVICE: `cpu` + * `_cuda` + - ENV variables: + - USE_CUDA: `True` + - CM_DEVICE: `cuda:0` + +
    + + + ##### Default variations + + `_cpu` +=== "Input Flags" + + + #### Input Flags + + * --**text:** Text to generate image + * --**output:** Output directory +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--output=value` → `CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT=value` + * `--text=value` → `CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/run.bat) +___ +#### Script output +```bash +cmr "modular python app stable-diffusion onnx [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/index.md new file mode 100644 index 000000000..3de2f8ac6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-AI-ML-application-pipeline/index.md @@ -0,0 +1,5 @@ +* [app-image-classification-onnx-py](app-image-classification-onnx-py/index.md) +* [app-image-classification-tf-onnx-cpp](app-image-classification-tf-onnx-cpp/index.md) +* [app-image-classification-torch-py](app-image-classification-torch-py/index.md) +* [app-image-classification-tvm-onnx-py](app-image-classification-tvm-onnx-py/index.md) +* [app-stable-diffusion-onnx-py](app-stable-diffusion-onnx-py/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md new file mode 100644 index 000000000..b78e6db15 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md @@ -0,0 +1,235 @@ +# app-mlperf-inference-dummy +Automatically generated README for this automation recipe: **app-mlperf-inference-dummy** + +Category: **[Modular MLPerf benchmarks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-dummy/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "reproduce mlcommons mlperf inference harness dummy-harness dummy[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**backend**" +
    + Click here to expand this section. + + * **`_pytorch`** (default) + - ENV variables: + - CM_MLPERF_BACKEND: `pytorch` + +
    + + + * Group "**batch-size**" +
    + Click here to expand this section. + + * `_bs.#` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + * `_cuda` + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` + +
    + + + * Group "**loadgen-scenario**" +
    + Click here to expand this section. + + * `_multistream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` + * `_offline` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * `_server` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Server` + * `_singlestream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_bert-99` + - ENV variables: + - CM_MODEL: `bert-99` + - CM_SQUAD_ACCURACY_DTYPE: `float32` + * `_bert-99.9` + - ENV variables: + - CM_MODEL: `bert-99.9` + * `_gptj-99` + - ENV variables: + - CM_MODEL: `gptj-99` + - CM_SQUAD_ACCURACY_DTYPE: `float32` + * `_gptj-99.9` + - ENV variables: + - CM_MODEL: `gptj-99.9` + * `_llama2-70b-99` + - ENV variables: + - CM_MODEL: `llama2-70b-99` + * `_llama2-70b-99.9` + - ENV variables: + - CM_MODEL: `llama2-70b-99.9` + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_fp16` + * `_fp32` + * `_uint8` + +
    + + + ##### Default variations + + `_cpu,_pytorch,_resnet50` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` + * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` + * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` + * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` + * `--rerun=value` → `CM_RERUN=value` + * `--results_repo=value` → `CM_MLPERF_INFERENCE_RESULTS_REPO=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` + * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` + * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` + * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` + * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` + * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` + * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * CM_MLPERF_LOADGEN_MODE: `performance` + * CM_SKIP_PREPROCESS_DATASET: `no` + * CM_SKIP_MODEL_DOWNLOAD: `no` + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `dummy_harness` + * CM_MLPERF_SKIP_RUN: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-dummy/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md new file mode 100644 index 000000000..6bab8f909 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md @@ -0,0 +1,347 @@ +# app-mlperf-inference-intel +Automatically generated README for this automation recipe: **app-mlperf-inference-intel** + +Category: **[Modular MLPerf benchmarks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_bs.#` + - ENV variables: + - ML_MLPERF_MODEL_BATCH_SIZE: `#` + * `_v3.1` + - ENV variables: + - CM_MLPERF_INFERENCE_CODE_VERSION: `v3.1` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * **`_pytorch`** (default) + - ENV variables: + - CM_MLPERF_BACKEND: `pytorch` + - CM_MLPERF_BACKEND_LIB_NAMESPEC: `pytorch` + +
    + + + * Group "**loadgen-batchsize**" +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_MLPERF_LOADGEN_BATCH_SIZE: `#` + +
    + + + * Group "**loadgen-scenario**" +
    + Click here to expand this section. + + * `_multistream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` + * `_offline` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * `_server` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Server` + * `_singlestream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_bert-99` + - ENV variables: + - CM_MODEL: `bert-99` + - CM_SQUAD_ACCURACY_DTYPE: `float32` + - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` + * `_bert-99.9` + - ENV variables: + - CM_MODEL: `bert-99.9` + - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` + * `_gptj-99` + - ENV variables: + - CM_MODEL: `gptj-99` + - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` + * `_gptj-99.9` + - ENV variables: + - CM_MODEL: `gptj-99.9` + - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + - dataset_imagenet_preprocessed_input_square_side: `224` + - ml_model_has_background_class: `YES` + - ml_model_image_height: `224` + - loadgen_buffer_size: `1024` + - loadgen_dataset_size: `50000` + - CM_BENCHMARK: `STANDALONE_CLASSIFICATION` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` + - dataset_imagenet_preprocessed_input_square_side: `224` + - ml_model_image_height: `800` + - ml_model_image_width: `800` + - loadgen_buffer_size: `64` + - loadgen_dataset_size: `24576` + - CM_BENCHMARK: `STANDALONE_OBJECT_DETECTION` + +
    + + + * Group "**network-mode**" +
    + Click here to expand this section. + + * `_network-server` + - ENV variables: + - CM_MLPERF_NETWORK_RUN_MODE: `network-server` + * **`_standalone`** (default) + - ENV variables: + - CM_MLPERF_NETWORK_RUN_MODE: `standalone` + +
    + + + * Group "**network-run-mode**" +
    + Click here to expand this section. + + * `_network-client` + - ENV variables: + - CM_MLPERF_NETWORK_RUN_MODE: `network-client` + +
    + + + * Group "**power-mode**" +
    + Click here to expand this section. + + * `_maxn` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True` + * `_maxq` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_fp32` + - ENV variables: + - CM_IMAGENET_ACCURACY_DTYPE: `float32` + * `_int4` + * `_uint8` + +
    + + + * Group "**run-mode**" +
    + Click here to expand this section. + + * `_build-harness` + - ENV variables: + - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `build_harness` + * `_calibration` + - ENV variables: + - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `calibration` + * **`_run-harness`** (default) + - ENV variables: + - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `run_harness` + +
    + + + * Group "**sut**" +
    + Click here to expand this section. + + * `_sapphire-rapids.112c` + - ENV variables: + - WARMUP: ` --warmup` + * `_sapphire-rapids.24c` + +
    + + + * Group "**version**" +
    + Click here to expand this section. + + * **`_v4.0`** (default) + - ENV variables: + - CM_MLPERF_INFERENCE_CODE_VERSION: `v4.0` + +
    + + + ##### Default variations + + `_cpu,_pytorch,_resnet50,_run-harness,_standalone,_v4.0` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` + * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` + * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` + * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` + * `--rerun=value` → `CM_RERUN=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` + * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` + * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` + * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` + * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` + * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` + * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + * CM_FAST_COMPILATION: `yes` + * CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * CM_MLPERF_LOADGEN_MODE: `performance` + * CM_SKIP_PREPROCESS_DATASET: `no` + * CM_SKIP_MODEL_DOWNLOAD: `no` + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `intel` + * CM_MLPERF_SKIP_RUN: `no` + * verbosity: `1` + * loadgen_trigger_cold_run: `0` + + + +#### Native script being run +=== "Linux/macOS" + * [run_bert_harness.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_bert_harness.sh) + * [run_gptj_harness_v3_1.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh) + * [run_gptj_harness_v4_0.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md new file mode 100644 index 000000000..b46ea4677 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md @@ -0,0 +1,368 @@ +# app-mlperf-inference-qualcomm +Automatically generated README for this automation recipe: **app-mlperf-inference-qualcomm** + +Category: **[Modular MLPerf benchmarks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-qualcomm/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_activation-count.#` + - ENV variables: + - CM_MLPERF_QAIC_ACTIVATION_COUNT: `#` + * `_num-devices.4` + - ENV variables: + - CM_QAIC_DEVICES: `0,1,2,3` + * `_pro` + - ENV variables: + - qaic_queue_length: `10` + +
    + + + * Group "**batch-size**" +
    + Click here to expand this section. + + * `_bs.#` + - ENV variables: + - kilt_model_batch_size: `#` + * `_bs.0` + - ENV variables: + - kilt_model_batch_size: `1` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + - kilt_backend_type: `cpu` + * `_cuda` + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` + - kilt_backend_type: `gpu` + * `_qaic` + - ENV variables: + - CM_MLPERF_DEVICE: `qaic` + - CM_MLPERF_DEVICE_LIB_NAMESPEC: `QAic` + - kilt_backend_type: `qaic` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_glow` + - ENV variables: + - device: `qaic` + - CM_MLPERF_BACKEND: `glow` + - CM_MLPERF_BACKEND_LIB_NAMESPEC: `QAic` + * **`_onnxruntime`** (default) + - ENV variables: + - device: `onnxrt` + - CM_MLPERF_BACKEND: `onnxruntime` + - CM_MLPERF_BACKEND_LIB_NAMESPEC: `onnxruntime` + * `_tensorrt` + - ENV variables: + - CM_MLPERF_BACKEND: `tensorrt` + - device: `tensorrt` + - CM_MLPERF_BACKEND_NAME: `TensorRT` + +
    + + + * Group "**loadgen-batch-size**" +
    + Click here to expand this section. + + * `_loadgen-batch-size.#` + - ENV variables: + - CM_MLPERF_LOADGEN_BATCH_SIZE: `#` + +
    + + + * Group "**loadgen-scenario**" +
    + Click here to expand this section. + + * `_multistream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` + * `_offline` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * `_server` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Server` + * `_singlestream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_bert-99` + - ENV variables: + - CM_MODEL: `bert-99` + - CM_SQUAD_ACCURACY_DTYPE: `float32` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` + * `_bert-99.9` + - ENV variables: + - CM_MODEL: `bert-99.9` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + - kilt_model_name: `resnet50` + - kilt_input_count: `1` + - kilt_output_count: `1` + - kilt_input_format: `FLOAT32,-1,224,224,3` + - kilt_output_format: `INT64,-1` + - dataset_imagenet_preprocessed_input_square_side: `224` + - ml_model_has_background_class: `YES` + - ml_model_image_height: `224` + - loadgen_buffer_size: `1024` + - loadgen_dataset_size: `50000` + - CM_BENCHMARK: `STANDALONE_CLASSIFICATION` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` + - kilt_model_name: `retinanet` + - kilt_input_count: `1` + - kilt_model_max_detections: `600` + - kilt_output_count: `1` + - kilt_input_format: `FLOAT32,-1,3,800,800` + - kilt_output_format: `INT64,-1` + - dataset_imagenet_preprocessed_input_square_side: `224` + - ml_model_image_height: `800` + - ml_model_image_width: `800` + - loadgen_buffer_size: `64` + - loadgen_dataset_size: `24576` + - CM_BENCHMARK: `STANDALONE_OBJECT_DETECTION` + +
    + + + * Group "**nsp**" +
    + Click here to expand this section. + + * `_nsp.#` + * `_nsp.14` + * `_nsp.16` + +
    + + + * Group "**power-mode**" +
    + Click here to expand this section. + + * `_maxn` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True` + * `_maxq` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_fp16` + * `_fp32` + - ENV variables: + - CM_IMAGENET_ACCURACY_DTYPE: `float32` + * `_uint8` + +
    + + + * Group "**run-mode**" +
    + Click here to expand this section. + + * `_network-client` + - ENV variables: + - CM_RUN_MODE: `network-client` + * `_network-server` + - ENV variables: + - CM_RUN_MODE: `network-server` + * **`_standalone`** (default) + - ENV variables: + - CM_RUN_MODE: `standalone` + +
    + + + * Group "**sut**" +
    + Click here to expand this section. + + * `_dl2q.24xlarge` + - ENV variables: + - CM_QAIC_DEVICES: `0,1,2,3,4,5,6,7` + - qaic_queue_length: `4` + * `_rb6` + - ENV variables: + - CM_QAIC_DEVICES: `0` + - qaic_queue_length: `6` + +
    + + + ##### Default variations + + `_cpu,_onnxruntime,_resnet50,_standalone` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--devices=value` → `CM_QAIC_DEVICES=value` + * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` + * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` + * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` + * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` + * `--rerun=value` → `CM_RERUN=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` + * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` + * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` + * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` + * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` + * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` + * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + * CM_FAST_COMPILATION: `yes` + * CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * CM_MLPERF_LOADGEN_MODE: `performance` + * CM_SKIP_PREPROCESS_DATASET: `no` + * CM_SKIP_MODEL_DOWNLOAD: `no` + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `kilt` + * CM_MLPERF_SKIP_RUN: `no` + * CM_KILT_REPO_URL: `https://github.com/GATEOverflow/kilt-mlperf` + * CM_QAIC_DEVICES: `0` + * kilt_max_wait_abs: `10000` + * verbosity: `0` + * loadgen_trigger_cold_run: `0` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-qualcomm/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/index.md new file mode 100644 index 000000000..9675eae16 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-benchmarks/index.md @@ -0,0 +1,3 @@ +* [app-mlperf-inference-dummy](app-mlperf-inference-dummy/index.md) +* [app-mlperf-inference-intel](app-mlperf-inference-intel/index.md) +* [app-mlperf-inference-qualcomm](app-mlperf-inference-qualcomm/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md new file mode 100644 index 000000000..1a08adcbe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md @@ -0,0 +1,213 @@ +# app-loadgen-generic-python +Automatically generated README for this automation recipe: **app-loadgen-generic-python** + +Category: **[Modular MLPerf inference benchmark pipeline](..)** + +License: **Apache 2.0** + +Developers: [Gaz Iqbal](https://www.linkedin.com/in/gaziqbal), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "python app generic loadgen" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=python,app,generic,loadgen[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "python app generic loadgen [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'python,app,generic,loadgen' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "python app generic loadgen[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_cmc` + - ENV variables: + - CM_CUSTOM_MODEL_CMC: `True` + * `_huggingface` + - ENV variables: + - CM_CUSTOM_MODEL_SOURCE: `huggingface` + * `_model-stub.#` + - ENV variables: + - CM_ML_MODEL_STUB: `#` + +
    + + + * Group "**backend**" +
    + Click here to expand this section. + + * **`_onnxruntime`** (default) + - ENV variables: + - CM_MLPERF_BACKEND: `onnxruntime` + * `_pytorch` + - ENV variables: + - CM_MLPERF_BACKEND: `pytorch` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + - CM_MLPERF_EXECUTION_PROVIDER: `CPUExecutionProvider` + * `_cuda` + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + - CM_MLPERF_EXECUTION_PROVIDER: `CUDAExecutionProvider` + +
    + + + * Group "**models**" +
    + Click here to expand this section. + + * `_custom` + - ENV variables: + - CM_MODEL: `custom` + * `_resnet50` + - ENV variables: + - CM_MODEL: `resnet50` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + +
    + + + ##### Default variations + + `_cpu,_onnxruntime` +=== "Input Flags" + + + #### Input Flags + + * --**modelpath:** Full path to file with model weights + * --**modelcodepath:** (for PyTorch models) Full path to file with model code and cmc.py + * --**modelcfgpath:** (for PyTorch models) Full path to JSON file with model cfg + * --**modelsamplepath:** (for PyTorch models) Full path to file with model sample in pickle format + * --**ep:** ONNX Execution provider + * --**scenario:** MLPerf LoadGen scenario + * --**samples:** Number of samples (*2*) + * --**runner:** MLPerf runner + * --**execmode:** MLPerf exec mode + * --**output_dir:** MLPerf output directory + * --**concurrency:** MLPerf concurrency + * --**intraop:** MLPerf intra op threads + * --**interop:** MLPerf inter op threads +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--concurrency=value` → `CM_MLPERF_CONCURRENCY=value` + * `--ep=value` → `CM_MLPERF_EXECUTION_PROVIDER=value` + * `--execmode=value` → `CM_MLPERF_EXEC_MODE=value` + * `--interop=value` → `CM_MLPERF_INTEROP=value` + * `--intraop=value` → `CM_MLPERF_INTRAOP=value` + * `--loadgen_duration_sec=value` → `CM_MLPERF_LOADGEN_DURATION_SEC=value` + * `--loadgen_expected_qps=value` → `CM_MLPERF_LOADGEN_EXPECTED_QPS=value` + * `--modelcfg=value` → `CM_ML_MODEL_CFG=value` + * `--modelcfgpath=value` → `CM_ML_MODEL_CFG_WITH_PATH=value` + * `--modelcodepath=value` → `CM_ML_MODEL_CODE_WITH_PATH=value` + * `--modelpath=value` → `CM_ML_MODEL_FILE_WITH_PATH=value` + * `--modelsamplepath=value` → `CM_ML_MODEL_SAMPLE_WITH_PATH=value` + * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + * `--runner=value` → `CM_MLPERF_RUNNER=value` + * `--samples=value` → `CM_MLPERF_LOADGEN_SAMPLES=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_EXECUTION_MODE: `parallel` + * CM_MLPERF_BACKEND: `onnxruntime` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/run.bat) +___ +#### Script output +```bash +cmr "python app generic loadgen [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md new file mode 100644 index 000000000..9e1044451 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md @@ -0,0 +1,236 @@ +# app-mlperf-inference-ctuning-cpp-tflite +Automatically generated README for this automation recipe: **app-mlperf-inference-ctuning-cpp-tflite** + +Category: **[Modular MLPerf inference benchmark pipeline](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app mlperf inference tflite-cpp" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,mlperf,inference,tflite-cpp[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app mlperf inference tflite-cpp [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,mlperf,inference,tflite-cpp' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app mlperf inference tflite-cpp[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_armnn` + - ENV variables: + - CM_MLPERF_TFLITE_USE_ARMNN: `yes` + - CM_TMP_LINK_LIBS: `tensorflowlite,armnn` + +
    + + + * Group "**backend**" +
    + Click here to expand this section. + + * `_tf` + - ENV variables: + - CM_MLPERF_BACKEND: `tf` + * **`_tflite`** (default) + - ENV variables: + - CM_MLPERF_BACKEND: `tflite` + - CM_MLPERF_BACKEND_VERSION: `master` + - CM_TMP_LINK_LIBS: `tensorflowlite` + - CM_TMP_SRC_FOLDER: `src` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + * `_gpu` + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` + +
    + + + * Group "**loadgen-scenario**" +
    + Click here to expand this section. + + * **`_singlestream`** (default) + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_efficientnet` + - ENV variables: + - CM_MODEL: `efficientnet` + * `_mobilenet` + - ENV variables: + - CM_MODEL: `mobilenet` + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + +
    + + + * Group "**optimization-target**" +
    + Click here to expand this section. + + * `_use-neon` + - ENV variables: + - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `using_neon` + - CM_MLPERF_TFLITE_USE_NEON: `1` + * `_use-opencl` + - ENV variables: + - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `using_opencl` + - CM_MLPERF_TFLITE_USE_OPENCL: `1` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * **`_fp32`** (default) + - ENV variables: + - CM_MLPERF_MODEL_PRECISION: `float32` + * `_int8` + - ENV variables: + - CM_DATASET_COMPRESSED: `on` + - CM_MLPERF_MODEL_PRECISION: `int8` + * `_uint8` + - ENV variables: + - CM_DATASET_COMPRESSED: `on` + - CM_MLPERF_MODEL_PRECISION: `uint8` + +
    + + + ##### Default variations + + `_cpu,_fp32,_resnet50,_singlestream,_tflite` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--compressed_dataset=value` → `CM_DATASET_COMPRESSED=value` + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + * `--verbose=value` → `CM_VERBOSE=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_DATASET_COMPRESSED: `off` + * CM_DATASET_INPUT_SQUARE_SIDE: `224` + * CM_FAST_COMPILATION: `yes` + * CM_LOADGEN_BUFFER_SIZE: `1024` + * CM_MLPERF_LOADGEN_MODE: `accuracy` + * CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + * CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN: `0` + * CM_MLPERF_OUTPUT_DIR: `.` + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `tflite_cpp` + * CM_MLPERF_TFLITE_USE_NEON: `0` + * CM_MLPERF_TFLITE_USE_OPENCL: `0` + * CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` + * CM_ML_MODEL_NORMALIZE_DATA: `0` + * CM_ML_MODEL_SUBTRACT_MEANS: `1` + * CM_VERBOSE: `0` + + + +___ +#### Script output +```bash +cmr "app mlperf inference tflite-cpp [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md new file mode 100644 index 000000000..a2c71b5f5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md @@ -0,0 +1,204 @@ +# app-mlperf-inference-mlcommons-cpp +Automatically generated README for this automation recipe: **app-mlperf-inference-mlcommons-cpp** + +Category: **[Modular MLPerf inference benchmark pipeline](..)** + +License: **Apache 2.0** + +Developers: [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app mlcommons mlperf inference cpp" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,mlcommons,mlperf,inference,cpp[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app mlcommons mlperf inference cpp [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,mlcommons,mlperf,inference,cpp' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app mlcommons mlperf inference cpp[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**batch-size**" +
    + Click here to expand this section. + + * `_batch-size.#` + - ENV variables: + - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + * `_cuda` + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * **`_onnxruntime`** (default) + - ENV variables: + - CM_MLPERF_BACKEND: `onnxruntime` + - CM_MLPERF_BACKEND_LIB_NAMESPEC: `onnxruntime` + * `_pytorch` + - ENV variables: + - CM_MLPERF_BACKEND: `pytorch` + * `_tf` + - ENV variables: + - CM_MLPERF_BACKEND: `tf` + * `_tflite` + - ENV variables: + - CM_MLPERF_BACKEND: `tflite` + * `_tvm-onnx` + - ENV variables: + - CM_MLPERF_BACKEND: `tvm-onnx` + +
    + + + * Group "**loadgen-scenario**" +
    + Click here to expand this section. + + * `_multistream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` + * **`_offline`** (default) + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * `_server` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Server` + * `_singlestream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `1` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + +
    + + + ##### Default variations + + `_cpu,_offline,_onnxruntime,_resnet50` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` + * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + * CM_FAST_COMPILATION: `yes` + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `cpp` + + + +___ +#### Script output +```bash +cmr "app mlcommons mlperf inference cpp [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md new file mode 100644 index 000000000..4206a8ee0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md @@ -0,0 +1,392 @@ +# app-mlperf-inference-mlcommons-python +Automatically generated README for this automation recipe: **app-mlperf-inference-mlcommons-python** + +Category: **[Modular MLPerf inference benchmark pipeline](..)** + +License: **Apache 2.0** + +Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python/README-extra.md) + + +--- + +This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) +using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). +The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks +across diverse platforms with continuously changing software and hardware. + +See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app vision language mlcommons mlperf inference reference ref" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,vision,language,mlcommons,mlperf,inference,reference,ref[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app vision language mlcommons mlperf inference reference ref [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,vision,language,mlcommons,mlperf,inference,reference,ref' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app vision language mlcommons mlperf inference reference ref[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_3d-unet` + - ENV variables: + - CM_TMP_IGNORE_MLPERF_QUERY_COUNT: `True` + - CM_MLPERF_MODEL_SKIP_BATCHING: `True` + * `_beam_size.#` + - ENV variables: + - GPTJ_BEAM_SIZE: `#` + * `_bert` + - ENV variables: + - CM_MLPERF_MODEL_SKIP_BATCHING: `True` + * `_dlrm` + - ENV variables: + - CM_MLPERF_MODEL_SKIP_BATCHING: `True` + * `_multistream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` + * `_offline` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * `_r2.1_default` + - ENV variables: + - CM_RERUN: `yes` + - CM_SKIP_SYS_UTILS: `yes` + - CM_TEST_QUERY_COUNT: `100` + * `_server` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Server` + * `_singlestream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + +
    + + + * Group "**batch-size**" +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + - CUDA_VISIBLE_DEVICES: `` + - USE_CUDA: `False` + - USE_GPU: `False` + * `_cuda` + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + - USE_CUDA: `True` + - USE_GPU: `True` + * `_rocm` + - ENV variables: + - CM_MLPERF_DEVICE: `rocm` + - USE_GPU: `True` + * `_tpu` + - ENV variables: + - CM_MLPERF_DEVICE: `tpu` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_deepsparse` + - ENV variables: + - CM_MLPERF_BACKEND: `deepsparse` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + * `_ncnn` + - ENV variables: + - CM_MLPERF_BACKEND: `ncnn` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + - CM_MLPERF_VISION_DATASET_OPTION: `imagenet_pytorch` + * **`_onnxruntime`** (default) + - ENV variables: + - CM_MLPERF_BACKEND: `onnxruntime` + * `_pytorch` + - ENV variables: + - CM_MLPERF_BACKEND: `pytorch` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + * `_ray` + - ENV variables: + - CM_MLPERF_BACKEND: `ray` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + * `_tf` + - Aliases: `_tensorflow` + - ENV variables: + - CM_MLPERF_BACKEND: `tf` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + * `_tflite` + - ENV variables: + - CM_MLPERF_BACKEND: `tflite` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + - CM_MLPERF_VISION_DATASET_OPTION: `imagenet_tflite_tpu` + * `_tvm-onnx` + - ENV variables: + - CM_MLPERF_BACKEND: `tvm-onnx` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + * `_tvm-pytorch` + - ENV variables: + - CM_MLPERF_BACKEND: `tvm-pytorch` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + - CM_PREPROCESS_PYTORCH: `yes` + - MLPERF_TVM_TORCH_QUANTIZED_ENGINE: `qnnpack` + * `_tvm-tflite` + - ENV variables: + - CM_MLPERF_BACKEND: `tvm-tflite` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + +
    + + + * Group "**implementation**" +
    + Click here to expand this section. + + * **`_python`** (default) + - ENV variables: + - CM_MLPERF_PYTHON: `yes` + - CM_MLPERF_IMPLEMENTATION: `reference` + +
    + + + * Group "**models**" +
    + Click here to expand this section. + + * `_3d-unet-99` + - ENV variables: + - CM_MODEL: `3d-unet-99` + * `_3d-unet-99.9` + - ENV variables: + - CM_MODEL: `3d-unet-99.9` + * `_bert-99` + - ENV variables: + - CM_MODEL: `bert-99` + * `_bert-99.9` + - ENV variables: + - CM_MODEL: `bert-99.9` + * `_dlrm-99` + - ENV variables: + - CM_MODEL: `dlrm-99` + * `_dlrm-99.9` + - ENV variables: + - CM_MODEL: `dlrm-99.9` + * `_gptj-99` + - ENV variables: + - CM_MODEL: `gptj-99` + * `_gptj-99.9` + - ENV variables: + - CM_MODEL: `gptj-99.9` + * `_llama2-70b-99` + - ENV variables: + - CM_MODEL: `llama2-70b-99` + * `_llama2-70b-99.9` + - ENV variables: + - CM_MODEL: `llama2-70b-99.9` + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + - CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: `yes` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + - CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: `yes` + - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `1` + * `_rnnt` + - ENV variables: + - CM_MODEL: `rnnt` + - CM_MLPERF_MODEL_SKIP_BATCHING: `True` + - CM_TMP_IGNORE_MLPERF_QUERY_COUNT: `True` + * `_sdxl` + - ENV variables: + - CM_MODEL: `stable-diffusion-xl` + - CM_NUM_THREADS: `1` + +
    + + + * Group "**network**" +
    + Click here to expand this section. + + * `_network-lon` + - ENV variables: + - CM_NETWORK_LOADGEN: `lon` + - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `network_loadgen` + * `_network-sut` + - ENV variables: + - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `network_sut` + - CM_NETWORK_LOADGEN: `sut` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_bfloat16` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `False` + - CM_MLPERF_MODEL_PRECISION: `bfloat16` + * `_float16` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `False` + - CM_MLPERF_MODEL_PRECISION: `float16` + * **`_fp32`** (default) + - ENV variables: + - CM_MLPERF_QUANTIZATION: `False` + - CM_MLPERF_MODEL_PRECISION: `float32` + * `_int8` + - Aliases: `_quantized` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `True` + - CM_MLPERF_MODEL_PRECISION: `int8` + +
    + + + ##### Default variations + + `_cpu,_fp32,_onnxruntime,_python,_resnet50` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--dataset=value` → `CM_MLPERF_VISION_DATASET_OPTION=value` + * `--dataset_args=value` → `CM_MLPERF_EXTRA_DATASET_ARGS=value` + * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` + * `--hw_name=value` → `CM_HW_NAME=value` + * `--imagenet_path=value` → `IMAGENET_PATH=value` + * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` + * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` + * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` + * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` + * `--network=value` → `CM_NETWORK_LOADGEN=value` + * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` + * `--num_threads=value` → `CM_NUM_THREADS=value` + * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` + * `--output_dir=value` → `OUTPUT_BASE_DIR=value` + * `--power=value` → `CM_MLPERF_POWER=value` + * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` + * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` + * `--rerun=value` → `CM_RERUN=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` + * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` + * `--sut_servers=value` → `CM_NETWORK_LOADGEN_SUT_SERVERS=value` + * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` + * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` + * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` + * `--threads=value` → `CM_NUM_THREADS=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_LOADGEN_MODE: `accuracy` + * CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * CM_OUTPUT_FOLDER_NAME: `test_results` + * CM_MLPERF_RUN_STYLE: `test` + * CM_TEST_QUERY_COUNT: `10` + * CM_MLPERF_QUANTIZATION: `False` + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `reference` + * CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: `` + + + +___ +#### Script output +```bash +cmr "app vision language mlcommons mlperf inference reference ref [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md new file mode 100644 index 000000000..aefdce10c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md @@ -0,0 +1,488 @@ +# app-mlperf-inference +Automatically generated README for this automation recipe: **app-mlperf-inference** + +Category: **[Modular MLPerf inference benchmark pipeline](..)** + +License: **Apache 2.0** + +Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/README-extra.md) + + +--- + +This CM script provides a unified interface to prepare and run a modular version of the [MLPerf inference benchmark](https://arxiv.org/abs/1911.02549) +across diverse ML models, data sets, frameworks, libraries, run-time systems and platforms +using the [cross-platform automation meta-framework (MLCommons CM)](https://github.com/mlcommons/ck). + +It is assembled from reusable and interoperable [CM scripts for DevOps and MLOps](../list_of_scripts.md) +being developed by the [open MLCommons taskforce on automation and reproducibility](../mlperf-education-workgroup.md). + +It is a higher-level wrapper to several other CM scripts modularizing the MLPerf inference benchmark: +* [Reference Python implementation](../app-mlperf-inference-reference) +* [Universal C++ implementation](../app-mlperf-inference-cpp) +* [TFLite C++ implementation](../app-mlperf-inference-tflite-cpp) +* [NVidia optimized implementation](app-mlperf-inference-nvidia) + +See [this SCC'23 tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md) +to use this script to run a reference (unoptimized) Python implementation of the MLPerf object detection benchmark +with RetinaNet model, Open Images dataset, ONNX runtime and CPU target. + +See this [CM script](../run-mlperf-inference-app) to automate and validate your MLPerf inference submission. + +Get in touch with the [open taskforce on automation and reproducibility at MLCommons](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +if you need help with your submission or if you would like to participate in further modularization of MLPerf +and collaborative design space exploration and optimization of ML Systems. + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app vision language mlcommons mlperf inference generic" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,vision,language,mlcommons,mlperf,inference,generic[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app vision language mlcommons mlperf inference generic [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,vision,language,mlcommons,mlperf,inference,generic' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app vision language mlcommons mlperf inference generic[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**implementation**" +
    + Click here to expand this section. + + * `_cpp` + - Aliases: `_mil,_mlcommons-cpp` + - ENV variables: + - CM_MLPERF_CPP: `yes` + - CM_MLPERF_IMPLEMENTATION: `mlcommons_cpp` + - CM_IMAGENET_ACCURACY_DTYPE: `float32` + - CM_OPENIMAGES_ACCURACY_DTYPE: `float32` + * `_intel-original` + - Aliases: `_intel` + - ENV variables: + - CM_MLPERF_IMPLEMENTATION: `intel` + * `_kilt` + - Aliases: `_qualcomm` + - ENV variables: + - CM_MLPERF_IMPLEMENTATION: `qualcomm` + * `_nvidia-original` + - Aliases: `_nvidia` + - ENV variables: + - CM_MLPERF_IMPLEMENTATION: `nvidia` + - CM_SQUAD_ACCURACY_DTYPE: `float16` + - CM_IMAGENET_ACCURACY_DTYPE: `int32` + - CM_CNNDM_ACCURACY_DTYPE: `int32` + - CM_LIBRISPEECH_ACCURACY_DTYPE: `int8` + * **`_reference`** (default) + - Aliases: `_mlcommons-python,_python` + - ENV variables: + - CM_MLPERF_PYTHON: `yes` + - CM_MLPERF_IMPLEMENTATION: `mlcommons_python` + - CM_SQUAD_ACCURACY_DTYPE: `float32` + - CM_IMAGENET_ACCURACY_DTYPE: `float32` + - CM_OPENIMAGES_ACCURACY_DTYPE: `float32` + - CM_LIBRISPEECH_ACCURACY_DTYPE: `float32` + - CM_CNNDM_ACCURACY_DTYPE: `int32` + * `_tflite-cpp` + - Aliases: `_ctuning-cpp-tflite` + - ENV variables: + - CM_MLPERF_TFLITE_CPP: `yes` + - CM_MLPERF_CPP: `yes` + - CM_MLPERF_IMPLEMENTATION: `ctuning_cpp_tflite` + - CM_IMAGENET_ACCURACY_DTYPE: `float32` + +
    + + + * Group "**backend**" +
    + Click here to expand this section. + + * `_deepsparse` + - ENV variables: + - CM_MLPERF_BACKEND: `deepsparse` + * `_glow` + - ENV variables: + - CM_MLPERF_BACKEND: `glow` + * `_ncnn` + - ENV variables: + - CM_MLPERF_BACKEND: `ncnn` + * `_onnxruntime` + - ENV variables: + - CM_MLPERF_BACKEND: `onnxruntime` + * `_pytorch` + - ENV variables: + - CM_MLPERF_BACKEND: `pytorch` + * `_ray` + - ENV variables: + - CM_MLPERF_BACKEND: `ray` + * `_tensorrt` + - ENV variables: + - CM_MLPERF_BACKEND: `tensorrt` + * `_tf` + - ENV variables: + - CM_MLPERF_BACKEND: `tf` + * `_tflite` + - ENV variables: + - CM_MLPERF_BACKEND: `tflite` + * `_tvm-onnx` + - ENV variables: + - CM_MLPERF_BACKEND: `tvm-onnx` + * `_tvm-pytorch` + - ENV variables: + - CM_MLPERF_BACKEND: `tvm-pytorch` + * `_tvm-tflite` + - ENV variables: + - CM_MLPERF_BACKEND: `tvm-tflite` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + * `_cuda` + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + * `_qaic` + - ENV variables: + - CM_MLPERF_DEVICE: `qaic` + * `_rocm` + - ENV variables: + - CM_MLPERF_DEVICE: `rocm` + * `_tpu` + - ENV variables: + - CM_MLPERF_DEVICE: `tpu` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_3d-unet-99` + - ENV variables: + - CM_MODEL: `3d-unet-99` + * `_3d-unet-99.9` + - ENV variables: + - CM_MODEL: `3d-unet-99.9` + * `_bert-99` + - ENV variables: + - CM_MODEL: `bert-99` + * `_bert-99.9` + - ENV variables: + - CM_MODEL: `bert-99.9` + * `_dlrm-v2-99` + - ENV variables: + - CM_MODEL: `dlrm-v2-99` + * `_dlrm-v2-99.9` + - ENV variables: + - CM_MODEL: `dlrm-v2-99.9` + * `_efficientnet` + - ENV variables: + - CM_MODEL: `efficientnet` + * `_gptj-99` + - ENV variables: + - CM_MODEL: `gptj-99` + * `_gptj-99.9` + - ENV variables: + - CM_MODEL: `gptj-99.9` + * `_llama2-70b-99` + - ENV variables: + - CM_MODEL: `llama2-70b-99` + * `_llama2-70b-99.9` + - ENV variables: + - CM_MODEL: `llama2-70b-99.9` + * `_mobilenet` + - ENV variables: + - CM_MODEL: `mobilenet` + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + * `_rnnt` + - ENV variables: + - CM_MODEL: `rnnt` + * `_sdxl` + - ENV variables: + - CM_MODEL: `stable-diffusion-xl` + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_bfloat16` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `False` + - CM_MLPERF_MODEL_PRECISION: `float32` + * `_float16` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `False` + - CM_MLPERF_MODEL_PRECISION: `float32` + * **`_float32`** (default) + - Aliases: `_fp32` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `False` + - CM_MLPERF_MODEL_PRECISION: `float32` + * `_int4` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `True` + - CM_MLPERF_MODEL_PRECISION: `int4` + * `_int8` + - Aliases: `_quantized` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `True` + - CM_MLPERF_MODEL_PRECISION: `int8` + * `_uint8` + - ENV variables: + - CM_MLPERF_QUANTIZATION: `True` + - CM_MLPERF_MODEL_PRECISION: `uint8` + +
    + + + * Group "**execution-mode**" +
    + Click here to expand this section. + + * `_fast` + - ENV variables: + - CM_FAST_FACTOR: `5` + - CM_OUTPUT_FOLDER_NAME: `fast_results` + - CM_MLPERF_RUN_STYLE: `fast` + * **`_test`** (default) + - ENV variables: + - CM_OUTPUT_FOLDER_NAME: `test_results` + - CM_MLPERF_RUN_STYLE: `test` + * `_valid` + - ENV variables: + - CM_OUTPUT_FOLDER_NAME: `valid_results` + - CM_MLPERF_RUN_STYLE: `valid` + +
    + + + * Group "**reproducibility**" +
    + Click here to expand this section. + + * `_r2.1_default` + - ENV variables: + - CM_SKIP_SYS_UTILS: `yes` + - CM_TEST_QUERY_COUNT: `100` + * `_r3.0_default` + - ENV variables: + - CM_SKIP_SYS_UTILS: `yes` + * `_r3.1_default` + * `_r4.0_default` + - ENV variables: + - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: `/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl` + * `_r4.1_default` + - ENV variables: + - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: `/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl` + +
    + + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_power` + - ENV variables: + - CM_MLPERF_POWER: `yes` + - CM_SYSTEM_POWER: `yes` + +
    + + + * Group "**batch_size**" +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#` + +
    + + + * Group "**loadgen-scenario**" +
    + Click here to expand this section. + + * `_multistream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` + * **`_offline`** (default) + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * `_server` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Server` + * `_singlestream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + +
    + + + ##### Default variations + + `_cpu,_float32,_offline,_reference,_resnet50,_test` +=== "Input Flags" + + + #### Input Flags + + * --**scenario:** MLPerf inference scenario {Offline,Server,SingleStream,MultiStream} (*Offline*) + * --**mode:** MLPerf inference mode {performance,accuracy} (*accuracy*) + * --**test_query_count:** Specifies the number of samples to be processed during a test run + * --**target_qps:** Target QPS + * --**target_latency:** Target Latency + * --**max_batchsize:** Maximum batchsize to be used + * --**num_threads:** Number of CPU threads to launch the application with + * --**hw_name:** Valid value - any system description which has a config file (under same name) defined [here](https://github.com/mlcommons/cm4mlops/tree/main/script/get-configs-sut-mlperf-inference/configs) + * --**output_dir:** Location where the outputs are produced + * --**rerun:** Redo the run even if previous run files exist (*True*) + * --**regenerate_files:** Regenerates measurement files including accuracy.txt files even if a previous run exists. This option is redundant if `--rerun` is used + * --**adr.python.name:** Python virtual environment name (optional) (*mlperf*) + * --**adr.python.version_min:** Minimal Python version (*3.8*) + * --**adr.python.version:** Force Python version (must have all system deps) + * --**adr.compiler.tags:** Compiler for loadgen (*gcc*) + * --**adr.inference-src-loadgen.env.CM_GIT_URL:** Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations) + * --**adr.inference-src.env.CM_GIT_URL:** Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations) + * --**quiet:** Quiet run (select default values for all questions) (*False*) + * --**readme:** Generate README with the reproducibility report + * --**debug:** Debug MLPerf script +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--debug=value` → `CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM=value` + * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` + * `--gpu_name=value` → `CM_NVIDIA_GPU_NAME=value` + * `--hw_name=value` → `CM_HW_NAME=value` + * `--imagenet_path=value` → `IMAGENET_PATH=value` + * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` + * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` + * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` + * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` + * `--num_threads=value` → `CM_NUM_THREADS=value` + * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` + * `--output_dir=value` → `OUTPUT_BASE_DIR=value` + * `--power=value` → `CM_MLPERF_POWER=value` + * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` + * `--readme=value` → `CM_MLPERF_README=value` + * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` + * `--rerun=value` → `CM_RERUN=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` + * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` + * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` + * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` + * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_LOADGEN_MODE: `accuracy` + * CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * CM_OUTPUT_FOLDER_NAME: `test_results` + * CM_MLPERF_RUN_STYLE: `test` + * CM_TEST_QUERY_COUNT: `10` + * CM_MLPERF_QUANTIZATION: `False` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "app vision language mlcommons mlperf inference generic [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md new file mode 100644 index 000000000..482ef2c01 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md @@ -0,0 +1,100 @@ +# benchmark-program-mlperf +Automatically generated README for this automation recipe: **benchmark-program-mlperf** + +Category: **[Modular MLPerf inference benchmark pipeline](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program-mlperf/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "mlperf benchmark-mlperf" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=mlperf,benchmark-mlperf[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "mlperf benchmark-mlperf [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'mlperf,benchmark-mlperf' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "mlperf benchmark-mlperf[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**power-mode**" +
    + Click here to expand this section. + + * **`_no-power`** (default) + * `_power` + - ENV variables: + - CM_MLPERF_POWER: `yes` + +
    + + + ##### Default variations + + `_no-power` + +___ +#### Script output +```bash +cmr "mlperf benchmark-mlperf [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md new file mode 100644 index 000000000..01e67ecc5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md @@ -0,0 +1,7 @@ +* [app-loadgen-generic-python](app-loadgen-generic-python/index.md) +* [app-mlperf-inference](app-mlperf-inference/index.md) +* [app-mlperf-inference-ctuning-cpp-tflite](app-mlperf-inference-ctuning-cpp-tflite/index.md) +* [app-mlperf-inference-mlcommons-cpp](app-mlperf-inference-mlcommons-cpp/index.md) +* [app-mlperf-inference-mlcommons-python](app-mlperf-inference-mlcommons-python/index.md) +* [benchmark-program-mlperf](benchmark-program-mlperf/index.md) +* [run-mlperf-inference-app](run-mlperf-inference-app/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md new file mode 100644 index 000000000..c05c90c38 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md @@ -0,0 +1,326 @@ +# run-mlperf-inference-app +Automatically generated README for this automation recipe: **run-mlperf-inference-app** + +Category: **[Modular MLPerf inference benchmark pipeline](..)** + +License: **Apache 2.0** + +Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-app/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-app/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run-mlperf,inference" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run-mlperf,inference[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run-mlperf,inference [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run-mlperf,inference' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run-mlperf,inference[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_all-scenarios` + - ENV variables: + - CM_MLPERF_LOADGEN_ALL_SCENARIOS: `yes` + * `_compliance` + - ENV variables: + - CM_MLPERF_LOADGEN_COMPLIANCE: `yes` + * `_dashboard` + - ENV variables: + - CM_MLPERF_DASHBOARD: `on` + +
    + + + * Group "**benchmark-version**" +
    + Click here to expand this section. + + * `_r2.1` + - ENV variables: + - CM_MLPERF_INFERENCE_VERSION: `2.1` + - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r2.1_default` + * `_r3.0` + - ENV variables: + - CM_MLPERF_INFERENCE_VERSION: `3.0` + - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r3.0_default` + * `_r3.1` + - ENV variables: + - CM_MLPERF_INFERENCE_VERSION: `3.1` + - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r3.1_default` + * `_r4.0` + - ENV variables: + - CM_MLPERF_INFERENCE_VERSION: `4.0` + - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r4.0_default` + * `_r4.1` + - ENV variables: + - CM_MLPERF_INFERENCE_VERSION: `4.1` + - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r4.1_default` + +
    + + + * Group "**mode**" +
    + Click here to expand this section. + + * `_all-modes` + - ENV variables: + - CM_MLPERF_LOADGEN_ALL_MODES: `yes` + +
    + + + * Group "**submission-generation**" +
    + Click here to expand this section. + + * `_accuracy-only` + - ENV variables: + - CM_MLPERF_LOADGEN_MODE: `accuracy` + - CM_MLPERF_SUBMISSION_RUN: `yes` + - CM_RUN_MLPERF_ACCURACY: `on` + - CM_RUN_SUBMISSION_CHECKER: `no` + * `_find-performance` + - ENV variables: + - CM_MLPERF_FIND_PERFORMANCE_MODE: `yes` + - CM_MLPERF_LOADGEN_ALL_MODES: `no` + - CM_MLPERF_LOADGEN_MODE: `performance` + - CM_MLPERF_RESULT_PUSH_TO_GITHUB: `False` + * **`_performance-and-accuracy`** (default) + * `_performance-only` + - ENV variables: + - CM_MLPERF_LOADGEN_MODE: `performance` + - CM_MLPERF_SUBMISSION_RUN: `yes` + - CM_RUN_SUBMISSION_CHECKER: `no` + * `_populate-readme` + - ENV variables: + - CM_MLPERF_README: `yes` + - CM_MLPERF_SUBMISSION_RUN: `yes` + - CM_RUN_SUBMISSION_CHECKER: `no` + * `_submission` + - ENV variables: + - CM_MLPERF_LOADGEN_COMPLIANCE: `yes` + - CM_MLPERF_SUBMISSION_RUN: `yes` + - CM_RUN_MLPERF_ACCURACY: `on` + - CM_RUN_SUBMISSION_CHECKER: `yes` + - CM_TAR_SUBMISSION_DIR: `yes` + +
    + + + * Group "**submission-generation-style**" +
    + Click here to expand this section. + + * `_full` + - ENV variables: + - CM_MLPERF_SUBMISSION_GENERATION_STYLE: `full` + - CM_MLPERF_SKIP_SUBMISSION_GENERATION: `yes` + * **`_short`** (default) + - ENV variables: + - CM_MLPERF_SUBMISSION_GENERATION_STYLE: `short` + +
    + + + ##### Default variations + + `_performance-and-accuracy,_short` +=== "Input Flags" + + + #### Input Flags + + * --**division:** MLPerf division {open,closed} (*open*) + * --**category:** MLPerf category {edge,datacenter,network} (*edge*) + * --**device:** MLPerf device {cpu,cuda,rocm,qaic} (*cpu*) + * --**model:** MLPerf model {resnet50,retinanet,bert-99,bert-99.9,3d-unet-99,3d-unet-99.9,rnnt,dlrm-v2-99,dlrm-v2-99.9,gptj-99,gptj-99.9,sdxl,llama2-70b-99,llama2-70b-99.9,mobilenet,efficientnet} (*resnet50*) + * --**precision:** MLPerf model precision {float32,float16,bfloat16,int8,uint8} + * --**implementation:** MLPerf implementation {mlcommons-python,mlcommons-cpp,nvidia,intel,qualcomm,ctuning-cpp-tflite} (*mlcommons-python*) + * --**backend:** MLPerf framework (backend) {onnxruntime,tf,pytorch,deepsparse,tensorrt,glow,tvm-onnx} (*onnxruntime*) + * --**scenario:** MLPerf scenario {Offline,Server,SingleStream,MultiStream} (*Offline*) + * --**mode:** MLPerf benchmark mode {,accuracy,performance} + * --**execution_mode:** MLPerf execution mode {test,fast,valid} (*test*) + * --**sut:** SUT configuration (if known) + * --**submitter:** Submitter name (without space) (*CTuning*) + * --**results_dir:** Folder path to store results (defaults to the current working directory) + * --**submission_dir:** Folder path to store MLPerf submission tree + * --**adr.compiler.tags:** Compiler for loadgen and any C/C++ part of implementation + * --**adr.inference-src-loadgen.env.CM_GIT_URL:** Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations) + * --**adr.inference-src.env.CM_GIT_URL:** Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations) + * --**adr.mlperf-inference-implementation.max_batchsize:** Maximum batchsize to be used + * --**adr.mlperf-inference-implementation.num_threads:** Number of threads (reference & C++ implementation only) + * --**adr.python.name:** Python virtual environment name (optional) + * --**adr.python.version:** Force Python version (must have all system deps) + * --**adr.python.version_min:** Minimal Python version (*3.8*) + * --**power:** Measure power {yes,no} (*no*) + * --**adr.mlperf-power-client.power_server:** MLPerf Power server IP address (*192.168.0.15*) + * --**adr.mlperf-power-client.port:** MLPerf Power server port (*4950*) + * --**clean:** Clean run (*False*) + * --**compliance:** Whether to run compliance tests (applicable only for closed division) {yes,no} (*no*) + * --**dashboard_wb_project:** W&B dashboard project (*cm-mlperf-dse-testing*) + * --**dashboard_wb_user:** W&B dashboard user (*cmind*) + * --**hw_name:** MLPerf hardware name (for example "gcp.c3_standard_8", "nvidia_orin", "lenovo_p14s_gen_4_windows_11", "macbook_pro_m1_2", "thundercomm_rb6" ...) + * --**multistream_target_latency:** Set MultiStream target latency + * --**offline_target_qps:** Set LoadGen Offline target QPS + * --**quiet:** Quiet run (select default values for all questions) (*True*) + * --**server_target_qps:** Set Server target QPS + * --**singlestream_target_latency:** Set SingleStream target latency + * --**target_latency:** Set Target latency + * --**target_qps:** Set LoadGen target QPS + * --**j:** Print results dictionary to console at the end of the run (*False*) + * --**repro:** Record input/output/state/info files to make it easier to reproduce results (*False*) + * --**time:** Print script execution time at the end of the run (*True*) + * --**debug:** Debug this script (*False*) +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--backend=value` → `CM_MLPERF_BACKEND=value` + * `--batch_size=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` + * `--beam_size=value` → `GPTJ_BEAM_SIZE=value` + * `--category=value` → `CM_MLPERF_SUBMISSION_SYSTEM_TYPE=value` + * `--clean=value` → `CM_MLPERF_CLEAN_ALL=value` + * `--compliance=value` → `CM_MLPERF_LOADGEN_COMPLIANCE=value` + * `--dashboard_wb_project=value` → `CM_MLPERF_DASHBOARD_WANDB_PROJECT=value` + * `--dashboard_wb_user=value` → `CM_MLPERF_DASHBOARD_WANDB_USER=value` + * `--debug=value` → `CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM=value` + * `--device=value` → `CM_MLPERF_DEVICE=value` + * `--division=value` → `CM_MLPERF_SUBMISSION_DIVISION=value` + * `--docker=value` → `CM_MLPERF_USE_DOCKER=value` + * `--dump_version_info=value` → `CM_DUMP_VERSION_INFO=value` + * `--execution_mode=value` → `CM_MLPERF_RUN_STYLE=value` + * `--find_performance=value` → `CM_MLPERF_FIND_PERFORMANCE_MODE=value` + * `--gpu_name=value` → `CM_NVIDIA_GPU_NAME=value` + * `--hw_name=value` → `CM_HW_NAME=value` + * `--hw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` + * `--imagenet_path=value` → `IMAGENET_PATH=value` + * `--implementation=value` → `CM_MLPERF_IMPLEMENTATION=value` + * `--lang=value` → `CM_MLPERF_IMPLEMENTATION=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--model=value` → `CM_MLPERF_MODEL=value` + * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` + * `--network=value` → `CM_NETWORK_LOADGEN=value` + * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` + * `--output_dir=value` → `OUTPUT_BASE_DIR=value` + * `--output_summary=value` → `MLPERF_INFERENCE_SUBMISSION_SUMMARY=value` + * `--output_tar=value` → `MLPERF_INFERENCE_SUBMISSION_TAR_FILE=value` + * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` + * `--power=value` → `CM_SYSTEM_POWER=value` + * `--precision=value` → `CM_MLPERF_MODEL_PRECISION=value` + * `--preprocess_submission=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` + * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` + * `--readme=value` → `CM_MLPERF_README=value` + * `--regenerate_accuracy_file=value` → `CM_MLPERF_REGENERATE_ACCURACY_FILE=value` + * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` + * `--rerun=value` → `CM_RERUN=value` + * `--results_dir=value` → `OUTPUT_BASE_DIR=value` + * `--results_git_url=value` → `CM_MLPERF_RESULTS_GIT_REPO_URL=value` + * `--run_checker=value` → `CM_RUN_SUBMISSION_CHECKER=value` + * `--run_style=value` → `CM_MLPERF_RUN_STYLE=value` + * `--save_console_log=value` → `CM_SAVE_CONSOLE_LOG=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` + * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` + * `--skip_submission_generation=value` → `CM_MLPERF_SKIP_SUBMISSION_GENERATION=value` + * `--skip_truncation=value` → `CM_SKIP_TRUNCATE_ACCURACY=value` + * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` + * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` + * `--sut=value` → `CM_MLPERF_INFERENCE_SUT_VARIATION=value` + * `--sut_servers=value` → `CM_NETWORK_LOADGEN_SUT_SERVERS=value` + * `--sw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` + * `--system_type=value` → `CM_MLPERF_SUBMISSION_SYSTEM_TYPE=value` + * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` + * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` + * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` + * `--threads=value` → `CM_NUM_THREADS=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_IMPLEMENTATION: `reference` + * CM_MLPERF_MODEL: `resnet50` + * CM_MLPERF_RUN_STYLE: `test` + + +#### Versions +* `master` +* `r2.1` + +___ +#### Script output +```bash +cmr "run-mlperf,inference [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md new file mode 100644 index 000000000..676d85570 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md @@ -0,0 +1,165 @@ +# app-mlperf-training-nvidia +Automatically generated README for this automation recipe: **app-mlperf-training-nvidia** + +Category: **[Modular MLPerf training benchmark pipeline](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app vision language mlcommons mlperf training nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,vision,language,mlcommons,mlperf,training,nvidia[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app vision language mlcommons mlperf training nvidia [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,vision,language,mlcommons,mlperf,training,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app vision language mlcommons mlperf training nvidia[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_bert` + - ENV variables: + - CM_MLPERF_MODEL: `bert` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cuda`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cuda` + - USE_CUDA: `True` + * `_tpu` + - ENV variables: + - CM_MLPERF_DEVICE: `tpu` + - CUDA_VISIBLE_DEVICES: `` + - USE_CUDA: `False` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_pytorch` + - ENV variables: + - CM_MLPERF_BACKEND: `pytorch` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + * `_tf` + - Aliases: `_tensorflow` + - ENV variables: + - CM_MLPERF_BACKEND: `tf` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + +
    + + + ##### Default variations + + `_cuda` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` + * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` + * `--hw_name=value` → `CM_HW_NAME=value` + * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` + * `--num_threads=value` → `CM_NUM_THREADS=value` + * `--output_dir=value` → `OUTPUT_BASE_DIR=value` + * `--rerun=value` → `CM_RERUN=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `nvidia` + + + +#### Native script being run +=== "Linux/macOS" + * [run-bert-training.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/run-bert-training.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "app vision language mlcommons mlperf training nvidia [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md new file mode 100644 index 000000000..4adad297f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md @@ -0,0 +1,166 @@ +# app-mlperf-training-reference +Automatically generated README for this automation recipe: **app-mlperf-training-reference** + +Category: **[Modular MLPerf training benchmark pipeline](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app vision language mlcommons mlperf training reference ref" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,vision,language,mlcommons,mlperf,training,reference,ref[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app vision language mlcommons mlperf training reference ref [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,vision,language,mlcommons,mlperf,training,reference,ref' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app vision language mlcommons mlperf training reference ref[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_bert` + - ENV variables: + - CM_MLPERF_MODEL: `bert` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cuda`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `cuda` + - USE_CUDA: `True` + * `_tpu` + - ENV variables: + - CM_MLPERF_DEVICE: `tpu` + - CUDA_VISIBLE_DEVICES: `` + - USE_CUDA: `False` + +
    + + + * Group "**framework**" +
    + Click here to expand this section. + + * `_pytorch` + - ENV variables: + - CM_MLPERF_BACKEND: `pytorch` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + * `_tf` + - Aliases: `_tensorflow` + - ENV variables: + - CM_MLPERF_BACKEND: `tf` + - CM_MLPERF_BACKEND_VERSION: `<<>>` + +
    + + + ##### Default variations + + `_cuda` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` + * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` + * `--hw_name=value` → `CM_HW_NAME=value` + * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` + * `--num_threads=value` → `CM_NUM_THREADS=value` + * `--output_dir=value` → `OUTPUT_BASE_DIR=value` + * `--rerun=value` → `CM_RERUN=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `reference` + * CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: `` + + + +#### Native script being run +=== "Linux/macOS" + * [run-bert-training.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/run-bert-training.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "app vision language mlcommons mlperf training reference ref [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md new file mode 100644 index 000000000..73140884b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md @@ -0,0 +1,2 @@ +* [app-mlperf-training-nvidia](app-mlperf-training-nvidia/index.md) +* [app-mlperf-training-reference](app-mlperf-training-reference/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md new file mode 100644 index 000000000..1d71d7f6f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md @@ -0,0 +1,87 @@ +# app-image-corner-detection +Automatically generated README for this automation recipe: **app-image-corner-detection** + +Category: **[Modular application pipeline](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app image corner-detection" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,image,corner-detection + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app image corner-detection " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,image,corner-detection' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app image corner-detection" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "app image corner-detection " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/index.md new file mode 100644 index 000000000..96076be6f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Modular-application-pipeline/index.md @@ -0,0 +1 @@ +* [app-image-corner-detection](app-image-corner-detection/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-cpu/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-cpu/index.md new file mode 100644 index 000000000..a45f8a03f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-cpu/index.md @@ -0,0 +1,87 @@ +# detect-cpu +Automatically generated README for this automation recipe: **detect-cpu** + +Category: **[Platform information](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "detect cpu detect-cpu info" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=detect,cpu,detect-cpu,info + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "detect cpu detect-cpu info " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'detect,cpu,detect-cpu,info' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "detect cpu detect-cpu info" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/run.bat) +___ +#### Script output +```bash +cmr "detect cpu detect-cpu info " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-os/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-os/index.md new file mode 100644 index 000000000..92e04fa5e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/detect-os/index.md @@ -0,0 +1,86 @@ +# detect-os +Automatically generated README for this automation recipe: **detect-os** + +Category: **[Platform information](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "detect-os detect os info" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=detect-os,detect,os,info + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "detect-os detect os info " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'detect-os,detect,os,info' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "detect-os detect os info" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/run.bat) +___ +#### Script output +```bash +cmr "detect-os detect os info " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/index.md new file mode 100644 index 000000000..7a25f8c34 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Platform-information/index.md @@ -0,0 +1,2 @@ +* [detect-cpu](detect-cpu/index.md) +* [detect-os](detect-os/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/activate-python-venv/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/activate-python-venv/index.md new file mode 100644 index 000000000..90f6bb959 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/activate-python-venv/index.md @@ -0,0 +1,88 @@ +# Activate virtual Python environment +Automatically generated README for this automation recipe: **activate-python-venv** + +Category: **[Python automation](..)** + +License: **Apache 2.0** + +Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "activate python-venv" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=activate,python-venv + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "activate python-venv " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'activate,python-venv' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "activate python-venv" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/run.bat) +___ +#### Script output +```bash +cmr "activate python-venv " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-generic-python-lib/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-generic-python-lib/index.md new file mode 100644 index 000000000..0cf418eb2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-generic-python-lib/index.md @@ -0,0 +1,421 @@ +# get-generic-python-lib +Automatically generated README for this automation recipe: **get-generic-python-lib** + +Category: **[Python automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get generic-python-lib" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,generic-python-lib[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get generic-python-lib [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,generic-python-lib' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get generic-python-lib[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_Pillow` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `Pillow` + * `_anthropic` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `anthropic` + * `_apache-tvm` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `apache-tvm` + - CM_GENERIC_PYTHON_PIP_EXTRA: ` --pre` + * `_apex` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex` + * `_async_timeout` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `async_timeout` + * `_attr` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `attr` + * `_attrs` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `attrs` + * `_boto3` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `boto3` + * `_cloudpickle` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `cloudpickle` + * `_cmind` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `cmind` + * `_colored` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `colored` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://pypi.ngc.nvidia.com` + * `_conda.#` + * `_cupy` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `cupy` + * `_custom-python` + - ENV variables: + - CM_TMP_USE_CUSTOM_PYTHON: `on` + * `_datasets` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `datasets` + * `_decorator` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `decorator` + * `_deepsparse` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `deepsparse` + * `_dllogger` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `dllogger` + - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/NVIDIA/dllogger#egg=dllogger` + * `_fiftyone` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `fiftyone` + * `_google-api-python-client` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `google_api_python_client` + * `_google-auth-oauthlib` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `google_auth_oauthlib` + * `_huggingface_hub` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `huggingface_hub` + * `_inflect` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `inflect` + * `_jax` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `jax` + * `_jax_cuda` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `jax[cuda]` + - CM_GENERIC_PYTHON_PIP_EXTRA: `-f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html` + - CM_JAX_VERSION_EXTRA: `CUDA` + * `_librosa` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `librosa` + * `_matplotlib` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `matplotlib` + * `_mlperf_loadgen` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `mlperf_loadgen` + - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/mlcommons/inference.git#subdirectory=loadgen` + * `_mlperf_logging` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `mlperf_logging` + - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/mlperf/logging.git` + * `_mpld3` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `mpld3` + * `_nibabel` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `nibabel` + * `_numpy` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `numpy` + * `_nvidia-apex` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex` + - CM_GENERIC_PYTHON_PACKAGE_VARIANT: `nvidia-apex` + - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880` + * `_nvidia-apex-from-src` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex` + - CM_GENERIC_PYTHON_PACKAGE_VARIANT: `nvidia-apex` + * `_nvidia-dali` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-dali-cuda120` + - CM_GENERIC_PYTHON_PIP_EXTRA: ` --upgrade --default-timeout=900` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://developer.download.nvidia.com/compute/redist` + * `_nvidia-pycocotools` + - ENV variables: + - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: `pycocotools` + - CM_GENERIC_PYTHON_PIP_URL: `pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI` + * `_nvidia-pyindex` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-pyindex` + * `_nvidia-tensorrt` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-tensorrt` + * `_onnx` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnx` + * `_onnx-graphsurgeon` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnx_graphsurgeon` + * `_onnxruntime` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnxruntime` + * `_onnxruntime_gpu` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnxruntime_gpu` + - CM_ONNXRUNTIME_VERSION_EXTRA: `GPU` + * `_openai` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `openai` + * `_opencv-python` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `opencv-python` + * `_package.#` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `#` + - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: `` + - CM_GENERIC_PYTHON_PIP_URL: `` + * `_pandas` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `pandas` + * `_path.#` + - ENV variables: + - CM_GENERIC_PYTHON_PIP_URL: `#` + * `_pillow` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `Pillow` + * `_pip` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `pip` + * `_polygraphy` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `polygraphy` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://pypi.ngc.nvidia.com` + * `_pre` + - ENV variables: + - CM_GENERIC_PYTHON_DEV_VERSION: `yes` + * `_protobuf` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `protobuf` + * `_psutil` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `psutil` + * `_pycocotools` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `pycocotools` + * `_pycuda` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `pycuda` + * `_ray` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `ray[default]` + * `_requests` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `requests` + * `_rocm` + * `_safetensors` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `safetensors` + * `_scikit-learn` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `scikit-learn` + * `_scipy` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `scipy` + * `_scons` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `scons` + * `_setfit` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `setfit` + * `_setuptools` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `setuptools` + * `_six` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `six` + * `_sklearn` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `sklearn` + * `_sox` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `sox` + * `_sparsezoo` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `sparsezoo` + * `_streamlit` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `streamlit` + * `_streamlit_option_menu` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `streamlit_option_menu` + * `_tensorboard` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorboard` + * `_tensorflow` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorflow` + * `_tensorrt` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorrt` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/<<>>` + - CM_TORCH_VERSION_EXTRA: `CUDA` + * `_tflite` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `tflite` + * `_tflite-runtime` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `tflite-runtime` + * `_tokenization` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `tokenization` + * `_toml` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `toml` + * `_torch` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu` + * `_torch_cuda` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: `https://download.pytorch.org/whl/<<>>` + - CM_TORCH_VERSION_EXTRA: `CUDA` + * `_torch_tensorrt` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch-tensorrt` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/<<>>` + - CM_TORCH_VERSION_EXTRA: `CUDA` + * `_torchaudio` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchaudio` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu` + * `_torchaudio_cuda` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchaudio` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: `https://download.pytorch.org/whl/<<>>` + - CM_TORCHAUDIO_VERSION_EXTRA: `CUDA` + * `_torchvision` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchvision` + - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu` + * `_torchvision_cuda` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchvision` + - CM_TORCHVISION_VERSION_EXTRA: `CUDA` + * `_tornado` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `tornado` + * `_tqdm` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `tqdm` + * `_transformers` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `transformers` + * `_typing_extensions` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `typing_extensions` + * `_ujson` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `ujson` + * `_unidecode` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `unidecode` + * `_url.#` + - ENV variables: + - CM_GENERIC_PYTHON_PIP_URL: `#` + - CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: `yes` + * `_wandb` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `wandb` + * `_west` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `west` + * `_xgboost` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `xgboost` + * `_xlsxwriter` + - ENV variables: + - CM_GENERIC_PYTHON_PACKAGE_NAME: `xlsxwriter` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--extra_index_url=value` → `CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL=value` + * `--force_install=value` → `CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL=value` + * `--index_url=value` → `CM_GENERIC_PYTHON_PIP_INDEX_URL=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/run.bat) +___ +#### Script output +```bash +cmr "get generic-python-lib [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-python3/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-python3/index.md new file mode 100644 index 000000000..9544d8790 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/get-python3/index.md @@ -0,0 +1,111 @@ +# get-python3 +Automatically generated README for this automation recipe: **get-python3** + +Category: **[Python automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get python python3 get-python get-python3" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,python,python3,get-python,get-python3[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get python python3 get-python get-python3 [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,python,python3,get-python,get-python3' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get python python3 get-python get-python3[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_conda.#` + - ENV variables: + - CM_PYTHON_CONDA: `yes` + - CM_PYTHON_INSTALL_CACHE_TAGS: `_conda.#` + * `_custom-path.#` + - ENV variables: + - CM_PYTHON_BIN_WITH_PATH: `#` + * `_lto` + * `_optimized` + * `_shared` + * `_with-custom-ssl` + * `_with-ssl` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/run.bat) +___ +#### Script output +```bash +cmr "get python python3 get-python get-python3 [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/index.md new file mode 100644 index 000000000..38a4cd7ec --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/index.md @@ -0,0 +1,6 @@ +* [activate-python-venv](activate-python-venv/index.md) +* [get-generic-python-lib](get-generic-python-lib/index.md) +* [get-python3](get-python3/index.md) +* [install-generic-conda-package](install-generic-conda-package/index.md) +* [install-python-src](install-python-src/index.md) +* [install-python-venv](install-python-venv/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-generic-conda-package/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-generic-conda-package/index.md new file mode 100644 index 000000000..1c663d574 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-generic-conda-package/index.md @@ -0,0 +1,113 @@ +# install-generic-conda-package +Automatically generated README for this automation recipe: **install-generic-conda-package** + +Category: **[Python automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-generic-conda-package/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get install generic generic-conda-lib conda-lib conda-package generic-conda-package[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_name.#` + * `_package.#` + - ENV variables: + - CM_CONDA_PKG_NAME: `#` + +
    + + + * Group "**package-source**" +
    + Click here to expand this section. + + * `_source.#` + - ENV variables: + - CM_CONDA_PKG_SRC: `#` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-generic-conda-package/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-src/index.md new file mode 100644 index 000000000..f43cebd9f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-src/index.md @@ -0,0 +1,144 @@ +# install-python-src +Automatically generated README for this automation recipe: **install-python-src** + +Category: **[Python automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install src python python3 src-python3 src-python" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,src,python,python3,src-python3,src-python[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install src python python3 src-python3 src-python [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,src,python,python3,src-python3,src-python' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install src python python3 src-python3 src-python[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_lto` + - ENV variables: + - CM_PYTHON_LTO_FLAG: ` --lto` + - CM_PYTHON_INSTALL_CACHE_TAGS: `with-lto` + * `_optimized` + - ENV variables: + - CM_PYTHON_OPTIMIZATION_FLAG: ` --enable-optimizations` + - CM_PYTHON_INSTALL_CACHE_TAGS: `optimized` + * `_shared` + - ENV variables: + - CM_PYTHON_INSTALL_CACHE_TAGS: `shared` + - CM_SHARED_BUILD: `yes` + * `_with-custom-ssl` + - ENV variables: + - CM_CUSTOM_SSL: `yes` + - CM_PYTHON_INSTALL_CACHE_TAGS: `with-custom-ssl` + +
    + + + * Group "**ssl**" +
    + Click here to expand this section. + + * `_with-ssl` + - ENV variables: + - CM_ENABLE_SSL: `yes` + - CM_PYTHON_INSTALL_CACHE_TAGS: `with-ssl` + +
    + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_ENABLE_SSL: `no` + * CM_CUSTOM_SSL: `no` + * CM_SHARED_BUILD: `no` + * CM_PYTHON_OPTIMIZATION_FLAG: `` + * CM_PYTHON_LTO_FLAG: `` + * CM_WGET_URL: `https://www.python.org/ftp/python/[PYTHON_VERSION]/Python-[PYTHON_VERSION].tgz` + + +#### Versions +Default version: `3.10.13` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "install src python python3 src-python3 src-python [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-venv/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-venv/index.md new file mode 100644 index 000000000..f097aa196 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Python-automation/install-python-venv/index.md @@ -0,0 +1,103 @@ +# install-python-venv +Automatically generated README for this automation recipe: **install-python-venv** + +Category: **[Python automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "install python get-python-venv python-venv" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=install,python,get-python-venv,python-venv[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "install python get-python-venv python-venv [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'install,python,get-python-venv,python-venv' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "install python get-python-venv python-venv[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_lto` + * `_optimized` + * `_shared` + * `_with-custom-ssl` + * `_with-ssl` + +
    + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/run.bat) +___ +#### Script output +```bash +cmr "install python get-python-venv python-venv [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/index.md new file mode 100644 index 000000000..754c07ddf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/index.md @@ -0,0 +1 @@ +* [remote-run-commands](remote-run-commands/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/remote-run-commands/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/remote-run-commands/index.md new file mode 100644 index 000000000..afb2ccf0c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Remote-automation/remote-run-commands/index.md @@ -0,0 +1,117 @@ +# remote-run-commands +Automatically generated README for this automation recipe: **remote-run-commands** + +Category: **[Remote automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "remote run cmds remote-run remote-run-cmds ssh-run ssh" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--client_refresh=value` → `CM_SSH_CLIENT_REFRESH=value` + * `--host=value` → `CM_SSH_HOST=value` + * `--password=value` → `CM_SSH_PASSWORD=value` + * `--port=value` → `CM_SSH_PORT=value` + * `--run_cmds=value` → `CM_SSH_RUN_COMMANDS=value` + * `--skip_host_verify=value` → `CM_SSH_SKIP_HOST_VERIFY=value` + * `--ssh_key_file=value` → `CM_SSH_KEY_FILE=value` + * `--user=value` → `CM_SSH_USER=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_SSH_PORT: `22` + * CM_SSH_HOST: `localhost` + * CM_SSH_USER: `$USER` + * CM_SSH_CLIENT_REFRESH: `10` + * CM_SSH_KEY_FILE: `$HOME/.ssh/id_rsa` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/run.bat) +___ +#### Script output +```bash +cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md new file mode 100644 index 000000000..f44318297 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md @@ -0,0 +1,652 @@ +# app-mlperf-inference-nvidia +Automatically generated README for this automation recipe: **app-mlperf-inference-nvidia** + +Category: **[Reproduce MLPerf benchmarks](..)** + +License: **Apache 2.0** + + + +--- + +This script is a CM wrapper to the official [Nvidia submission code](https://github.com/mlcommons/inference_results_v3.0/tree/master/closed/NVIDIA) used for MLPerf inference submissions. + + + +## Download the needed files + +* Please ask privately in [this discord channel](https://discord.gg/y7hupJsUNb) if you would like to get access to an Amazon S3 bucket containing all the needed files for easiness. Otherwise, you can download them from the below links. + +For x86 machines, please download the latest install tar files from the below sites +1. [cuDNN](https://developer.nvidia.com/cudnn) (for cuda 11) +2. [TensorRT](https://developer.nvidia.com/tensorrt) +3. Imagenet validation set (unfortunately not available via public URL) following the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) + +
    + + + +## Using Docker (Recommended on x86 systems) + + +Assuming all the downloaded files are to the user home directory please do the following steps: + +1. Download CUDA 11.8 + ``` + wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run + ``` +2. [Install docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + +3. Give docker permission to the current user + ``` + sudo usermod -aG docker $USER + ``` + Logout and login + Restart docker if required and confirm that Nvidia container toolkit is working by + ``` + nvidia-ctk --version + ``` +4. Check if Nvidia driver is working properly on the host. + ``` + nvidia-smi + ``` + If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access + ``` + cmr "install cuda prebuilt _driver" --version=11.8.0 + ``` +5. Build the docker container and mount the paths from the host machine. + ** You may want to change the `scratch_path` location as it can take 100s of GBs.** + ```bash + cm docker script --tags=build,nvidia,inference,server \ + --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \ + --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --imagenet_path=$HOME/imagenet-2012-val \ + --scratch_path=$HOME/mlperf_scratch \ + --docker_cm_repo=mlcommons@cm4mlops \ + --results_dir=$HOME/results_dir \ + --submission_dir=$HOME/submission_dir \ + --adr.compiler.tags=gcc + ``` + * Use `--docker_cache=no` to turn off docker caching + * Use `--docker_run_cmd_prefix="cm pull repo mlcommons@cm4mlops --checkout=dev"` to update the CK repository when docker caching is used + * Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). + +6. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files + ### Example output + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` + Now you'll be inside the CM Nvidia docker container and can run further scripts. + +7. Once the build is complete, you can proceed with any further CM scripts like for MLPerf inference. You can also save the container at this stage using [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) so that it can be launched later without having to go through the previous steps. + +
    + +
    + + + +## Without Docker + + +1. Install CUDA + If CUDA is not detected, CM should download and install it automatically when you run the workflow. + ** Nvidia drivers are expected to be installed on the system ** + +2. Install cuDNN + ```bash + cmr "get cudnn" --tar_file= + ``` +3. Install TensorRT + ```bash + cmr "get tensorrt _dev" --tar_file= + ``` + On non x86 systems like Nvidia Orin, you can do a package manager install and then CM should pick up the installation automatically during the workflow run. + +4. Build the Nvidia inference server + ``` + cmr "build nvidia inference server" \ + --adr.install-cuda-prebuilt.local_run_file_path=/data/cuda_11.8.0_520.61.05_linux.run \ + --adr.tensorrt.tar_file=/data/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --adr.cudnn.tar_file=/data/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --adr.compiler.tags=gcc \ + [--custom_system=no] + ``` + Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). + +5. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files + + ### Example output + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` +
    + + +## Acknowledgments + +* A common CM interface and automation for MLPerf inference benchmarks was developed by Arjun Suresh and Grigori Fursin + sponsored by the [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org). +* Nvidia's MLPerf inference implementation was developed by Zhihan Jiang, Ethan Cheng, Yiheng Zhang and Jinho Suh. + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "reproduce mlcommons mlperf inference harness nvidia-harness nvidia[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_run-harness` + * `_v3.1` + - ENV variables: + - CM_MLPERF_INFERENCE_VERSION: `v3.1` + - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: `GPTJ-07142023.pth` + +
    + + + * Group "**backend**" +
    + Click here to expand this section. + + * **`_tensorrt`** (default) + - ENV variables: + - CM_MLPERF_BACKEND: `tensorrt` + - CM_MLPERF_BACKEND_NAME: `TensorRT` + +
    + + + * Group "**batch-size**" +
    + Click here to expand this section. + + * `_batch_size.#` + - ENV variables: + - CM_MODEL_BATCH_SIZE: `#` + - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `#` + +
    + + + * Group "**build-engine-options**" +
    + Click here to expand this section. + + * `_build_engine_options.#` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: `#` + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * `_cpu` + - ENV variables: + - CM_MLPERF_DEVICE: `cpu` + * **`_cuda`** (default) + - ENV variables: + - CM_MLPERF_DEVICE: `gpu` + - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` + +
    + + + * Group "**device-memory**" +
    + Click here to expand this section. + + * `_gpu_memory.16` + - ENV variables: + - CM_NVIDIA_GPU_MEMORY: `16` + * `_gpu_memory.24` + - ENV variables: + - CM_NVIDIA_GPU_MEMORY: `24` + * `_gpu_memory.32` + - ENV variables: + - CM_NVIDIA_GPU_MEMORY: `32` + * `_gpu_memory.40` + - ENV variables: + - CM_NVIDIA_GPU_MEMORY: `40` + * `_gpu_memory.48` + - ENV variables: + - CM_NVIDIA_GPU_MEMORY: `48` + * `_gpu_memory.8` + - ENV variables: + - CM_NVIDIA_GPU_MEMORY: `8` + * `_gpu_memory.80` + - ENV variables: + - CM_NVIDIA_GPU_MEMORY: `80` + +
    + + + * Group "**dla-batch-size**" +
    + Click here to expand this section. + + * `_dla_batch_size.#` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: `#` + - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: `dla_batch_size.#` + +
    + + + * Group "**gpu-connection**" +
    + Click here to expand this section. + + * `_pcie` + * `_sxm` + +
    + + + * Group "**gpu-name**" +
    + Click here to expand this section. + + * `_a100` + - ENV variables: + - CM_NVIDIA_CUSTOM_GPU: `yes` + * `_a6000` + - ENV variables: + - CM_NVIDIA_CUSTOM_GPU: `yes` + * `_custom` + - ENV variables: + - CM_NVIDIA_CUSTOM_GPU: `yes` + - CM_MODEL_BATCH_SIZE: `` + - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `<<>>` + * `_l4` + - ENV variables: + - CM_NVIDIA_CUSTOM_GPU: `yes` + * `_orin` + - ENV variables: + - CM_NVIDIA_CUSTOM_GPU: `yes` + - CM_MODEL_BATCH_SIZE: `` + - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `<<>>` + * `_rtx_4090` + - ENV variables: + - CM_NVIDIA_CUSTOM_GPU: `yes` + * `_rtx_6000_ada` + - ENV variables: + - CM_NVIDIA_CUSTOM_GPU: `yes` + * `_t4` + - ENV variables: + - CM_NVIDIA_CUSTOM_GPU: `yes` + +
    + + + * Group "**loadgen-scenario**" +
    + Click here to expand this section. + + * `_multistream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` + * `_offline` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * `_server` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `Server` + * `_singlestream` + - ENV variables: + - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` + - CUDA_VISIBLE_DEVICES_NOT_USED: `0` + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_3d-unet-99` + - ENV variables: + - CM_MODEL: `3d-unet-99` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + * `_3d-unet-99.9` + - ENV variables: + - CM_MODEL: `3d-unet-99.9` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + * `_bert-99` + - ENV variables: + - CM_MODEL: `bert-99` + - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + * `_bert-99.9` + - ENV variables: + - CM_MODEL: `bert-99.9` + - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` + * `_dlrm-v2-99` + - ENV variables: + - CM_MODEL: `dlrm-v2-99` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` + * `_dlrm-v2-99.9` + - ENV variables: + - CM_MODEL: `dlrm-v2-99.9` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` + * `_gptj-99` + - ENV variables: + - CM_MODEL: `gptj-99` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` + * `_gptj-99.9` + - ENV variables: + - CM_MODEL: `gptj-99.9` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` + * **`_resnet50`** (default) + - ENV variables: + - CM_MODEL: `resnet50` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + * `_retinanet` + - ENV variables: + - CM_MODEL: `retinanet` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` + * `_rnnt` + - ENV variables: + - CM_MODEL: `rnnt` + - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt` + - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` + - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp16` + - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` + +
    + + + * Group "**num-gpus**" +
    + Click here to expand this section. + + * `_num-gpus.#` + - ENV variables: + - CM_NVIDIA_NUM_GPUS: `#` + * **`_num-gpus.1`** (default) + - ENV variables: + - CM_NVIDIA_NUM_GPUS: `1` + +
    + + + * Group "**power-mode**" +
    + Click here to expand this section. + + * `_maxn` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True` + * `_maxq` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True` + +
    + + + * Group "**run-mode**" +
    + Click here to expand this section. + + * `_build` + - ENV variables: + - MLPERF_NVIDIA_RUN_COMMAND: `build` + - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `build` + * `_build_engine` + - Aliases: `_build-engine` + - ENV variables: + - MLPERF_NVIDIA_RUN_COMMAND: `generate_engines` + - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `generate_engines` + * `_calibrate` + - ENV variables: + - MLPERF_NVIDIA_RUN_COMMAND: `calibrate` + - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `calibrate` + * `_download_model` + - ENV variables: + - MLPERF_NVIDIA_RUN_COMMAND: `download_model` + - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `download_model` + * `_prebuild` + - ENV variables: + - MLPERF_NVIDIA_RUN_COMMAND: `prebuild` + - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `prebuild` + * `_preprocess_data` + - ENV variables: + - MLPERF_NVIDIA_RUN_COMMAND: `preprocess_data` + - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `preprocess_data` + * **`_run_harness`** (default) + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `run_harness` + - MLPERF_NVIDIA_RUN_COMMAND: `run_harness` + - CM_CALL_MLPERF_RUNNER: `yes` + +
    + + + * Group "**triton**" +
    + Click here to expand this section. + + * `_use_triton` + - ENV variables: + - CM_MLPERF_NVIDIA_HARNESS_USE_TRITON: `yes` + - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: `using_triton` + +
    + + + * Group "**version**" +
    + Click here to expand this section. + + * **`_v4.0`** (default) + - ENV variables: + - CM_MLPERF_INFERENCE_VERSION: `v4.0` + - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: `GPTJ-FP8-quantized` + +
    + + + ##### Default variations + + `_cuda,_num-gpus.1,_resnet50,_run_harness,_tensorrt,_v4.0` +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--audio_buffer_num_lines=value` → `CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES=value` + * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` + * `--deque_timeout_usec=value` → `CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC=value` + * `--devices=value` → `CM_MLPERF_NVIDIA_HARNESS_DEVICES=value` + * `--dla_batch_size=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE=value` + * `--dla_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS=value` + * `--dla_inference_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS=value` + * `--embedding_weights_on_gpu_part=value` → `CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART=value` + * `--enable_sort=value` → `CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT=value` + * `--end_on_device=value` → `CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE=value` + * `--extra_run_options=value` → `CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS=value` + * `--gpu_batch_size=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE=value` + * `--gpu_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS=value` + * `--gpu_inference_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS=value` + * `--graphs_max_seqlen=value` → `CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN=value` + * `--input_format=value` → `CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT=value` + * `--log_dir=value` → `CM_MLPERF_NVIDIA_HARNESS_LOG_DIR=value` + * `--make_cmd=value` → `MLPERF_NVIDIA_RUN_COMMAND=value` + * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` + * `--max_dlas=value` → `CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS=value` + * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` + * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` + * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` + * `--num_issue_query_threads=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS=value` + * `--num_sort_segments=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS=value` + * `--num_warmups=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS=value` + * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` + * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` + * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` + * `--power_setting=value` → `CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING=value` + * `--rerun=value` → `CM_RERUN=value` + * `--run_infer_on_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS=value` + * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` + * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` + * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` + * `--skip_postprocess=value` → `CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS=value` + * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` + * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` + * `--soft_drop=value` → `CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP=value` + * `--start_from_device=value` → `CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE=value` + * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` + * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` + * `--use_cuda_thread_per_device=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE=value` + * `--use_deque_limit=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT=value` + * `--use_fp8=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_FP8=value` + * `--use_graphs=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS=value` + * `--use_small_tile_gemm_plugin=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN=value` + * `--use_triton=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_TRITON=value` + * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + * `--workspace_size=value` → `CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_BATCH_COUNT: `1` + * CM_BATCH_SIZE: `1` + * CM_FAST_COMPILATION: `yes` + * CM_MLPERF_LOADGEN_SCENARIO: `Offline` + * CM_MLPERF_LOADGEN_MODE: `performance` + * CM_SKIP_PREPROCESS_DATASET: `no` + * CM_SKIP_MODEL_DOWNLOAD: `no` + * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `nvidia_original` + * CM_MLPERF_SKIP_RUN: `no` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/index.md new file mode 100644 index 000000000..6db8a9a3e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/index.md @@ -0,0 +1,4 @@ +* [app-mlperf-inference-nvidia](app-mlperf-inference-nvidia/index.md) +* [reproduce-mlperf-octoml-tinyml-results](reproduce-mlperf-octoml-tinyml-results/index.md) +* [reproduce-mlperf-training-nvidia](reproduce-mlperf-training-nvidia/index.md) +* [wrapper-reproduce-octoml-tinyml-submission](wrapper-reproduce-octoml-tinyml-submission/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md new file mode 100644 index 000000000..d32b17538 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md @@ -0,0 +1,137 @@ +# reproduce-mlperf-octoml-tinyml-results +Automatically generated README for this automation recipe: **reproduce-mlperf-octoml-tinyml-results** + +Category: **[Reproduce MLPerf benchmarks](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "reproduce tiny results mlperf octoml mlcommons" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=reproduce,tiny,results,mlperf,octoml,mlcommons[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "reproduce tiny results mlperf octoml mlcommons [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,tiny,results,mlperf,octoml,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "reproduce tiny results mlperf octoml mlcommons[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_NRF` + - ENV variables: + - CM_TINY_BOARD: `NRF5340DK` + * `_NUCLEO` + - ENV variables: + - CM_TINY_BOARD: `NUCLEO_L4R5ZI` + * `_ad` + - ENV variables: + - CM_TINY_MODEL: `ad` + * `_cmsis_nn` + - ENV variables: + - CM_MICROTVM_VARIANT: `microtvm_cmsis_nn` + * `_ic` + - ENV variables: + - CM_TINY_MODEL: `ic` + * `_kws` + - ENV variables: + - CM_TINY_MODEL: `kws` + * `_native` + - ENV variables: + - CM_MICROTVM_VARIANT: `microtvm_native` + * `_vww` + - ENV variables: + - CM_TINY_MODEL: `vww` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--flash=value` → `CM_FLASH_BOARD=value` + * `--recreate_binary=value` → `CM_RECREATE_BINARY=value` + + + +#### Versions +Default version: `r1.0` + +* `r1.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "reproduce tiny results mlperf octoml mlcommons [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md new file mode 100644 index 000000000..8b461ba10 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md @@ -0,0 +1,115 @@ +# reproduce-mlperf-training-nvidia +Automatically generated README for this automation recipe: **reproduce-mlperf-training-nvidia** + +Category: **[Reproduce MLPerf benchmarks](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "reproduce mlcommons mlperf train training nvidia-training nvidia" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "reproduce mlcommons mlperf train training nvidia-training nvidia [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "reproduce mlcommons mlperf train training nvidia-training nvidia[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**benchmark**" +
    + Click here to expand this section. + + * `_resnet` + - ENV variables: + - CM_MLPERF_TRAINING_BENCHMARK: `resnet` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--results_dir=value` → `CM_MLPERF_RESULTS_DIR=value` + * `--system_conf_name=value` → `CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME=value` + + + +#### Versions +* `r2.1` +* `r3.0` + +#### Native script being run +=== "Linux/macOS" + * [run-resnet.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/run-resnet.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "reproduce mlcommons mlperf train training nvidia-training nvidia [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md new file mode 100644 index 000000000..2a69d5c1c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md @@ -0,0 +1,101 @@ +# wrapper-reproduce-octoml-tinyml-submission +Automatically generated README for this automation recipe: **wrapper-reproduce-octoml-tinyml-submission** + +Category: **[Reproduce MLPerf benchmarks](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--flash=value` → `CM_FLASH_BOARD=value` + * `--recreate_binary=value` → `CM_RECREATE_BINARY=value` + + + +#### Versions +Default version: `r1.0` + +* `r1.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md new file mode 100644 index 000000000..ba254ab2b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md @@ -0,0 +1,98 @@ +# get-ipol-src +Automatically generated README for this automation recipe: **get-ipol-src** + +Category: **[Reproducibility and artifact evaluation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ipol-src/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ipol-src/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get ipol journal src ipol-src" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,ipol,journal,src,ipol-src [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get ipol journal src ipol-src " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,ipol,journal,src,ipol-src' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get ipol journal src ipol-src" [--input_flags] + ``` +___ + +=== "Input Flags" + + + #### Input Flags + + * --**number:** IPOL publication number + * --**year:** IPOL publication year +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--number=value` → `CM_IPOL_NUMBER=value` + * `--year=value` → `CM_IPOL_YEAR=value` + + + + +___ +#### Script output +```bash +cmr "get ipol journal src ipol-src " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/index.md new file mode 100644 index 000000000..6803c39f9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/index.md @@ -0,0 +1,4 @@ +* [get-ipol-src](get-ipol-src/index.md) +* [process-ae-users](process-ae-users/index.md) +* [reproduce-ipol-paper-2022-439](reproduce-ipol-paper-2022-439/index.md) +* [reproduce-micro-paper-2023-victima](reproduce-micro-paper-2023-victima/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md new file mode 100644 index 000000000..51e9a4f91 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md @@ -0,0 +1,95 @@ +# process-ae-users +Automatically generated README for this automation recipe: **process-ae-users** + +Category: **[Reproducibility and artifact evaluation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "process ae users" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=process,ae,users [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "process ae users " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'process,ae,users' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "process ae users" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--file=value` → `CM_PROCESS_AE_USERS_INPUT_FILE=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/run.bat) +___ +#### Script output +```bash +cmr "process ae users " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md new file mode 100644 index 000000000..d0298a09c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md @@ -0,0 +1,97 @@ +# reproduce-ipol-paper-2022-439 +Automatically generated README for this automation recipe: **reproduce-ipol-paper-2022-439** + +Category: **[Reproducibility and artifact evaluation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439 [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439 " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--image1=value` → `CM_IMAGE_1=value` + * `--image2=value` → `CM_IMAGE_2=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/run.bat) +___ +#### Script output +```bash +cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439 " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md new file mode 100644 index 000000000..461210341 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md @@ -0,0 +1,123 @@ +# reproduce-micro-paper-2023-victima +Automatically generated README for this automation recipe: **reproduce-micro-paper-2023-victima** + +Category: **[Reproducibility and artifact evaluation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "reproduce project paper micro micro-2023 victima" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=reproduce,project,paper,micro,micro-2023,victima[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "reproduce project paper micro micro-2023 victima [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,project,paper,micro,micro-2023,victima' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "reproduce project paper micro micro-2023 victima[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_install_deps` + * `_plot` + * `_run` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--container=value` → `CM_VICTIMA_CONTAINER=value` + * `--job_manager=value` → `CM_VICTIMA_JOB_MANAGER=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_VICTIMA_JOB_MANAGER: `native` + * CM_VICTIMA_CONTAINER: `docker` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "reproduce project paper micro micro-2023 victima [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/index.md new file mode 100644 index 000000000..c7d48602b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/index.md @@ -0,0 +1,15 @@ +* [print-any-text](print-any-text/index.md) +* [print-croissant-desc](print-croissant-desc/index.md) +* [print-hello-world](print-hello-world/index.md) +* [print-hello-world-java](print-hello-world-java/index.md) +* [print-hello-world-javac](print-hello-world-javac/index.md) +* [print-hello-world-py](print-hello-world-py/index.md) +* [print-python-version](print-python-version/index.md) +* [run-python](run-python/index.md) +* [test-cm-core](test-cm-core/index.md) +* [test-cm-script-pipeline](test-cm-script-pipeline/index.md) +* [test-deps-conditions](test-deps-conditions/index.md) +* [test-deps-conditions2](test-deps-conditions2/index.md) +* [test-download-and-extract-artifacts](test-download-and-extract-artifacts/index.md) +* [test-set-sys-user-cm](test-set-sys-user-cm/index.md) +* [upgrade-python-pip](upgrade-python-pip/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-any-text/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-any-text/index.md new file mode 100644 index 000000000..3a924de55 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-any-text/index.md @@ -0,0 +1,129 @@ +# print-any-text +Automatically generated README for this automation recipe: **print-any-text** + +Category: **[Tests](..)** + +License: **Apache 2.0** + +Developers: Grigori Fursin + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "print any-text" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=print,any-text[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "print any-text [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,any-text' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "print any-text[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_cm_env.#` + - ENV variables: + - CM_PRINT_ANY_CM_ENV_KEYS: `#` + * `_os_env.#` + - ENV variables: + - CM_PRINT_ANY_OS_ENV_KEYS: `#` + * `_text.#` + - ENV variables: + - CM_PRINT_ANY_TEXT: `#` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--cm_env_keys=value` → `CM_PRINT_ANY_CM_ENV_KEYS=value` + * `--os_env_keys=value` → `CM_PRINT_ANY_OS_ENV_KEYS=value` + * `--text=value` → `CM_PRINT_ANY_TEXT=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_PRINT_ANY_TEXT: `` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/run.bat) +___ +#### Script output +```bash +cmr "print any-text [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-croissant-desc/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-croissant-desc/index.md new file mode 100644 index 000000000..2533d905d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-croissant-desc/index.md @@ -0,0 +1,106 @@ +# print-croissant-desc +Automatically generated README for this automation recipe: **print-croissant-desc** + +Category: **[Tests](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "print croissant desc" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=print,croissant,desc [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "print croissant desc " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,croissant,desc' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "print croissant desc" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--url=value` → `CM_PRINT_CROISSANT_URL=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_PRINT_CROISSANT_URL: `https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/run.bat) +___ +#### Script output +```bash +cmr "print croissant desc " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-java/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-java/index.md new file mode 100644 index 000000000..56a73326b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-java/index.md @@ -0,0 +1,86 @@ +# print-hello-world-java +Automatically generated README for this automation recipe: **print-hello-world-java** + +Category: **[Tests](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "print hello world hello-world hello world java" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=print,hello world,hello-world,hello,world,java + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "print hello world hello-world hello world java " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,hello world,hello-world,hello,world,java' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "print hello world hello-world hello world java" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/run.bat) +___ +#### Script output +```bash +cmr "print hello world hello-world hello world java " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-javac/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-javac/index.md new file mode 100644 index 000000000..0166b29cd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-javac/index.md @@ -0,0 +1,86 @@ +# print-hello-world-javac +Automatically generated README for this automation recipe: **print-hello-world-javac** + +Category: **[Tests](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "print hello world hello-world hello world javac" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=print,hello world,hello-world,hello,world,javac + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "print hello world hello-world hello world javac " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,hello world,hello-world,hello,world,javac' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "print hello world hello-world hello world javac" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/run.bat) +___ +#### Script output +```bash +cmr "print hello world hello-world hello world javac " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-py/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-py/index.md new file mode 100644 index 000000000..e753b2fd8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world-py/index.md @@ -0,0 +1,86 @@ +# print-hello-world-py +Automatically generated README for this automation recipe: **print-hello-world-py** + +Category: **[Tests](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "print hello world hello-world hello world python" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=print,hello world,hello-world,hello,world,python + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "print hello world hello-world hello world python " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,hello world,hello-world,hello,world,python' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "print hello world hello-world hello world python" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/run.bat) +___ +#### Script output +```bash +cmr "print hello world hello-world hello world python " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world/index.md new file mode 100644 index 000000000..d0bba05ba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-hello-world/index.md @@ -0,0 +1,123 @@ +# print-hello-world +Automatically generated README for this automation recipe: **print-hello-world** + +Category: **[Tests](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "print hello-world hello world hello world native-script native script" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=print,hello-world,hello world,hello,world,native-script,native,script[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "print hello-world hello world hello world native-script native script [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,hello-world,hello world,hello,world,native-script,native,script' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "print hello-world hello world hello world native-script native script[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_skip_print_env` + - ENV variables: + - CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV: `yes` + * `_text.#` + - ENV variables: + - CM_PRINT_HELLO_WORLD_TEXT: `#` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--test1=value` → `CM_ENV_TEST1=value` + + + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_ENV_TEST1: `TEST1` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/run.bat) +___ +#### Script output +```bash +cmr "print hello-world hello world hello world native-script native script [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-python-version/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-python-version/index.md new file mode 100644 index 000000000..6fd14d421 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/print-python-version/index.md @@ -0,0 +1,86 @@ +# print-python-version +Automatically generated README for this automation recipe: **print-python-version** + +Category: **[Tests](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "print python version python-version" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=print,python,version,python-version + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "print python version python-version " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'print,python,version,python-version' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "print python version python-version" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/run.bat) +___ +#### Script output +```bash +cmr "print python version python-version " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/run-python/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/run-python/index.md new file mode 100644 index 000000000..be4b0fa0f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/run-python/index.md @@ -0,0 +1,95 @@ +# run-python +Automatically generated README for this automation recipe: **run-python** + +Category: **[Tests](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "run python" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=run,python [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "run python " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,python' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "run python" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--command=value` → `CM_RUN_PYTHON_CMD=value` + + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/run.bat) +___ +#### Script output +```bash +cmr "run python " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-core/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-core/index.md new file mode 100644 index 000000000..d7ad5f859 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-core/index.md @@ -0,0 +1,87 @@ +# test-cm-core +Automatically generated README for this automation recipe: **test-cm-core** + +Category: **[Tests](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "test cm core" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=test,cm,core + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "test cm core " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,cm,core' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "test cm core" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/run.bat) +___ +#### Script output +```bash +cmr "test cm core " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-script-pipeline/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-script-pipeline/index.md new file mode 100644 index 000000000..4ab088034 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-cm-script-pipeline/index.md @@ -0,0 +1,90 @@ +# test-cm-script-pipeline +Automatically generated README for this automation recipe: **test-cm-script-pipeline** + +Category: **[Tests](..)** + +License: **Apache 2.0** + +Developers: Grigori Fursin +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "test cm-script pipeline" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=test,cm-script,pipeline + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "test cm-script pipeline " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,cm-script,pipeline' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "test cm-script pipeline" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run.sh) + * [run2.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run2.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run.bat) + * [run2.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run2.bat) +___ +#### Script output +```bash +cmr "test cm-script pipeline " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions/index.md new file mode 100644 index 000000000..976ddd6b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions/index.md @@ -0,0 +1,93 @@ +# test-deps-conditions +Automatically generated README for this automation recipe: **test-deps-conditions** + +Category: **[Tests](..)** + +License: **Apache 2.0** + +Developers: Grigori Fursin +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "test deps conditions" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=test,deps,conditions [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "test deps conditions " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,deps,conditions' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "test deps conditions" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--test1=value` → `CM_ENV1=value` + * `--test2=value` → `CM_ENV2=value` + * `--test3=value` → `CM_ENV3=value` + + + + +___ +#### Script output +```bash +cmr "test deps conditions " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions2/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions2/index.md new file mode 100644 index 000000000..94ed26f62 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-deps-conditions2/index.md @@ -0,0 +1,91 @@ +# test-deps-conditions2 +Automatically generated README for this automation recipe: **test-deps-conditions2** + +Category: **[Tests](..)** + +License: **Apache 2.0** + +Developers: Grigori Fursin +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions2/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions2/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "test deps conditions2" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=test,deps,conditions2 [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "test deps conditions2 " [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,deps,conditions2' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "test deps conditions2" [--input_flags] + ``` +___ + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--test=value` → `TEST=value` + + + + +___ +#### Script output +```bash +cmr "test deps conditions2 " [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-download-and-extract-artifacts/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-download-and-extract-artifacts/index.md new file mode 100644 index 000000000..8e2e0d0d1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-download-and-extract-artifacts/index.md @@ -0,0 +1,87 @@ +# test-download-and-extract-artifacts +Automatically generated README for this automation recipe: **test-download-and-extract-artifacts** + +Category: **[Tests](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/README-extra.md) + +* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/_cm.yaml)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "test download-and-extract-artifacts" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=test,download-and-extract-artifacts + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "test download-and-extract-artifacts " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'test,download-and-extract-artifacts' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "test download-and-extract-artifacts" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/run.bat) +___ +#### Script output +```bash +cmr "test download-and-extract-artifacts " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-set-sys-user-cm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-set-sys-user-cm/index.md new file mode 100644 index 000000000..f4448d63b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/test-set-sys-user-cm/index.md @@ -0,0 +1,96 @@ +# test-set-sys-user-cm +Automatically generated README for this automation recipe: **test-set-sys-user-cm** + +Category: **[Tests](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/test-set-sys-user-cm/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "demo set sys-user cm sys-user-cm" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=demo,set,sys-user,cm,sys-user-cm + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "demo set sys-user cm sys-user-cm " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'demo,set,sys-user,cm,sys-user-cm' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "demo set sys-user cm sys-user-cm" + ``` +___ + +=== "Default environment" + + #### Default environment + + + These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + * CM_SUDO: `sudo` + + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-set-sys-user-cm/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "demo set sys-user cm sys-user-cm " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/upgrade-python-pip/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/upgrade-python-pip/index.md new file mode 100644 index 000000000..3e593c727 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/Tests/upgrade-python-pip/index.md @@ -0,0 +1,86 @@ +# upgrade-python-pip +Automatically generated README for this automation recipe: **upgrade-python-pip** + +Category: **[Tests](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "upgrade python pip python-pip" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=upgrade,python,pip,python-pip + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "upgrade python pip python-pip " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'upgrade,python,pip,python-pip' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "upgrade python pip python-pip" + ``` +___ + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/run.sh) +=== "Windows" + + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/run.bat) +___ +#### Script output +```bash +cmr "upgrade python pip python-pip " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md new file mode 100644 index 000000000..ae17fabc4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md @@ -0,0 +1,114 @@ +# create-fpgaconvnet-app-tinyml +Automatically generated README for this automation recipe: **create-fpgaconvnet-app-tinyml** + +Category: **[TinyML automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-app-tinyml/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "create app fpgaconvnet" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=create,app,fpgaconvnet[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "create app fpgaconvnet [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,app,fpgaconvnet' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "create app fpgaconvnet[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**benchmark**" +
    + Click here to expand this section. + + * **`_ic`** (default) + +
    + + + * Group "**board**" +
    + Click here to expand this section. + + * **`_zc706`** (default) + - ENV variables: + - CM_TINY_BOARD: `zc706` + +
    + + + ##### Default variations + + `_ic,_zc706` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-app-tinyml/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "create app fpgaconvnet [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md new file mode 100644 index 000000000..e1e0bab29 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md @@ -0,0 +1,114 @@ +# create-fpgaconvnet-config-tinyml +Automatically generated README for this automation recipe: **create-fpgaconvnet-config-tinyml** + +Category: **[TinyML automation](..)** + +License: **Apache 2.0** + + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-config-tinyml/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "create config fpgaconvnet" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=create,config,fpgaconvnet[,variations] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "create config fpgaconvnet [variations]" + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'create,config,fpgaconvnet' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "create config fpgaconvnet[variations]" + ``` +___ + +=== "Variations" + + + #### Variations + + * Group "**benchmark**" +
    + Click here to expand this section. + + * **`_ic`** (default) + +
    + + + * Group "**board**" +
    + Click here to expand this section. + + * **`_zc706`** (default) + - ENV variables: + - CM_TINY_BOARD: `zc706` + +
    + + + ##### Default variations + + `_ic,_zc706` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-config-tinyml/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "create config fpgaconvnet [variations]" -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md new file mode 100644 index 000000000..df6a2e298 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md @@ -0,0 +1,119 @@ +# flash-tinyml-binary +Automatically generated README for this automation recipe: **flash-tinyml-binary** + +Category: **[TinyML automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/_cm.json)* +* Output cached? *False* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "flash tiny mlperf mlcommons" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=flash,tiny,mlperf,mlcommons[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "flash tiny mlperf mlcommons [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'flash,tiny,mlperf,mlcommons' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "flash tiny mlperf mlcommons[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_NRF` + * `_NUCLEO` + * `_ad` + * `_cmsis_nn` + * `_ic` + * `_kws` + * `_native` + * `_vww` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--build_dir=value` → `CM_TINY_BUILD_DIR=value` + + + +#### Versions +Default version: `r1.0` + + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "flash tiny mlperf mlcommons [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-microtvm/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-microtvm/index.md new file mode 100644 index 000000000..b73c5eb0a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-microtvm/index.md @@ -0,0 +1,119 @@ +# get-microtvm +Automatically generated README for this automation recipe: **get-microtvm** + +Category: **[TinyML automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get src source microtvm tiny" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,src,source,microtvm,tiny[,variations] [--input_flags] + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get src source microtvm tiny [variations]" [--input_flags] + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,src,source,microtvm,tiny' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get src source microtvm tiny[variations]" [--input_flags] + ``` +___ + +=== "Variations" + + + #### Variations + + * *No group (any combination of variations can be selected)* +
    + Click here to expand this section. + + * `_full-history` + - ENV variables: + - CM_GIT_DEPTH: `--depth 10` + * `_short-history` + - ENV variables: + - CM_GIT_DEPTH: `--depth 10` + +
    + +=== "Input Flag Mapping" + + + #### Script flags mapped to environment + + * `--ssh=value` → `CM_GIT_SSH=value` + + + +#### Versions +Default version: `main` + +* `custom` +* `main` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get src source microtvm tiny [variations]" [--input_flags] -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md new file mode 100644 index 000000000..e32311f97 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md @@ -0,0 +1,93 @@ +# get-zephyr-sdk +Automatically generated README for this automation recipe: **get-zephyr-sdk** + +Category: **[TinyML automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get zephyr-sdk" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,zephyr-sdk + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get zephyr-sdk " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,zephyr-sdk' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get zephyr-sdk" + ``` +___ + +#### Versions +Default version: `0.13.2` + +* `0.13.1` +* `0.13.2` +* `0.15.0` + +#### Native script being run +=== "Linux/macOS" + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get zephyr-sdk " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr/index.md new file mode 100644 index 000000000..6016b7ecf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/get-zephyr/index.md @@ -0,0 +1,92 @@ +# get-zephyr +Automatically generated README for this automation recipe: **get-zephyr** + +Category: **[TinyML automation](..)** + +License: **Apache 2.0** + +* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/README-extra.md) + +* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/_cm.json)* +* Output cached? *True* + +--- +### Reuse this script in your project + +#### Install MLCommons CM automation meta-framework + +* [Install CM](https://docs.mlcommons.org/ck/install) +* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) + +#### Pull CM repository with this automation recipe (CM script) + +```cm pull repo mlcommons@cm4mlops``` + +#### Print CM help from the command line + +````cmr "get zephyr" --help```` + +#### Run this script + +=== "CLI" + ##### Run this script via CLI + + ```bash + cm run script --tags=get,zephyr + ``` +=== "CLI Alt" + ##### Run this script via CLI (alternative) + + + ```bash + cmr "get zephyr " + ``` + +=== "Python" + ##### Run this script from Python + + + ```python + + import cmind + + r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'get,zephyr' + 'out':'con', + ... + (other input keys for this script) + ... + }) + + if r['return']>0: + print (r['error']) + + ``` + + +=== "Docker" + ##### Run this script via Docker (beta) + + ```bash + cm docker script "get zephyr" + ``` +___ + +#### Versions +Default version: `v2.7` + +* `v2.7` + +#### Native script being run +=== "Linux/macOS" + * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/run-ubuntu.sh) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/run.sh) +=== "Windows" + + No run file exists for Windows +___ +#### Script output +```bash +cmr "get zephyr " -j +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/index.md new file mode 100644 index 000000000..1ac94a64a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/TinyML-automation/index.md @@ -0,0 +1,6 @@ +* [create-fpgaconvnet-app-tinyml](create-fpgaconvnet-app-tinyml/index.md) +* [create-fpgaconvnet-config-tinyml](create-fpgaconvnet-config-tinyml/index.md) +* [flash-tinyml-binary](flash-tinyml-binary/index.md) +* [get-microtvm](get-microtvm/index.md) +* [get-zephyr](get-zephyr/index.md) +* [get-zephyr-sdk](get-zephyr-sdk/index.md) diff --git a/cmx4mlops/cmx4mlops/repo/docs/scripts/index.md b/cmx4mlops/cmx4mlops/repo/docs/scripts/index.md new file mode 100644 index 000000000..cc29ffc3e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/docs/scripts/index.md @@ -0,0 +1,30 @@ +* [AI-ML-datasets](AI-ML-datasets) +* [AI-ML-frameworks](AI-ML-frameworks) +* [AI-ML-models](AI-ML-models) +* [AI-ML-optimization](AI-ML-optimization) +* [Cloud-automation](Cloud-automation) +* [CM-automation](CM-automation) +* [CM-Interface](CM-Interface) +* [CM-interface-prototyping](CM-interface-prototyping) +* [Collective-benchmarking](Collective-benchmarking) +* [Compiler-automation](Compiler-automation) +* [CUDA-automation](CUDA-automation) +* [Dashboard-automation](Dashboard-automation) +* [Detection-or-installation-of-tools-and-artifacts](Detection-or-installation-of-tools-and-artifacts) +* [DevOps-automation](DevOps-automation) +* [Docker-automation](Docker-automation) +* [GUI](GUI) +* [Legacy-CK-support](Legacy-CK-support) +* [MLPerf-benchmark-support](MLPerf-benchmark-support) +* [Modular-AI-ML-application-pipeline](Modular-AI-ML-application-pipeline) +* [Modular-application-pipeline](Modular-application-pipeline) +* [Modular-MLPerf-benchmarks](Modular-MLPerf-benchmarks) +* [Modular-MLPerf-inference-benchmark-pipeline](Modular-MLPerf-inference-benchmark-pipeline) +* [Modular-MLPerf-training-benchmark-pipeline](Modular-MLPerf-training-benchmark-pipeline) +* [Platform-information](Platform-information) +* [Python-automation](Python-automation) +* [Remote-automation](Remote-automation) +* [Reproduce-MLPerf-benchmarks](Reproduce-MLPerf-benchmarks) +* [Reproducibility-and-artifact-evaluation](Reproducibility-and-artifact-evaluation) +* [Tests](Tests) +* [TinyML-automation](TinyML-automation) diff --git a/cmx4mlops/cmx4mlops/repo/mkdocs.yml b/cmx4mlops/cmx4mlops/repo/mkdocs.yml new file mode 100644 index 000000000..4cb295691 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/mkdocs.yml @@ -0,0 +1,77 @@ +site_name: CM Script Automation Documentation +repo_url: https://github.com/mlcommons/cm4mlops +theme: + name: material + logo: img/logo_v2.svg + favicon: img/logo_v2.svg + palette: + primary: deep purple + accent: green + features: + - content.tabs.link + - content.code.copy + - navigation.expand + - navigation.sections + - navigation.indexes + - navigation.instant + - navigation.tabs + - navigation.tabs.sticky + - navigation.top + - toc.follow +nav: + - HOME: index.md + - Getting Started: getting-started.md + - CM Scripts: + - scripts/index.md + - Python-automation: scripts/Python-automation/index.md + - MLPerf-benchmark-support: scripts/MLPerf-benchmark-support/index.md + - Modular-AI-ML-application-pipeline: scripts/Modular-AI-ML-application-pipeline/index.md + - Modular-application-pipeline: scripts/Modular-application-pipeline/index.md + - Modular-MLPerf-inference-benchmark-pipeline: scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md + - Modular-MLPerf-benchmarks: scripts/Modular-MLPerf-benchmarks/index.md + - Reproduce-MLPerf-benchmarks: scripts/Reproduce-MLPerf-benchmarks/index.md + - Modular-MLPerf-training-benchmark-pipeline: scripts/Modular-MLPerf-training-benchmark-pipeline/index.md + - DevOps-automation: scripts/DevOps-automation/index.md + - Docker-automation: scripts/Docker-automation/index.md + - AI-ML-optimization: scripts/AI-ML-optimization/index.md + - AI-ML-models: scripts/AI-ML-models/index.md + - CM-automation: scripts/CM-automation/index.md + - TinyML-automation: scripts/TinyML-automation/index.md + - Cloud-automation: scripts/Cloud-automation/index.md + - Platform-information: scripts/Platform-information/index.md + - Detection-or-installation-of-tools-and-artifacts: scripts/Detection-or-installation-of-tools-and-artifacts/index.md + - Compiler-automation: scripts/Compiler-automation/index.md + - CM-Interface: scripts/CM-Interface/index.md + - Legacy-CK-support: scripts/Legacy-CK-support/index.md + - AI-ML-datasets: scripts/AI-ML-datasets/index.md + - CUDA-automation: scripts/CUDA-automation/index.md + - AI-ML-frameworks: scripts/AI-ML-frameworks/index.md + - Reproducibility-and-artifact-evaluation: scripts/Reproducibility-and-artifact-evaluation/index.md + - GUI: scripts/GUI/index.md + - Collective-benchmarking: scripts/Collective-benchmarking/index.md + - Tests: scripts/Tests/index.md + - Dashboard-automation: scripts/Dashboard-automation/index.md + - Remote-automation: scripts/Remote-automation/index.md + - CM-interface-prototyping: scripts/CM-interface-prototyping/index.md + +markdown_extensions: + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.details + - admonition + - attr_list + - def_list + - footnotes + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.tabbed: + alternate_style: true + - toc: + slugify: !!python/object/apply:pymdownx.slugs.slugify {kwds: {case: lower}} +plugins: + - search + - macros + - caseinsensitivefiles diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/README.md b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/README.md new file mode 100644 index 000000000..9d4b69694 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/README.md @@ -0,0 +1,93 @@ +On this page, we highlight some of the exciting submissions done by CTuning for the MLCommons Inference 3.1 round. + +## Top Results in Edge Category + +In the edge category, Rigel Supercomputers from One Stop Systems achieved the peak offline performance for the four submitted benchmarks - Image classification (ResNet50), Object detection (RetinaNet), Language processing (Bert) and Speech Recognition (RNNT). The below graph compares the peak performance of bert-99 model among the top 10 performing systems. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/9f8e3367-1ca4-4298-8545-285cdedfc991) + + +Nvidia RTX 4090 has the best performance for performance per accelerator, and this accelerator is assembled on a PC made by PCSPECIALIST UK. The below graph compares the performance per accelerator of bert-99 model among the top 10 performing systems. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/c02120cb-eda9-4eef-9e22-56fff4bf23a7) + + +Nvidia RTX 4090 wins the latency metric too for ResNet50, Bert and 3d-unet in the SingleStream scenario. +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/6d4b39a0-9f39-474a-ac16-5498e281ebad) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/8afb5609-581d-4ee8-be56-731af731f10f) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/5cb88f53-9255-4a0b-98df-a192ba87b125) + + +## Best energy efficient results in Edge category + +For the Speech Recognition model rnnt, CTuning submitted the best power-efficient result on Nvidia Jetson Orin AGX. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/d485aa50-a0d4-4a40-a805-cc2ddc3e0ca6) + + +For the Medical Imaging model 3d-unet where the samples per second is quite low, the best 4 energy efficient results are by CTuning. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/d15297fb-3eff-47c9-b188-68d438b7f248) + +For the Language Processing model bert-99, gloria highend system from Qualcomm tops the energy efficiency metric and CTuning's Nvidia Jetson Orin AGX is at second place. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/22c85404-51f5-44b7-b128-8df4579c635c) + + + +## Benchmarking Rigel Supercomputer + +Rigel Edge Supercomputer from OneStopSytems wins the peak performance for all four submitted models and comfortably beats the second-place system. It also wins the best latency for ResNet50 MultiStream scenario. + + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/635f5f29-080f-4c7c-85a5-65fcf438f9e1) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/c993c2f5-a8b7-4a11-b89f-35d96e357e42) + + + + + +## Benchmarking MLPerf Inference Reference Implementations + +We compared the performance of the reference implementation with that of the Nvidia optimized implementation by running both implementations on an Nvidia RTX 4090 GPU. Reference implementation uses fp32 models whereas Nvidia implementation uses quantized models. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/b46bc509-f242-4bc6-a9e8-ec318d09616b) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/404b54d2-a04e-4e5e-861d-43c7d940faf8) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/f5a04e85-269f-485a-8839-348dddcd5eb7) + +## Showcasing Apple Metal Performance + +We benchmarked the performance of Apple metal using Tensorflow-metal. The below graphs show the performance benefit of running inference on Apple meta using tensorflow-metal versus onnxruntime running only on CPUs. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/87385e24-b3b5-4694-8106-2c30eeb393de) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/c9a38dc9-0986-461e-b81d-988297e1771e) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/4b8565b4-7a23-4f29-b450-6eaf00d10f63) + + + + + +## Design Space Exploration For NeuralMagic Deepsparse Library + +Using CM experiment automation we did a design space exploration to find the optimal batch size for the bert-99 compatible sparse models. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/a18088f2-c864-4c16-b714-5b375cf5fc94) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/8bd95c5f-344f-4d9f-9f94-c3024efbce13) + + +## Comparing the performance of Modular MLPerf Inference C++ implementations + +Here we compare the performance of MIL Library used by CTuning and the KILT library used by KRAI both on CPUs and GPUs. This is not an apple-to-apple comparison as KILT used Nvidia Nvidia A1000 GPU and MIL was run on Nvidia RTX 4090 GPU. For CPUs, KILT was run on a [24-core Dell server](https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Krai/systems/7920t-kilt-onnxruntime_cpu.json) with peak frequency of 4000 MHz whereas MIL was run on a [16 core PCSPECIALIST custom workstation](https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/CTuning/systems/amd_ryzen_workstation-cpp-cpu-onnxruntime-vdefault-default_config.json) with peak frequency of 5900 MHz. + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/6d73360a-27ab-4158-b4cc-a5724d6d4c73) + +![image](https://github.com/ctuning/mlcommons-ck/assets/4791823/d6b5516b-4861-4355-badf-65decbf8d3b0) + diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/_cm.json b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/_cm.json new file mode 100644 index 000000000..4860af17b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-analysis-ctuning/_cm.json @@ -0,0 +1,16 @@ +{ + "alias": "mlperf-inference-v3.1-analysis-ctuning", + "automation_alias": "report", + "automation_uid": "6462ecdba2054467", + "date":"20230917", + "title":"cTuning's analysis of MLPerf inference v3.1 community results", + "tags": [ + "mlperf", + "inference", + "mlperf-inference", + "v3.1", + "analysis", + "ctuning" + ], + "uid": "ebc483653dbc45b6" +} diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-ctuning/_cm.json b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-ctuning/_cm.json new file mode 100644 index 000000000..99d0370a5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-ctuning/_cm.json @@ -0,0 +1,17 @@ +{ + "alias": "mlperf-inference-v3.1-press-release-ctuning", + "automation_alias": "report", + "automation_uid": "6462ecdba2054467", + "date": "20230913", + "redirect": "https://www.linkedin.com/pulse/new-milestone-make-mlperf-benchmarks-accessible-everyone-fursin", + "tags": [ + "mlperf", + "inference", + "mlperf-inference", + "v3.1", + "analysis", + "ctuning" + ], + "title": "cTuning press-release about making MLPerf inference accessible to everyone", + "uid": "85ff4a6ac203411e" +} diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-hpcwire/_cm.json b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-hpcwire/_cm.json new file mode 100644 index 000000000..159a98673 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v3.1-press-release-hpcwire/_cm.json @@ -0,0 +1,17 @@ +{ + "alias": "mlperf-inference-v3.1-press-release-hpcwire", + "automation_alias": "report", + "automation_uid": "6462ecdba2054467", + "date": "20230913", + "tags": [ + "mlperf", + "inference", + "mlperf-inference", + "v3.1", + "analysis", + "ctuning" + ], + "redirect": "https://www.hpcwire.com/2023/09/13/mlperf-releases-latest-inference-results-and-new-storage-benchmark", + "title": "HPCWire about MLPerf inference v3.1 and storage results (with cTuning/cKnowledge coverage)", + "uid": "50960565640142d6" +} diff --git a/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v4.0-press-release-ctuning/_cm.json b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v4.0-press-release-ctuning/_cm.json new file mode 100644 index 000000000..15c3fa6c4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/report/mlperf-inference-v4.0-press-release-ctuning/_cm.json @@ -0,0 +1,17 @@ +{ + "alias": "mlperf-inference-v4.0-press-release-ctuning", + "automation_alias": "report", + "automation_uid": "6462ecdba2054467", + "date": "20230913", + "redirect": "https://www.linkedin.com/pulse/new-cm-mlperf-automation-helps-benchmark-commodity-hardware-fursin-61noe", + "tags": [ + "mlperf", + "inference", + "mlperf-inference", + "v4.0", + "analysis", + "ctuning" + ], + "title": "cTuning press-release about a new version of the CM workflow to automate MLPerf", + "uid": "acc35b8e9ed14c98" +} diff --git a/cmx4mlops/cmx4mlops/repo/requirements.txt b/cmx4mlops/cmx4mlops/repo/requirements.txt new file mode 100644 index 000000000..b1eac3c17 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/requirements.txt @@ -0,0 +1,6 @@ +cmind>=2.0.1 +pyyaml +requests +setuptools +giturlparse +tabulate diff --git a/cmx4mlops/cmx4mlops/repo/script/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/README.md b/cmx4mlops/cmx4mlops/repo/script/README.md new file mode 100644 index 000000000..d2667369c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/README.md @@ -0,0 +1,40 @@ +## About + +Portable CM automations for MLOps and MLPerf. + +## License + +[Apache 2.0](../../LICENSE.md) + +## Copyright + +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. + +This file is licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License can be obtained at: + +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software distributed under the License is provided on an "AS IS" basis, without warranties or conditions of any kind, either express or implied. Please refer to the License for the specific language governing permissions and limitations under the License. + +## Maintainer(s) + +* MLCommons + +## CM author + +[Grigori Fursin](https://cKnowledge.org/gfursin) + +## CM script developers + +Arjun Suresh, Anandhu Sooraj, Grigori Fursin + +## Parent project + +Visit the [parent Collective Knowledge project](https://github.com/mlcommons/ck) for further details. + +## Citing this project + +If you found the CM automations helpful, kindly reference this article: +[ [ArXiv](https://arxiv.org/abs/2406.16791) ] diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README-extra.md new file mode 100644 index 000000000..2b61d193c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README-extra.md @@ -0,0 +1,7 @@ +# About + +Activate python virtual environment installed via CM: + +```bash +cm run script "activate python-ven" (--version={python version}) (--name={user friendly name of the virtual environment)) +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README.md b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README.md new file mode 100644 index 000000000..c9c3db32f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/activate-python-venv](https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/activate-python-venv) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/_cm.yaml new file mode 100644 index 000000000..ed65cf2f1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/_cm.yaml @@ -0,0 +1,18 @@ +alias: activate-python-venv +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Python automation +developers: '[Grigori Fursin](https://cKnowledge.org/gfursin)' +name: Activate virtual Python environment +prehook_deps: +- names: + - python-venv + reuse_version: true + tags: install,python-venv +tags: +- activate +- python +- activate-python-venv +- python-venv +tags_help: activate python-venv +uid: fcbbb84946f34c55 diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/customize.py b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/customize.py new file mode 100644 index 000000000..c22b25b65 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/customize.py @@ -0,0 +1,42 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + name = env.get('CM_NAME', '') + if name != '': + name = name.strip().lower() + + r = automation.update_deps({'deps': meta['prehook_deps'], + 'update_deps': { + 'python-venv': { + 'name': name + } + } + }) + if r['return'] > 0: + return r + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.bat b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.bat new file mode 100644 index 000000000..5ca2ac0ed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.bat @@ -0,0 +1,7 @@ +echo. +echo call "%CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd" +echo. +echo Enter exit to exit virtual env. +echo. + +call %CM_VIRTUAL_ENV_SCRIPTS_PATH%\activate.bat && cmd diff --git a/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.sh b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.sh new file mode 100644 index 000000000..6569b07e5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/activate-python-venv/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "" +echo " bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate" +echo "" +echo " Enter exit to exit virtual env." +echo "" + +bash --init-file ${CM_VIRTUAL_ENV_SCRIPTS_PATH}/activate diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README-extra.md new file mode 100644 index 000000000..baa487880 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README-extra.md @@ -0,0 +1,2 @@ +# About +This CM script detects the system details using Nvidia script diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README.md b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README.md new file mode 100644 index 000000000..f10735581 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/add-custom-nvidia-system](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/add-custom-nvidia-system) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/_cm.yaml new file mode 100644 index 000000000..6dce8414d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/_cm.yaml @@ -0,0 +1,126 @@ +# Identification of this CM script +alias: add-custom-nvidia-system +uid: b2e6c46c6e8745a3 +cache: true +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" +docker: + real_run: False + +# User-friendly tags to find this CM script +tags: + - add + - custom + - system + - nvidia + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect python3 + - tags: get,python3 + names: + - python + - python3 + + # Detect CUDA + - tags: get,cuda,_cudnn + + # Detect Tensorrt + - tags: get,tensorrt + + # Detect CMake + - tags: get,cmake + + # Detect requests + - tags: get,generic-python-lib,_requests + + # Detect Google Logger + - tags: get,generic,sys-util,_glog-dev + + # Detect GFlags + - tags: get,generic,sys-util,_gflags-dev + + # Detect libre2-dev + - tags: get,generic,sys-util,_libre2-dev + + # Detect libnuma-dev + - tags: get,generic,sys-util,_libnuma-dev + + # Detect libboost-all-dev + - tags: get,generic,sys-util,_libboost-all-dev + + # Detect rapidjson-dev + - tags: get,generic,sys-util,_rapidjson-dev + + # Download Nvidia Submission Code + - tags: get,nvidia,mlperf,inference,common-code + names: + - nvidia-inference-common-code + + # Detect pycuda + - tags: get,generic-python-lib,_pycuda + +variations: + nvidia-only: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _nvidia-only + custom: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _custom + mlcommons: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _mlcommons + ctuning: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _ctuning + go: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _go + + + + +versions: + r2.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r2.1 + + r3.0: + add_deps_recursive: + nvidia-inference-common-code: + version: r3.0 + + r3.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r3.1 + + r4.0: + add_deps_recursive: + nvidia-inference-common-code: + version: r4.0 diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/customize.py b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/customize.py new file mode 100644 index 000000000..016d9cdcd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/run.sh b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/run.sh new file mode 100644 index 000000000..b89617f7f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/add-custom-nvidia-system/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +CUR=$PWD +cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} +${CM_PYTHON_BIN_WITH_PATH} scripts/custom_systems/add_custom_system.py +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README-extra.md new file mode 100644 index 000000000..e379e2544 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README-extra.md @@ -0,0 +1,17 @@ +# About + +See [this tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/modular-image-classification.md). + +# Collaborative testing + +## Windows 11 + +* CUDA 11.8; cuDNN 8.7.0; ONNX GPU 1.16.1 + +## Windows 10 + +* CUDA 11.6; cuDNN 8.6.0.96; ONNX GPU 1.13.1 + +## Ubuntu 22.04 + +* CUDA 11.3; ONNX 1.12.0 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README.md new file mode 100644 index 000000000..1efef8201 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/_cm.yaml new file mode 100644 index 000000000..740a8a18a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/_cm.yaml @@ -0,0 +1,120 @@ +alias: app-image-classification-onnx-py +uid: 3d5e908e472b417e + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular AI/ML application pipeline" + +tags: +- app +- modular +- image-classification +- onnx +- python + +tags_help: "modular python app image-classification onnx" + +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + + +deps: +- tags: detect,os +#- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 + +- tags: get,cuda + names: + - cuda + enable_if_env: + USE_CUDA: + - yes +- tags: get,cudnn + names: + - cudnn + enable_if_env: + USE_CUDA: + - yes + +- tags: get,dataset,imagenet,image-classification,original,_run-during-docker-build + +- tags: get,dataset-aux,imagenet-aux,image-classification +- tags: get,ml-model,resnet50,_onnx,image-classification + names: + - ml-model + +- tags: get,generic-python-lib,_package.Pillow +- tags: get,generic-python-lib,_package.numpy + version_max: "1.99.99" +- tags: get,generic-python-lib,_package.opencv-python + + +- tags: get,generic-python-lib,_onnxruntime + names: + - onnxruntime + skip_if_env: + USE_CUDA: + - yes +- tags: get,generic-python-lib,_onnxruntime_gpu + names: + - onnxruntime + enable_if_env: + USE_CUDA: + - yes + +variations: + cuda: + docker: + all_gpus: 'yes' + group: target + env: + USE_CUDA: yes + + cpu: + group: target + default: yes + env: + USE_CPU: yes + +input_mapping: + input: CM_IMAGE + output: CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT + + +new_env_keys: + - CM_APP_IMAGE_CLASSIFICATION_ONNX_PY* + + +new_state_keys: + - cm_app_image_classification_onnx_py + + +input_description: + input: + desc: "Path to JPEG image to classify" + output: + desc: "Output directory (optional)" + j: + desc: "Print JSON output" + boolean: true + +docker: + skip_run_cmd: 'no' + skip_cm_sys_upgrade: 'yes' + cm_repo_flags: '--branch=dev' + use_host_group_id: 'yes' + image_tag_extra: '-cm-dev' + input_paths: + - input + - env.CM_IMAGE + - output + skip_input_for_fake_run: + - input + - env.CM_IMAGE + - output + - j diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/customize.py new file mode 100644 index 000000000..0ca34fc95 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + +# print ('') +# print ('Running preprocess function in customize.py ...') + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + + +# print ('') +# print ('Running postprocess function in customize.py ...') + + # Saving predictions to JSON file to current directory + # Should work with "cm docker script" ? + + data = state.get('cm_app_image_classification_onnx_py', {}) + + fjson = 'cm-image-classification-onnx-py.json' + fyaml = 'cm-image-classification-onnx-py.yaml' + + output = env.get('CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT', '') + if output != '': + if not os.path.exists(output): + os.makedirs(output) + + fjson = os.path.join(output, fjson) + fyaml = os.path.join(output, fyaml) + + try: + import json + with open(fjson, 'w', encoding='utf-8') as f: + json.dump(data, f, ensure_ascii=False, indent=4) + except Exception as e: + print('CM warning: {}'.format(e)) + + try: + import yaml + with open(fyaml, 'w', encoding='utf-8') as f: + yaml.dump(data, f) + except Exception as e: + print('CM warning: {}'.format(e)) + + top_classification = data.get('top_classification', '') + + if env.get('CM_TMP_SILENT', '') != 'yes': + if top_classification != '': + print('') + x = 'Top classification: {}'.format(top_classification) + print('=' * len(x)) + print(x) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/img/computer_mouse.jpg b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/img/computer_mouse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7f8abb6fe93d18af393ea036b24b907cc48e786 GIT binary patch literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 literal 0 HcmV?d00001 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.bat new file mode 100644 index 000000000..ee7db9867 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.bat @@ -0,0 +1,29 @@ +rem echo %CM_PYTHON_BIN% +rem echo %CM_DATASET_PATH% +rem echo %CM_DATASET_AUX_PATH% +rem echo %CM_ML_MODEL_FILE_WITH_PATH% + +rem connect CM intelligent components with CK env +set CK_ENV_ONNX_MODEL_ONNX_FILEPATH=%CM_ML_MODEL_FILE_WITH_PATH% +set CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME=input_tensor:0 +set CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME=softmax_tensor:0 +set CK_ENV_DATASET_IMAGENET_VAL=%CM_DATASET_PATH% +set CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt +set ML_MODEL_DATA_LAYOUT=NCHW +set CK_BATCH_SIZE=%CM_BATCH_SIZE% +set CK_BATCH_COUNT=%CM_BATCH_COUNT% + +IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% + +IF DEFINED CM_INPUT SET CM_IMAGE=%CM_INPUT% + +echo. +%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\onnx_classify.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +rem Just a demo to pass environment variables from native scripts back to CM workflows +echo CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess > tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.sh new file mode 100644 index 000000000..62b07e1f1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/run.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +if [[ ${CM_RUN_DOCKER_CONTAINER} == "yes" ]]; then + exit 0 +fi + +#echo ${CM_PYTHON_BIN} +#echo ${CM_DATASET_PATH} +#echo ${CM_DATASET_AUX_PATH} +#echo ${CM_ML_MODEL_FILE_WITH_PATH} +CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +# connect CM intelligent components with CK env +export CK_ENV_ONNX_MODEL_ONNX_FILEPATH=${CM_ML_MODEL_FILE_WITH_PATH} +export CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME="input_tensor:0" +export CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME="softmax_tensor:0" +export CK_ENV_DATASET_IMAGENET_VAL=${CM_DATASET_PATH} +export CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt +export ML_MODEL_DATA_LAYOUT="NCHW" +export CK_BATCH_SIZE=${CM_BATCH_SIZE} +export CK_BATCH_COUNT=${CM_BATCH_COUNT} + +if [[ "${CM_INPUT}" != "" ]]; then export CM_IMAGE=${CM_INPUT}; fi + +PIP_EXTRA=`${CM_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` + +echo "" +${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${PIP_EXTRA} +test $? -eq 0 || exit 1 + +echo "" +${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/onnx_classify.py +test $? -eq 0 || exit 1 + +# Just a demo to pass environment variables from native scripts back to CM workflows +echo "CM_APP_IMAGE_CLASSIFICATION_ONNX_PY=sucess" > tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/src/onnx_classify.py b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/src/onnx_classify.py new file mode 100644 index 000000000..c2c5a6ceb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/src/onnx_classify.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 + +# Extended by Grigori Fursin to support MLCommons CM workflow automation +# language + +import os +import onnxruntime as rt +import numpy as np +import time +import json + +from PIL import Image + +model_path = os.environ['CK_ENV_ONNX_MODEL_ONNX_FILEPATH'] +input_layer_name = os.environ['CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME'] +output_layer_name = os.environ['CK_ENV_ONNX_MODEL_OUTPUT_LAYER_NAME'] +normalize_data_bool = os.getenv( + 'CK_ENV_ONNX_MODEL_NORMALIZE_DATA', '0') in ( + 'YES', 'yes', 'ON', 'on', '1') +subtract_mean_bool = os.getenv( + 'CK_ENV_ONNX_MODEL_SUBTRACT_MEAN', '0') in ( + 'YES', 'yes', 'ON', 'on', '1') +given_channel_means = os.getenv('ML_MODEL_GIVEN_CHANNEL_MEANS', '') +if given_channel_means: + given_channel_means = np.array( + given_channel_means.split(' '), + dtype=np.float32) + +imagenet_path = os.environ['CK_ENV_DATASET_IMAGENET_VAL'] +labels_path = os.environ['CK_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] +data_layout = os.environ['ML_MODEL_DATA_LAYOUT'] +batch_size = int(os.environ['CK_BATCH_SIZE']) +batch_count = int(os.environ['CK_BATCH_COUNT']) +CPU_THREADS = int(os.getenv('CK_HOST_CPU_NUMBER_OF_PROCESSORS', 0)) + + +def load_labels(labels_filepath): + my_labels = [] + input_file = open(labels_filepath, 'r') + for l in input_file: + my_labels.append(l.strip()) + return my_labels + + +def load_and_resize_image(image_filepath, height, width): + # sic! The order of dimensions in resize is (W,H) + pillow_img = Image.open(image_filepath).resize((width, height)) + + # Grigori fixed below + # input_data = np.float32(pillow_img) + input_data = np.asarray(pillow_img) + input_data = np.asarray(input_data, np.float32) + + # Normalize + if normalize_data_bool: + input_data = input_data / 127.5 - 1.0 + + # Subtract mean value + if subtract_mean_bool: + if len(given_channel_means): + input_data -= given_channel_means + else: + input_data -= np.mean(input_data) + +# print(np.array(pillow_img).shape) + nhwc_data = np.expand_dims(input_data, axis=0) + + if data_layout == 'NHWC': + # print(nhwc_data.shape) + return nhwc_data + else: + nchw_data = nhwc_data.transpose(0, 3, 1, 2) + # print(nchw_data.shape) + return nchw_data + + +def load_a_batch(batch_filenames): + unconcatenated_batch_data = [] + for image_filename in batch_filenames: + image_filepath = image_filename + nchw_data = load_and_resize_image(image_filepath, height, width) + unconcatenated_batch_data.append(nchw_data) + batch_data = np.concatenate(unconcatenated_batch_data, axis=0) + + return batch_data + + +# print("Device: " + rt.get_device()) +sess_options = rt.SessionOptions() + +if CPU_THREADS > 0: + sess_options.enable_sequential_execution = False + sess_options.session_thread_pool_size = CPU_THREADS + +if len(rt.get_all_providers()) > 1 and os.environ.get( + "USE_CUDA", "yes").lower() not in ["0", "false", "off", "no"]: + # Currently considering only CUDAExecutionProvider + sess = rt.InferenceSession( + model_path, + sess_options, + providers=['CUDAExecutionProvider']) +else: + sess = rt.InferenceSession( + model_path, + sess_options, + providers=["CPUExecutionProvider"]) + +# FIXME: check that input_layer_name belongs to this list +input_layer_names = [x.name for x in sess.get_inputs()] +input_layer_name = input_layer_name or input_layer_names[0] + +# FIXME: check that output_layer_name belongs to this list +output_layer_names = [x.name for x in sess.get_outputs()] +output_layer_name = output_layer_name or output_layer_names[0] + +model_input_shape = sess.get_inputs()[0].shape +model_classes = sess.get_outputs()[1].shape[1] +labels = load_labels(labels_path) +# 1 means the labels represent classes 1..1000 and the background class 0 +# has to be skipped +bg_class_offset = model_classes - len(labels) + +if data_layout == 'NHWC': + (samples, height, width, channels) = model_input_shape +else: + (samples, channels, height, width) = model_input_shape + +print("") +print("Data layout: {}".format(data_layout)) +print("Input layers: {}".format([str(x) for x in sess.get_inputs()])) +print("Output layers: {}".format([str(x) for x in sess.get_outputs()])) +print("Input layer name: " + input_layer_name) +print("Expected input shape: {}".format(model_input_shape)) +print("Output layer name: " + output_layer_name) +print("Data normalization: {}".format(normalize_data_bool)) +print("Subtract mean: {}".format(subtract_mean_bool)) +print('Per-channel means to subtract: {}'.format(given_channel_means)) +print("Background/unlabelled classes to skip: {}".format(bg_class_offset)) +print("") + +starting_index = 1 + +start_time = time.time() + +for batch_idx in range(batch_count): + print('') + print("Batch {}/{}:".format(batch_idx + 1, batch_count)) + + batch_filenames = [ + imagenet_path + + '/' + + "ILSVRC2012_val_00000{:03d}.JPEG".format( + starting_index + + batch_idx * + batch_size + + i) for i in range(batch_size)] + + # Grigori: trick to test models: + if os.environ.get('CM_IMAGE', '') != '': + batch_filenames = [os.environ['CM_IMAGE']] + + batch_data = load_a_batch(batch_filenames) + # print(batch_data.shape) + + batch_predictions = sess.run( + [output_layer_name], { + input_layer_name: batch_data})[0] + + cm_status = {'classifications': []} + + print('') + top_classification = '' + for in_batch_idx in range(batch_size): + # skipping the background class on the left (if present) + softmax_vector = batch_predictions[in_batch_idx][bg_class_offset:] + top5_indices = list(reversed(softmax_vector.argsort()))[:5] + + print(' * ' + batch_filenames[in_batch_idx] + ' :') + + for class_idx in top5_indices: + if top_classification == '': + top_classification = labels[class_idx] + + print( + "\t{}\t{}\t{}".format( + class_idx, + softmax_vector[class_idx], + labels[class_idx])) + + cm_status['classifications'].append({'class_idx': int(class_idx), + 'softmax': float(softmax_vector[class_idx]), + 'label': labels[class_idx]}) + + print('') + print('Top classification: {}'.format(top_classification)) + cm_status['top_classification'] = top_classification + +avg_time = (time.time() - start_time) / batch_count +cm_status['avg_time'] = avg_time + +# Record cm_status to embedded it into CM workflows +with open('tmp-run-state.json', 'w') as cm_file: + cm_file.write(json.dumps( + {'cm_app_image_classification_onnx_py': cm_status}, sort_keys=True, indent=2)) diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/tests/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/tests/README.md new file mode 100644 index 000000000..899509cb7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-onnx-py/tests/README.md @@ -0,0 +1,14 @@ +```bash +docker system prune -a -f + +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e + +cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --env.CM_IMAGE=computer_mouse.jpg +cm docker script "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg + +cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg -j --docker_it + +cmrd "python app image-classification onnx" --docker_cm_repo=ctuning@mlcommons-ck --input=computer_mouse.jpg --output=. + + +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README-extra.md new file mode 100644 index 000000000..5e59c8fed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README-extra.md @@ -0,0 +1,3 @@ +# Image Classification App in C++ for ResNet50 model + +* In development stage, not complete diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README.md new file mode 100644 index 000000000..7f5dcacbd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/_cm.yaml new file mode 100644 index 000000000..c7ee8b560 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/_cm.yaml @@ -0,0 +1,27 @@ +alias: app-image-classification-tf-onnx-cpp +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Modular AI/ML application pipeline +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' +deps: +- tags: detect,os +- tags: get,sys-utils-cm +- tags: get,gcc +- tags: get,dataset,image-classification,original +- tags: get,dataset-aux,image-classification +- tags: get,ml-model,raw,image-classification,resnet50,_onnx,_opset-11 +- tags: tensorflow,from-src + version: v2.0.0 +tags: +- app +- image-classification +- tf +- tensorflow +- tf-onnx +- tensorflow-onnx +- onnx +- cpp +tags_help: app image-classification cpp tensorflow onnx +uid: 879ed32e47074033 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/include/benchmark.h b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/include/benchmark.h new file mode 100644 index 000000000..42b0418fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/include/benchmark.h @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#include + +#define DEBUG(msg) std::cout << "DEBUG: " << msg << std::endl; + +namespace CK { + +enum _TIMERS { + X_TIMER_SETUP, + X_TIMER_TEST, + + X_TIMER_COUNT +}; + +enum _VARS { + X_VAR_TIME_SETUP, + X_VAR_TIME_TEST, + X_VAR_TIME_IMG_LOAD_TOTAL, + X_VAR_TIME_IMG_LOAD_AVG, + X_VAR_TIME_CLASSIFY_TOTAL, + X_VAR_TIME_CLASSIFY_AVG, + + X_VAR_COUNT +}; + +enum MODEL_TYPE { + LITE, + TF_FROZEN +}; + +/// Store named value into xopenme variable. +inline void store_value_f(int index, const char* name, float value) { + char* json_name = new char[strlen(name) + 6]; + sprintf(json_name, "\"%s\":%%f", name); + //xopenme_add_var_f(index, json_name, value); + delete[] json_name; +} + +/// Load mandatory string value from the environment. +inline std::string getenv_s(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return std::string(value); +} + +/// Load mandatory integer value from the environment. +inline int getenv_i(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return atoi(value); +} + +/// Load mandatory float value from the environment. +inline float getenv_f(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return atof(value); +} + +/// Dummy `sprintf` like formatting function using std::string. +/// It uses buffer of fixed length so can't be used in any cases, +/// generally use it for short messages with numeric arguments. +template +inline std::string format(const char* str, Args ...args) { + char buf[1024]; + sprintf(buf, str, args...); + return std::string(buf); +} + +//---------------------------------------------------------------------- + +class Accumulator { +public: + void reset() { _total = 0, _count = 0; } + void add(float value) { _total += value, _count++; } + float total() const { return _total; } + float avg() const { return _total / static_cast(_count); } +private: + float _total = 0; + int _count = 0; +}; + +//---------------------------------------------------------------------- + +class BenchmarkSettings { +public: + const std::string images_dir = getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DIR"); + const std::string images_file = getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_SUBSET_FOF"); + const bool skip_internal_preprocessing = getenv("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE") + && ( getenv_s("CK_ENV_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE") == "float32" ); + + const std::string result_dir = getenv_s("CK_RESULTS_DIR"); + const std::string input_layer_name = getenv_s("CK_ENV_TENSORFLOW_MODEL_INPUT_LAYER_NAME"); + const std::string output_layer_name = getenv_s("CK_ENV_TENSORFLOW_MODEL_OUTPUT_LAYER_NAME"); + const int batch_count = getenv_i("CK_BATCH_COUNT"); + const int batch_size = getenv_i("CK_BATCH_SIZE"); + const int image_size = getenv_i("CK_ENV_DATASET_IMAGENET_PREPROCESSED_INPUT_SQUARE_SIDE"); + const int num_channels = 3; + const int num_classes = 1000; + const bool normalize_img = getenv_s("CK_ENV_TENSORFLOW_MODEL_NORMALIZE_DATA") == "YES"; + const bool subtract_mean = getenv_s("CK_ENV_TENSORFLOW_MODEL_SUBTRACT_MEAN") == "YES"; + const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS"); + + const bool full_report = getenv_i("CK_SILENT_MODE") == 0; + + BenchmarkSettings(enum MODEL_TYPE mode = MODEL_TYPE::LITE) { + + if(given_channel_means_str) { + std::stringstream ss(given_channel_means_str); + for(int i=0;i<3;i++){ + ss >> given_channel_means[i]; + } + } + + switch (mode) + { + case MODEL_TYPE::LITE: + _graph_file = getenv_s("CK_ENV_TENSORFLOW_MODEL_TFLITE_FILEPATH"); + break; + + case MODEL_TYPE::TF_FROZEN: + _graph_file = getenv_s("CK_ENV_TENSORFLOW_MODEL_TF_FROZEN_FILEPATH"); + break; + + default: + std::cout << "Unsupported MODEL_TYPE" << std::endl; + exit(1); + break; + }; + _number_of_threads = std::thread::hardware_concurrency(); + _number_of_threads = _number_of_threads < 1 ? 1 : _number_of_threads; + _number_of_threads = !getenv("CK_HOST_CPU_NUMBER_OF_PROCESSORS") + ? _number_of_threads + : getenv_i("CK_HOST_CPU_NUMBER_OF_PROCESSORS"); + + // Print settings + std::cout << "Graph file: " << _graph_file << std::endl; + std::cout << "Image dir: " << images_dir << std::endl; + std::cout << "Image list: " << images_file << std::endl; + std::cout << "Image size: " << image_size << std::endl; + std::cout << "Image channels: " << num_channels << std::endl; + std::cout << "Prediction classes: " << num_classes << std::endl; + std::cout << "Result dir: " << result_dir << std::endl; + std::cout << "Batch count: " << batch_count << std::endl; + std::cout << "Batch size: " << batch_size << std::endl; + std::cout << "Normalize: " << normalize_img << std::endl; + std::cout << "Subtract mean: " << subtract_mean << std::endl; + if(subtract_mean && given_channel_means_str) + std::cout << "Per-channel means to subtract: " << given_channel_means[0] + << ", " << given_channel_means[1] + << ", " << given_channel_means[2] << std::endl; + + // Create results dir if none + auto dir = opendir(result_dir.c_str()); + if (dir) + closedir(dir); + else + system(("mkdir " + result_dir).c_str()); + + // Load list of images to be processed + std::ifstream file(images_file); + if (!file) + throw "Unable to open image list file " + images_file; + for (std::string s; !getline(file, s).fail();) + _image_list.emplace_back(s); + std::cout << "Image count in file: " << _image_list.size() << std::endl; + } + + const std::vector& image_list() const { return _image_list; } + + std::vector _image_list; + + int number_of_threads() { return _number_of_threads; } + + std::string graph_file() { return _graph_file; } + + float given_channel_means[3]; +private: + int _number_of_threads; + std::string _graph_file; +}; + +//---------------------------------------------------------------------- + +class BenchmarkSession { +public: + BenchmarkSession(const BenchmarkSettings* settings): _settings(settings) { + } + + virtual ~BenchmarkSession() {} + + float total_load_images_time() const { return _loading_time.total(); } + float total_prediction_time() const { return _total_prediction_time; } + float avg_load_images_time() const { return _loading_time.avg(); } + float avg_prediction_time() const { return _prediction_time.avg(); } + + bool get_next_batch() { + if (_batch_index+1 == _settings->batch_count) + return false; + _batch_index++; + int batch_number = _batch_index+1; + if (_settings->full_report || batch_number%10 == 0) + std::cout << "\nBatch " << batch_number << " of " << _settings->batch_count << std::endl; + int begin = _batch_index * _settings->batch_size; + int end = (_batch_index + 1) * _settings->batch_size; + int images_count = _settings->image_list().size(); + if (begin >= images_count || end > images_count) + throw format("Not enough images to populate batch %d", _batch_index); + _batch_files.clear(); + for (int i = begin; i < end; i++) + _batch_files.emplace_back(_settings->image_list()[i]); + return true; + } + + /// Begin measuring of new benchmark stage. + /// Only one stage can be measured at a time. + void measure_begin() { + _start_time = std::chrono::high_resolution_clock::now(); + } + + /// Finish measuring of batch loading stage + float measure_end_load_images() { + float duration = measure_end(); + if (_settings->full_report) + std::cout << "Batch loaded in " << duration << " s" << std::endl; + _loading_time.add(duration); + return duration; + } + + /// Finish measuring of batch prediction stage + float measure_end_prediction() { + float duration = measure_end(); + _total_prediction_time += duration; + if (_settings->full_report) + std::cout << "Batch classified in " << duration << " s" << std::endl; + // Skip first batch in order to account warming-up the system + if (_batch_index > 0 || _settings->batch_count == 1) + _prediction_time.add(duration); + return duration; + } + + int batch_index() const { return _batch_index; } + const std::vector& batch_files() const { return _batch_files; } + +private: + int _batch_index = -1; + Accumulator _loading_time; + Accumulator _prediction_time; + const BenchmarkSettings* _settings; + float _total_prediction_time = 0; + std::vector _batch_files; + std::chrono::time_point _start_time; + + float measure_end() const { + auto finish_time = std::chrono::high_resolution_clock::now(); + std::chrono::duration elapsed = finish_time - _start_time; + return static_cast(elapsed.count()); + } +}; + +//---------------------------------------------------------------------- + +inline void init_benchmark() { + //xopenme_init(X_TIMER_COUNT, X_VAR_COUNT); +} + +inline void finish_benchmark(const BenchmarkSession& s) { + // Store metrics + /* store_value_f(X_VAR_TIME_SETUP, "setup_time_s", xopenme_get_timer(X_TIMER_SETUP)); + store_value_f(X_VAR_TIME_TEST, "test_time_s", xopenme_get_timer(X_TIMER_TEST)); + store_value_f(X_VAR_TIME_IMG_LOAD_TOTAL, "images_load_time_total_s", s.total_load_images_time()); + store_value_f(X_VAR_TIME_IMG_LOAD_AVG, "images_load_time_avg_s", s.avg_load_images_time()); + store_value_f(X_VAR_TIME_CLASSIFY_TOTAL, "prediction_time_total_s", s.total_prediction_time()); + store_value_f(X_VAR_TIME_CLASSIFY_AVG, "prediction_time_avg_s", s.avg_prediction_time()); + + // Finish xopenmp + xopenme_dump_state(); + xopenme_finish();*/ +} + +template +void measure_setup(L &&lambda_function) { + //xopenme_clock_start(X_TIMER_SETUP); + lambda_function(); + //xopenme_clock_end(X_TIMER_SETUP); +} + +template +void measure_prediction(L &&lambda_function) { + //xopenme_clock_start(X_TIMER_TEST); + lambda_function(); + //xopenme_clock_end(X_TIMER_TEST); +} + +//---------------------------------------------------------------------- + +template +class StaticBuffer { +public: + StaticBuffer(int size, const std::string& dir): _size(size), _dir(dir) { + _buffer = new TData[size]; + } + + virtual ~StaticBuffer() { + delete[] _buffer; + } + + TData* data() const { return _buffer; } + int size() const { return _size; } + +protected: + const int _size; + const std::string _dir; + TData* _buffer; +}; + +//---------------------------------------------------------------------- + +class ImageData : public StaticBuffer { +public: + ImageData(const BenchmarkSettings* s): StaticBuffer( + s->image_size * s->image_size * s->num_channels * (s->skip_internal_preprocessing ? sizeof(float) : sizeof(uint8_t)), + s->images_dir) {} + + void load(const std::string& filename) { + auto path = _dir + '/' + filename; + std::ifstream file(path, std::ios::in | std::ios::binary); + if (!file) throw "Failed to open image data " + path; + file.read(reinterpret_cast(_buffer), _size); + } +}; + +//---------------------------------------------------------------------- + +class ResultData : public StaticBuffer { +public: + ResultData(const BenchmarkSettings* s): StaticBuffer( + s->num_classes, s->result_dir) {} + + void save(const std::string& filename) { + auto path = _dir + '/' + filename + ".txt"; + std::ofstream file(path); + if (!file) throw "Unable to create result file " + path; + for (int i = 0; i < _size; i++) + file << _buffer[i] << std::endl; + } +}; + +//---------------------------------------------------------------------- + +class IBenchmark { +public: + bool has_background_class = false; + + virtual ~IBenchmark() {} + virtual void load_images(const std::vector& batch_images) = 0; + virtual void save_results(const std::vector& batch_images) = 0; +}; + + +template +class Benchmark : public IBenchmark { +public: + Benchmark(const BenchmarkSettings* settings, TData *in_ptr, TData *out_ptr) { + _in_ptr = in_ptr; + _out_ptr = out_ptr; + _in_data.reset(new ImageData(settings)); + _out_data.reset(new ResultData(settings)); + _in_converter.reset(new TInConverter(settings)); + _out_converter.reset(new TOutConverter(settings)); + } + + void load_images(const std::vector& batch_images) override { + int image_offset = 0; + for (auto image_file : batch_images) { + _in_data->load(image_file); + _in_converter->convert(_in_data.get(), _in_ptr + image_offset); + image_offset += _in_data->size(); + } + } + + void save_results(const std::vector& batch_images) override { + int image_offset = 0; + int probe_offset = has_background_class ? 1 : 0; + for (auto image_file : batch_images) { + _out_converter->convert(_out_ptr + image_offset + probe_offset, _out_data.get()); + _out_data->save(image_file); + image_offset += _out_data->size() + probe_offset; + } + } + +private: + TData* _in_ptr; + TData* _out_ptr; + std::unique_ptr _in_data; + std::unique_ptr _out_data; + std::unique_ptr _in_converter; + std::unique_ptr _out_converter; +}; + +//---------------------------------------------------------------------- + +class IinputConverter { +public: + virtual ~IinputConverter() {} + virtual void convert(const ImageData* source, void* target) = 0; +}; + +//---------------------------------------------------------------------- + +class InCopy : public IinputConverter { +public: + InCopy(const BenchmarkSettings* s) {} + + void convert(const ImageData* source, void* target) { + uint8_t *uint8_target = static_cast(target); + std::copy(source->data(), source->data() + source->size(), uint8_target); + } +}; + +//---------------------------------------------------------------------- + +class InNormalize : public IinputConverter { +public: + InNormalize(const BenchmarkSettings* s): + _normalize_img(s->normalize_img), + _subtract_mean(s->subtract_mean), + _given_channel_means(s->given_channel_means), + _num_channels(s->num_channels) { + } + + void convert(const ImageData* source, void* target) { + // Copy image data to target + float *float_target = static_cast(target); + float sum = 0; + for (int i = 0; i < source->size(); i++) { + float px = source->data()[i]; + if (_normalize_img) + px = (px / 255.0 - 0.5) * 2.0; + sum += px; + float_target[i] = px; + } + // Subtract mean value if required + if (_subtract_mean) { + if(_given_channel_means) { + for (int i = 0; i < source->size(); i++) + float_target[i] -= _given_channel_means[i % _num_channels]; // assuming NHWC order! + } else { + float mean = sum / static_cast(source->size()); + for (int i = 0; i < source->size(); i++) + float_target[i] -= mean; + } + } + } + +private: + const bool _normalize_img; + const bool _subtract_mean; + const float *_given_channel_means; + const int _num_channels; +}; + +//---------------------------------------------------------------------- + +class OutCopy { +public: + OutCopy(const BenchmarkSettings* s) {} + + void convert(const float* source, ResultData* target) const { + std::copy(source, source + target->size(), target->data()); + } +}; + +//---------------------------------------------------------------------- + +class OutDequantize { +public: + OutDequantize(const BenchmarkSettings* s) {} + + void convert(const uint8_t* source, ResultData* target) const { + for (int i = 0; i < target->size(); i++) + target->data()[i] = source[i] / 255.0; + } +}; + +} // namespace CK diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/run.sh new file mode 100644 index 000000000..b4a46853b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +${CM_CXX_COMPILER_WITH_PATH} -O3 ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classification.cpp -o classification.exe -ltensorflow + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/src/classification.cpp b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/src/classification.cpp new file mode 100644 index 000000000..a9ee5ee50 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tf-onnx-cpp/src/classification.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +// TODO: this header should be moved to a common location (where?) +#include "../include/benchmark.h" + +#include "tensorflow/core/public/session.h" +#include "tensorflow/cc/framework/scope.h" + +using namespace std; +using namespace CK; +using namespace tensorflow; + +int main(int argc, char* argv[]) { + try { + init_benchmark(); + + BenchmarkSettings settings(MODEL_TYPE::TF_FROZEN); + BenchmarkSession session(&settings); + ImageData input_data(&settings); + ResultData result_data(&settings); + unique_ptr input_converter; + OutCopy result_converter(&settings); + unique_ptr tf_session; + GraphDef graph_def; + + if (settings.skip_internal_preprocessing) + input_converter.reset(new InCopy(&settings)); + else + input_converter.reset(new InNormalize(&settings)); + + // TODO: this option is for TF mobilenets, but generally should be evaluated + // from weights package somehow (supported number or classes in meta?) + // TODO: this problem is related to the absence of a knowledge about + // required image size for particular image recognition network package. + // TODO: We have to provide common set of parameters for all image-recognition packages. + const bool has_background_class = true; + + cout << "\nLoading graph..." << endl; + measure_setup([&]{ + Status status = ReadBinaryProto(Env::Default(), settings.graph_file(), &graph_def); + if (!status.ok()) + throw "Failed to load graph: " + status.ToString(); + + tf_session.reset(NewSession(SessionOptions())); + + status = tf_session->Create(graph_def); + if (!status.ok()) + throw "Failed to create new session: " + status.ToString(); + }); + + cout << "\nProcessing batches..." << endl; + measure_prediction([&]{ + Tensor input(DT_FLOAT, TensorShape({settings.batch_size, + settings.image_size, + settings.image_size, + settings.num_channels})); + float* input_ptr = input.flat().data(); + vector outputs; + + while (session.get_next_batch()) { + // Load batch + session.measure_begin(); + int image_offset = 0; + for (auto image_file : session.batch_files()) { + input_data.load(image_file); + input_converter->convert(&input_data, input_ptr + image_offset); + image_offset += input_data.size(); + } + session.measure_end_load_images(); + + // Classify current batch + session.measure_begin(); + Status status = tf_session->Run( + {{settings.input_layer_name, input}}, {settings.output_layer_name}, {}, &outputs); + if (!status.ok()) + throw "Running model failed: " + status.ToString(); + session.measure_end_prediction(); + + // Process output tensor + auto output_flat = outputs[0].flat(); + if (output_flat.size() != settings.batch_size * (settings.num_classes + 1)) + throw format("Output tensor has size of %d, but expected size is %d", + output_flat.size(), settings.batch_size * (settings.num_classes + 1)); + image_offset = 0; + int probe_offset = has_background_class ? 1 : 0; + for (auto image_file : session.batch_files()) { + result_converter.convert(output_flat.data() + image_offset + probe_offset, &result_data); + result_data.save(image_file); + image_offset += result_data.size() + probe_offset; + } + } + }); + + finish_benchmark(session); + } + catch (const string& error_message) { + cerr << "ERROR: " << error_message << endl; + return -1; + } + return 0; +} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README-extra.md new file mode 100644 index 000000000..662888506 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README-extra.md @@ -0,0 +1,16 @@ +# CPU + +## 20240129; Windows 11 + +```bash +cmr "get generic-python-lib _package.torch" --version=2.1.1 +cmr "get generic-python-lib _package.torchvision" --version=0.16.2 +``` + +# CUDA + +```bash +cm run script "install python-venv" --name=test +cm run script "python app image-classification pytorch _cuda" --adr.python.name=test +cm run script "python app image-classification pytorch _cuda" --adr.python.name=test --input=src/computer_mouse.jpg +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README.md new file mode 100644 index 000000000..2d1b951fb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/_cm.yaml new file mode 100644 index 000000000..6684bb737 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/_cm.yaml @@ -0,0 +1,46 @@ +alias: app-image-classification-torch-py +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Modular AI/ML application pipeline +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +- tags: get,dataset,imagenet,image-classification,preprocessed +- tags: get,dataset-aux,imagenet-aux,image-classification +- tags: get,imagenet-helper +- tags: get,ml-model,image-classification,resnet50,_pytorch,_fp32 +- skip_if_env: + USE_CUDA: + - 'yes' + tags: get,generic-python-lib,_torch +- enable_if_env: + USE_CUDA: + - 'yes' + tags: get,generic-python-lib,_torch_cuda +- skip_if_env: + USE_CUDA: + - 'yes' + tags: get,generic-python-lib,_torchvision +- enable_if_env: + USE_CUDA: + - 'yes' + tags: get,generic-python-lib,_torchvision_cuda +tags: +- app +- image-classification +- torch +- python +tags_help: app image-classification python torch +uid: e3986ae887b84ca8 +variations: + cuda: + deps: + - tags: get,cuda + env: + USE_CUDA: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/img/computer_mouse.jpg b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/img/computer_mouse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7f8abb6fe93d18af393ea036b24b907cc48e786 GIT binary patch literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 literal 0 HcmV?d00001 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/requirements.txt new file mode 100644 index 000000000..d1c427e4a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/requirements.txt @@ -0,0 +1,4 @@ +Pillow +requests +numpy + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.bat new file mode 100644 index 000000000..1415d4265 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.bat @@ -0,0 +1,20 @@ +rem connect CM portable scripts with CK env + +set CM_ML_TORCH_MODEL_NAME=resnet50 +set CM_ML_MODEL_INPUT_DATA_TYPE=float32 +set CM_ML_MODEL_IMAGE_HEIGHT=224 +set CM_ML_MODEL_IMAGE_WIDTH=224 + +rem set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_PATH% + +set CM_DATASET_IMAGENET_PREPROCESSED_DIR=%CM_DATASET_PREPROCESSED_FULL_PATH% +set CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=%CM_DATASET_AUX_PATH%\synset_words.txt +set CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32 +set CM_RESULTS_DIR=%CM_TMP_CURRENT_SCRIPT_PATH%\results +set ML_MODEL_DATA_LAYOUT=NCHW + +%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\pytorch_classify_preprocessed.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.sh new file mode 100644 index 000000000..b50b79eb4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +# connect CM intelligent components with CK env +export CM_ML_TORCH_MODEL_NAME=resnet50 +export CM_ML_MODEL_INPUT_DATA_TYPE=float32 +export CM_ML_MODEL_IMAGE_HEIGHT=224 +export CM_ML_MODEL_IMAGE_WIDTH=224 +export CM_DATASET_IMAGENET_PREPROCESSED_DIR=${CM_DATASET_PREPROCESSED_FULL_PATH} +export CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT=${CM_DATASET_AUX_PATH}/synset_words.txt +export CM_DATASET_IMAGENET_PREPROCESSED_DATA_TYPE=float32 +export CM_RESULTS_DIR=${CM_TMP_CURRENT_SCRIPT_PATH}/results +export ML_MODEL_DATA_LAYOUT=NCHW + +${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt +test $? -eq 0 || exit 1 + +${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/pytorch_classify_preprocessed.py +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py new file mode 100644 index 000000000..863b3a651 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python3 + +import json +import time +import os +import shutil +import numpy as np + + +import torch +import torchvision.models as models + +import imagenet_helper +from imagenet_helper import ( + load_preprocessed_batch, + image_list, + class_labels, + BATCH_SIZE) + +# Writing the results out: +# +RESULTS_DIR = os.getenv('CM_RESULTS_DIR') +FULL_REPORT = os.getenv( + 'CM_SILENT_MODE', + '0') in ( + 'NO', + 'no', + 'OFF', + 'off', + '0') + +# Processing by batches: +# +BATCH_COUNT = int(os.getenv('CM_BATCH_COUNT', 1)) + +# Enabling GPU if available and not disabled: +# +USE_CUDA = (os.getenv('USE_CUDA', '').strip() == 'yes') + + +labels_path = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] + + +def load_labels(labels_filepath): + my_labels = [] + input_file = open(labels_filepath, 'r') + for l in input_file: + my_labels.append(l.strip()) + return my_labels + + +labels = load_labels(labels_path) + + +data_layout = os.environ['ML_MODEL_DATA_LAYOUT'] + + +def main(): + global BATCH_SIZE + global BATCH_COUNT + + setup_time_begin = time.time() + + bg_class_offset = 0 + + # Cleanup results directory + if os.path.isdir(RESULTS_DIR): + shutil.rmtree(RESULTS_DIR) + os.mkdir(RESULTS_DIR) + + # Load the [cached] Torch model + path_to_model_pth = os.environ['CM_ML_MODEL_FILE_WITH_PATH'] + + model = models.resnet50(pretrained=False) + model.load_state_dict(torch.load(path_to_model_pth)) + + model.eval() + + # move the model to GPU for speed if available + if USE_CUDA: + model.to('cuda') + + setup_time = time.time() - setup_time_begin + + # Run batched mode + test_time_begin = time.time() + image_index = 0 + total_load_time = 0 + total_classification_time = 0 + first_classification_time = 0 + images_loaded = 0 + + image_path = os.environ.get('CM_INPUT', '') + if image_path != '': + + normalize_data_bool = True + subtract_mean_bool = False + + from PIL import Image + + def load_and_resize_image(image_filepath, height, width): + pillow_img = Image.open(image_filepath).resize( + (width, height)) # sic! The order of dimensions in resize is (W,H) + + input_data = np.float32(pillow_img) + + # Normalize + if normalize_data_bool: + input_data = input_data / 127.5 - 1.0 + + # Subtract mean value + if subtract_mean_bool: + if len(given_channel_means): + input_data -= given_channel_means + else: + input_data -= np.mean(input_data) + + # print(np.array(pillow_img).shape) + nhwc_data = np.expand_dims(input_data, axis=0) + + if data_layout == 'NHWC': + # print(nhwc_data.shape) + return nhwc_data + else: + nchw_data = nhwc_data.transpose(0, 3, 1, 2) + # print(nchw_data.shape) + return nchw_data + + BATCH_COUNT = 1 + + for batch_index in range(BATCH_COUNT): + batch_number = batch_index + 1 + if FULL_REPORT or (batch_number % 10 == 0): + print("\nBatch {} of {}".format(batch_number, BATCH_COUNT)) + + begin_time = time.time() + + if image_path == '': + batch_data, image_index = load_preprocessed_batch( + image_list, image_index) + else: + batch_data = load_and_resize_image(image_path, 224, 224) + image_index = 1 + + torch_batch = torch.from_numpy(batch_data) + + load_time = time.time() - begin_time + total_load_time += load_time + images_loaded += BATCH_SIZE + if FULL_REPORT: + print("Batch loaded in %fs" % (load_time)) + + # Classify one batch + begin_time = time.time() + + # move the input to GPU for speed if available + if USE_CUDA: + torch_batch = torch_batch.to('cuda') + + with torch.no_grad(): + batch_results = model(torch_batch) + + classification_time = time.time() - begin_time + if FULL_REPORT: + print("Batch classified in %fs" % (classification_time)) + + total_classification_time += classification_time + # Remember first batch prediction time + if batch_index == 0: + first_classification_time = classification_time + + # Process results + for index_in_batch in range(BATCH_SIZE): + # skipping the background class on the left (if present) + softmax_vector = batch_results[index_in_batch][bg_class_offset:] + global_index = batch_index * BATCH_SIZE + index_in_batch + + res_file = os.path.join(RESULTS_DIR, image_list[global_index]) + + with open(res_file + '.txt', 'w') as f: + for prob in softmax_vector: + f.write('{}\n'.format(prob)) + + top5_indices = list(reversed(softmax_vector.argsort()))[:5] + for class_idx in top5_indices: + print( + "\t{}\t{}\t{}".format( + class_idx, + softmax_vector[class_idx], + labels[class_idx])) + print("") + + test_time = time.time() - test_time_begin + + if BATCH_COUNT > 1: + avg_classification_time = ( + total_classification_time - first_classification_time) / (images_loaded - BATCH_SIZE) + else: + avg_classification_time = total_classification_time / images_loaded + + avg_load_time = total_load_time / images_loaded + + # Store benchmarking results: + output_dict = { + 'setup_time_s': setup_time, + 'test_time_s': test_time, + 'images_load_time_total_s': total_load_time, + 'images_load_time_avg_s': avg_load_time, + 'prediction_time_total_s': total_classification_time, + 'prediction_time_avg_s': avg_classification_time, + + 'avg_time_ms': avg_classification_time * 1000, + 'avg_fps': 1.0 / avg_classification_time, + 'batch_time_ms': avg_classification_time * 1000 * BATCH_SIZE, + 'batch_size': BATCH_SIZE, + } + with open('tmp-ck-timer.json', 'w') as out_file: + json.dump(output_dict, out_file, indent=4, sort_keys=True) + + +if __name__ == '__main__': + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README-extra.md new file mode 100644 index 000000000..c24e073a9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README-extra.md @@ -0,0 +1,16 @@ +Example: + +```bash +cm run script "get llvm" --version=14.0.0 +cm run script "get tvm _llvm" --version=0.10.0 +cm run script "python app image-classification tvm-onnx" +``` + +Example 2: + +```bash +cm run script "install python-venv" --name=test --version=3.10.7 +cm run script "get generic-python-lib _apache-tvm" +cm run script "python app image-classification tvm-onnx _tvm-pip-install" +cm run script "python app image-classification tvm-onnx _tvm-pip-install" --input=`cm find script --tags=python,app,image-classification,tvm-onnx`/img/computer_mouse.jpg +``` \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README.md new file mode 100644 index 000000000..317018ce0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/_cm.yaml new file mode 100644 index 000000000..2b5cc9cca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/_cm.yaml @@ -0,0 +1,48 @@ +alias: app-image-classification-tvm-onnx-py +uid: 63080407db4d4ac4 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Modular AI/ML application pipeline + +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + tags: get,python3 +- tags: get,dataset,image-classification,original +- tags: get,dataset-aux,image-classification +- tags: get,raw,ml-model,image-classification,resnet50,_onnx +- tags: get,generic-python-lib,_onnxruntime +- names: + - tvm + tags: get,tvm + +tags: +- app +- image-classification +- tvm-onnx +- python + +tags_help: app image-classification python tvm-onnx + +variations: + cuda: + add_deps_recursive: + tvm: + tags: _cuda + deps: + - tags: get,cuda + env: + USE_CUDA: 'yes' + llvm: + add_deps_recursive: + tvm: + tags: _llvm diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/img/computer_mouse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7f8abb6fe93d18af393ea036b24b907cc48e786 GIT binary patch literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 literal 0 HcmV?d00001 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/requirements.txt new file mode 100644 index 000000000..ae4aff7ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/requirements.txt @@ -0,0 +1,7 @@ +matplotlib +opencv-python +scipy +onnx +decorator +attrs +psutil diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/run.sh new file mode 100644 index 000000000..8eb066077 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +#if [[ ${CM_HOST_PLATFORM_FLAVOR} == "arm64" ]]; then +# ${CM_PYTHON_BIN} -m pip install -i https://test.pypi.org/simple/ onnxruntime==1.9.0.dev174552 +#fi + +export USE_TVM=yes + + +wget -nc https://raw.githubusercontent.com/mlcommons/ck-mlops/main/program/ml-task-image-classification-tvm-onnx-cpu/synset.txt +test $? -eq 0 || exit 1 + +${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt +test $? -eq 0 || exit 1 + +if [[ "${CM_INPUT}" != "" ]]; then + export CM_IMAGE=${CM_INPUT} +else + export CM_IMAGE=${CM_DATASET_PATH}/ILSVRC2012_val_00000001.JPEG +fi + + +${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/classify.py --image ${CM_IMAGE} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/src/classify.py b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/src/classify.py new file mode 100644 index 000000000..20c164288 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-classification-tvm-onnx-py/src/classify.py @@ -0,0 +1,302 @@ +""" +Developers: + - grigori@octoml.ai +""" + +import time +import os +import argparse +import json + +from PIL import Image +import cv2 + +import numpy as np + +import onnxruntime as rt + + +# Image conversion from MLPerf(tm) vision +def center_crop(img, out_height, out_width): + height, width, _ = img.shape + left = int((width - out_width) / 2) + right = int((width + out_width) / 2) + top = int((height - out_height) / 2) + bottom = int((height + out_height) / 2) + img = img[top:bottom, left:right] + return img + + +def resize_with_aspectratio( + img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR): + height, width, _ = img.shape + new_height = int(100. * out_height / scale) + new_width = int(100. * out_width / scale) + if height > width: + w = new_width + h = int(new_height * height / width) + else: + h = new_height + w = int(new_width * width / height) + img = cv2.resize(img, (w, h), interpolation=inter_pol) + return img + + +# returns list of pairs (prob, class_index) +def get_top5(all_probs): + probs_with_classes = [] + + for class_index in range(len(all_probs)): + prob = all_probs[class_index] + probs_with_classes.append((prob, class_index)) + + sorted_probs = sorted( + probs_with_classes, + key=lambda pair: pair[0], + reverse=True) + return sorted_probs[0:5] + + +def run_case(dtype, image, target): + # Check image + import os + import json + import sys + + STAT_REPEAT = os.environ.get('STAT_REPEAT', '') + if STAT_REPEAT == '' or STAT_REPEAT is None: + STAT_REPEAT = 10 + STAT_REPEAT = int(STAT_REPEAT) + + # FGG: set model files via CM env + CATEG_FILE = 'synset.txt' + synset = eval(open(os.path.join(CATEG_FILE)).read()) + + files = [] + val = {} + + # FGG: set timers + import time + timers = {} + + img_orig = cv2.imread(image) + + img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB) + + output_height, output_width, _ = 224, 224, 3 + img = resize_with_aspectratio( + img, + output_height, + output_width, + inter_pol=cv2.INTER_AREA) + img = center_crop(img, output_height, output_width) + img = np.asarray(img, dtype='float32') + + # normalize image + means = np.array([123.68, 116.78, 103.94], dtype=np.float32) + img -= means + + # transpose if needed + img = img.transpose([2, 0, 1]) + + import matplotlib.pyplot as plt + img1 = img.transpose([1, 2, 0]) + # you can give axis attribute if you wanna squeeze in specific dimension + arr_ = np.squeeze(img1) + plt.imshow(arr_) +# plt.show() + plt.savefig('pre-processed-image.png') + # Load model + model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', '') + if model_path == '': + print('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined') + exit(1) + + opt = rt.SessionOptions() + + if len(rt.get_all_providers()) > 1 and os.environ.get( + "USE_CUDA", "yes").lower() not in ["0", "false", "off", "no"]: + # Currently considering only CUDAExecutionProvider + sess = rt.InferenceSession( + model_path, opt, providers=['CUDAExecutionProvider']) + else: + sess = rt.InferenceSession( + model_path, opt, providers=["CPUExecutionProvider"]) + + inputs = [meta.name for meta in sess.get_inputs()] + outputs = [meta.name for meta in sess.get_outputs()] + + print(inputs) + print(outputs) + + if os.environ.get('USE_TVM', '') == 'yes': + import tvm + from tvm import relay + import onnx + + del sess + + # Load model via ONNX to be used with TVM + print('') + print('ONNX: load model ...') + print('') + + onnx_model = onnx.load(model_path) + + # Init TVM + # TBD: add tvm platform selector + if os.environ.get('USE_CUDA', '') == 'yes': + # TVM package must be built with CUDA enabled + ctx = tvm.cuda(0) + else: + ctx = tvm.cpu(0) + tvm_ctx = ctx + + build_conf = {'relay.backend.use_auto_scheduler': False} + opt_lvl = int(os.environ.get('TVM_OPT_LEVEL', 3)) + host = os.environ.get('CM_HOST_PLATFORM_FLAVOR') + if host == 'x86_64' and 'AMD' in os.environ.get( + 'CM_HOST_CPU_VENDOR_ID', ''): + target = os.environ.get('TVM_TARGET', 'llvm -mcpu=znver2') + else: + target = os.environ.get('TVM_TARGET', 'llvm') + + target_host = None + params = {} + + # New target API + tvm_target = tvm.target.Target(target, host=target_host) + + input_shape = (1, 3, 224, 224) + shape_dict = {inputs[0]: input_shape} + + print('') + print('TVM: import model ...') + print('') + # Extra param: opset=12 + mod, params = relay.frontend.from_onnx( + onnx_model, shape_dict, freeze_params=True) + + print('') + print('TVM: transform to static ...') + print('') + mod = relay.transform.DynamicToStatic()(mod) + + print('') + print('TVM: apply extra optimizations ...') + print('') + # Padding optimization + # Adds extra optimizations + mod = relay.transform.FoldExplicitPadding()(mod) + + print('') + print('TVM: build model ...') + print('') + + executor = os.environ.get('MLPERF_TVM_EXECUTOR', 'graph') + + if executor == "graph" or executor == "debug": + from tvm.contrib import graph_executor + + # Without history + with tvm.transform.PassContext(opt_level=opt_lvl, config=build_conf): + graph_module = relay.build(mod, + target=tvm_target, + params=params) + lib = graph_module + + print('') + print('TVM: init graph engine ...') + print('') + + sess = graph_executor.GraphModule(lib['default'](ctx)) + + elif executor == "vm": + from tvm.runtime.vm import VirtualMachine + + # Without history + with tvm.transform.PassContext(opt_level=opt_lvl, config=build_conf): + vm_exec = relay.vm.compile( + mod, target=tvm_target, params=params) + + r_exec = vm_exec + + print('') + print('TVM: init VM ...') + print('') + + sess = VirtualMachine(r_exec, ctx) + + # For now only graph + sess.set_input(inputs[0], tvm.nd.array([img])) + + # Run TVM inference + sess.run() + + # Process TVM outputs + output = [] + + for i in range(sess.get_num_outputs()): + # Take only the output of batch size for dynamic batches + if len(output) < (i + 1): + output.append([]) + output[i].append(sess.get_output(i).asnumpy()[0]) + + else: + inp = {inputs[0]: np.array([img], dtype=np.float32)} + output = sess.run(outputs, inp) + + top1 = np.argmax(output[1]) - 1 # .asnumpy()) + + top5 = [] + atop5 = get_top5(output[1][0]) # .asnumpy()) + + print('') + print('Prediction Top1:', top1, synset[top1]) + + print('') + print('Prediction Top5:') + for p in atop5: + out = p[1] - 1 + name = synset[out] + print(' * {} {}'.format(out, name)) + + ck_results = { + 'prediction': synset[top1] + } + + with open('tmp-ck-timer.json', 'w') as ck_results_file: + json.dump(ck_results, ck_results_file, indent=2, sort_keys=True) + + return + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--image', + type=str, + help="Path to JPEG image.", + default=None, + required=True) + parser.add_argument('--target', type=str, help="Target", default=None) + args = parser.parse_args() + + if args.image.strip().lower() == '': + print('Please specify path to an image using CM_IMAGE environment variable!') + exit(1) + + # set parameter + batch_size = 1 + num_classes = 1000 + image_shape = (3, 224, 224) + + # load model + data_shape = (batch_size,) + image_shape + out_shape = (batch_size, num_classes) + + dtype = 'float32' + if os.environ.get('CM_TVM_DTYPE', '') != '': + dtype = os.environ['CM_TVM_DTYPE'] + + run_case(dtype, args.image, args.target) diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README-extra.md new file mode 100644 index 000000000..19fe90edb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README-extra.md @@ -0,0 +1,32 @@ +# Examples + +First download images: + +```bash +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/data.pgm --ssl-verify=no --md5sum=0af279e557a8de252d7ff0751a999379 +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --ssl-verify=no --md5sum=45ae5c940233892c2f860efdf0b66e7e +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse2.jpg --ssl-verify=no --md5sum=e7e2050b41e0b85cedca3ca87ab55390 +cmr "download file _wget" --url=https://cKnowledge.org/ai/data/computer_mouse2.pgm --ssl-verify=no --md5sum=a4e48556d3eb09402bfc98e375b41311 +``` + +Then run app + +```bash +cm run script "app image corner-detection" +cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=llvm +cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=gcc +cm run script "app image corner-detection" -add_deps_recursive.compiler.tags=llvm --add_deps_recursive.compiler.version_min=11.0.0 --add_deps_recursive.compiler.version_max=13.0.0 +``` + +## Reproducibility matrix + +* Ubuntu 22.04; x64; LLVM 17.06 +* Windows 11; x64; LLVM 17.06 + +## Debugging scripts without CM + +```bash +cmr "app image corner-detection" --debug_script_tags=compile,cpp-program +cmr "app image corner-detection" --debug-script-tags=benchmark,program +``` + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README.md b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README.md new file mode 100644 index 000000000..2697a585c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-application-pipeline/app-image-corner-detection](https://docs.mlcommons.org/cm4mlops/scripts/Modular-application-pipeline/app-image-corner-detection) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/_cm.yaml new file mode 100644 index 000000000..1fd27d9b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/_cm.yaml @@ -0,0 +1,32 @@ +alias: app-image-corner-detection +uid: 998ffee0bc534d0a + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Modular application pipeline + +deps: +- tags: detect,os + +- tags: detect,cpu + +- tags: download,file,_url.https://cKnowledge.org/ai/data/data.pgm + md5sum: 0af279e557a8de252d7ff0751a999379 + force_cache: false + + +posthook_deps: +- skip_if_env: + CM_SKIP_COMPILE: + - 'on' + tags: compile,cpp-program +- skip_if_env: + CM_SKIP_RUN: + - 'on' + tags: benchmark-program + +tags: +- app +- image +- corner-detection diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/customize.py new file mode 100644 index 000000000..962f0de43 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/customize.py @@ -0,0 +1,54 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + + env = i['env'] + script_path = i['run_script_input']['path'] + + env["CM_SOURCE_FOLDER_PATH"] = script_path + env['CM_C_SOURCE_FILES'] = "susan.c" + + if 'CM_INPUT' not in env: + env['CM_INPUT'] = os.path.join(script_path, 'data.pgm') + + if 'CM_OUTPUT' not in env: + env['CM_OUTPUT'] = 'output_image_with_corners.pgm' + + if 'CM_RUN_DIR' not in env: + output_path = os.path.join(script_path, "output") + if output_path != '' and not os.path.isdir(output_path): + os.makedirs(output_path) + + env['CM_RUN_DIR'] = output_path + + env['CM_RUN_SUFFIX'] = env['CM_INPUT'] + ' ' + env['CM_OUTPUT'] + ' -c' + + if os_info['platform'] == 'windows': + env['CM_BIN_NAME'] = 'image-corner.exe' + else: + env['CM_BIN_NAME'] = 'image-corner' + env['+ LDCFLAGS'] = ["-lm"] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + print(env['CM_OUTPUT'] + " generated in " + env['CM_RUN_DIR']) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/run.sh new file mode 100644 index 000000000..30cfbdd00 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +CUR=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +mkdir -p $CUR"/output" + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/susan.c b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/susan.c new file mode 100644 index 000000000..8a41d9a22 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-image-corner-detection/susan.c @@ -0,0 +1,2161 @@ +/* {{{ Copyright etc. */ + +/**********************************************************************\ + + SUSAN Version 2l by Stephen Smith + Oxford Centre for Functional Magnetic Resonance Imaging of the Brain, + Department of Clinical Neurology, Oxford University, Oxford, UK + (Previously in Computer Vision and Image Processing Group - now + Computer Vision and Electro Optics Group - DERA Chertsey, UK) + Email: steve@fmrib.ox.ac.uk + WWW: http://www.fmrib.ox.ac.uk/~steve + + (C) Crown Copyright (1995-1999), Defence Evaluation and Research Agency, + Farnborough, Hampshire, GU14 6TD, UK + DERA WWW site: + http://www.dera.gov.uk/ + DERA Computer Vision and Electro Optics Group WWW site: + http://www.dera.gov.uk/imageprocessing/dera/group_home.html + DERA Computer Vision and Electro Optics Group point of contact: + Dr. John Savage, jtsavage@dera.gov.uk, +44 1344 633203 + + A UK patent has been granted: "Method for digitally processing + images to determine the position of edges and/or corners therein for + guidance of unmanned vehicle", UK Patent 2272285. Proprietor: + Secretary of State for Defence, UK. 15 January 1997 + + This code is issued for research purposes only and remains the + property of the UK Secretary of State for Defence. This code must + not be passed on without this header information being kept + intact. This code must not be sold. + +\**********************************************************************/ + +/* }}} */ +/* {{{ Readme First */ + +/**********************************************************************\ + + SUSAN Version 2l + SUSAN = Smallest Univalue Segment Assimilating Nucleus + + Email: steve@fmrib.ox.ac.uk + WWW: http://www.fmrib.ox.ac.uk/~steve + + Related paper: + @article{Smith97, + author = "Smith, S.M. and Brady, J.M.", + title = "{SUSAN} - A New Approach to Low Level Image Processing", + journal = "Int. Journal of Computer Vision", + pages = "45--78", + volume = "23", + number = "1", + month = "May", + year = 1997} + + To be registered for automatic (bug) updates of SUSAN, send an email. + + Compile with: + gcc -O4 -o susan susan2l.c -lm + + See following section for different machine information. Please + report any bugs (and fixes). There are a few optional changes that + can be made in the "defines" section which follows shortly. + + Usage: type "susan" to get usage. Only PGM format files can be input + and output. Utilities such as the netpbm package and XV can be used + to convert to and from other formats. Any size of image can be + processed. + + This code is written using an emacs folding mode, making moving + around the different sections very easy. This is why there are + various marks within comments and why comments are indented. + + + SUSAN QUICK: + + This version of the SUSAN corner finder does not do all the + false-corner suppression and thus is faster and produced some false + positives, particularly on strong edges. However, because there are + less stages involving thresholds etc., the corners that are + correctly reported are usually more stable than those reported with + the full algorithm. Thus I recommend at least TRYING this algorithm + for applications where stability is important, e.g., tracking. + + THRESHOLDS: + + There are two thresholds which can be set at run-time. These are the + brightness threshold (t) and the distance threshold (d). + + SPATIAL CONTROL: d + + In SUSAN smoothing d controls the size of the Gaussian mask; its + default is 4.0. Increasing d gives more smoothing. In edge finding, + a fixed flat mask is used, either 37 pixels arranged in a "circle" + (default), or a 3 by 3 mask which gives finer detail. In corner + finding, only the larger 37 pixel mask is used; d is not + variable. In smoothing, the flat 3 by 3 mask can be used instead of + a larger Gaussian mask; this gives low smoothing and fast operation. + + BRIGHTNESS CONTROL: t + + In all three algorithms, t can be varied (default=20); this is the + main threshold to be varied. It determines the maximum difference in + greylevels between two pixels which allows them to be considered + part of the same "region" in the image. Thus it can be reduced to + give more edges or corners, i.e. to be more sensitive, and vice + versa. In smoothing, reducing t gives less smoothing, and vice + versa. Set t=10 for the test image available from the SUSAN web + page. + + ITERATIONS: + + With SUSAN smoothing, more smoothing can also be obtained by + iterating the algorithm several times. This has a different effect + from varying d or t. + + FIXED MASKS: + + 37 pixel mask: ooo 3 by 3 mask: ooo + ooooo ooo + ooooooo ooo + ooooooo + ooooooo + ooooo + ooo + + CORNER ATTRIBUTES dx, dy and I + (Only read this if you are interested in the C implementation or in + using corner attributes, e.g., for corner matching) + + Corners reported in the corner list have attributes associated with + them as well as positions. This is useful, for example, when + attempting to match corners from one image to another, as these + attributes can often be fairly unchanged between images. The + attributes are dx, dy and I. I is the value of image brightness at + the position of the corner. In the case of susan_corners_quick, dx + and dy are the first order derivatives (differentials) of the image + brightness in the x and y directions respectively, at the position + of the corner. In the case of normal susan corner finding, dx and dy + are scaled versions of the position of the centre of gravity of the + USAN with respect to the centre pixel (nucleus). + + BRIGHTNESS FUNCTION LUT IMPLEMENTATION: + (Only read this if you are interested in the C implementation) + + The SUSAN brightness function is implemented as a LUT + (Look-Up-Table) for speed. The resulting pointer-based code is a + little hard to follow, so here is a brief explanation. In + setup_brightness_lut() the LUT is setup. This mallocs enough space + for *bp and then repositions the pointer to the centre of the + malloced space. The SUSAN function e^-(x^6) or e^-(x^2) is + calculated and converted to a uchar in the range 0-100, for all + possible image brightness differences (including negative + ones). Thus bp[23] is the output for a brightness difference of 23 + greylevels. In the SUSAN algorithms this LUT is used as follows: + + p=in + (i-3)*x_size + j - 1; + p points to the first image pixel in the circular mask surrounding + point (x,y). + + cp=bp + in[i*x_size+j]; + cp points to a position in the LUT corresponding to the brightness + of the centre pixel (x,y). + + now for every pixel within the mask surrounding (x,y), + n+=*(cp-*p++); + the brightness difference function is found by moving the cp pointer + down by an amount equal to the value of the pixel pointed to by p, + thus subtracting the two brightness values and performing the + exponential function. This value is added to n, the running USAN + area. + + in SUSAN smoothing, the variable height mask is implemented by + multiplying the above by the moving mask pointer, reset for each new + centre pixel. + tmp = *dpt++ * *(cp-brightness); + +\**********************************************************************/ + +/* }}} */ +/* {{{ Machine Information */ + +/**********************************************************************\ + + Success has been reported with the following: + + MACHINE OS COMPILER + + Sun 4.1.4 bundled C, gcc + + Next + + SGI IRIX SGI cc + + DEC Unix V3.2+ + + IBM RISC AIX gcc + + PC Borland 5.0 + + PC Linux gcc-2.6.3 + + PC Win32 Visual C++ 4.0 (Console Application) + + PC Win95 Visual C++ 5.0 (Console Application) + Thanks to Niu Yongsheng : + Use the FOPENB option below + + PC DOS djgpp gnu C + Thanks to Mark Pettovello : + Use the FOPENB option below + + HP HP-UX bundled cc + Thanks to Brian Dixon : + in ksh: + export CCOPTS="-Aa -D_HPUX_SOURCE | -lM" + cc -O3 -o susan susan2l.c + +\**********************************************************************/ + +/* }}} */ +/* {{{ History */ + +/**********************************************************************\ + + SUSAN Version 2l, 12/2/99 + Changed GNUDOS option to FOPENB. + (Thanks to Niu Yongsheng .) + Took out redundant "sq=sq/2;". + + SUSAN Version 2k, 19/8/98: + In corner finding: + Changed if(yyx_size) etc. tests in smoothing. + Added a couple of free() calls for cgx and cgy. + (Thanks to geoffb@ucs.ed.ac.uk - Geoff Browitt.) + + SUSAN Version 2i, 21/7/97: + Added information about corner attributes. + + SUSAN Version 2h, 16/12/96: + Added principle (initial enhancement) option. + + SUSAN Version 2g, 2/7/96: + Minor superficial changes to code. + + SUSAN Version 2f, 16/1/96: + Added GNUDOS option (now called FOPENB; see options below). + + SUSAN Version 2e, 9/1/96: + Added -b option. + Fixed 1 pixel horizontal offset error for drawing edges. + + SUSAN Version 2d, 27/11/95: + Fixed loading of certain PGM files in get_image (again!) + + SUSAN Version 2c, 22/11/95: + Fixed loading of certain PGM files in get_image. + (Thanks to qu@San-Jose.ate.slb.com - Gongyuan Qu.) + + SUSAN Version 2b, 9/11/95: + removed "z==" error in edges routines. + + SUSAN Version 2a, 6/11/95: + Removed a few unnecessary variable declarations. + Added different machine information. + Changed "header" in get_image to char. + + SUSAN Version 2, 1/11/95: first combined version able to take any + image sizes. + + SUSAN "Versions 1", circa 1992: the various SUSAN algorithms were + developed during my doctorate within different programs and for + fixed image sizes. The algorithms themselves are virtually unaltered + between "versions 1" and the combined program, version 2. + +\**********************************************************************/ + +/* }}} */ +/* {{{ defines, includes and typedefs */ + +/* ********** Optional settings */ + +#ifndef PPC +typedef int TOTAL_TYPE; /* this is faster for "int" but should be "float" for large d masks */ +#else +typedef float TOTAL_TYPE; /* for my PowerPC accelerator only */ +#endif + +/*#define FOPENB*/ /* uncomment if using djgpp gnu C for DOS or certain Win95 compilers */ +#define SEVEN_SUPP /* size for non-max corner suppression; SEVEN_SUPP or FIVE_SUPP */ +#define MAX_CORNERS 15000 /* max corners per frame */ + +/* ********** Leave the rest - but you may need to remove one or both of sys/file.h and malloc.h lines */ + +#include +#include +#include +#include +#define exit_error(IFB,IFC) { fprintf(stderr,IFB,IFC); exit(0); } +#define FTOI(a) ( (a) < 0 ? ((int)(a-0.5)) : ((int)(a+0.5)) ) +typedef unsigned char uchar; +typedef struct {int x,y,info, dx, dy, I;} CORNER_LIST[MAX_CORNERS]; + +/* }}} */ +/* {{{ usage() */ + +#ifdef OPENME +#include +#endif +#ifdef XOPENME +#include +#endif + +void usage(void) +{ + printf("Usage: susan [options]\n\n"); + + printf("-s : Smoothing mode (default)\n"); + printf("-e : Edges mode\n"); + printf("-c : Corners mode\n\n"); + + printf("See source code for more information about setting the thresholds\n"); + printf("-t : Brightness threshold, all modes (default=20)\n"); + printf("-d : Distance threshold, smoothing mode, (default=4) (use next option instead for flat 3x3 mask)\n"); + printf("-3 : Use flat 3x3 mask, edges or smoothing mode\n"); + printf("-n : No post-processing on the binary edge map (runs much faster); edges mode\n"); + printf("-q : Use faster (and usually stabler) corner mode; edge-like corner suppression not carried out; corners mode\n"); + printf("-b : Mark corners/edges with single black points instead of black with white border; corners or edges mode\n"); + printf("-p : Output initial enhancement image only; corners or edges mode (default is edges mode)\n"); + + printf("\nSUSAN Version 2l (C) 1995-1997 Stephen Smith, DRA UK. steve@fmrib.ox.ac.uk\n"); + + exit(0); +} + +/* }}} */ +/* {{{ get_image(filename,in,x_size,y_size) */ + +/* {{{ int getint(fp) derived from XV */ + +int getint(FILE* fd) +{ + int c, i; + char dummy[10000]; + + c = getc(fd); + while (1) /* find next integer */ + { + if (c=='#') /* if we're at a comment, read to end of line */ + fgets(dummy,9000,fd); + if (c==EOF) + exit_error("Image %s not binary PGM.\n","is"); + if (c>='0' && c<='9') + break; /* found what we were looking for */ + c = getc(fd); + } + + /* we're at the start of a number, continue until we hit a non-number */ + i = 0; + while (1) { + i = (i*10) + (c - '0'); + c = getc(fd); + if (c==EOF) return (i); + if (c<'0' || c>'9') break; + } + + return (i); +} + +/* }}} */ + +void get_image(char filename[200], unsigned char** in, int* x_size, int* y_size) +{ +FILE *fd; +char header [100]; +int tmp; + +#ifdef FOPENB + if ((fd=fopen(filename,"rb")) == NULL) +#else + if ((fd=fopen(filename,"r")) == NULL) +#endif + exit_error("Can't input image %s.\n",filename); + + /* {{{ read header */ + + header[0]=fgetc(fd); + header[1]=fgetc(fd); + if(!(header[0]=='P' && header[1]=='5')) + exit_error("Image %s does not have binary PGM header.\n",filename); + + *x_size = getint(fd); + *y_size = getint(fd); + tmp = getint(fd); + +/* }}} */ + + *in = (uchar *) malloc(*x_size * *y_size); + + if (fread(*in,1,*x_size * *y_size,fd) == 0) + exit_error("Image %s is wrong size.\n",filename); + + fclose(fd); +} + +/* }}} */ +/* {{{ put_image(filename,in,x_size,y_size) */ + +void put_image(char filename[100], char* in, int x_size, int y_size) +{ +FILE *fd; + +#ifdef FOPENB + if ((fd=fopen(filename,"wb")) == NULL) +#else + if ((fd=fopen(filename,"w")) == NULL) +#endif + exit_error("Can't output image%s.\n",filename); + + fprintf(fd,"P5\n"); + fprintf(fd,"%d %d\n",x_size,y_size); + fprintf(fd,"255\n"); + + if (fwrite(in,x_size*y_size,1,fd) != 1) + exit_error("Can't write image %s.\n",filename); + + fclose(fd); +} + +/* }}} */ +/* {{{ int_to_uchar(r,in,size) */ + +void int_to_uchar(int* r, uchar* in, int size) +{ +int i, + max_r=r[0], + min_r=r[0]; + + for (i=0; i max_r ) + max_r=r[i]; + if ( r[i] < min_r ) + min_r=r[i]; + } + + /*printf("min=%d max=%d\n",min_r,max_r);*/ + + max_r-=min_r; + + for (i=0; ip[l+1]) + { + tmp=p[l]; p[l]=p[l+1]; p[l+1]=tmp; + } + + return( (p[3]+p[4]) / 2 ); +} + +/* }}} */ +/* {{{ enlarge(in,tmp_image,x_size,y_size,border) */ + +/* this enlarges "in" so that borders can be dealt with easily */ + +void enlarge(uchar** in, uchar* tmp_image, int* x_size, int* y_size, int border) +{ +int i, j; + + for(i=0; i<*y_size; i++) /* copy *in into tmp_image */ + memcpy(tmp_image+(i+border)*(*x_size+2*border)+border, *in+i* *x_size, *x_size); + + for(i=0; i15) && (total==0) ) + { + printf("Distance_thresh (%f) too big for integer arithmetic.\n",dt); + printf("Either reduce it to <=15 or recompile with variable \"total\"\n"); + printf("as a float: see top \"defines\" section.\n"); + exit(0); + } + + if ( (2*mask_size+1>x_size) || (2*mask_size+1>y_size) ) + { + printf("Mask size (1.5*distance_thresh+1=%d) too big for image (%dx%d).\n",mask_size,x_size,y_size); + exit(0); + } + + tmp_image = (uchar *) malloc( (x_size+mask_size*2) * (y_size+mask_size*2) ); + enlarge(&in,tmp_image,&x_size,&y_size,mask_size); + +/* }}} */ + + if (three_by_three==0) + { /* large Gaussian masks */ + /* {{{ setup distance lut */ + + n_max = (mask_size*2) + 1; + + increment = x_size - n_max; + + dp = (unsigned char *)malloc(n_max*n_max); + dpt = dp; + temp = -(dt*dt); + + for(i=-mask_size; i<=mask_size; i++) + for(j=-mask_size; j<=mask_size; j++) + { + x = (int) (100.0 * exp( ((float)((i*i)+(j*j))) / temp )); + *dpt++ = (unsigned char)x; + } + +/* }}} */ + /* {{{ main section */ + + for (i=mask_size;im) { m=l[y+y+y+x]; a=y; b=x; } + + if (m>0) + { + if (mid[i*x_size+j]<4) + mid[(i+a-1)*x_size+j+b-1] = 4; + else + mid[(i+a-1)*x_size+j+b-1] = mid[i*x_size+j]+1; + if ( (a+a+b) < 3 ) /* need to jump back in image */ + { + i+=a-1; + j+=b-2; + if (i<4) i=4; + if (j<4) j=4; + } + } + } + +/* }}} */ + /* {{{ n==2 */ + + if (n==2) + { + /* put in a bit here to straighten edges */ + b00 = mid[(i-1)*x_size+j-1]<8; /* corners of 3x3 */ + b02 = mid[(i-1)*x_size+j+1]<8; + b20 = mid[(i+1)*x_size+j-1]<8; + b22 = mid[(i+1)*x_size+j+1]<8; + if ( ((b00+b02+b20+b22)==2) && ((b00|b22)&(b02|b20))) + { /* case: move a point back into line. + e.g. X O X CAN become X X X + O X O O O O + O O O O O O */ + if (b00) + { + if (b02) { x=0; y=-1; } + else { x=-1; y=0; } + } + else + { + if (b02) { x=1; y=0; } + else { x=0; y=1; } + } + if (((float)r[(i+y)*x_size+j+x]/(float)centre) > 0.7) + { + if ( ( (x==0) && (mid[(i+(2*y))*x_size+j]>7) && (mid[(i+(2*y))*x_size+j-1]>7) && (mid[(i+(2*y))*x_size+j+1]>7) ) || + ( (y==0) && (mid[(i)*x_size+j+(2*x)]>7) && (mid[(i+1)*x_size+j+(2*x)]>7) && (mid[(i-1)*x_size+j+(2*x)]>7) ) ) + { + mid[(i)*x_size+j]=100; + mid[(i+y)*x_size+j+x]=3; /* no jumping needed */ + } + } + } + else + { + b01 = mid[(i-1)*x_size+j ]<8; + b12 = mid[(i )*x_size+j+1]<8; + b21 = mid[(i+1)*x_size+j ]<8; + b10 = mid[(i )*x_size+j-1]<8; + /* {{{ right angle ends - not currently used */ + +#ifdef IGNORETHIS + if ( (b00&b01)|(b00&b10)|(b02&b01)|(b02&b12)|(b20&b10)|(b20&b21)|(b22&b21)|(b22&b12) ) + { /* case; right angle ends. clean up. + e.g.; X X O CAN become X X O + O X O O O O + O O O O O O */ + if ( ((b01)&(mid[(i-2)*x_size+j-1]>7)&(mid[(i-2)*x_size+j]>7)&(mid[(i-2)*x_size+j+1]>7)& + ((b00&((2*r[(i-1)*x_size+j+1])>centre))|(b02&((2*r[(i-1)*x_size+j-1])>centre)))) | + ((b10)&(mid[(i-1)*x_size+j-2]>7)&(mid[(i)*x_size+j-2]>7)&(mid[(i+1)*x_size+j-2]>7)& + ((b00&((2*r[(i+1)*x_size+j-1])>centre))|(b20&((2*r[(i-1)*x_size+j-1])>centre)))) | + ((b12)&(mid[(i-1)*x_size+j+2]>7)&(mid[(i)*x_size+j+2]>7)&(mid[(i+1)*x_size+j+2]>7)& + ((b02&((2*r[(i+1)*x_size+j+1])>centre))|(b22&((2*r[(i-1)*x_size+j+1])>centre)))) | + ((b21)&(mid[(i+2)*x_size+j-1]>7)&(mid[(i+2)*x_size+j]>7)&(mid[(i+2)*x_size+j+1]>7)& + ((b20&((2*r[(i+1)*x_size+j+1])>centre))|(b22&((2*r[(i+1)*x_size+j-1])>centre)))) ) + { + mid[(i)*x_size+j]=100; + if (b10&b20) j-=2; + if (b00|b01|b02) { i--; j-=2; } + } + } +#endif + +/* }}} */ + if ( ((b01+b12+b21+b10)==2) && ((b10|b12)&(b01|b21)) && + ((b01&((mid[(i-2)*x_size+j-1]<8)|(mid[(i-2)*x_size+j+1]<8)))|(b10&((mid[(i-1)*x_size+j-2]<8)|(mid[(i+1)*x_size+j-2]<8)))| + (b12&((mid[(i-1)*x_size+j+2]<8)|(mid[(i+1)*x_size+j+2]<8)))|(b21&((mid[(i+2)*x_size+j-1]<8)|(mid[(i+2)*x_size+j+1]<8)))) ) + { /* case; clears odd right angles. + e.g.; O O O becomes O O O + X X O X O O + O X O O X O */ + mid[(i)*x_size+j]=100; + i--; /* jump back */ + j-=2; + if (i<4) i=4; + if (j<4) j=4; + } + } + } + +/* }}} */ + /* {{{ n>2 the thinning is done here without breaking connectivity */ + + if (n>2) + { + b01 = mid[(i-1)*x_size+j ]<8; + b12 = mid[(i )*x_size+j+1]<8; + b21 = mid[(i+1)*x_size+j ]<8; + b10 = mid[(i )*x_size+j-1]<8; + if((b01+b12+b21+b10)>1) + { + b00 = mid[(i-1)*x_size+j-1]<8; + b02 = mid[(i-1)*x_size+j+1]<8; + b20 = mid[(i+1)*x_size+j-1]<8; + b22 = mid[(i+1)*x_size+j+1]<8; + p1 = b00 | b01; + p2 = b02 | b12; + p3 = b22 | b21; + p4 = b20 | b10; + + if( ((p1 + p2 + p3 + p4) - ((b01 & p2)+(b12 & p3)+(b21 & p4)+(b10 & p1))) < 2) + { + mid[(i)*x_size+j]=100; + i--; + j-=2; + if (i<4) i=4; + if (j<4) j=4; + } + } + } + +/* }}} */ + } +} + +/* }}} */ +/* {{{ susan_edges(in,r,sf,max_no,out) */ + +void susan_edges(uchar* in, int* r, uchar* mid, uchar* bp, + int max_no, int x_size, int y_size) +{ +float z; +int do_symmetry, i, j, m, n, a, b, x, y, w; +uchar c,*p,*cp; + + memset (r,0,x_size * y_size * sizeof(int)); + + for (i=3;i0) + { + m=r[i*x_size+j]; + n=max_no - m; + cp=bp + in[i*x_size+j]; + + if (n>600) + { + p=in + (i-3)*x_size + j - 1; + x=0;y=0; + + c=*(cp-*p++);x-=c;y-=3*c; + c=*(cp-*p++);y-=3*c; + c=*(cp-*p);x+=c;y-=3*c; + p+=x_size-3; + + c=*(cp-*p++);x-=2*c;y-=2*c; + c=*(cp-*p++);x-=c;y-=2*c; + c=*(cp-*p++);y-=2*c; + c=*(cp-*p++);x+=c;y-=2*c; + c=*(cp-*p);x+=2*c;y-=2*c; + p+=x_size-5; + + c=*(cp-*p++);x-=3*c;y-=c; + c=*(cp-*p++);x-=2*c;y-=c; + c=*(cp-*p++);x-=c;y-=c; + c=*(cp-*p++);y-=c; + c=*(cp-*p++);x+=c;y-=c; + c=*(cp-*p++);x+=2*c;y-=c; + c=*(cp-*p);x+=3*c;y-=c; + p+=x_size-6; + + c=*(cp-*p++);x-=3*c; + c=*(cp-*p++);x-=2*c; + c=*(cp-*p);x-=c; + p+=2; + c=*(cp-*p++);x+=c; + c=*(cp-*p++);x+=2*c; + c=*(cp-*p);x+=3*c; + p+=x_size-6; + + c=*(cp-*p++);x-=3*c;y+=c; + c=*(cp-*p++);x-=2*c;y+=c; + c=*(cp-*p++);x-=c;y+=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p++);x+=c;y+=c; + c=*(cp-*p++);x+=2*c;y+=c; + c=*(cp-*p);x+=3*c;y+=c; + p+=x_size-5; + + c=*(cp-*p++);x-=2*c;y+=2*c; + c=*(cp-*p++);x-=c;y+=2*c; + c=*(cp-*p++);y+=2*c; + c=*(cp-*p++);x+=c;y+=2*c; + c=*(cp-*p);x+=2*c;y+=2*c; + p+=x_size-3; + + c=*(cp-*p++);x-=c;y+=3*c; + c=*(cp-*p++);y+=3*c; + c=*(cp-*p);x+=c;y+=3*c; + + z = sqrt((float)((x*x) + (y*y))); + if (z > (0.9*(float)n)) /* 0.5 */ + { + do_symmetry=0; + if (x==0) + z=1000000.0; + else + z=((float)y) / ((float)x); + if (z < 0) { z=-z; w=-1; } + else w=1; + if (z < 0.5) { /* vert_edge */ a=0; b=1; } + else { if (z > 2.0) { /* hor_edge */ a=1; b=0; } + else { /* diag_edge */ if (w>0) { a=1; b=1; } + else { a=-1; b=1; }}} + if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) && + (m > r[(i+(2*a))*x_size+j+(2*b)]) && (m >= r[(i-(2*a))*x_size+j-(2*b)]) ) + mid[i*x_size+j] = 1; + } + else + do_symmetry=1; + } + else + do_symmetry=1; + + if (do_symmetry==1) + { + p=in + (i-3)*x_size + j - 1; + x=0; y=0; w=0; + + /* | \ + y -x- w + | \ */ + + c=*(cp-*p++);x+=c;y+=9*c;w+=3*c; + c=*(cp-*p++);y+=9*c; + c=*(cp-*p);x+=c;y+=9*c;w-=3*c; + p+=x_size-3; + + c=*(cp-*p++);x+=4*c;y+=4*c;w+=4*c; + c=*(cp-*p++);x+=c;y+=4*c;w+=2*c; + c=*(cp-*p++);y+=4*c; + c=*(cp-*p++);x+=c;y+=4*c;w-=2*c; + c=*(cp-*p);x+=4*c;y+=4*c;w-=4*c; + p+=x_size-5; + + c=*(cp-*p++);x+=9*c;y+=c;w+=3*c; + c=*(cp-*p++);x+=4*c;y+=c;w+=2*c; + c=*(cp-*p++);x+=c;y+=c;w+=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p++);x+=c;y+=c;w-=c; + c=*(cp-*p++);x+=4*c;y+=c;w-=2*c; + c=*(cp-*p);x+=9*c;y+=c;w-=3*c; + p+=x_size-6; + + c=*(cp-*p++);x+=9*c; + c=*(cp-*p++);x+=4*c; + c=*(cp-*p);x+=c; + p+=2; + c=*(cp-*p++);x+=c; + c=*(cp-*p++);x+=4*c; + c=*(cp-*p);x+=9*c; + p+=x_size-6; + + c=*(cp-*p++);x+=9*c;y+=c;w-=3*c; + c=*(cp-*p++);x+=4*c;y+=c;w-=2*c; + c=*(cp-*p++);x+=c;y+=c;w-=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p++);x+=c;y+=c;w+=c; + c=*(cp-*p++);x+=4*c;y+=c;w+=2*c; + c=*(cp-*p);x+=9*c;y+=c;w+=3*c; + p+=x_size-5; + + c=*(cp-*p++);x+=4*c;y+=4*c;w-=4*c; + c=*(cp-*p++);x+=c;y+=4*c;w-=2*c; + c=*(cp-*p++);y+=4*c; + c=*(cp-*p++);x+=c;y+=4*c;w+=2*c; + c=*(cp-*p);x+=4*c;y+=4*c;w+=4*c; + p+=x_size-3; + + c=*(cp-*p++);x+=c;y+=9*c;w-=3*c; + c=*(cp-*p++);y+=9*c; + c=*(cp-*p);x+=c;y+=9*c;w+=3*c; + + if (y==0) + z = 1000000.0; + else + z = ((float)x) / ((float)y); + if (z < 0.5) { /* vertical */ a=0; b=1; } + else { if (z > 2.0) { /* horizontal */ a=1; b=0; } + else { /* diagonal */ if (w>0) { a=-1; b=1; } + else { a=1; b=1; }}} + if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) && + (m > r[(i+(2*a))*x_size+j+(2*b)]) && (m >= r[(i-(2*a))*x_size+j-(2*b)]) ) + mid[i*x_size+j] = 2; + } + } + } +} + +/* }}} */ +/* {{{ susan_edges_small(in,r,sf,max_no,out) */ + +void susan_edges_small(uchar* in, int* r, uchar* mid, uchar* bp, + int max_no, int x_size, int y_size) +{ +float z; +int do_symmetry, i, j, m, n, a, b, x, y, w; +uchar c,*p,*cp; + + memset (r,0,x_size * y_size * sizeof(int)); + + max_no = 730; /* ho hum ;) */ + + for (i=1;i0) + { + m=r[i*x_size+j]; + n=max_no - m; + cp=bp + in[i*x_size+j]; + + if (n>250) + { + p=in + (i-1)*x_size + j - 1; + x=0;y=0; + + c=*(cp-*p++);x-=c;y-=c; + c=*(cp-*p++);y-=c; + c=*(cp-*p);x+=c;y-=c; + p+=x_size-2; + + c=*(cp-*p);x-=c; + p+=2; + c=*(cp-*p);x+=c; + p+=x_size-2; + + c=*(cp-*p++);x-=c;y+=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p);x+=c;y+=c; + + z = sqrt((float)((x*x) + (y*y))); + if (z > (0.4*(float)n)) /* 0.6 */ + { + do_symmetry=0; + if (x==0) + z=1000000.0; + else + z=((float)y) / ((float)x); + if (z < 0) { z=-z; w=-1; } + else w=1; + if (z < 0.5) { /* vert_edge */ a=0; b=1; } + else { if (z > 2.0) { /* hor_edge */ a=1; b=0; } + else { /* diag_edge */ if (w>0) { a=1; b=1; } + else { a=-1; b=1; }}} + if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) ) + mid[i*x_size+j] = 1; + } + else + do_symmetry=1; + } + else + do_symmetry=1; + + if (do_symmetry==1) + { + p=in + (i-1)*x_size + j - 1; + x=0; y=0; w=0; + + /* | \ + y -x- w + | \ */ + + c=*(cp-*p++);x+=c;y+=c;w+=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p);x+=c;y+=c;w-=c; + p+=x_size-2; + + c=*(cp-*p);x+=c; + p+=2; + c=*(cp-*p);x+=c; + p+=x_size-2; + + c=*(cp-*p++);x+=c;y+=c;w-=c; + c=*(cp-*p++);y+=c; + c=*(cp-*p);x+=c;y+=c;w+=c; + + if (y==0) + z = 1000000.0; + else + z = ((float)x) / ((float)y); + if (z < 0.5) { /* vertical */ a=0; b=1; } + else { if (z > 2.0) { /* horizontal */ a=1; b=0; } + else { /* diagonal */ if (w>0) { a=-1; b=1; } + else { a=1; b=1; }}} + if ( (m > r[(i+a)*x_size+j+b]) && (m >= r[(i-a)*x_size+j-b]) ) + mid[i*x_size+j] = 2; + } + } + } +} + +/* }}} */ + +/* }}} */ +/* {{{ corners */ + +/* {{{ corner_draw(in,corner_list,drawing_mode) */ + +void corner_draw(uchar* in, CORNER_LIST corner_list, + int x_size, int drawing_mode) +{ +uchar *p; +int n=0; + + while(corner_list[n].info != 7) + { + if (drawing_mode==0) + { + p = in + (corner_list[n].y-1)*x_size + corner_list[n].x - 1; + *p++=255; *p++=255; *p=255; p+=x_size-2; + *p++=255; *p++=0; *p=255; p+=x_size-2; + *p++=255; *p++=255; *p=255; + n++; + } + else + { + p = in + corner_list[n].y*x_size + corner_list[n].x; + *p=0; + n++; + } + } +} + +/* }}} */ +/* {{{ susan(in,r,sf,max_no,corner_list) */ + +void susan_corners(uchar* in, int* r, uchar* bp, + int max_no, CORNER_LIST corner_list, + int x_size, int y_size) +{ +int n,x,y,sq,xx,yy, + i,j,*cgx,*cgy; +float divide; +uchar c,*p,*cp; + + memset (r,0,x_size * y_size * sizeof(int)); + + cgx=(int *)malloc(x_size*y_size*sizeof(int)); + cgy=(int *)malloc(x_size*y_size*sizeof(int)); + + for (i=5;i ((n*n)/2) ) + { + if(yy290){ + r[i*x_size+j] = max_no-n; + cgx[i*x_size+j] = (51*x)/n; + cgy[i*x_size+j] = (51*y)/n;} + } + } +}}}}}}}}}}}}}}}}}}} + + /* to locate the local maxima */ + n=0; + for (i=5;i0) { + /* 5x5 mask */ +#ifdef FIVE_SUPP + if ( + (x>r[(i-1)*x_size+j+2]) && + (x>r[(i )*x_size+j+1]) && + (x>r[(i )*x_size+j+2]) && + (x>r[(i+1)*x_size+j-1]) && + (x>r[(i+1)*x_size+j ]) && + (x>r[(i+1)*x_size+j+1]) && + (x>r[(i+1)*x_size+j+2]) && + (x>r[(i+2)*x_size+j-2]) && + (x>r[(i+2)*x_size+j-1]) && + (x>r[(i+2)*x_size+j ]) && + (x>r[(i+2)*x_size+j+1]) && + (x>r[(i+2)*x_size+j+2]) && + (x>=r[(i-2)*x_size+j-2]) && + (x>=r[(i-2)*x_size+j-1]) && + (x>=r[(i-2)*x_size+j ]) && + (x>=r[(i-2)*x_size+j+1]) && + (x>=r[(i-2)*x_size+j+2]) && + (x>=r[(i-1)*x_size+j-2]) && + (x>=r[(i-1)*x_size+j-1]) && + (x>=r[(i-1)*x_size+j ]) && + (x>=r[(i-1)*x_size+j+1]) && + (x>=r[(i )*x_size+j-2]) && + (x>=r[(i )*x_size+j-1]) && + (x>=r[(i+1)*x_size+j-2]) ) +#endif +#ifdef SEVEN_SUPP + if ( + (x>r[(i-3)*x_size+j-3]) && + (x>r[(i-3)*x_size+j-2]) && + (x>r[(i-3)*x_size+j-1]) && + (x>r[(i-3)*x_size+j ]) && + (x>r[(i-3)*x_size+j+1]) && + (x>r[(i-3)*x_size+j+2]) && + (x>r[(i-3)*x_size+j+3]) && + + (x>r[(i-2)*x_size+j-3]) && + (x>r[(i-2)*x_size+j-2]) && + (x>r[(i-2)*x_size+j-1]) && + (x>r[(i-2)*x_size+j ]) && + (x>r[(i-2)*x_size+j+1]) && + (x>r[(i-2)*x_size+j+2]) && + (x>r[(i-2)*x_size+j+3]) && + + (x>r[(i-1)*x_size+j-3]) && + (x>r[(i-1)*x_size+j-2]) && + (x>r[(i-1)*x_size+j-1]) && + (x>r[(i-1)*x_size+j ]) && + (x>r[(i-1)*x_size+j+1]) && + (x>r[(i-1)*x_size+j+2]) && + (x>r[(i-1)*x_size+j+3]) && + + (x>r[(i)*x_size+j-3]) && + (x>r[(i)*x_size+j-2]) && + (x>r[(i)*x_size+j-1]) && + (x>=r[(i)*x_size+j+1]) && + (x>=r[(i)*x_size+j+2]) && + (x>=r[(i)*x_size+j+3]) && + + (x>=r[(i+1)*x_size+j-3]) && + (x>=r[(i+1)*x_size+j-2]) && + (x>=r[(i+1)*x_size+j-1]) && + (x>=r[(i+1)*x_size+j ]) && + (x>=r[(i+1)*x_size+j+1]) && + (x>=r[(i+1)*x_size+j+2]) && + (x>=r[(i+1)*x_size+j+3]) && + + (x>=r[(i+2)*x_size+j-3]) && + (x>=r[(i+2)*x_size+j-2]) && + (x>=r[(i+2)*x_size+j-1]) && + (x>=r[(i+2)*x_size+j ]) && + (x>=r[(i+2)*x_size+j+1]) && + (x>=r[(i+2)*x_size+j+2]) && + (x>=r[(i+2)*x_size+j+3]) && + + (x>=r[(i+3)*x_size+j-3]) && + (x>=r[(i+3)*x_size+j-2]) && + (x>=r[(i+3)*x_size+j-1]) && + (x>=r[(i+3)*x_size+j ]) && + (x>=r[(i+3)*x_size+j+1]) && + (x>=r[(i+3)*x_size+j+2]) && + (x>=r[(i+3)*x_size+j+3]) ) +#endif +{ +corner_list[n].info=0; +corner_list[n].x=j; +corner_list[n].y=i; +corner_list[n].dx=cgx[i*x_size+j]; +corner_list[n].dy=cgy[i*x_size+j]; +corner_list[n].I=in[i*x_size+j]; +n++; +if(n==MAX_CORNERS){ + fprintf(stderr,"Too many corners.\n"); + exit(1); + }}}} +corner_list[n].info=7; + +free(cgx); +free(cgy); + +} + +/* }}} */ +/* {{{ susan_quick(in,r,sf,max_no,corner_list) */ + +void susan_corners_quick(uchar* in, int* r, uchar* bp, + int max_no, CORNER_LIST corner_list, + int x_size, int y_size) +{ +int n,x,y,i,j; +uchar *p,*cp; + + memset (r,0,x_size * y_size * sizeof(int)); + + for (i=7;i0) { + /* 5x5 mask */ +#ifdef FIVE_SUPP + if ( + (x>r[(i-1)*x_size+j+2]) && + (x>r[(i )*x_size+j+1]) && + (x>r[(i )*x_size+j+2]) && + (x>r[(i+1)*x_size+j-1]) && + (x>r[(i+1)*x_size+j ]) && + (x>r[(i+1)*x_size+j+1]) && + (x>r[(i+1)*x_size+j+2]) && + (x>r[(i+2)*x_size+j-2]) && + (x>r[(i+2)*x_size+j-1]) && + (x>r[(i+2)*x_size+j ]) && + (x>r[(i+2)*x_size+j+1]) && + (x>r[(i+2)*x_size+j+2]) && + (x>=r[(i-2)*x_size+j-2]) && + (x>=r[(i-2)*x_size+j-1]) && + (x>=r[(i-2)*x_size+j ]) && + (x>=r[(i-2)*x_size+j+1]) && + (x>=r[(i-2)*x_size+j+2]) && + (x>=r[(i-1)*x_size+j-2]) && + (x>=r[(i-1)*x_size+j-1]) && + (x>=r[(i-1)*x_size+j ]) && + (x>=r[(i-1)*x_size+j+1]) && + (x>=r[(i )*x_size+j-2]) && + (x>=r[(i )*x_size+j-1]) && + (x>=r[(i+1)*x_size+j-2]) ) +#endif +#ifdef SEVEN_SUPP + if ( + (x>r[(i-3)*x_size+j-3]) && + (x>r[(i-3)*x_size+j-2]) && + (x>r[(i-3)*x_size+j-1]) && + (x>r[(i-3)*x_size+j ]) && + (x>r[(i-3)*x_size+j+1]) && + (x>r[(i-3)*x_size+j+2]) && + (x>r[(i-3)*x_size+j+3]) && + + (x>r[(i-2)*x_size+j-3]) && + (x>r[(i-2)*x_size+j-2]) && + (x>r[(i-2)*x_size+j-1]) && + (x>r[(i-2)*x_size+j ]) && + (x>r[(i-2)*x_size+j+1]) && + (x>r[(i-2)*x_size+j+2]) && + (x>r[(i-2)*x_size+j+3]) && + + (x>r[(i-1)*x_size+j-3]) && + (x>r[(i-1)*x_size+j-2]) && + (x>r[(i-1)*x_size+j-1]) && + (x>r[(i-1)*x_size+j ]) && + (x>r[(i-1)*x_size+j+1]) && + (x>r[(i-1)*x_size+j+2]) && + (x>r[(i-1)*x_size+j+3]) && + + (x>r[(i)*x_size+j-3]) && + (x>r[(i)*x_size+j-2]) && + (x>r[(i)*x_size+j-1]) && + (x>=r[(i)*x_size+j+1]) && + (x>=r[(i)*x_size+j+2]) && + (x>=r[(i)*x_size+j+3]) && + + (x>=r[(i+1)*x_size+j-3]) && + (x>=r[(i+1)*x_size+j-2]) && + (x>=r[(i+1)*x_size+j-1]) && + (x>=r[(i+1)*x_size+j ]) && + (x>=r[(i+1)*x_size+j+1]) && + (x>=r[(i+1)*x_size+j+2]) && + (x>=r[(i+1)*x_size+j+3]) && + + (x>=r[(i+2)*x_size+j-3]) && + (x>=r[(i+2)*x_size+j-2]) && + (x>=r[(i+2)*x_size+j-1]) && + (x>=r[(i+2)*x_size+j ]) && + (x>=r[(i+2)*x_size+j+1]) && + (x>=r[(i+2)*x_size+j+2]) && + (x>=r[(i+2)*x_size+j+3]) && + + (x>=r[(i+3)*x_size+j-3]) && + (x>=r[(i+3)*x_size+j-2]) && + (x>=r[(i+3)*x_size+j-1]) && + (x>=r[(i+3)*x_size+j ]) && + (x>=r[(i+3)*x_size+j+1]) && + (x>=r[(i+3)*x_size+j+2]) && + (x>=r[(i+3)*x_size+j+3]) ) +#endif +{ +corner_list[n].info=0; +corner_list[n].x=j; +corner_list[n].y=i; +x = in[(i-2)*x_size+j-2] + in[(i-2)*x_size+j-1] + in[(i-2)*x_size+j] + in[(i-2)*x_size+j+1] + in[(i-2)*x_size+j+2] + + in[(i-1)*x_size+j-2] + in[(i-1)*x_size+j-1] + in[(i-1)*x_size+j] + in[(i-1)*x_size+j+1] + in[(i-1)*x_size+j+2] + + in[(i )*x_size+j-2] + in[(i )*x_size+j-1] + in[(i )*x_size+j] + in[(i )*x_size+j+1] + in[(i )*x_size+j+2] + + in[(i+1)*x_size+j-2] + in[(i+1)*x_size+j-1] + in[(i+1)*x_size+j] + in[(i+1)*x_size+j+1] + in[(i+1)*x_size+j+2] + + in[(i+2)*x_size+j-2] + in[(i+2)*x_size+j-1] + in[(i+2)*x_size+j] + in[(i+2)*x_size+j+1] + in[(i+2)*x_size+j+2]; + +corner_list[n].I=x/25; +/*corner_list[n].I=in[i*x_size+j];*/ +x = in[(i-2)*x_size+j+2] + in[(i-1)*x_size+j+2] + in[(i)*x_size+j+2] + in[(i+1)*x_size+j+2] + in[(i+2)*x_size+j+2] - + (in[(i-2)*x_size+j-2] + in[(i-1)*x_size+j-2] + in[(i)*x_size+j-2] + in[(i+1)*x_size+j-2] + in[(i+2)*x_size+j-2]); +x += x + in[(i-2)*x_size+j+1] + in[(i-1)*x_size+j+1] + in[(i)*x_size+j+1] + in[(i+1)*x_size+j+1] + in[(i+2)*x_size+j+1] - + (in[(i-2)*x_size+j-1] + in[(i-1)*x_size+j-1] + in[(i)*x_size+j-1] + in[(i+1)*x_size+j-1] + in[(i+2)*x_size+j-1]); + +y = in[(i+2)*x_size+j-2] + in[(i+2)*x_size+j-1] + in[(i+2)*x_size+j] + in[(i+2)*x_size+j+1] + in[(i+2)*x_size+j+2] - + (in[(i-2)*x_size+j-2] + in[(i-2)*x_size+j-1] + in[(i-2)*x_size+j] + in[(i-2)*x_size+j+1] + in[(i-2)*x_size+j+2]); +y += y + in[(i+1)*x_size+j-2] + in[(i+1)*x_size+j-1] + in[(i+1)*x_size+j] + in[(i+1)*x_size+j+1] + in[(i+1)*x_size+j+2] - + (in[(i-1)*x_size+j-2] + in[(i-1)*x_size+j-1] + in[(i-1)*x_size+j] + in[(i-1)*x_size+j+1] + in[(i-1)*x_size+j+2]); +corner_list[n].dx=x/15; +corner_list[n].dy=y/15; +n++; +if(n==MAX_CORNERS){ + fprintf(stderr,"Too many corners.\n"); + exit(1); + }}}} +corner_list[n].info=7; +} + +/* }}} */ + +/* }}} */ +/* {{{ main(argc, argv) */ + +int main(int argc, char* argv[]) +{ +/* {{{ vars */ + +char *tcp; +uchar *in, *bp, *mid; +float dt=4.0; +int *r, + argindex=3, + bt=20, + principle=0, + thin_post_proc=1, + three_by_three=0, + drawing_mode=0, + susan_quick=0, + max_no_corners=1850, + max_no_edges=2650, + mode = 0, + x_size, y_size; +CORNER_LIST corner_list; + +/* }}} */ + + long ct_repeat=0; + long ct_repeat_max=1; + int ct_return=0; + +#ifdef OPENME + openme_init(NULL,NULL,NULL,0); + openme_callback("PROGRAM_START", NULL); +#endif +#ifdef XOPENME + xopenme_init(1,2); +#endif + + if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); + + if (argc<3) + usage(); + + get_image(argv[1],&in,&x_size,&y_size); + +#ifdef XOPENME + xopenme_add_var_i(0, " \"image_size_x\":%u", x_size); + xopenme_add_var_i(1, " \"image_size_y\":%u", y_size); +#endif + +// printf("Size X=%u Size Y=%u\n", x_size, y_size); + /* FGG - changing dataset size */ +// x_size=8; +// y_size=8; +// printf("Size X=%u Size Y=%u\n", x_size, y_size); + + /* {{{ look at options */ + + while (argindex < argc) + { + tcp = argv[argindex]; + if (*tcp == '-') + switch (*++tcp) + { + case 's': /* smoothing */ + mode=0; + break; + case 'e': /* edges */ + mode=1; + break; + case 'c': /* corners */ + mode=2; + break; + case 'p': /* principle */ + principle=1; + break; + case 'n': /* thinning post processing */ + thin_post_proc=0; + break; + case 'b': /* simple drawing mode */ + drawing_mode=1; + break; + case '3': /* 3x3 flat mask */ + three_by_three=1; + break; + case 'q': /* quick susan mask */ + susan_quick=1; + break; + case 'd': /* distance threshold */ + if (++argindex >= argc){ + printf ("No argument following -d\n"); + exit(0);} + dt=atof(argv[argindex]); + if (dt<0) three_by_three=1; + break; + case 't': /* brightness threshold */ + if (++argindex >= argc){ + printf ("No argument following -t\n"); + exit(0);} + bt=atoi(argv[argindex]); + break; + } + else + usage(); + argindex++; + } + + if ( (principle==1) && (mode==0) ) + mode=1; + +/* }}} */ + /* {{{ main processing */ + +#ifdef OPENME + openme_callback("KERNEL_START", NULL); +#endif +#ifdef XOPENME + xopenme_clock_start(0); +#endif + + for (ct_repeat=0; ct_repeat +Click if you want to use Python virtual environment + +We suggest you to install a python virtual environment via CM though it's not strictly necessary +(CM can automatically detect and reuse your Python installation and environments): +```bash +cm run script "install python-venv" --name=loadgen +``` + +You can also install a specific version of Python on your system via: +```bash +cm run script "install python-venv" --name=loadgen --version=3.10.7 +``` + +By default, CM will be asking users to select one from all detected and installed Python versions +including the above one, any time a script with python dependency is run. To avoid that, you +can set up the following environment variable with the name of the current virtual environment: + +```bash +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=loadgen" +``` + +The `--adr` flag stands for "Add to all Dependencies Recursively" and will find all sub-dependencies on other CM scripts + + + + +### Install dependencies via CM (optional) + +
    +Click if you want to install specific versions of dependencies + +You can skip this sub-section if you want CM to automatically detect already installed +ONNX runtime on your system. Otherwise, follow the next steps to install the latest or specific +version of ONNX runtime. + + +### Download LoadGen sources from MLPerf inference benchmark + +```bash +cm run script "get mlperf inference src" --version=r3.1 +``` + +### Install MLPerf LoadGen +We can now install loadgen via CM while forcing compiler dependency to GCC: + +```bash +cm run script "get mlperf loadgen" +``` + +### ONNX, CPU + +```bash +cm run script "get generic-python-lib _onnxruntime" +``` + +or + +```bash +cm run script "get generic-python-lib _onnxruntime" --version=1.13.1 +``` + +or + +```bash +cm run script "get generic-python-lib _onnxruntime" --version_min=1.10.0 +``` +
    + +### Benchmark standard MLPerf model + +You can use CM variations prefixed by `_` to benchmark an official MLPerf model +(_resnet50 or _retinanet): + +``` +cm run script "python app loadgen-generic _onnxruntime _retinanet" --samples=5 +cmr "python app loadgen-generic _onnxruntime _resnet50" +``` + +Normally, you should see the following performance report from the loadgen: + + + + +
    +Click to open + +```bash + +2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Model: /home/gfursin/CM/repos/local/cache/9c825a0a06fb48e2/resnet50_v1.onnx +2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Runner: inline, Concurrency: 4 +2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Results: results/resnet50_v1.onnx/inline +2022-12-06 16:51:39,279 INFO MainThread - __main__ main: Test Started +2022-12-06 16:51:39,399 INFO MainThread - loadgen.harness load_query_samples: Loaded 100 samples +2022-12-06 16:51:55,723 INFO MainThread - loadgen.harness issue_query: Queries issued 550 +2022-12-06 16:51:55,725 INFO MainThread - loadgen.harness flush_queries: Queries flushed +2022-12-06 16:51:55,731 INFO MainThread - loadgen.harness unload_query_samples: Unloaded samples +================================================ +MLPerf Results Summary +================================================ +SUT name : PySUT +Scenario : Offline +Mode : PerformanceOnly +Samples per second: 33.6903 +Result is : VALID + Min duration satisfied : Yes + Min queries satisfied : Yes + Early stopping satisfied: Yes + +================================================ +Additional Stats +================================================ +Min latency (ns) : 16325180169 +Max latency (ns) : 16325180169 +Mean latency (ns) : 16325180169 +50.00 percentile latency (ns) : 16325180169 +90.00 percentile latency (ns) : 16325180169 +95.00 percentile latency (ns) : 16325180169 +97.00 percentile latency (ns) : 16325180169 +99.00 percentile latency (ns) : 16325180169 +99.90 percentile latency (ns) : 16325180169 + +================================================ +Test Parameters Used +================================================ +samples_per_query : 550 +target_qps : 50 +target_latency (ns): 0 +max_async_queries : 1 +min_duration (ms): 10000 +max_duration (ms): 0 +min_query_count : 1 +max_query_count : 0 +qsl_rng_seed : 0 +sample_index_rng_seed : 0 +schedule_rng_seed : 0 +accuracy_log_rng_seed : 0 +accuracy_log_probability : 0 +accuracy_log_sampling_target : 0 +print_timestamps : 0 +performance_issue_unique : 0 +performance_issue_same : 0 +performance_issue_same_index : 0 +performance_sample_count : 100 + +No warnings encountered during test. + +No errors encountered during test. +2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Observed QPS: 33.6903 +2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Result: VALID +2022-12-06 16:51:55,753 INFO MainThread - __main__ main: Test Completed + + - Running postprocess ... + - running time of script "app,loadgen,generic,loadgen-generic,python": 370.87 sec. + +``` + +
    + + +### Benchmark custom model + +You can also specify any custom onnx model file as follows: + +```bash +cm run script "python app loadgen-generic _onnxruntime" --modelpath= +``` + +### Benchmark Hugging Face model + +```bash +cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx +``` + +*See more examples to download Hugging Face models via CM [here](../get-ml-model-huggingface-zoo/README-extra.md).* + +### Benchmark using ONNX CUDA + +```bash +cm rm cache -f +cmr "python app loadgen-generic _onnxruntime _cuda _retinanet" --quiet +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx +``` + +These cases worked on Windows and Linux but may require GPU with > 8GB memory: +```bash +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.alpindale/Llama-2-13b-ONNX" --adr.hf-downloader.model_filename=FP32/LlamaV2_13B_float32.onnx --adr.hf-downloader.full_subfolder=FP32 --samples=2 +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.Intel/gpt-j-6B-int8-static" --adr.hf-downloader.model_filename=model.onnx --adr.hf-downloader.full_subfolder=. --samples=2 +``` + +TBD: some cases that are not yet fully supported (data types, input mismatch, etc): +```bash +cmr "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.runwayml/stable-diffusion-v1-5" --adr.hf-downloader.revision=onnx --adr.hf-downloader.model_filename=unet/model.onnx,unet/weights.pb --samples=2 +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.microsoft/Mistral-7B-v0.1-onnx" --adr.hf-downloader.model_filename=Mistral-7B-v0.1.onnx,Mistral-7B-v0.1.onnx.data --samples=2 +cmr "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.alpindale/Llama-2-7b-ONNX" --adr.hf-downloader.model_filename=FP16/LlamaV2_7B_float16.onnx --adr.hf-downloader.full_subfolder=FP16 --samples=2 +``` + +### Other variations and flags: + +You can obtain help about flags and variations from CMD: + +```bash +cm run script "python app loadgen-generic" --help + +Available variations: + + _cpu + _cuda + _custom + _custom,huggingface + _huggingface + _model-stub.# + _onnxruntime + _pytorch + _resnet50 + _retinanet + +Available flags mapped to environment variables: + + --concurrency -> --env.CM_MLPERF_CONCURRENCY + --ep -> --env.CM_MLPERF_EXECUTION_PROVIDER + --execmode -> --env.CM_MLPERF_EXEC_MODE + --interop -> --env.CM_MLPERF_INTEROP + --intraop -> --env.CM_MLPERF_INTRAOP + --modelpath -> --env.CM_ML_MODEL_FILE_WITH_PATH + --output_dir -> --env.CM_MLPERF_OUTPUT_DIR + --runner -> --env.CM_MLPERF_RUNNER + --samples -> --env.CM_MLPERF_LOADGEN_SAMPLES + --scenario -> --env.CM_MLPERF_LOADGEN_SCENARIO + +``` + +## Running this app via Docker + +```bash +cm docker script "python app loadgen-generic _onnxruntime _custom _huggingface _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --adr.hf-downloader.model_filename=model.onnx --samples=2 --output_dir=new_results --docker_cm_repo=ctuning@mlcommons-ck +``` + +## Tuning CPU performance via CM experiment + +```bash +cm run experiment --tags=loadgen,python,llama2 -- cmr script "python app loadgen-generic _onnxruntime _cuda _custom _huggingface _model-stub.steerapi/Llama-2-7b-chat-hf-onnx-awq-w8" --adr.hf-downloader.model_filename=onnx/decoder_model_merged_quantized.onnx,onnx/decoder_model_merged_quantized.onnx_data --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet +cm run experiment --tags=loadgen,python,llama2 -- cmr "python app loadgen-generic _onnxruntime" --modelpath={PATH TO ONNX MODEL} --samples=2 --intraop={{CM_OPT_INTRAOP{[1,2,4]}}} --interop={{CM_OPT_INTEROP{[1,2,4]}}} --quiet +``` + + +## Developers + +* [Gaz Iqbal](https://www.linkedin.com/in/gaziqbal) +* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) +* [Grigori Fursin](https://cKnowledge.org/gfursin) + +## Get in touch + +* [MLCommons Task Force on Automation and Reproducibility](../../../docs/taskforce.md) +* [Public Discord server](https://discord.gg/JjWNWXKxwT) diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/README.md b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/README.md new file mode 100644 index 000000000..1e00049c6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/_cm.yaml new file mode 100644 index 000000000..3e5fe56e1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/_cm.yaml @@ -0,0 +1,326 @@ +# Identification of this CM script +alias: app-loadgen-generic-python +uid: d3d949cc361747a6 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf inference benchmark pipeline" + +developers: "[Gaz Iqbal](https://www.linkedin.com/in/gaziqbal), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)" + + +# User-friendly tags to find this CM script +tags: + - app + - loadgen + - generic + - loadgen-generic + - python + +tags_help: "python app generic loadgen" + + +# Default environment +default_env: + CM_MLPERF_EXECUTION_MODE: parallel + CM_MLPERF_BACKEND: onnxruntime + +# Map script inputs to environment variables +input_mapping: + modelpath: CM_ML_MODEL_FILE_WITH_PATH + modelcodepath: CM_ML_MODEL_CODE_WITH_PATH + modelcfgpath: CM_ML_MODEL_CFG_WITH_PATH + modelcfg: CM_ML_MODEL_CFG + modelsamplepath: CM_ML_MODEL_SAMPLE_WITH_PATH + output_dir: CM_MLPERF_OUTPUT_DIR + scenario: CM_MLPERF_LOADGEN_SCENARIO + runner: CM_MLPERF_RUNNER + concurrency: CM_MLPERF_CONCURRENCY + ep: CM_MLPERF_EXECUTION_PROVIDER + intraop: CM_MLPERF_INTRAOP + interop: CM_MLPERF_INTEROP + execmode: CM_MLPERF_EXEC_MODE + samples: CM_MLPERF_LOADGEN_SAMPLES + loadgen_expected_qps: CM_MLPERF_LOADGEN_EXPECTED_QPS + loadgen_duration_sec: CM_MLPERF_LOADGEN_DURATION_SEC + +# New env keys exported from this script +new_env_keys: + - CM_MLPERF_* + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Get Python + - tags: get,python3 + names: + - python + - python3 + + # Extra package + - tags: get,generic-python-lib,_psutil + - tags: get,generic-python-lib,_package.numpy + version_max: "1.99.99" + + # Detect CUDA if required + - tags: get,cuda + enable_if_env: + CM_MLPERF_DEVICE: + - gpu + + # Install loadgen + - tags: get,loadgen + names: + - loadgen + + ######################################################################## + # Install ML engines via CM + # ONNX + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - cpu + tags: get,generic-python-lib,_onnxruntime + names: + - onnxruntime + + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + tags: get,generic-python-lib,_onnxruntime_gpu + names: + - onnxruntime + + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + tags: get,generic-python-lib,_onnx + names: + - onnx + + ######################################################################## + # Install ML engines via CM + # PyTorch + + # CPU + + - enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - cpu + tags: get,generic-python-lib,_torch + names: + - torch + + - enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - cpu + tags: get,generic-python-lib,_torchvision + names: + - torchvision + + # CUDA/GPU + + - enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - gpu + tags: get,generic-python-lib,_torch_cuda + names: + - torch + + - enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - gpu + tags: get,generic-python-lib,_torchvision_cuda + names: + - torchvision + + + + ######################################################################## + # Install MLPerf models + - enable_if_env: + CM_MODEL: + - resnet50 + tags: get,ml-model,resnet50,_onnx + + - enable_if_env: + CM_MODEL: + - retinanet + tags: get,ml-model,retinanet,_onnx,_fp32 + + - enable_if_env: + CM_MODEL: + - retinanet + tags: get,ml-model,retinanet,_onnx,_fp32 + + + + +# Customize this CM script +variations: + + pytorch: + group: backend + env: + CM_MLPERF_BACKEND: + pytorch + + onnxruntime: + group: backend + default: true + env: + CM_MLPERF_BACKEND: + onnxruntime + + + + cpu: + group: + device + default: + true + env: + CM_MLPERF_DEVICE: + cpu + CM_MLPERF_EXECUTION_PROVIDER: + CPUExecutionProvider + + cuda: + docker: + all_gpus: 'yes' + base_image: nvcr.io/nvidia/pytorch:24.03-py3 + group: + device + env: + CM_MLPERF_DEVICE: + gpu + CM_MLPERF_EXECUTION_PROVIDER: + CUDAExecutionProvider + + + + retinanet: + group: + models + env: + CM_MODEL: retinanet + + resnet50: + group: + models + env: + CM_MODEL: resnet50 + + custom: + group: + models + env: + CM_MODEL: custom + + + + huggingface: + env: + CM_CUSTOM_MODEL_SOURCE: huggingface + + custom,huggingface: + deps: + - tags: get,ml-model,huggingface + names: + - hf-downloader + update_tags_from_env_with_prefix: + "_model-stub.": + - CM_ML_MODEL_STUB + + model-stub.#: + env: + CM_ML_MODEL_STUB: "#" + + + cmc: + env: + CM_CUSTOM_MODEL_CMC: yes + + + custom,cmc: + deps: + - tags: get,ml-model,cmc + names: + - cmc-model + + +input_description: + modelpath: + desc: Full path to file with model weights + modelcodepath: + desc: (for PyTorch models) Full path to file with model code and cmc.py + modelcfgpath: + desc: (for PyTorch models) Full path to JSON file with model cfg + modelsamplepath: + desc: (for PyTorch models) Full path to file with model sample in pickle format + ep: + desc: ONNX Execution provider + scenario: + desc: MLPerf LoadGen scenario + samples: + desc: Number of samples + default: 2 + runner: + desc: MLPerf runner + execmode: + desc: MLPerf exec mode + output_dir: + desc: MLPerf output directory + concurrency: + desc: MLPerf concurrency + intraop: + desc: MLPerf intra op threads + interop: + desc: MLPerf inter op threads + + +docker: + skip_run_cmd: 'no' + input_paths: + - modelpath + - modelsamplepath + - env.CM_ML_MODEL_FILE_WITH_PATH + - env.CM_ML_MODEL_CODE_WITH_PATH + - output_dir + - repro_dir + skip_input_for_fake_run: + - modelpath + - modelsamplepath + - env.CM_ML_MODEL_FILE_WITH_PATH + - env.CM_ML_MODEL_CODE_WITH_PATH + - output_dir + - scenario + - runner + - repro_dir + - concurrency + - intraop + - interop + - execmode + - samples + - modelcfg.num_classes + - modelcfg.config + - repro diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/customize.py new file mode 100644 index 000000000..34720c052 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/customize.py @@ -0,0 +1,117 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +# Developer: Grigori Fursin + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if 'CM_ML_MODEL_FILE_WITH_PATH' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + + run_opts = env.get('CM_RUN_OPTS', '') + + if env.get('CM_MLPERF_BACKEND', '') != '': + run_opts += " -b " + env['CM_MLPERF_BACKEND'] + + if env.get('CM_MLPERF_RUNNER', '') != '': + run_opts += " -r " + env['CM_MLPERF_RUNNER'] + + if env.get('CM_MLPERF_CONCURRENCY', '') != '': + run_opts += " --concurrency " + env['CM_MLPERF_CONCURRENCY'] + + if env.get('CM_MLPERF_EXECUTION_PROVIDER', '') != '': + run_opts += " --ep " + env['CM_MLPERF_EXECUTION_PROVIDER'] + + if env.get('CM_MLPERF_INTRAOP', '') != '': + run_opts += " --intraop " + env['CM_MLPERF_INTRAOP'] + + if env.get('CM_MLPERF_INTEROP', '') != '': + run_opts += " --interop " + env['CM_MLPERF_INTEROP'] + + if env.get('CM_MLPERF_EXECMODE', '') != '': + run_opts += " --execmode " + env['CM_MLPERF_EXECUTION_MODE'] + + if env.get('CM_MLPERF_LOADGEN_SAMPLES', '') != '': + run_opts += " --samples " + env['CM_MLPERF_LOADGEN_SAMPLES'] + + if env.get('CM_MLPERF_LOADGEN_EXPECTED_QPS', '') != '': + run_opts += " --loadgen_expected_qps " + \ + env['CM_MLPERF_LOADGEN_EXPECTED_QPS'] + + if env.get('CM_MLPERF_LOADGEN_DURATION_SEC', '') != '': + run_opts += " --loadgen_duration_sec " + \ + env['CM_MLPERF_LOADGEN_DURATION_SEC'] + + if env.get('CM_MLPERF_OUTPUT_DIR', '') != '': + run_opts += " --output " + env['CM_MLPERF_OUTPUT_DIR'] + + if env.get('CM_ML_MODEL_CODE_WITH_PATH', '') != '': + run_opts += " --model_code " + env['CM_ML_MODEL_CODE_WITH_PATH'] + + if env.get('CM_ML_MODEL_CFG_WITH_PATH', '') != '': + run_opts += " --model_cfg " + env['CM_ML_MODEL_CFG_WITH_PATH'] + else: + # Check cfg from command line + cfg = env.get('CM_ML_MODEL_CFG', {}) + if len(cfg) > 0: + del (env['CM_ML_MODEL_CFG']) + + import json + import tempfile + tfile = tempfile.NamedTemporaryFile(mode="w+", suffix='.json') + + fd, tfile = tempfile.mkstemp(suffix='.json', prefix='cm-cfg-') + os.close(fd) + + with open(tfile, 'w') as fd: + json.dump(cfg, fd) + + env['CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE'] = tfile + + run_opts += " --model_cfg " + tfile + + if env.get('CM_ML_MODEL_SAMPLE_WITH_PATH', '') != '': + run_opts += " --model_sample_pickle " + \ + env['CM_ML_MODEL_SAMPLE_WITH_PATH'] + + # Add path to file model weights at the end of command line + + run_opts += ' ' + env['CM_ML_MODEL_FILE_WITH_PATH'] + + env['CM_RUN_OPTS'] = run_opts + + print('') + print('Assembled flags: {}'.format(run_opts)) + print('') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + tfile = env.get('CM_APP_LOADGEN_GENERIC_PYTHON_TMP_CFG_FILE', '') + + if tfile != '' and os.path.isfile(tfile): + os.remove(tfile) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.bat new file mode 100644 index 000000000..3d4b5d58b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.bat @@ -0,0 +1,4 @@ +rem native script + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\main.py %CM_RUN_OPTS% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.sh new file mode 100644 index 000000000..2a13312f0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/src/main.py ${CM_RUN_OPTS} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_onnxruntime.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_onnxruntime.py new file mode 100644 index 000000000..371f44ffb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_onnxruntime.py @@ -0,0 +1,92 @@ +import typing + +import numpy as np +import onnx +import onnxruntime as ort + +from loadgen.model import Model, ModelFactory, ModelInput, ModelInputSampler + +xinput = input + +ONNX_TO_NP_TYPE_MAP = { + "tensor(bool)": bool, + "tensor(int)": np.int32, + "tensor(int32)": np.int32, + "tensor(int8)": np.int8, + "tensor(uint8)": np.uint8, + "tensor(int16)": np.int16, + "tensor(uint16)": np.uint16, + "tensor(uint64)": np.uint64, + "tensor(int64)": np.int64, + "tensor(float16)": np.float16, + "tensor(float)": np.float32, + "tensor(double)": np.float64, + "tensor(string)": np.string_, +} + + +class XModel(Model): + def __init__(self, session: ort.InferenceSession): + assert session is not None + self.session = session + + def predict(self, input: ModelInput): + output = self.session.run(None, input) + return output + + +class XModelFactory(ModelFactory): + def __init__( + self, + model_path: str, + execution_provider="CPUExecutionProvider", + execution_mode="", + intra_op_threads=0, + inter_op_threads=0, + model_code='', # Not used here + model_cfg={}, # Not used here + model_sample_pickle='' # Not used here + ): + self.model_path = model_path + self.execution_provider = execution_provider + self.session_options = ort.SessionOptions() + if execution_mode.lower() == "sequential": + self.session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL + elif execution_mode.lower() == "parallel": + self.session_options.execution_mode = ort.ExecutionMode.ORT_PARALLEL + self.session_options.intra_op_num_threads = intra_op_threads + self.session_options.inter_op_num_threads = inter_op_threads + + def create(self) -> Model: + print('Loading model: {}'.format(self.model_path)) +# model = onnx.load(self.model_path) + session_eps = [self.execution_provider] + session = ort.InferenceSession( + # model.SerializeToString(), self.session_options, + # providers=session_eps + self.model_path, self.session_options, providers=session_eps + ) + return XModel(session) + + +class XModelInputSampler(ModelInputSampler): + def __init__(self, model_factory: XModelFactory): + model = model_factory.create() + input_defs = model.session.get_inputs() + self.inputs: typing.Dict[str, + typing.Tuple[np.dtype, + typing.List[int]]] = dict() + for input in input_defs: + input_name = input.name + input_type = ONNX_TO_NP_TYPE_MAP[input.type] + input_dim = [ + 1 if (x is None or (isinstance(x, str))) else x for x in input.shape + ] + self.inputs[input_name] = (input_type, input_dim) + + def sample(self, id_: int) -> ModelInput: + input = dict() + for name, spec in self.inputs.items(): + val = np.random.random_sample(spec[1]).astype(spec[0]) + input[name] = val + return input diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_pytorch.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_pytorch.py new file mode 100644 index 000000000..6fb716028 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/backend_pytorch.py @@ -0,0 +1,132 @@ +# Developer: Grigori Fursin + +import typing +import importlib +import os +import psutil + +import utils + +import numpy as np + +import torch + +from loadgen.model import Model, ModelFactory, ModelInput, ModelInputSampler + + +xinput = input + + +class XModel(Model): + def __init__(self, session): + assert session is not None + self.session = session + + def predict(self, input: ModelInput): + + print('') + utils.print_host_memory_use('Host memory used') + + print('Running inference ...') + with torch.no_grad(): + output = self.session(input) + + utils.print_host_memory_use('Host memory used') + + return output + + +class XModelFactory(ModelFactory): + def __init__( + self, + model_path: str, + execution_provider="CPUExecutionProvider", + execution_mode="", + intra_op_threads=0, + inter_op_threads=0, + model_code='', + model_cfg={}, + model_sample_pickle='' + ): + + self.model_path = model_path + self.model_code = model_code + self.model_cfg = model_cfg + self.model_sample_pickle = model_sample_pickle + self.execution_provider = execution_provider + + def create(self) -> Model: + print('') + print('Loading model: {}'.format(self.model_path)) + + if self.execution_provider == 'CPUExecutionProvider': + torch_provider = 'cpu' + elif self.execution_provider == 'CUDAExecutionProvider': + torch_provider = 'cuda' + if not torch.cuda.is_available(): + raise Exception( + 'Error: CUDA is forced but not available or installed in PyTorch!') + else: + raise Exception( + 'Error: execution provider is unknown ({})!'.format( + self.execution_provider)) + + checkpoint = torch.load(self.model_path, + map_location=torch.device(torch_provider)) + + if self.model_code == '': + raise Exception('Error: path to model code was not provided!') + + if self.model_sample_pickle == '': + raise Exception( + 'Error: path to model sample pickle was not provided!') + + # Load sample + import pickle + with open(self.model_sample_pickle, 'rb') as handle: + self.input_sample = pickle.load(handle) + + # Check if has CM connector + cm_model_module = os.path.join(self.model_code, 'cmc.py') + if not os.path.isfile(cm_model_module): + raise Exception( + 'cm.py interface for a PyTorch model was not found in {}'.format( + self.model_code)) + + print('') + print('Collective Mind Connector for the model found: {}'.format( + cm_model_module)) + + # Load CM interface for the model + import sys + sys.path.insert(0, self.model_code) + model_module = importlib.import_module('cmc') + del (sys.path[0]) + + # Init model + if len(self.model_cfg) > 0: + print('Model cfg: {}'.format(self.model_cfg)) + + r = model_module.model_init(checkpoint, self.model_cfg) + if r['return'] > 0: + raise Exception('Error: {}'.format(r['error'])) + + model = r['model'] + + if torch_provider == 'cuda': + model.cuda() + + model.eval() + + return XModel(model) + + +class XModelInputSampler(ModelInputSampler): + def __init__(self, model_factory: XModelFactory): + model = model_factory.create() + self.input_sample = model_factory.input_sample + return + + def sample(self, id_: int) -> ModelInput: + input = self.input_sample + return input diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/harness.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/harness.py new file mode 100644 index 000000000..a8fdd4e86 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/harness.py @@ -0,0 +1,77 @@ +import abc +import contextlib +import logging +import typing + +import mlperf_loadgen + +from loadgen.model import ModelInput, ModelInputSampler + +logger = logging.getLogger(__name__) + + +QueryInput = typing.Dict[int, ModelInput] +QueryResult = typing.Dict[int, typing.Any] + + +class ModelRunner(contextlib.AbstractContextManager): + @abc.abstractmethod + def issue_query(self, query: QueryInput) -> typing.Optional[QueryResult]: + pass + + # Optional method to flush pending queries + def flush_queries(self) -> typing.Optional[QueryResult]: + pass + + def __exit__(self, _exc_type, _exc_value, _traceback): + logger.info(f"{self} : Exited") + return None + + +class Harness: + def __init__(self, sampler: ModelInputSampler, runner: ModelRunner): + self.sampler = sampler + self.runner = runner + self.samples = None + + def load_query_samples(self, query_samples): + assert self.samples is None + self.samples = dict() + for query_id in query_samples: + self.samples[query_id] = self.sampler.sample(query_id) + logger.info(f"Loaded {len(self.samples)} samples") + + def unload_query_samples(self, _query_samples): + assert self.samples is not None + logger.info(f"Unloaded samples") + self.samples = None + + def issue_query(self, query_samples): + query_input = dict() + for q in query_samples: + # logger.info(f"Query Id: {q.id}, SampleIndex: {q.index}") + input = self.samples[q.index] + query_input[q.id] = input + result = self.runner.issue_query(query_input) + logger.info(f"Queries issued {len(query_input)}") + if result is not None: + self._complete_query(result) + + # Called after the last call to issue queries in a series is made. + # Client can use this to flush any deferred queries rather than waiting + # for a timeout. + def flush_queries(self): + result = self.runner.flush_queries() + logger.info(f"Queries flushed") + if result is not None: + self._complete_query(result) + + def _complete_query(self, result: QueryResult): + responses = [] + for query_id, _query_result in result.items(): + response_data, response_size = 0, 0 + response = mlperf_loadgen.QuerySampleResponse( + query_id, response_data, response_size + ) + responses.append(response) + mlperf_loadgen.QuerySamplesComplete(responses) diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/model.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/model.py new file mode 100644 index 000000000..8bb7dbf04 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/model.py @@ -0,0 +1,24 @@ +import abc +import typing + +import numpy as np + +ModelInput = typing.Dict[str, np.array] + + +class Model(abc.ABC): + @abc.abstractmethod + def predict(self, input: ModelInput) -> typing.Any: + pass + + +class ModelFactory(abc.ABC): + @abc.abstractmethod + def create(self) -> Model: + pass + + +class ModelInputSampler(abc.ABC): + @abc.abstractmethod + def sample(self, id: int) -> ModelInput: + pass diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/runners.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/runners.py new file mode 100644 index 000000000..9c813a027 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/loadgen/runners.py @@ -0,0 +1,188 @@ +import abc +import concurrent.futures +import logging +import multiprocessing +import threading +import typing + +from loadgen.harness import ModelRunner, QueryInput, QueryResult +from loadgen.model import Model, ModelFactory, ModelInput + +logger = logging.getLogger(__name__) + +# Runner implementations + + +class ModelRunnerInline(ModelRunner): + def __init__(self, model_factory: ModelFactory): + self.model = model_factory.create() + + def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]: + result = dict() + for query_id, model_input in queries.items(): + output = self.model.predict(model_input) + result[query_id] = output + return result + + +class ModelRunnerPoolExecutor(ModelRunner): + def __init__(self): + self.executor: concurrent.futures.Executor = None + self.futures = None + + def __exit__(self, _exc_type, _exc_value, _traceback): + if self.executor: + self.executor.shutdown(True) + return super().__exit__(_exc_type, _exc_value, _traceback) + + def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]: + self.futures = dict() + predictor_fn = self.get_predictor() + for query_id, model_input in queries.items(): + f = self.executor.submit(predictor_fn, model_input) + self.futures[f] = query_id + return None + + def flush_queries(self) -> typing.Optional[QueryResult]: + result = dict() + for future in concurrent.futures.as_completed(self.futures.keys()): + query_id = self.futures[future] + query_result = future.result() + result[query_id] = query_result + return result + + @abc.abstractmethod + def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]: + pass + + +class ModelRunnerThreadPoolExecutor(ModelRunnerPoolExecutor): + def __init__(self, model_factory: ModelFactory, max_concurrency: int): + super().__init__() + self.model = model_factory.create() + self.max_concurrency = max_concurrency + + def __enter__(self): + self.executor = concurrent.futures.ThreadPoolExecutor( + max_workers=self.max_concurrency, thread_name_prefix="LoadGen" + ) + return self + + def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]: + return self.model.predict + + +class ModelRunnerThreadPoolExecutorWithTLS(ModelRunnerPoolExecutor): + tls: threading.local + + def __init__(self, model_factory: ModelFactory, max_concurrency: int): + super().__init__() + self.model_factory = model_factory + self.max_concurrency = max_concurrency + + def __enter__(self): + self.executor = concurrent.futures.ThreadPoolExecutor( + max_workers=self.max_concurrency, + thread_name_prefix="LoadGen", + initializer=ModelRunnerThreadPoolExecutorWithTLS._tls_init, + initargs=(self.model_factory,), + ) + return self + + def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]: + return ModelRunnerThreadPoolExecutorWithTLS._tls_predict + + @staticmethod + def _tls_init(model_factory: ModelFactory): + ModelRunnerThreadPoolExecutorWithTLS.tls = threading.local() + ModelRunnerThreadPoolExecutorWithTLS.tls.model = model_factory.create() + + @staticmethod + def _tls_predict(input: ModelInput): + return ModelRunnerThreadPoolExecutorWithTLS.tls.model.predict(input) + + +class ModelRunnerProcessPoolExecutor(ModelRunnerPoolExecutor): + _model: Model + + def __init__(self, model_factory: ModelFactory, max_concurrency: int): + super().__init__() + self.max_concurrency = max_concurrency + ModelRunnerProcessPoolExecutor._model = model_factory.create() + + def __enter__(self): + self.executor = concurrent.futures.ProcessPoolExecutor( + max_workers=self.max_concurrency + ) + return self + + def get_predictor(self) -> typing.Callable[[ModelInput], typing.Any]: + return ModelRunnerProcessPoolExecutor._predict + + @staticmethod + def _predict(input: ModelInput): + result = ModelRunnerProcessPoolExecutor._model.predict(input) + return result + + +class ModelRunnerMultiProcessingPool(ModelRunner): + _model: Model + + def __init__( + self, + model_factory: ModelFactory, + max_concurrency: int, + ): + self.max_concurrency = max_concurrency + self.task: multiprocessing.ApplyResult = None + ModelRunnerMultiProcessingPool._model = model_factory.create() + + def __enter__(self): + self.pool = multiprocessing.Pool(self.max_concurrency) + + def __exit__(self, _exc_type, _exc_value, _traceback): + if self.pool: + self.pool.terminate() + return super().__exit__(_exc_type, _exc_value, _traceback) + + def issue_query(self, queries: QueryInput) -> typing.Optional[QueryResult]: + if hasattr(self, "tasks"): + assert len(self.tasks) == 0 + for query_id, model_input in queries.items(): + task = self.pool.apply_async( + ModelRunnerMultiProcessingPool._predict, (model_input,) + ) + self.tasks[task] = query_id + else: + assert self.task is None + inputs = [ + [query_id, model_input] for query_id, model_input in queries.items() + ] + self.task = self.pool.starmap_async( + ModelRunnerMultiProcessingPool._predict_with_id, inputs + ) + return None + + def flush_queries(self) -> typing.Optional[QueryResult]: + if hasattr(self, "tasks"): + result = dict() + for task, query_id in self.tasks.items(): + task_result = task.get() + result[query_id] = task_result + return result + else: + task_result = self.task.get() + result = { + query_id: query_result for query_id, + query_result in task_result} + return result + + @staticmethod + def _predict(input: ModelInput): + result = ModelRunnerMultiProcessingPool._model.predict(input) + return result + + @staticmethod + def _predict_with_id(query_id: int, input: ModelInput): + result = ModelRunnerMultiProcessingPool._model.predict(input) + return (query_id, result) diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/main.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/main.py new file mode 100644 index 000000000..58f929132 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/main.py @@ -0,0 +1,274 @@ +import argparse +import contextlib +import logging +import os +import re +import typing + +import mlperf_loadgen +import psutil + +from loadgen.harness import Harness, ModelRunner +from loadgen.runners import ( + ModelRunnerInline, + ModelRunnerMultiProcessingPool, + ModelRunnerProcessPoolExecutor, + ModelRunnerThreadPoolExecutor, + ModelRunnerThreadPoolExecutorWithTLS, +) + +logger = logging.getLogger(__name__) + + +def main( + backend: str, + model_path: str, + model_code: str, + model_cfg: str, + model_sample_pickle: str, + output_path: typing.Optional[str], + runner_name: str, + runner_concurrency: int, + execution_provider: str, + execution_mode: str, + intraop_threads: int, + interop_threads: int, + samples: int, + loadgen_expected_qps: float, + loadgen_duration_sec: float +): + + print('=====================================================================') + + if backend == 'onnxruntime': + from backend_onnxruntime import XModelFactory + from backend_onnxruntime import XModelInputSampler + elif backend == 'pytorch': + from backend_pytorch import XModelFactory + from backend_pytorch import XModelInputSampler + else: + raise Exception("Error: backend is not recognized.") + + # Load model cfg + model_cfg_dict = {} + if model_cfg != '': + import json + + with open(model_cfg) as mc: + model_cfg_dict = json.load(mc) + + model_factory = XModelFactory( + model_path, + execution_provider, + execution_mode, + interop_threads, + intraop_threads, + model_code, + model_cfg_dict, + model_sample_pickle + ) + + model_dataset = XModelInputSampler(model_factory) + + runner: ModelRunner = None + if runner_name == "inline": + runner = ModelRunnerInline(model_factory) + elif runner_name == "threadpool": + runner = ModelRunnerThreadPoolExecutor( + model_factory, max_concurrency=runner_concurrency + ) + elif runner_name == "threadpool+replication": + runner = ModelRunnerThreadPoolExecutorWithTLS( + model_factory, max_concurrency=runner_concurrency + ) + elif runner_name == "processpool": + runner = ModelRunnerProcessPoolExecutor( + model_factory, max_concurrency=runner_concurrency + ) + elif runner_name == "processpool+mp": + runner = ModelRunnerMultiProcessingPool( + model_factory, max_concurrency=runner_concurrency + ) + else: + raise ValueError(f"Invalid runner {runner}") + + settings = mlperf_loadgen.TestSettings() + + settings.scenario = mlperf_loadgen.TestScenario.Offline + settings.mode = mlperf_loadgen.TestMode.PerformanceOnly + settings.offline_expected_qps = loadgen_expected_qps + settings.min_query_count = samples + settings.max_query_count = samples + settings.min_duration_ms = loadgen_duration_sec * 1000 + # Duration isn't enforced in offline mode + # Instead, it is used to determine total sample count via + # target_sample_count = Slack (1.1) * TargetQPS (1) * TargetDuration () + # samples_per_query = Max(min_query_count, target_sample_count) + + output_path = "results" if not output_path else output_path + output_path = os.path.join( + output_path, + os.path.basename(model_path), + runner_name) + os.makedirs(output_path, exist_ok=True) + + output_settings = mlperf_loadgen.LogOutputSettings() + output_settings.outdir = output_path + output_settings.copy_summary_to_stdout = True + + log_settings = mlperf_loadgen.LogSettings() + log_settings.log_output = output_settings + log_settings.enable_trace = False + + logger.info(f"Model: {model_path}") + logger.info(f"Runner: {runner_name}, Concurrency: {runner_concurrency}") + logger.info(f"Results: {output_path}") + + with contextlib.ExitStack() as stack: + stack.enter_context(runner) + harness = Harness(model_dataset, runner) + + query_sample_libary = mlperf_loadgen.ConstructQSL( + samples, # Total sample count + samples, # Num to load in RAM at a time + harness.load_query_samples, + harness.unload_query_samples, + ) + system_under_test = mlperf_loadgen.ConstructSUT( + harness.issue_query, harness.flush_queries + ) + + print('=====================================================================') + logger.info("Test Started") + + mlperf_loadgen.StartTestWithLogSettings( + system_under_test, query_sample_libary, settings, log_settings + ) + + logger.info("Test Finished") + print('=====================================================================') + + # Parse output file + output_summary = {} + output_summary_path = os.path.join( + output_path, "mlperf_log_summary.txt") + with open(output_summary_path, "r") as output_summary_file: + for line in output_summary_file: + m = re.match( + r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.]+).*", line) + if m: + output_summary[m.group(1).strip()] = m.group(2).strip() + logger.info( + "Observed QPS: " + + output_summary.get("Samples per second")) + logger.info("Result: " + output_summary.get("Result is")) + + mlperf_loadgen.DestroySUT(system_under_test) + mlperf_loadgen.DestroyQSL(query_sample_libary) + logger.info("Test Completed") + print('=====================================================================') + + +if __name__ == "__main__": + print('') + + logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s %(levelname)s %(threadName)s - %(name)s %(funcName)s: %(message)s", + ) + + parser = argparse.ArgumentParser() + parser.add_argument( + "model_path", help="path to input model", default="models/yolov5s.onnx" + ) + parser.add_argument( + "-b", + "--backend", + help="backend", + default="onnxruntime") + parser.add_argument("-o", "--output", help="path to store loadgen results") + parser.add_argument( + "-r", + "--runner", + help="model runner", + choices=[ + "inline", + "threadpool", + "threadpool+replication", + "processpool", + "processpool+mp", + ], + default="inline", + ) + parser.add_argument( + "--concurrency", + help="concurrency count for runner", + default=psutil.cpu_count(False), + type=int, + ) + parser.add_argument( + "--ep", help="Execution Provider", default="CPUExecutionProvider" + ) + parser.add_argument( + "--intraop", + help="IntraOp threads", + default=0, + type=int) + parser.add_argument( + "--interop", + help="InterOp threads", + default=0, + type=int) + parser.add_argument( + "--execmode", + help="Execution Mode", + choices=["sequential", "parallel"], + default="sequential", + ) + parser.add_argument( + "--samples", + help="number of samples", + default=100, + type=int, + ) + parser.add_argument( + "--loadgen_expected_qps", + help="Expected QPS", + default=1, + type=float) + parser.add_argument( + "--loadgen_duration_sec", + help="Expected duration in sec.", + default=1, + type=float) + parser.add_argument( + "--model_code", + help="(for PyTorch models) path to model code with cmc.py", + default="") + parser.add_argument( + "--model_cfg", + help="(for PyTorch models) path to model's configuration in JSON file", + default="") + parser.add_argument( + "--model_sample_pickle", + help="(for PyTorch models) path to a model sample in pickle format", + default="") + + args = parser.parse_args() + main( + args.backend, + args.model_path, + args.model_code, + args.model_cfg, + args.model_sample_pickle, + args.output, + args.runner, + args.concurrency, + args.ep, + args.execmode, + args.intraop, + args.interop, + args.samples, + args.loadgen_expected_qps, + args.loadgen_duration_sec + ) diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/utils.py b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/utils.py new file mode 100644 index 000000000..f7b0bfd7d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/src/utils.py @@ -0,0 +1,18 @@ +# Developer: Grigori Fursin + +import os +import psutil + + +def print_host_memory_use(text=''): + + pid = os.getpid() + python_process = psutil.Process(pid) + memoryUse = python_process.memory_info()[0] + + if text == '': + text = 'host memory use' + + print('{}: {} MB'.format(text, int(memoryUse / 1000000))) + + return diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat new file mode 100644 index 000000000..c7154832f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.bat @@ -0,0 +1,7 @@ +rem set CM_CACHE=--no-cache + +set CM_DOCKER_ORG=modularcm +set CM_DOCKER_NAME=loadgen-generic-python +set CM_OS_NAME=ubuntu +set CM_HW_TARGET=cpu +set CM_OS_VERSION=22.04 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh new file mode 100644 index 000000000..5f49d3be9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/_common.sh @@ -0,0 +1,10 @@ +#! /bin/bash + +#export CM_CACHE="--no-cache" + +export CM_DOCKER_ORG=modularcm +export CM_DOCKER_NAME="loadgen-generic-python" +export CM_OS_NAME="ubuntu" +export CM_HW_TARGET="cpu" +export CM_OS_VERSION="22.04" + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat new file mode 100644 index 000000000..f51ea46b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.bat @@ -0,0 +1,16 @@ +call _common.bat + +docker build -f %CM_DOCKER_NAME%--%CM_OS_NAME%-%CM_HW_TARGET%.Dockerfile ^ + -t %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION% ^ + --build-arg cm_os_name=%CM_OS_NAME% ^ + --build-arg cm_hw_target=%CM_HW_TARGET% ^ + --build-arg cm_os_version=%CM_OS_VERSION% ^ + --build-arg cm_version="" ^ + --build-arg cm_automation_repo="ctuning@mlcommons-ck" ^ + --build-arg cm_automation_checkout="" ^ + --build-arg cm_python_version="3.10.8" ^ + --build-arg cm_mlperf_inference_loadgen_version="" ^ + --build-arg cm_mlperf_inference_src_tags="" ^ + --build-arg cm_mlperf_inference_src_version="" ^ + --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" ^ + %CM_CACHE% . diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh new file mode 100644 index 000000000..186a0eae9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/build.sh @@ -0,0 +1,18 @@ +#! /bin/bash + +. ./_common.sh + +time docker build -f ${CM_DOCKER_NAME}--${CM_OS_NAME}-${CM_HW_TARGET}.Dockerfile \ + -t ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-${CM_HW_TARGET}:${CM_OS_NAME}-${CM_OS_VERSION} \ + --build-arg cm_os_name=${CM_OS_NAME} \ + --build-arg cm_hw_target=${CM_HW_TARGET} \ + --build-arg cm_os_version=${CM_OS_VERSION} \ + --build-arg cm_version="" \ + --build-arg cm_automation_repo="ctuning@mlcommons-ck" \ + --build-arg cm_automation_checkout="" \ + --build-arg cm_python_version="3.10.8" \ + --build-arg cm_mlperf_inference_loadgen_version="" \ + --build-arg cm_mlperf_inference_src_tags="" \ + --build-arg cm_mlperf_inference_src_version="" \ + --build-arg CM_ONNXRUNTIME_VERSION="1.13.1" \ + ${CM_CACHE} . diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile new file mode 100644 index 000000000..c82296c66 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python--ubuntu-cpu.Dockerfile @@ -0,0 +1,96 @@ +# Modular MLPerf container with the MLCommons CM automation meta-framework + +# Preparing OS +ARG cm_os_name="ubuntu" +ARG cm_os_version="22.04" + +FROM ${cm_os_name}:${cm_os_version} + +# Maintained by the MLCommons taskforce on automation and reproducibility and OctoML +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +# Customization +ARG CM_GH_TOKEN + +# Prepare shell and entry point +SHELL ["/bin/bash", "-c"] +ENTRYPOINT ["/bin/bash", "-c"] + +# Install system dependencies +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +RUN apt-get update -y +RUN apt-get install -y lsb-release +RUN apt-get install -y python3 python3-pip git wget sudo + +# Extra python deps +RUN python3 -m pip install requests + +# CM version +ARG cm_version="" +ENV CM_VERSION="${cm_version}" +RUN if [ "${CM_VERSION}" != "" ] ; then \ + python3 -m pip install cmind==${CM_VERSION} ; \ + else \ + python3 -m pip install cmind ; \ + fi + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +# See example in https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-CPU +RUN groupadd --gid 10001 cm +RUN useradd --uid 10000 -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +USER cmuser:cm +WORKDIR /home/cmuser + +# Check CM installation +RUN lsb_release -a > sys-version-os.log +RUN uname -a > sys-version-kernel.log +RUN python3 --version > sys-version-python3.log +RUN cm version > sys-version-cm.log + +################################################################################ +# Get CM automation repository +ARG cm_automation_repo="mlcommons@ck" +ARG cm_automation_repo_checkout="" +ENV CM_AUTOMATION_REPO=${cm_automation_repo} +ENV CM_AUTOMATION_REPO_CHECKOUT=${cm_automation_repo_checkout} +RUN echo ${CM_AUTOMATION_REPO} +RUN cm pull repo ${CM_AUTOMATION_REPO} --checkout=${CM_AUTOMATION_REPO_CHECKOUT} + +################################################################################ +# Install CM system dependencies +RUN cm run script "get sys-utils-cm" --quiet + +# Detect/install python +ARG cm_python_version="" +RUN cm run script "get python3" --version=${cm_python_version} + +################################################################################ +# Build MLPerf loadgen +ARG cm_mlperf_inference_loadgen_version="" +RUN cm run script "get mlperf loadgen" --adr.compiler.tags=gcc --version=${cm_mlperf_inference_loadgen_version} --adr.inference-src-loadgen.version=${cm_mlperf_inference_loadgen_version} -v + +################################################################################ +# Install ONNX runtime +ARG CM_ONNXRUNTIME_VERSION="" +RUN cm run script "get generic-python-lib _onnxruntime" --version=${CM_ONNXRUNTIME_VERSION} + +ARG CM_MLPERF_CHOICE_BACKEND="onnxruntime" +ARG CM_MLPERF_CHOICE_DEVICE="cpu" + +RUN cm run script --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 \ + --adr.compiler.tags=gcc \ + --adr.python.version_min=3.8 \ + --quiet \ + --fake_run + +################################################################################ +# CMD entry point +CMD /bin/bash diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile new file mode 100644 index 000000000..195acdec6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/loadgen-generic-python-auto.Dockerfile @@ -0,0 +1,33 @@ +FROM ubuntu:20.04 +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +ENV PATH=${PATH}:$HOME/.local/bin +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo ctuning@mlcommons-ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm run script --quiet --tags=python,app,loadgen-generic,_onnxruntime,_resnet50 --fake_run diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat new file mode 100644 index 000000000..171aeecab --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.bat @@ -0,0 +1,3 @@ +call _common.bat + +docker run -it %CM_DOCKER_ORG%/%CM_DOCKER_NAME%-%CM_HW_TARGET%:%CM_OS_NAME%-%CM_OS_VERSION% diff --git a/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh new file mode 100644 index 000000000..c82d4b7b1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-loadgen-generic-python/tests/modular-cm-containers/run.sh @@ -0,0 +1,3 @@ +. ./_common.sh + +docker run -it ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}-%CM_HW_TARGET%:${CM_OS_NAME}-${CM_OS_VERSION} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/_cm.yaml new file mode 100644 index 000000000..305578a17 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/_cm.yaml @@ -0,0 +1,341 @@ +# Identification of this CM script +alias: app-mlperf-inference-amd +uid: 467cdb20aabc4394 +cache: false + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - amd-harness + - amd + +# Default environment +default_env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness + CM_MLPERF_SKIP_RUN: 'no' + +env: + CM_CALL_MLPERF_RUNNER: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: CM_RERUN + results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_IMAGENET_ACCURACY_DTYPE + - CM_SQUAD_ACCURACY_DTYPE + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download MLPerf inference loadgen + - tags: get,mlcommons,inference,loadgen + names: + - inference-loadgen + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + - tags: get,git,repo + names: + - inference-results + - inference-code + update_tags_from_env_with_prefix: + _repo.: + - CM_MLPERF_INFERENCE_RESULTS_REPO + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO + extra_cache_tags: results,repo,mlperf + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + tags: benchmark-mlperf + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + rocm: + group: device + env: + CM_MLPERF_DEVICE: rocm + CM_MLPERF_DEVICE_LIB_NAMESPEC: rocm + + openshift: + group: backend + default: true + env: + CM_MLPERF_BACKEND: openshift + + pytorch: + group: backend + env: + CM_MLPERF_BACKEND: pytorch + + pytorch,cuda: + deps: + - tags: get,generic-python-lib,_torch_cuda + + pytorch,rocm: + deps: + - tags: get,generic-python-lib,_torch,_rocm + + pytorch,cpu: + deps: + - tags: get,generic-python-lib,_torch + + bs.#: + group: batch-size + + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + + retinanet: + group: model + base: + - bs.1 + env: + CM_MODEL: retinanet + + bert_: + {} + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + + bert_: + {} + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + + gptj_: + deps: + - tags: get,ml-model,gptj + names: + - gptj-model + - tags: get,dataset,cnndm,_validation + + gptj-99: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + gptj-99.9: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + + llama2-70b_: + deps: + - tags: get,generic-python-lib,_package.compressed_tensors + names: + - compressed_tensors + - tags: get,preprocessed,dataset,openorca,_mlc,_validation + - tags: get,ml-model,llama2,_amd,_pytorch + skip_if_env: + CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + - 'yes' + CM_RUN_STATE_DOCKER: + - 'yes' + - tags: get,preprocessed,dataset,openorca,_mlc,_validation + - tags: download,file,_url.https://github.com/vllm-project/vllm/blob/38c4b7e863570a045308af814c72f4504297222e/tests/fp8_kv/llama2-70b-fp8-kv/kv_cache_scales.json + extra_cache_tags: llama2-scales,kv-cache + force_cache: true + env: + CM_DOWNLOAD_FINAL_ENV_NAME: QUANTIZATION_PARAM_PATH + - tags: get,generic-python-lib,_package.vllm + names: + - vllm + - tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.cm-code-only + extra_cache_tags: inference,results + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH + + llama2-70b-99: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99 + + llama2-70b-99.9: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99.9 + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + + singlestream,resnet50: + default_variations: + batch-size: bs.1 + + singlestream,retinanet: + default_variations: + batch-size: bs.1 + + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + uint8: + group: precision + fp16: + group: precision + fp32: + group: precision + + r4.1-dev_default: + group: version + default: true + env: + CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0 + + r4.1_default: + group: version + env: + CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.1 + +docker: + real_run: False diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/customize.py new file mode 100644 index 000000000..7c6b91e58 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/customize.py @@ -0,0 +1,59 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + env['CM_MLPERF_AMD_SCRIPT_PATH'] = env['CM_TMP_CURRENT_SCRIPT_PATH'] + env['CM_MLPERF_AMD_CODE_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD") + + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + if "llama2" in env['CM_MODEL']: + env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join( + env['CM_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8") + env['CM_RUN_CMD'] = "bash run-llama2.sh " + else: + return {'return': 1, 'error': 'Model {} not supported'.format( + env['CM_MODEL'])} + + return {'return': 0} + # return {'return':1, 'error': 'Run command needs to be tested'} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run-llama2.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run-llama2.sh new file mode 100644 index 000000000..10f36f8ca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run-llama2.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +set -xeu + +N_SAMPLES=${N_SAMPLES:-24576} #24576 #3072 #2457 #6 +TP=1 +DP=${DP:-8} + +export HIP_FORCE_DEV_KERNARG=1 +export VLLM_USE_TRITON_FLASH_ATTN=0 +export VLLM_FP8_PADDING=1 +export VLLM_FP8_ACT_PADDING=1 +export VLLM_FP8_WEIGHT_PADDING=1 +export VLLM_FP8_REDUCE_CONV=1 + +export HARNESS_DISABLE_VLLM_LOGS=1 +export VLLM_LOGGING_LEVEL=ERROR + +MODEL_PATH=${LLAMA2_CHECKPOINT_PATH:-/data/llm/llama2-70b-chat/} +DATASET_PATH=${CM_DATASET_OPENORCA_PREPROCESSED_PATH:-/data/open_orca/open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz} +QUANTIZED_WEIGHTS_PATH=${CM_LLAMA2_FINAL_SAFE_TENSORS_PATH:-quantized/quark_share/modelzoo/llama2_70b_wfp8_afp8_ofp8_nomerge/json-safetensors/llama.safetensors} +QUANTIZATION_PARAM_PATH=${QUANTIZATION_PARAM_PATH:-/app/kv_cache_scales.json} + +MLPERF_CONF="${CM_MLPERF_CONF:-/app/mlperf_inference/mlperf.conf}" +USER_CONF="${CM_MLPERF_USER_CONF:-/lab-mlperf-inference/code/llama2-70b-99.9/mlperf_config_VllmFp8/user.conf}" + +SUBMISSION=${SUBMISSION:-0} + +LOG_DIR=${CM_MLPERF_OUTPUT_DIR} + +cp $USER_CONF ${LOG_DIR}/user.conf + +COMMON_CMD_OPTIONS="\ + --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --output-log-dir ${LOG_DIR} \ + --model-path $MODEL_PATH \ + --mlperf-conf $MLPERF_CONF \ + --user-conf $USER_CONF \ + --total-sample-count $N_SAMPLES \ + --dataset-path $DATASET_PATH \ + --dtype float16 \ + --backend vllm \ + --device cuda:0 \ + --kv-cache-dtype fp8 \ + -tp ${TP} \ + -dp ${DP} \ + --quantization fp8 \ + --quantized-weights-path ${QUANTIZED_WEIGHTS_PATH} \ + --quantization-param-path ${QUANTIZATION_PARAM_PATH}" + +if [ "${CM_MLPERF_LOADGEN_MODE}" == "accuracy" ]; then + COMMON_CMD_OPTIONS+=" --accuracy" +fi + +if [ "${CM_MLPERF_LOADGEN_SCENARIO}" == "Offline" ]; then + WD=${WD:-0} + SORTING=${SORTING:-descending} #ascending #descending #lexicographic #skip + export VLLM_SCHED_PREFILL_KVC_FREEPCT=31.0 + # generate run command + cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_Offline.py \ + ${COMMON_CMD_OPTIONS} \ + --warmup-duration ${WD} \ + --sorting ${SORTING} \ + --enforce-eager True \ + --gpu-memory-utilization 0.99" +else + # generate run command + cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_SyncServer.py \ + ${COMMON_CMD_OPTIONS} \ + --enable-warm-up \ + --enable-batcher" +fi + +echo "${cmd}" +# uncomment the below lines for testing +#eval "${cmd}" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run.sh new file mode 100644 index 000000000..ddcd0b550 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-amd/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/README.md new file mode 100644 index 000000000..f9f7ce6c6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.yaml new file mode 100644 index 000000000..e66ae2bac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.yaml @@ -0,0 +1,282 @@ +alias: app-mlperf-inference-ctuning-cpp-tflite +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Modular MLPerf inference benchmark pipeline +default_env: + CM_DATASET_COMPRESSED: 'off' + CM_DATASET_INPUT_SQUARE_SIDE: '224' + CM_FAST_COMPILATION: 'yes' + CM_LOADGEN_BUFFER_SIZE: '1024' + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN: '0' + CM_MLPERF_OUTPUT_DIR: . + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_cpp + CM_MLPERF_TFLITE_USE_NEON: '0' + CM_MLPERF_TFLITE_USE_OPENCL: '0' + CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + CM_ML_MODEL_NORMALIZE_DATA: '0' + CM_ML_MODEL_SUBTRACT_MEANS: '1' + CM_VERBOSE: '0' +deps: +- tags: detect,os +- tags: detect,cpu +- tags: get,sys-utils-cm +- enable_if_env: + CM_MLPERF_DEVICE: + - gpu + tags: get,cuda +- names: + - loadgen + tags: get,loadgen +- names: + - inference-src + tags: get,mlcommons,inference,src +- enable_if_env: + CM_MLPERF_BACKEND: + - tflite + - armnn_tflite + CM_MODEL: + - mobilenet + names: + - ml-model + - tflite-model + - mobilenet-model + tags: get,ml-model,mobilenet,raw,_tflite +- enable_if_env: + CM_MLPERF_BACKEND: + - tflite + - armnn_tflite + CM_MODEL: + - resnet50 + names: + - ml-model + - tflite-model + - resnet50-model + tags: get,ml-model,resnet50,raw,_tflite,_no-argmax +- enable_if_env: + CM_MLPERF_BACKEND: + - tf + CM_MODEL: + - resnet50 + names: + - ml-model + - tflite-model + - resnet50-model + tags: get,ml-model,resnet50,raw,_tf +- enable_if_env: + CM_MLPERF_BACKEND: + - tflite + - armnn_tflite + CM_MODEL: + - efficientnet + names: + - ml-model + - tflite-model + - efficientnet-model + tags: get,ml-model,efficientnet,raw,_tflite +- names: + - tensorflow + - tflite + tags: get,tensorflow,lib,_tflite +- enable_if_env: + CM_MLPERF_TFLITE_USE_ARMNN: + - 'yes' + names: + - armnn + - lib-armnn + tags: get,lib,armnn +input_mapping: + compressed_dataset: CM_DATASET_COMPRESSED + count: CM_MLPERF_LOADGEN_QUERY_COUNT + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + verbose: CM_VERBOSE +new_env_keys: +- CM_MLPERF_* +- CM_ML_MODEL_* +- CM_HW_NAME +new_state_keys: +- CM_SUT_* +post_deps: +- names: + - compiler-program + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + tags: compile,program +- names: + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + tags: benchmark-mlperf +- names: + - save-mlperf-inference-state + tags: save,mlperf,inference,state +prehook_deps: +- names: + - user-conf-generator + tags: generate,user-conf,mlperf,inference +- enable_if_env: + CM_MLPERF_SKIP_RUN: + - 'no' + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + - preprocessed-dataset + skip_if_env: + CM_DATASET_COMPRESSED: + - 'on' + tags: get,dataset,preprocessed,imagenet,_for.resnet50,_rgb32,_NHWC + update_tags_from_env: + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS +- enable_if_env: + CM_MLPERF_SKIP_RUN: + - 'no' + CM_MODEL: + - mobilenet + - efficientnet + names: + - imagenet-preprocessed + - preprocessed-dataset + skip_if_env: + CM_DATASET_COMPRESSED: + - 'on' + tags: get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb32,_NHWC + update_tags_from_env: + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS +- enable_if_env: + CM_DATASET_COMPRESSED: + - 'on' + CM_MLPERF_SKIP_RUN: + - 'no' + CM_MODEL: + - mobilenet + - efficientnet + names: + - imagenet-preprocessed + - preprocessed-dataset + tags: get,dataset,preprocessed,imagenet,_for.mobilenet,_rgb8,_NHWC + update_tags_from_env: + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS +- enable_if_env: + CM_DATASET_COMPRESSED: + - 'on' + CM_MLPERF_SKIP_RUN: + - 'no' + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + - preprocessed-dataset + tags: get,dataset,preprocessed,imagenet,_for.resnet50,_rgb8,_NHWC + update_tags_from_env: + - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS +tags: +- app +- mlcommons +- mlperf +- inference +- tflite-cpp +tags_help: app mlperf inference tflite-cpp +uid: 415904407cca404a +variations: + armnn: + default_variations: + optimization-target: use-neon + env: + CM_MLPERF_TFLITE_USE_ARMNN: 'yes' + CM_TMP_LINK_LIBS: tensorflowlite,armnn + armnn,tflite: + env: + CM_MLPERF_BACKEND: armnn_tflite + CM_MLPERF_BACKEND_VERSION: <<>> + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: tflite_armnn_cpp + CM_TMP_LINK_LIBS: tensorflowlite,armnn,armnnTfLiteParser + CM_TMP_SRC_FOLDER: armnn + cpu: + default: true + env: + CM_MLPERF_DEVICE: cpu + group: device + efficientnet: + env: + CM_MODEL: efficientnet + group: model + fp32: + adr: + ml-model: + tags: _fp32 + preprocessed-dataset: + tags: _float32 + default: true + env: + CM_MLPERF_MODEL_PRECISION: float32 + group: precision + gpu: + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + group: device + int8: + adr: + ml-model: + tags: _int8 + preprocessed-dataset: + tags: _int8 + env: + CM_DATASET_COMPRESSED: 'on' + CM_MLPERF_MODEL_PRECISION: int8 + group: precision + mobilenet: + env: + CM_MODEL: mobilenet + group: model + resnet50: + default: true + env: + CM_MODEL: resnet50 + group: model + singlestream: + default: true + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + group: loadgen-scenario + tf: + env: + CM_MLPERF_BACKEND: tf + group: backend + tflite: + default: true + env: + CM_MLPERF_BACKEND: tflite + CM_MLPERF_BACKEND_VERSION: master + CM_TMP_LINK_LIBS: tensorflowlite + CM_TMP_SRC_FOLDER: src + group: backend + uint8: + adr: + ml-model: + tags: _uint8 + preprocessed-dataset: + tags: _int8 + env: + CM_DATASET_COMPRESSED: 'on' + CM_MLPERF_MODEL_PRECISION: uint8 + group: precision + use-neon: + env: + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_neon + CM_MLPERF_TFLITE_USE_NEON: '1' + group: optimization-target + use-opencl: + env: + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: using_opencl + CM_MLPERF_TFLITE_USE_OPENCL: '1' + group: optimization-target diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp new file mode 100644 index 000000000..c641e9d1e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/armnn/classification.cpp @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +#include +#include +#include + +#include "armnn/ArmNN.hpp" +#include "armnn/Exceptions.hpp" +#include "armnn/Tensor.hpp" +#include "armnn/INetwork.hpp" +#include "armnnTfLiteParser/ITfLiteParser.hpp" + +#include "loadgen.h" +#include "query_sample_library.h" +#include "system_under_test.h" +#include "test_settings.h" + + +#include "benchmark.h" + +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/model.h" + +using namespace std; +using namespace CK; + + +template + +class ArmNNBenchmark : public Benchmark { +public: + ArmNNBenchmark(const BenchmarkSettings* settings, TData *in_ptr, TData *out_ptr) + : Benchmark(settings, in_ptr, out_ptr) { + } +}; + +armnn::InputTensors MakeInputTensors(const std::pair& input, const void* inputTensorData) +{ + return { {input.first, armnn::ConstTensor(input.second, inputTensorData) } }; +} + +armnn::OutputTensors MakeOutputTensors(const std::pair& output, void* outputTensorData) +{ + return { {output.first, armnn::Tensor(output.second, outputTensorData) } }; +} + +class Program { +public: + Program () : runtime( armnn::IRuntime::Create(options) ) { + + bool use_neon = getenv_b("CM_MLPERF_TFLITE_USE_NEON"); + bool use_opencl = getenv_b("CM_MLPERF_TFLITE_USE_OPENCL"); + string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME"); + string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME"); + + settings = new BenchmarkSettings(MODEL_TYPE::LITE); + + session = new BenchmarkSession(settings); + + armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create(); + + // Optimize the network for a specific runtime compute device, e.g. CpuAcc, GpuAcc + //std::vector optOptions = {armnn::Compute::CpuAcc, armnn::Compute::GpuAcc}; + std::vector optOptions = {armnn::Compute::CpuRef}; + if( use_neon && use_opencl) { + optOptions = {armnn::Compute::CpuAcc, armnn::Compute::GpuAcc}; + } else if( use_neon ) { + optOptions = {armnn::Compute::CpuAcc}; + } else if( use_opencl ) { + optOptions = {armnn::Compute::GpuAcc}; + } + + cout << "\nLoading graph..." << endl; + + armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(settings->graph_file().c_str()); + if (!network) + throw "Failed to load graph from file"; + + armnnTfLiteParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo(0, input_layer_name); + armnnTfLiteParser::BindingPointInfo outputBindingInfo = parser->GetNetworkOutputBindingInfo(0, output_layer_name); + + armnn::TensorShape inShape = inputBindingInfo.second.GetShape(); + armnn::TensorShape outShape = outputBindingInfo.second.GetShape(); + std::size_t inSize = inShape[0] * inShape[1] * inShape[2] * inShape[3]; + std::size_t outSize = outShape[0] * outShape[1]; + + armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*network, optOptions, runtime->GetDeviceSpec()); + + runtime->LoadNetwork(networkIdentifier, std::move(optNet)); + + armnn::DataType input_type = inputBindingInfo.second.GetDataType(); + armnn::DataType output_type = outputBindingInfo.second.GetDataType(); + if (input_type != output_type) + throw format("Type of graph's input (%d) does not match type of its output (%d).", int(input_type), int(output_type)); + + void* input = input_type == armnn::DataType::Float32 ? (void*)new float[inSize] : (void*)new uint8_t[inSize]; + void* output = output_type == armnn::DataType::Float32 ? (void*)new float[outSize] : (void*)new uint8_t[outSize]; + + inputTensor = MakeInputTensors(inputBindingInfo, input); + outputTensor = MakeOutputTensors(outputBindingInfo, output); + + switch (input_type) { + case armnn::DataType::Float32: + if (settings->skip_internal_preprocessing) { + cout << "************* Type 1" << endl; + benchmark.reset(new ArmNNBenchmark(settings, (float*)input, (float*)output)); + } else { + cout << "************* Type 2" << endl; + benchmark.reset(new ArmNNBenchmark(settings, (float*)input, (float*)output)); + } + break; + + case armnn::DataType::QAsymmU8: + benchmark.reset(new ArmNNBenchmark(settings, (uint8_t*)input, (uint8_t*)output)); + break; + + default: + throw format("Unsupported type of graph's input: %d. " + "Supported types are: Float32 (%d), UInt8 (%d)", + int(input_type), int(armnn::DataType::Float32), int(armnn::DataType::QAsymmU8)); + } + + int out_num = outShape[0]; + int out_classes = outShape[1]; + cout << format("Output tensor dimensions: %d*%d", out_num, out_classes) << endl; + if (out_classes != settings->num_classes && out_classes != settings->num_classes+1) + throw format("Unsupported number of classes in graph's output tensor. Supported numbers are %d and %d", + settings->num_classes, settings->num_classes+1); + benchmark->has_background_class = out_classes == settings->num_classes+1; + } + + ~Program() { + } + + //bool is_available_batch() {return session? session->get_next_batch(): false; } + + void LoadNextBatch(const std::vector& img_indices) { + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "LoadNextBatch(["; + for( auto idx : img_indices) { + cout << idx << ' '; + } + cout << "])" << endl; + } else if( vl ) { + cout << 'B' << flush; + } + session->load_filenames(img_indices); + benchmark->load_images( session ); + + if( vl ) { + cout << endl; + } + } + + void ColdRun() { + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "Triggering a Cold Run..." << endl; + } else if( vl ) { + cout << 'C' << flush; + } + + if (runtime->EnqueueWorkload(networkIdentifier, inputTensor, outputTensor) != armnn::Status::Success) + throw "Failed to invoke the classifier"; + } + + int InferenceOnce(int img_idx) { + benchmark->get_random_image( img_idx ); + + if (runtime->EnqueueWorkload(networkIdentifier, inputTensor, outputTensor) != armnn::Status::Success) + throw "Failed to invoke the classifier"; + + return benchmark->get_next_result(); + } + + void UnloadBatch(const std::vector& img_indices) { + auto b_size = img_indices.size(); + + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "Unloading a batch[" << b_size << "]" << endl; + } else if( vl ) { + cout << 'U' << flush; + } + + benchmark->unload_images(b_size); + //benchmark->save_results( ); + } + + const int available_images_max() { return settings->list_of_available_imagefiles().size(); } + const int images_in_memory_max() { return settings->images_in_memory_max; } + + BenchmarkSettings *settings; +private: + BenchmarkSession *session; + unique_ptr benchmark; + armnn::NetworkId networkIdentifier; + armnn::OutputTensors outputTensor; + armnn::InputTensors inputTensor; + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime; +}; + +class SystemUnderTestSingleStream : public mlperf::SystemUnderTest { +public: + SystemUnderTestSingleStream(Program *_prg) : mlperf::SystemUnderTest() { + prg = _prg; + query_counter = 0; + }; + + ~SystemUnderTestSingleStream() override = default; + + const std::string& Name() { return name_; } + + void IssueQuery(const std::vector& samples) override { + + ++query_counter; + auto vl = prg->settings->verbosity_level; + if( vl > 1 ) { + cout << query_counter << ") IssueQuery([" << samples.size() << "]," << samples[0].id << "," << samples[0].index << ")" << endl; + } else if ( vl ) { + cout << 'Q' << flush; + } + + std::vector responses; + responses.reserve(samples.size()); + float encoding_buffer[samples.size()]; + int i=0; + for (auto s : samples) { + int predicted_class = prg->InferenceOnce(s.index); + + if( vl > 1 ) { + cout << "Query image index: " << s.index << " -> Predicted class: " << predicted_class << endl << endl; + } else if ( vl ) { + cout << 'p' << flush; + } + + /* This would be the correct way to pass in one integer index: + */ +// int single_value_buffer[] = { (int)predicted_class }; + + /* This conversion is subtly but terribly wrong + yet we use it here in order to use Guenther's parsing script: + */ + encoding_buffer[i] = (float)predicted_class; + responses.push_back({s.id, uintptr_t(&encoding_buffer[i]), sizeof(encoding_buffer[i])}); + ++i; + } + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + } + + void FlushQueries() override { + auto vl = prg->settings->verbosity_level; + if ( vl ) { + cout << endl; + } + } + + void ReportLatencyResults(const std::vector& latencies_ns) { + + size_t size = latencies_ns.size(); + uint64_t avg = accumulate(latencies_ns.begin(), latencies_ns.end(), uint64_t(0) )/size; + + std::vector sorted_lat(latencies_ns.begin(), latencies_ns.end()); + sort(sorted_lat.begin(), sorted_lat.end()); + + cout << endl << "------------------------------------------------------------"; + cout << endl << "| LATENCIES (in nanoseconds and fps) |"; + cout << endl << "------------------------------------------------------------"; + size_t p50 = size * 0.5; + size_t p90 = size * 0.9; + cout << endl << "Number of queries run: " << size; + cout << endl << "Min latency: " << sorted_lat[0] << "ns (" << 1e9/sorted_lat[0] << " fps)"; + cout << endl << "Median latency: " << sorted_lat[p50] << "ns (" << 1e9/sorted_lat[p50] << " fps)"; + cout << endl << "Average latency: " << avg << "ns (" << 1e9/avg << " fps)"; + cout << endl << "90 percentile latency: " << sorted_lat[p90] << "ns (" << 1e9/sorted_lat[p90] << " fps)"; + + if(!prg->settings->trigger_cold_run) { + cout << endl << "First query (cold model) latency: " << latencies_ns[0] << "ns (" << 1e9/latencies_ns[0] << " fps)"; + } + cout << endl << "Max latency: " << sorted_lat[size-1] << "ns (" << 1e9/sorted_lat[size-1] << " fps)"; + cout << endl << "------------------------------------------------------------ " << endl; + } + +private: + std::string name_{"TFLite_SUT"}; + Program *prg; + long query_counter; +}; + +class QuerySampleLibrarySingleStream : public mlperf::QuerySampleLibrary { +public: + QuerySampleLibrarySingleStream(Program *_prg) : mlperf::QuerySampleLibrary() { + prg = _prg; + }; + + ~QuerySampleLibrarySingleStream() = default; + + const std::string& Name() override { return name_; } + + size_t TotalSampleCount() override { return prg->available_images_max(); } + + size_t PerformanceSampleCount() override { return prg->images_in_memory_max(); } + + void LoadSamplesToRam( const std::vector& samples) override { + prg->LoadNextBatch(samples); + return; + } + + void UnloadSamplesFromRam( const std::vector& samples) override { + prg->UnloadBatch(samples); + return; + } + +private: + std::string name_{"TFLite_QSL"}; + Program *prg; +}; + +void TestSingleStream(Program *prg) { + SystemUnderTestSingleStream sut(prg); + QuerySampleLibrarySingleStream qsl(prg); + + const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF"); + const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF"); + + std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model"); + std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", ""); + + const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO"); + const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE"); + + std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl; + std::cout << "Path to user.conf : " << user_conf_path << std::endl; + std::cout << "Model Name: " << model_name << std::endl; + std::cout << "LoadGen Scenario: " << scenario_string << std::endl; + std::cout << "LoadGen Mode: " << ( mode_string != "" ? mode_string : "(empty string)" ) << std::endl; + + mlperf::TestSettings ts; + + // This should have been done automatically inside ts.FromConfig() ! + ts.scenario = ( scenario_string == "SingleStream") ? mlperf::TestScenario::SingleStream + : ( scenario_string == "MultiStream") ? mlperf::TestScenario::MultiStream + : ( scenario_string == "Server") ? mlperf::TestScenario::Server + : ( scenario_string == "Offline") ? mlperf::TestScenario::Offline : mlperf::TestScenario::SingleStream; + + if( mode_string != "") + ts.mode = ( mode_string == "SubmissionRun") ? mlperf::TestMode::SubmissionRun + : ( mode_string == "accuracy") ? mlperf::TestMode::AccuracyOnly + : ( mode_string == "performance") ? mlperf::TestMode::PerformanceOnly + : ( mode_string == "findpeakperformance") ? mlperf::TestMode::FindPeakPerformance : mlperf::TestMode::SubmissionRun; + + if (ts.FromConfig(mlperf_conf_path, model_name, scenario_string)) { + std::cout << "Issue with mlperf.conf file at " << mlperf_conf_path << std::endl; + exit(1); + } + + if (ts.FromConfig(user_conf_path, model_name, scenario_string)) { + std::cout << "Issue with user.conf file at " << user_conf_path << std::endl; + exit(1); + } + + mlperf::LogSettings log_settings; + log_settings.log_output.outdir = logs_dir; + log_settings.log_output.prefix_with_datetime = false; + log_settings.enable_trace = false; + + + if (prg->settings->trigger_cold_run) { + prg->ColdRun(); + } + + mlperf::StartTest(&sut, &qsl, ts, log_settings); +} + +int main(int argc, char* argv[]) { + try { + Program *prg = new Program(); + TestSingleStream(prg); + delete prg; + } + catch (const string& error_message) { + cerr << "ERROR: " << error_message << endl; + return -1; + } + return 0; +} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py new file mode 100644 index 000000000..ec75f7e84 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/customize.py @@ -0,0 +1,120 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + source_files = [] + script_path = i['run_script_input']['path'] + + env['CM_SOURCE_FOLDER_PATH'] = os.path.join( + script_path, env['CM_TMP_SRC_FOLDER']) + + for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + if file.endswith(".c") or file.endswith(".cpp"): + source_files.append(file) + + env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + + if '+CPLUS_INCLUDE_PATH' not in env: + env['+CPLUS_INCLUDE_PATH'] = [] + + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + + # TODO: get cuda path ugly fix + if env['CM_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + + if '+ CXXFLAGS' not in env: + env['+ CXXFLAGS'] = [] + env['+ CXXFLAGS'].append("-std=c++17") + + # add preprocessor flag like "#define CM_MODEL_RESNET50" + env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) + # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + + env['CM_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + + env['CM_MLPERF_DEVICE'].upper()) + + if '+ LDCXXFLAGS' not in env: + env['+ LDCXXFLAGS'] = [] + + env['+ LDCXXFLAGS'] += [ + "-lmlperf_loadgen", + "-lpthread" + ] + # e.g. -lonnxruntime + if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + # e.g. -lcudart + if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) + + if env.get('CM_TMP_LINK_LIBS', []): + libs = env['CM_TMP_LINK_LIBS'].split(",") + for lib in libs: + env['+ LDCXXFLAGS'].append(' -l' + lib) + + env['CM_LINKER_LANG'] = 'CXX' + env['CM_RUN_DIR'] = os.getcwd() + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'CM_MLPERF_USER_CONF' not in env: + env['CM_MLPERF_USER_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + if env.get('CM_DATASET_COMPRESSED', "no").lower() in [ + "yes", "on", "true"] and "float" in env.get('CM_MLPERF_MODEL_PRECISION', ''): + # Use all cores for input preprocessing + env['CM_HOST_USE_ALL_CORES'] = "yes" + env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2'] = "with_live_preprocessing" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h new file mode 100644 index 000000000..76f1209a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/inc/benchmark.h @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG(msg) std::cout << "DEBUG: " << msg << std::endl; + +namespace CK { + +enum MODEL_TYPE { + LITE, + TF_FROZEN +}; + + +/// Load mandatory string value from the environment. +inline std::string getenv_s(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return std::string(value); +} + +inline std::string getenv_opt_s(const std::string& name, const std::string default_value) { + const char *value = getenv(name.c_str()); + if (!value) + return default_value; + else + return std::string(value); +} + + +/// Load mandatory integer value from the environment. +inline int getenv_i(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return atoi(value); +} + +/// Load mandatory float value from the environment. +inline float getenv_f(const std::string& name) { + const char *value = getenv(name.c_str()); + if (!value) + throw "Required environment variable " + name + " is not set"; + return atof(value); +} + +/// Load an optional boolean value from the environment. +inline bool getenv_b(const char *name) { + std::string value = getenv(name); + + return (value == "YES" || value == "yes" || value == "ON" || value == "on" || value == "1"); +} + +/// Dummy `sprintf` like formatting function using std::string. +/// It uses buffer of fixed length so can't be used in any cases, +/// generally use it for short messages with numeric arguments. +template +inline std::string format(const char* str, Args ...args) { + char buf[1024]; + sprintf(buf, str, args...); + return std::string(buf); +} + +//---------------------------------------------------------------------- + +class Accumulator { +public: + void reset() { _total = 0, _count = 0; } + void add(float value) { _total += value, _count++; } + float total() const { return _total; } + float avg() const { return _total / static_cast(_count); } +private: + float _total = 0; + int _count = 0; +}; + +//---------------------------------------------------------------------- + +class BenchmarkSettings { +public: + const std::string images_dir = getenv_s("CM_DATASET_PREPROCESSED_PATH"); + const std::string available_images_file = getenv_s("CM_DATASET_PREPROCESSED_IMAGES_LIST"); + const bool skip_internal_preprocessing = (getenv_opt_s("CM_DATASET_COMPRESSED", "off") == "off"); + const std::string result_dir = getenv_s("CM_MLPERF_OUTPUT_DIR"); + const std::string input_layer_name = getenv_s("CM_ML_MODEL_INPUT_LAYER_NAME"); + const std::string output_layer_name = getenv_s("CM_ML_MODEL_OUTPUT_LAYER_NAME"); + const int images_in_memory_max = getenv_i("CM_LOADGEN_BUFFER_SIZE"); + const int image_size = getenv_i("CM_DATASET_INPUT_SQUARE_SIDE"); + const int batch_size = 1; + const int num_channels = 3; + const int num_classes = 1000; + const bool normalize_img = getenv_b("CM_ML_MODEL_NORMALIZE_DATA"); + + const bool subtract_mean = getenv_b("CM_ML_MODEL_SUBTRACT_MEANS"); + const char *given_channel_means_str = getenv("CM_ML_MODEL_GIVEN_CHANNEL_MEANS"); + + const bool trigger_cold_run = getenv_b("CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN"); + + const int verbosity_level = getenv_i("CM_VERBOSE"); + + BenchmarkSettings(enum MODEL_TYPE mode = MODEL_TYPE::LITE) { + + if (given_channel_means_str) { + std::stringstream ss(given_channel_means_str); + for(int i=0;i<3;i++){ + ss >> given_channel_means[i]; + } + } + + switch (mode) + { + case MODEL_TYPE::LITE: + _graph_file = getenv_s("CM_ML_MODEL_FILE_WITH_PATH"); + break; + + case MODEL_TYPE::TF_FROZEN: + _graph_file = getenv_s("CM_ML_MODEL_FILE_WITH_PATH"); + break; + + default: + std::cout << "Unsupported MODEL_TYPE" << std::endl; + exit(-1); + break; + }; + _number_of_threads = std::thread::hardware_concurrency(); + + if (getenv_opt_s("CM_HOST_USE_ALL_CORES", "no") != "yes") { + _number_of_threads = _number_of_threads < 1 ? 1 : _number_of_threads; + _number_of_threads = !getenv("CM_HOST_CPU_TOTAL_CORES") + ? _number_of_threads + : getenv_i("CM_HOST_CPU_TOTAL_CORES"); + if (getenv_i("CM_HOST_CPU_TOTAL_CORES") && getenv_i("CM_HOST_CPU_THREADS_PER_CORE")) { + _number_of_threads = getenv_i("CM_HOST_CPU_TOTAL_CORES") / getenv_i("CM_HOST_CPU_THREADS_PER_CORE"); + } + } + // Print settings + std::cout << "Graph file: " << _graph_file << std::endl; + std::cout << "Image dir: " << images_dir << std::endl; + std::cout << "Image list: " << available_images_file << std::endl; + std::cout << "Image size: " << image_size << std::endl; + std::cout << "Image channels: " << num_channels << std::endl; + std::cout << "Prediction classes: " << num_classes << std::endl; + std::cout << "Result dir: " << result_dir << std::endl; + std::cout << "How many images fit in memory buffer: " << images_in_memory_max << std::endl; + std::cout << "Normalize: " << normalize_img << std::endl; + std::cout << "Subtract mean: " << subtract_mean << std::endl; + std::cout << "Run time preprocessing: " << !skip_internal_preprocessing << std::endl; + std::cout << "Number of Threads: " << _number_of_threads << std::endl; + if(subtract_mean && given_channel_means_str) + std::cout << "Per-channel means to subtract: " << given_channel_means[0] + << ", " << given_channel_means[1] + << ", " << given_channel_means[2] << std::endl; + + // Create results dir if none + auto dir = opendir(result_dir.c_str()); + if (dir) + closedir(dir); + else + system(("mkdir " + result_dir).c_str()); + + // Load list of images to be processed + std::ifstream file(available_images_file); + if (!file) + throw "Unable to open the available image list file " + available_images_file; + for (std::string s; !getline(file, s).fail();) + _available_image_list.emplace_back(s); + std::cout << "Number of available imagefiles: " << _available_image_list.size() << std::endl; + } + + const std::vector& list_of_available_imagefiles() const { return _available_image_list; } + + std::vector _available_image_list; + + int number_of_threads() { return _number_of_threads; } + + std::string graph_file() { return _graph_file; } + + float given_channel_means[3]; +private: + int _number_of_threads; + std::string _graph_file; +}; + +//---------------------------------------------------------------------- + +class BenchmarkSession { +public: + BenchmarkSession(const BenchmarkSettings* settings): _settings(settings) { + } + + virtual ~BenchmarkSession() {} + + const std::vector& load_filenames(std::vector img_indices) { + _filenames_buffer.clear(); + _filenames_buffer.reserve( img_indices.size() ); + idx2loc.clear(); + + auto list_of_available_imagefiles = _settings->list_of_available_imagefiles(); + auto count_available_imagefiles = list_of_available_imagefiles.size(); + + int loc=0; + for (auto idx : img_indices) { + if(idx& current_filenames() const { return _filenames_buffer; } + + std::map idx2loc; + +private: + const BenchmarkSettings* _settings; + std::vector _filenames_buffer; +}; + +//---------------------------------------------------------------------- + +template +class StaticBuffer { +public: + StaticBuffer(int size, const std::string& dir): _size(size), _dir(dir) { + _buffer = new TData[size]; + } + + virtual ~StaticBuffer() { + delete[] _buffer; + } + + TData* data() const { return _buffer; } + int size() const { return _size; } + +protected: + const int _size; + const std::string _dir; + TData* _buffer; +}; + +//---------------------------------------------------------------------- + +class ImageData : public StaticBuffer { +public: + ImageData(const BenchmarkSettings* s): StaticBuffer( + s->image_size * s->image_size * s->num_channels * (s->skip_internal_preprocessing ? sizeof(float) : sizeof(uint8_t)), + s->images_dir) {} + + void load(const std::string& filepath, int vl) { + //auto path = _dir + '/' + filename; + auto path = filepath; + std::ifstream file(path, std::ios::in | std::ios::binary); + if (!file) throw "Failed to open image data " + path; + file.read(reinterpret_cast(_buffer), _size); + if( vl > 1) { + std::cout << "Loaded file: " << path << std::endl; + } else if ( vl ) { + std::cout << 'l' << std::flush; + } + } +}; + +//---------------------------------------------------------------------- + +class ResultData : public StaticBuffer { +public: + ResultData(const BenchmarkSettings* s): StaticBuffer( + s->num_classes, s->result_dir) {} + + void save(const std::string& filename) { + auto path = _dir + '/' + filename + ".txt"; + std::ofstream file(path); + if (!file) throw "Unable to create result file " + path; + for (int i = 0; i < _size; i++) + file << _buffer[i] << std::endl; + } + + int argmax() { + int arg_index = 0; + float max_value = _buffer[0]; + + for (int i = 1; i < _size; i++) { + if (_buffer[i] > max_value) { + arg_index = i; + max_value = _buffer[i]; + } + } + + return arg_index; + } +}; + +//---------------------------------------------------------------------- + +class IBenchmark { +public: + bool has_background_class = false; + + virtual ~IBenchmark() {} + virtual void load_images(BenchmarkSession *session) = 0; + virtual void unload_images(size_t num_examples) = 0; + virtual void save_results() = 0; + virtual int get_next_result() = 0; + virtual void get_random_image(int img_idx) = 0; +}; + + +template +class Benchmark : public IBenchmark { +public: + Benchmark(const BenchmarkSettings* settings, TData *in_ptr, TData *out_ptr): _settings(settings) { + _in_ptr = in_ptr; + _out_ptr = out_ptr; + _in_converter.reset(new TInConverter(settings)); + _out_converter.reset(new TOutConverter(settings)); + } + + void load_images(BenchmarkSession *_session) override { + session = _session; + auto vl = _settings->verbosity_level; + + const std::vector& image_filenames = session->current_filenames(); + + int length = image_filenames.size(); + _current_buffer_size = length; + _in_batch = new std::unique_ptr[length]; + _out_batch = new std::unique_ptr[length]; + int i = 0; + for (auto image_file : image_filenames) { + _in_batch[i].reset(new ImageData(_settings)); + _out_batch[i].reset(new ResultData(_settings)); + _in_batch[i]->load(image_file, vl); + i++; + } + } + + void unload_images(size_t num_examples) override { + for(size_t i=0;iconvert(_in_batch[ session->idx2loc[img_idx] ].get(), _in_ptr); + } + + int get_next_result() override { + int probe_offset = has_background_class ? 1 : 0; + ResultData *next_result_ptr = _out_batch[_out_buffer_index++].get(); + _out_converter->convert(_out_ptr + probe_offset, next_result_ptr); + _out_buffer_index %= _current_buffer_size; + return next_result_ptr->argmax(); + } + + void save_results() override { + const std::vector& image_filenames = session->current_filenames(); + int i = 0; + for (auto image_file : image_filenames) { + _out_batch[i++]->save(image_file); + } + } + +private: + const BenchmarkSettings* _settings; + BenchmarkSession* session; + int _out_buffer_index = 0; + int _current_buffer_size = 0; + TData* _in_ptr; + TData* _out_ptr; + std::unique_ptr *_in_batch; + std::unique_ptr *_out_batch; + std::unique_ptr _in_converter; + std::unique_ptr _out_converter; +}; + +//---------------------------------------------------------------------- + +class IinputConverter { +public: + virtual ~IinputConverter() {} + virtual void convert(const ImageData* source, void* target) = 0; +}; + +//---------------------------------------------------------------------- + +class InCopy : public IinputConverter { +public: + InCopy(const BenchmarkSettings* s) {} + + void convert(const ImageData* source, void* target) { + uint8_t *uint8_target = static_cast(target); + std::copy(source->data(), source->data() + source->size(), uint8_target); + } +}; + +//---------------------------------------------------------------------- + +class InNormalize : public IinputConverter { +public: + InNormalize(const BenchmarkSettings* s): + _normalize_img(s->normalize_img), + _subtract_mean(s->subtract_mean), + _given_channel_means(s->given_channel_means), + _num_channels(s->num_channels) { + } + + void convert(const ImageData* source, void* target) { + // Copy image data to target + float *float_target = static_cast(target); + float sum = 0; + for (int i = 0; i < source->size(); i++) { + float px = source->data()[i]; + if (_normalize_img) + px = (px / 255.0 - 0.5) * 2.0; + sum += px; + float_target[i] = px; + } + // Subtract mean value if required + if (_subtract_mean) { + if(_given_channel_means) { + for (int i = 0; i < source->size(); i++){ + float_target[i] -= _given_channel_means[i % _num_channels]; // assuming NHWC order! + } + } else { + float mean = sum / static_cast(source->size()); + for (int i = 0; i < source->size(); i++) + float_target[i] -= mean; + } + } + } + +private: + const bool _normalize_img; + const bool _subtract_mean; + const float *_given_channel_means; + const int _num_channels; +}; + +//---------------------------------------------------------------------- + +class OutCopy { +public: + OutCopy(const BenchmarkSettings* s) {} + + void convert(const float* source, ResultData* target) const { + std::copy(source, source + target->size(), target->data()); + } +}; + +//---------------------------------------------------------------------- + +class OutDequantize { +public: + OutDequantize(const BenchmarkSettings* s) {} + + void convert(const uint8_t* source, ResultData* target) const { + for (int i = 0; i < target->size(); i++) + target->data()[i] = source[i] / 255.0; + } +}; + +} // namespace CK diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp new file mode 100644 index 000000000..9493f5430 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-ctuning-cpp-tflite/src/classification.cpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2018 cTuning foundation. + * See CK COPYRIGHT.txt for copyright details. + * + * See CK LICENSE for licensing details. + * See CK COPYRIGHT for copyright details. + */ + +#include +#include +#include + +#include "loadgen.h" +#include "query_sample_library.h" +#include "system_under_test.h" +#include "test_settings.h" + + +#include "benchmark.h" + +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/model.h" + +using namespace std; +using namespace CK; + + +template +class TFLiteBenchmark : public Benchmark { +public: + TFLiteBenchmark(const BenchmarkSettings* settings, tflite::Interpreter* interpreter, int input_index) + : Benchmark( + settings, interpreter->typed_tensor(input_index), interpreter->typed_output_tensor(0)) { + } +}; + +class Program { +public: + Program () { + settings = new BenchmarkSettings(MODEL_TYPE::LITE); + + session = new BenchmarkSession(settings); + + cout << "\nLoading graph..." << endl; + + model = tflite::FlatBufferModel::BuildFromFile(settings->graph_file().c_str()); + if (!model) + throw "Failed to load graph from file " + settings->graph_file(); + + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder(*model, resolver)(&interpreter); + if (!interpreter) + throw string("Failed to construct interpreter"); + if (interpreter->AllocateTensors() != kTfLiteOk) + throw string("Failed to allocate tensors"); + + interpreter->SetNumThreads(settings->number_of_threads()); + + int input_index = interpreter->inputs()[0]; + int output_index = interpreter->outputs()[0]; + auto input_type = interpreter->tensor(input_index)->type; + auto output_type = interpreter->tensor(output_index)->type; + if (input_type != output_type) + throw format("Type of graph's input (%d) does not match type of its output (%d).", + int(input_type), int(output_type)); + + switch (input_type) { + case kTfLiteFloat32: + if (settings->skip_internal_preprocessing) + benchmark.reset(new TFLiteBenchmark(settings, interpreter.get(), input_index)); + else + benchmark.reset(new TFLiteBenchmark(settings, interpreter.get(), input_index)); + break; + + case kTfLiteUInt8: + benchmark.reset(new TFLiteBenchmark(settings, interpreter.get(), input_index)); + break; + + default: + throw format("Unsupported type of graph's input: %d. " + "Supported types are: Float32 (%d), UInt8 (%d)", + int(input_type), int(kTfLiteFloat32), int(kTfLiteUInt8)); + } + + TfLiteIntArray* in_dims = interpreter->tensor(input_index)->dims; + int in_num = in_dims->data[0]; + int in_height = in_dims->data[1]; + int in_width = in_dims->data[2]; + int in_channels = in_dims->data[3]; + cout << format("Input tensor dimensions (NHWC): %d*%d*%d*%d", in_num, in_height, in_width, in_channels) << endl; + if (in_height != settings->image_size || + in_width != settings->image_size || + in_channels != settings->num_channels) + throw format("Dimensions of graph's input do not correspond to dimensions of input image (%d*%d*%d*%d)", + settings->batch_size, settings->image_size, settings->image_size, settings->num_channels); + + TfLiteIntArray* out_dims = interpreter->tensor(output_index)->dims; + int out_num = out_dims->data[0]; + int out_classes = out_dims->data[1]; + cout << format("Output tensor dimensions: %d*%d", out_num, out_classes) << endl; + if (out_classes != settings->num_classes && out_classes != settings->num_classes+1) + throw format("Unsupported number of classes in graph's output tensor. Supported numbers are %d and %d", + settings->num_classes, settings->num_classes+1); + benchmark->has_background_class = out_classes == settings->num_classes+1; + } + + ~Program() { + } + + //bool is_available_batch() {return session? session->get_next_batch(): false; } + + void LoadNextBatch(const std::vector& img_indices) { + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "LoadNextBatch(["; + for( auto idx : img_indices) { + cout << idx << ' '; + } + cout << "])" << endl; + } else if( vl ) { + cout << 'B' << flush; + } + session->load_filenames(img_indices); + benchmark->load_images( session ); + if( vl ) { + cout << endl; + } + } + + void ColdRun() { + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "Triggering a Cold Run..." << endl; + } else if( vl ) { + cout << 'C' << flush; + } + + if (interpreter->Invoke() != kTfLiteOk) + throw "Failed to invoke tflite"; + } + + int InferenceOnce(int img_idx) { + benchmark->get_random_image( img_idx ); + if (interpreter->Invoke() != kTfLiteOk) + throw "Failed to invoke tflite"; + return benchmark->get_next_result(); + } + + void UnloadBatch(const std::vector& img_indices) { + auto b_size = img_indices.size(); + + auto vl = settings->verbosity_level; + + if( vl > 1 ) { + cout << "Unloading a batch[" << b_size << "]" << endl; + } else if( vl ) { + cout << 'U' << flush; + } + + benchmark->unload_images(b_size); + //benchmark->save_results( ); + } + + const int available_images_max() { return settings->list_of_available_imagefiles().size(); } + const int images_in_memory_max() { return settings->images_in_memory_max; } + + BenchmarkSettings *settings; +private: + BenchmarkSession *session; + unique_ptr benchmark; + unique_ptr interpreter; + unique_ptr model; +}; + + +class SystemUnderTestSingleStream : public mlperf::SystemUnderTest { +public: + SystemUnderTestSingleStream(Program *_prg) : mlperf::SystemUnderTest() { + prg = _prg; + query_counter = 0; + }; + + ~SystemUnderTestSingleStream() override = default; + + const std::string& Name() { return name_; } + + void IssueQuery(const std::vector& samples) override { + + ++query_counter; + auto vl = prg->settings->verbosity_level; + if( vl > 1 ) { + cout << query_counter << ") IssueQuery([" << samples.size() << "]," << samples[0].id << "," << samples[0].index << ")" << endl; + } else if ( vl ) { + cout << 'Q' << flush; + } + + std::vector responses; + responses.reserve(samples.size()); + float encoding_buffer[samples.size()]; + int i=0; + for (auto s : samples) { + int predicted_class = prg->InferenceOnce(s.index); + + if( vl > 1 ) { + cout << "Query image index: " << s.index << " -> Predicted class: " << predicted_class << endl << endl; + } else if ( vl ) { + cout << 'p' << flush; + } + + /* This would be the correct way to pass in one integer index: + */ +// int single_value_buffer[] = { (int)predicted_class }; + + /* This conversion is subtly but terribly wrong + yet we use it here in order to use Guenther's parsing script: + */ + encoding_buffer[i] = (float)predicted_class; + responses.push_back({s.id, uintptr_t(&encoding_buffer[i]), sizeof(encoding_buffer[i])}); + ++i; + } + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + } + + void FlushQueries() override { + auto vl = prg->settings->verbosity_level; + if ( vl ) { + cout << endl; + } + } + + void ReportLatencyResults(const std::vector& latencies_ns) { + + size_t size = latencies_ns.size(); + uint64_t avg = accumulate(latencies_ns.begin(), latencies_ns.end(), uint64_t(0) )/size; + + std::vector sorted_lat(latencies_ns.begin(), latencies_ns.end()); + sort(sorted_lat.begin(), sorted_lat.end()); + + cout << endl << "------------------------------------------------------------"; + cout << endl << "| LATENCIES (in nanoseconds and fps) |"; + cout << endl << "------------------------------------------------------------"; + size_t p50 = size * 0.5; + size_t p90 = size * 0.9; + cout << endl << "Number of queries run: " << size; + cout << endl << "Min latency: " << sorted_lat[0] << "ns (" << 1e9/sorted_lat[0] << " fps)"; + cout << endl << "Median latency: " << sorted_lat[p50] << "ns (" << 1e9/sorted_lat[p50] << " fps)"; + cout << endl << "Average latency: " << avg << "ns (" << 1e9/avg << " fps)"; + cout << endl << "90 percentile latency: " << sorted_lat[p90] << "ns (" << 1e9/sorted_lat[p90] << " fps)"; + + if(!prg->settings->trigger_cold_run) { + cout << endl << "First query (cold model) latency: " << latencies_ns[0] << "ns (" << 1e9/latencies_ns[0] << " fps)"; + } + cout << endl << "Max latency: " << sorted_lat[size-1] << "ns (" << 1e9/sorted_lat[size-1] << " fps)"; + cout << endl << "------------------------------------------------------------ " << endl; + } + +private: + std::string name_{"TFLite_SUT"}; + Program *prg; + long query_counter; +}; + +class QuerySampleLibrarySingleStream : public mlperf::QuerySampleLibrary { +public: + QuerySampleLibrarySingleStream(Program *_prg) : mlperf::QuerySampleLibrary() { + prg = _prg; + }; + + ~QuerySampleLibrarySingleStream() = default; + + const std::string& Name() override { return name_; } + + size_t TotalSampleCount() override { return prg->available_images_max(); } + + size_t PerformanceSampleCount() override { return prg->images_in_memory_max(); } + + void LoadSamplesToRam( const std::vector& samples) override { + prg->LoadNextBatch(samples); + return; + } + + void UnloadSamplesFromRam( const std::vector& samples) override { + prg->UnloadBatch(samples); + return; + } + +private: + std::string name_{"TFLite_QSL"}; + Program *prg; +}; + +void TestSingleStream(Program *prg) { + SystemUnderTestSingleStream sut(prg); + QuerySampleLibrarySingleStream qsl(prg); + + const std::string mlperf_conf_path = getenv_s("CM_MLPERF_CONF"); + const std::string user_conf_path = getenv_s("CM_MLPERF_USER_CONF"); + const std::string audit_conf_path = getenv_opt_s("CM_MLPERF_INFERENCE_AUDIT_PATH",""); + + std::string model_name = getenv_opt_s("CM_MODEL", "unknown_model"); + std::string logs_dir = getenv_opt_s("CM_MLPERF_LOADGEN_LOGS_DIR", ""); + + const std::string scenario_string = getenv_s("CM_MLPERF_LOADGEN_SCENARIO"); + const std::string mode_string = getenv_s("CM_MLPERF_LOADGEN_MODE"); + + std::cout << "Path to mlperf.conf : " << mlperf_conf_path << std::endl; + std::cout << "Path to user.conf : " << user_conf_path << std::endl; + std::cout << "Model Name: " << model_name << std::endl; + std::cout << "LoadGen Scenario: " << scenario_string << std::endl; + std::cout << "LoadGen Mode: " << ( mode_string != "" ? mode_string : "(empty string)" ) << std::endl; + + mlperf::TestSettings ts; + + // This should have been done automatically inside ts.FromConfig() ! + ts.scenario = ( scenario_string == "SingleStream") ? mlperf::TestScenario::SingleStream + : ( scenario_string == "MultiStream") ? mlperf::TestScenario::MultiStream + : ( scenario_string == "Server") ? mlperf::TestScenario::Server + : ( scenario_string == "Offline") ? mlperf::TestScenario::Offline : mlperf::TestScenario::SingleStream; + + if( mode_string != "") + ts.mode = ( mode_string == "SubmissionRun") ? mlperf::TestMode::SubmissionRun + : ( mode_string == "accuracy") ? mlperf::TestMode::AccuracyOnly + : ( mode_string == "performance") ? mlperf::TestMode::PerformanceOnly + : ( mode_string == "findpeakperformance") ? mlperf::TestMode::FindPeakPerformance : mlperf::TestMode::SubmissionRun; + + if (ts.FromConfig(mlperf_conf_path, model_name, scenario_string)) { + std::cout << "Issue with mlperf.conf file at " << mlperf_conf_path << std::endl; + exit(1); + } + + if (ts.FromConfig(user_conf_path, model_name, scenario_string)) { + std::cout << "Issue with user.conf file at " << user_conf_path << std::endl; + exit(1); + } + + mlperf::LogSettings log_settings; + log_settings.log_output.outdir = logs_dir; + log_settings.log_output.prefix_with_datetime = false; + log_settings.enable_trace = false; + + + if (prg->settings->trigger_cold_run) { + prg->ColdRun(); + } + + mlperf::StartTest(&sut, &qsl, ts, log_settings, audit_conf_path); +} + +int main(int argc, char* argv[]) { + try { + Program *prg = new Program(); + TestSingleStream(prg); + delete prg; + } + catch (const string& error_message) { + cerr << "ERROR: " << error_message << endl; + return -1; + } + return 0; +} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/_cm.yaml new file mode 100644 index 000000000..a1f311cc7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/_cm.yaml @@ -0,0 +1,291 @@ +# Identification of this CM script +alias: app-mlperf-inference-dummy +uid: 5b71627383a94576 +cache: false + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - dummy-harness + - dummy + +# Default environment +default_env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: dummy_harness + CM_MLPERF_SKIP_RUN: 'no' + +env: + CM_CALL_MLPERF_RUNNER: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: CM_RERUN + results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_IMAGENET_ACCURACY_DTYPE + - CM_SQUAD_ACCURACY_DTYPE + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download MLPerf inference loadgen + - tags: get,mlcommons,inference,loadgen + names: + - inference-loadgen + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + - tags: get,git,repo + names: + inference-results + inference-code + updats_tags_from_env_with_prefix: + _repo.: CM_MLPERF_INFERENCE_RESULTS_REPO + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO + extra_cache_tags: inference-implementation,mlperf + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + tags: benchmark-mlperf + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + pytorch: + group: backend + default: true + env: + CM_MLPERF_BACKEND: pytorch + + pytorch,cuda: + deps: + - tags: get,generic-python-lib,_torch_cuda + + pytorch,cpu: + deps: + - tags: get,generic-python-lib,_torch + + bs.#: + group: batch-size + + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + + retinanet: + group: model + base: + - bs.1 + env: + CM_MODEL: retinanet + + bert_: + {} + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + + bert_: + {} + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + + gptj_: + deps: + - tags: get,ml-model,gptj + names: + - gptj-model + - tags: get,dataset,cnndm,_validation + + gptj-99: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + gptj-99.9: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + + llama2-70b_: + {} + + llama2-70b-99: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99 + + llama2-70b-99.9: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99.9 + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + + singlestream,resnet50: + default_variations: + batch-size: bs.1 + + singlestream,retinanet: + default_variations: + batch-size: bs.1 + + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + uint8: + group: precision + fp16: + group: precision + fp32: + group: precision + +docker: + real_run: False diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/customize.py new file mode 100644 index 000000000..fa6a73b93 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/customize.py @@ -0,0 +1,82 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + r = get_run_cmd(env['CM_MODEL'], i) + if r['return'] > 0: + return r + run_cmd = r['run_cmd'] + run_dir = r['run_dir'] + print(run_cmd) + print(run_dir) + return {'return': 1, 'error': 'Run command needs to be tested!'} + + +def get_run_cmd(model, i): + env = i['env'] + if "gptj" in model: + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + device = env['CM_MLPERF_DEVICE'] + mode = env['CM_MLPERF_LOADGEN_MODE'] + outdir = env['CM_MLPERF_OUTPUT_DIR'] + mlperf_conf_path = env['CM_MLPERF_CONF'] + user_conf_path = env['CM_MLPERF_USER_CONF'] + api_server = env.get('CM_MLPERF_INFERENCE_API_SERVER', 'localhost') + model_path = env['GPTJ_CHECKPOINT_PATH'] + dataset_path = env['CM_DATASET_CNNDM_EVAL_PATH'] + precision = env['CM_MLPERF_MODEL_PRECISION'] + if mode == "accuracy": + accuracy_string = " --accuracy " + else: + accuracy_string = "" + + run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " + submitter = "CTuning" + run_dir = os.path.join( + env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + "open", + submitter, + "code", + "gptj-99") + + return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/run.sh new file mode 100644 index 000000000..ddcd0b550 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-dummy/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/README.md new file mode 100644 index 000000000..fd90176c3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/_cm.yaml new file mode 100644 index 000000000..0975f0b0b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/_cm.yaml @@ -0,0 +1,1216 @@ +# Identification of this CM script +alias: app-mlperf-inference-intel +uid: c05a90433bb04cc1 +cache: false +can_force_cache: true + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - intel-harness + - intel + - intel-harness + - intel + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_FAST_COMPILATION: 'yes' + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: intel + CM_MLPERF_SKIP_RUN: 'no' + verbosity: 1 + loadgen_trigger_cold_run: 0 + +env: + CM_CALL_MLPERF_RUNNER: 'no' + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: '0' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: CM_RERUN + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - resnet50-model + - ml-model + tags: get,ml-model,resnet50,_fp32,_pytorch + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-original + - dataset-original + tags: get,dataset,imagenet,original,_full + + + + ######################################################################## + # Install OpenImages + + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-original + - dataset-original + tags: get,dataset,original,openimages,_validation,_custom-annotations,_full + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-calibration + - dataset-calibration + tags: get,dataset,original,openimages,_calibration + + + + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + enable_if_env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: + - run_harness + tags: benchmark-mlperf + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # version + v4.0: + group: version + default: true + env: + CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0" + deps: + - tags: get,mlperf,inference,results,_go + names: + inference-results + version: v4.0 + v4.0,gptj_: + adr: + pytorch: + tags: _for-intel-mlperf-inference-v4.0 + v4.0,bert_: + adr: + pytorch: + tags: _for-intel-mlperf-inference-v4.0 + v3.1: + group: version + env: + CM_MLPERF_INFERENCE_CODE_VERSION: "v3.1" + deps: + - tags: get,mlperf,inference,results,_ctuning + names: + inference-results + version: v3.1 + + v3.1,gptj_: + adr: + pytorch: + tags: _for-intel-mlperf-inference-v3.1 + v3.1,dlrm-v2_: + adr: + pytorch: + tags: _for-intel-mlperf-inference-v3.1 + v3.1,bert_: + adr: + pytorch: + tags: _for-intel-mlperf-inference-v3.1 + + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + + # ML engine + pytorch: + group: framework + default: true + env: + CM_MLPERF_BACKEND: pytorch + CM_MLPERF_BACKEND_LIB_NAMESPEC: pytorch + + bs.#: + env: + ML_MLPERF_MODEL_BATCH_SIZE: "#" + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + CM_BENCHMARK: STANDALONE_CLASSIFICATION + + resnet50,int8: + env: + CM_IMAGENET_ACCURACY_DTYPE: int8 + + bert-99: + deps: + - tags: compile,intel,model,_bert-99 + names: + - bert-99-compiler + env: + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + retinanet: + group: model + env: + CM_MODEL: retinanet + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + CM_BENCHMARK: STANDALONE_OBJECT_DETECTION + + deps: + - tags: get,generic-python-lib,_numpy + names: + - pip-package + - numpy + version: "1.23.5" + + 3d-unet-99: + group: model + base: + - 3d-unet_ + env: + CM_MODEL: 3d-unet-99 + + 3d-unet-99.9: + group: model + base: + - 3d-unet_ + env: + CM_MODEL: 3d-unet-99.9 + + 3d-unet_: + env: + CM_BENCHMARK: MEDICAL_IMAGING + deps: + - tags: get,dataset,kits19,preprocessed + - tags: get,ml-model,medical-imaging,3d-unet,_pytorch,_weights + + bert_: + env: + CM_BENCHMARK: STANDALONE_BERT + + bert_,pytorch: + deps: + - tags: get,conda,_name.bert-pt + - tags: install,llvm,src,_tag.llvmorg-15.0.7,_runtimes.libcxx:libcxxabi:openmp,_clang,_release,_for-intel-mlperf-inference-v3.1-bert + names: + - llvm-from-src + - tags: get,generic-sys-util,_libffi7 + - tags: get,generic,conda-package,_package.python + names: + - conda-package + - python + version: "3.8" + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: get,pytorch,from.src,_for-intel-mlperf-inference-v3.1-bert + names: + - pytorch-from-src + - tags: install,onednn,from.src,_for-intel-mlperf-inference-v3.1-bert + names: + - onednn-from-src + - tags: install,transformers,from.src,_for-intel-mlperf-inference-v3.1-bert + names: + - transformers-from-src + + gptj_: + env: + CM_BENCHMARK: STANDALONE_GPTJ + + int4,gptj_,build-harness: + deps: + - tags: reproduce,mlperf,inference,intel,harness,_calibration + inherit_variation_tags: true + names: + - calibration + skip_inherit_variation_groups: + - run-mode + - device-info + - sut + - loadgen-batchsize + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v3.1 + force_cache: true + - tags: get,generic-python-lib,_package.optimum + names: + - pip-package + - optimum + + sdxl: + group: model + env: + CM_BENCHMARK: STANDALONE_SDXL + CM_MODEL: stable-diffusion-xl + + sdxl,pytorch: + adr: + conda-package: + tags: _name.sdxl-pt + deps: + - tags: get,conda,_name.sdxl-pt + - tags: get,python,_conda.sdxl-pt + adr: + conda-python: + version: "3.9" + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.conda-forge + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.intel + - names: + - conda-package + - llvm-openmp + tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: get,generic-python-lib,_package.torch,_path.https://download.pytorch.org/whl/nightly/cpu/torch-2.3.0.dev20231214%2Bcpu-cp39-cp39-linux_x86_64.whl + names: + - pip-package + - pip-torch + - tags: get,generic-python-lib,_package.torchvision,_path.https://download.pytorch.org/whl/nightly/cpu/torchvision-0.18.0.dev20231214%2Bcpu-cp39-cp39-linux_x86_64.whl + names: + - pip-package + - pip-torchvision + - tags: get,generic-python-lib,_torch + names: + - pip-package + - torch + - tags: install,diffusers,from.src,_for-intel-mlperf-inference-v4.0-sdxl + names: + - diffusers-from-src + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v4.0-sdxl + names: + - ipex-from-src + - tags: get,generic,conda-package,_package.ninja + names: + - conda-package + - ninja + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + names: + - inference-loadgen + + sdxl,build-harness: + deps: + - tags: get,generic-python-lib,_package.pybind11[global] + names: + - pip-package + - pybind11 + + sdxl,run-harness: + deps: + - tags: get,ml-model,sdxl,_fp32,_pytorch + - tags: get,dataset,coco2014,original,_validation + - tags: get,generic-python-lib,_package.opencv-python + names: + - pip-package + - opencv + - tags: get,generic-python-lib,_package.transformers + names: + - pip-package + - transformers + - tags: get,generic-python-lib,_package.accelerate + names: + - pip-package + - accelerate + - tags: get,generic-python-lib,_package.open-clip-torch + names: + - pip-package + - open-clip-torch + - tags: get,generic-python-lib,_package.pycocotools + names: + - pip-package + - pycocotools + - tags: get,generic-python-lib,_package.torchmetrics[image] + names: + - pip-package + - torchmetrics + - tags: get,generic-python-lib,_torchvision + version: "0.17.1" + names: + - pip-package + - torchvision + - tags: get,generic-python-lib,_package.py-libnuma + names: + - pip-package + - libnuma + + + + + + resnet50,pytorch: + adr: + conda-package: + tags: _name.resnet50-pt + deps: + - tags: get,conda,_name.resnet50-pt + - tags: get,python,_conda.resnet50-pt + adr: + conda-python: + version: "3.9" + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.conda-forge + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.intel + - names: + - conda-package + - llvm-openmp + tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: get,generic-python-lib,_package.torchvision,_no-deps + names: + - pip-package + - torchvision + version: "0.13.0" + - tags: get,pytorch,from.src,_for-intel-mlperf-inference-resnet50 + - tags: install,opencv,from.src,_branch.4.x + names: + - opencv-from-src + - tags: get,git,repo,_repo.https://github.com/Tencent/rapidjson.git,_sha.e4bde977 + names: + - rapidjson-src + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_RAPIDJSON_SRC_REPO_PATH + - tags: install,gflags,from.src + names: + - gflags-from-src + - tags: install,onednn,from.src,_branch.rls-v2.6 + names: + - onednn-from-src + - tags: get,generic-python-lib,_package.scikit-learn + names: + - pip-package + - scikit-learn + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-resnet50 + names: + - ipex-from-src + - tags: get,generic,conda-package,_package.ninja + names: + - conda-package + - ninja + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + names: + - inference-loadgen + + + resnet50,build-harness: + deps: + - tags: reproduce,mlperf,inference,intel,_compile-model + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + + resnet50,compile-model: + deps: + - tags: reproduce,mlperf,inference,intel,_calibration + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + + retinanet,pytorch: + adr: + conda-package: + tags: _name.retinanet-pt + compiler: + tags: gcc + conda-python: + version: "3.9" + deps: + - tags: get,conda,_name.retinanet-pt + - tags: get,python,_conda.retinanet-pt + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.conda-forge + - names: + - conda-package + - libstdcxx-ng + tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.intel + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.intel + - names: + - conda-package + - intel-openmp + tags: get,generic,conda-package,_package.intel-openmp,_source.intel + - names: + - conda-package + - llvm-openmp + tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.cmake,_source.conda-forge + names: + - conda-package + - cmake + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: get,pytorch,from.src,_for-intel-mlperf-inference-retinanet + names: + - pytorch + - tags: get,torchvision,from.src,_sha.8e078971b8aebdeb1746fea58851e3754f103053 + update_tags_from_env_with_prefix: + "_python.": + - CM_PYTHON_BIN_WITH_PATH + names: + - torchvision + - tags: install,opencv,from.src,_branch.4.x + names: + - opencv-from-src + - tags: get,git,repo,_repo.https://github.com/Tencent/rapidjson.git,_sha.e4bde977 + names: + - rapidjson-src + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_RAPIDJSON_SRC_REPO_PATH + - tags: install,gflags,from.src + names: + - gflags-from-src + - tags: install,onednn,from.src,_branch.rls-v2.6 + names: + - onednn-from-src + - tags: get,generic-python-lib,_package.scikit-learn + names: + - pip-package + - scikit-learn + - tags: get,generic-python-lib,_package.opencv-python + names: + - pip-package + - opencv-python + - tags: get,generic-python-lib,_package.pycocotools + names: + - pip-package + - pycocotools + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-retinanet + names: + - ipex-from-src + - tags: get,generic,conda-package,_package.ninja + names: + - conda-package + - ninja + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + names: + - inference-loadgen + + + retinanet,build-harness: + deps: + - tags: reproduce,mlperf,inference,intel,_compile-model + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + new_env_keys: + - CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH + + retinanet,compile-model: + deps: + - tags: get,ml-model,retinanet,_pytorch,_fp32 + new_env_keys: + - CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH + + 3d-unet_,pytorch: + adr: + conda-package: + tags: _name.3d-unet-pt + deps: + - tags: get,generic-sys-util,_libffi7 + - tags: get,conda,_name.3d-unet-pt + - tags: get,python,_conda.3d-unet-pt + adr: + conda-python: + version: "3.8" + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.conda-forge + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.intel + - names: + - conda-package + - mkl-service + tags: get,generic,conda-package,_package.mkl-service,_source.intel + - names: + - conda-package + - mkl_fft + tags: get,generic,conda-package,_package.mkl_fft,_source.intel + - names: + - conda-package + - mkl_random + tags: get,generic,conda-package,_package.mkl_random,_source.intel + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-3d-unet + names: + - ipex-from-src + - tags: get,generic,conda-package,_package.ninja + names: + - conda-package + - ninja + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + names: + - inference-loadgen + + + 3d-unet_,build-harness: + deps: + - tags: reproduce,mlperf,inference,intel,_compile-model + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + + 3d-unet_,compile-model: + deps: + - tags: reproduce,mlperf,inference,intel,_calibration + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + + gptj_,pytorch: + adr: + conda-package: + tags: _name.gptj-pt + deps: + - tags: get,conda,_name.gptj-pt + - tags: get,python,_conda.gptj-pt + adr: + conda-python: + version: "3.9" + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.conda-forge + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v4.0 + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.conda-forge + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v4.0 + - names: + - conda-package + - llvm-openmp + tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v4.0 + - names: + - conda-package + - pybind11 + tags: get,generic,conda-package,_package.pybind11,_source.conda-forge + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v4.0 + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj + names: + - llvm-from-src + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v3.1 + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-gptj + names: + - ipex-from-src + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v3.1 + - tags: get,generic,conda-package,_package.ninja + names: + - conda-package + - ninja + enable_if_env: + INTEL_GPTJ_INT4: + - 'yes' + - tags: install,tpp-pex,from.src,_for-intel-mlperf-inference-v3.1-gptj + names: + - tpp-pex-from-src + enable_if_env: + INTEL_GPTJ_INT4: + - 'yes' + CM_MLPERF_INFERENCE_CODE_VERSION: + - v3.1 + - tags: get,generic-python-lib,_package.transformers + names: + - pip-package + - transformers + version: "4.28.1" + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: get,mlcommons,inference,loadgen,_custom-python + names: + - inference-loadgen + - tags: get,ml-model,large-language-model,gptj + names: + - ml-model + - gptj-model + - gpt-j-model + - tags: get,generic-python-lib,_package.datasets + names: + - pip-package + - datasets + - tags: get,generic-python-lib,_package.accelerate + names: + - pip-package + - accelerate + - tags: get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v3.1 + env: + "+ CXXFLAGS": + - "-Wno-nonnull" + - "-Wno-maybe-uninitialized" + - "-Wno-uninitialized" + - "-Wno-free-nonheap-object" + - tags: get,generic-python-lib,_custom-python,_package.torch + env: + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - 'v4.0' + - tags: install,intel-neural-speed,_for-intel-mlperf-inference-v4.0-gptj,_branch.mlperf-v4-0 + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - 'v4.0' + + + gptj-99: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + gptj-99.9: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + + dlrm-v2_,build-harness: + deps: + - tags: reproduce,mlperf,inference,intel,_calibration + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + + dlrm-v2_,pytorch: + adr: + conda-package: + tags: _name.dlrm-v2-pt + deps: + - tags: get,conda,_name.dlrm-v2-pt + - tags: get,python,_conda.dlrm-v2-pt + adr: + conda-python: + version: "3.9" + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.conda-forge + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.conda-forge + - names: + - conda-package + - llvm-openmp + tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v4.0 + - names: + - conda-package + - pybind11 + tags: get,generic,conda-package,_package.pybind11,_source.conda-forge + enable_if_env: + CM_MLPERF_INFERENCE_CODE_VERSION: + - v4.0 + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-dlrm-v2 + names: + - ipex-from-src + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: get,mlcommons,inference,loadgen,_custom-python + names: + - inference-loadgen + - tags: get,ml-model,dlrm,_pytorch + names: + - ml-model + - dlrm-v2-model + - dlrm_v2-model + - tags: get,generic-python-lib,_package.absl-py + names: + - pip-package + - absl-py + - tags: get,generic-python-lib,_package.accelerate + names: + - pip-package + - accelerate + - tags: install,pytorch,from-src,_for-intel-mlperf-inference-v3.1-dlrm-v2 + names: + - pytorch + - torch + dlrm-v2_: + env: {} + + dlrm-v2-99: + group: model + base: + - dlrm-v2_ + env: + CM_MODEL: dlrm-v2-99 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + dlrm-v2-99.9: + group: model + base: + - dlrm-v2_ + env: + CM_MODEL: dlrm-v2-99.9 + + standalone: + group: network-mode + default: true + env: + CM_MLPERF_NETWORK_RUN_MODE: standalone + + network-server: + group: network-mode + env: + CM_MLPERF_NETWORK_RUN_MODE: network-server + + network-client: + group: network-run-mode + env: + CM_MLPERF_NETWORK_RUN_MODE: network-client + + bert_,network-server: + env: + CM_BENCHMARK: NETWORK_BERT_SERVER + + bert_,network-client: + env: + CM_BENCHMARK: NETWORK_BERT_CLIENT + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + + batch_size.#: + group: loadgen-batchsize + env: + CM_MLPERF_LOADGEN_BATCH_SIZE: "#" + + + build-harness: + docker: + real_run: false + group: run-mode + env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: build_harness + new_env_keys: + - CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH + - CM_ML_MODEL_* + - DATA_PATH + + compile-model: + group: run-mode + env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: compilation + + calibration: + group: run-mode + env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: calibration + new_env_keys: + - CM_ML_MODEL_* + - INT4_CALIBRATION_DIR + + calibration,gptj_: + deps: [] + + build-harness,bert_: + deps: + - tags: get,generic-sys-util,_rsync + - tags: get,dataset,original,squad + names: + - squad-original + - tags: get,ml-model,bert-large,_pytorch,_int8 + names: + - bert-large + - ml-model + - tags: get,generic-python-lib,_package.tokenization + + + run-harness: + docker: + real_run: false + group: run-mode + default: true + deps: + - tags: reproduce,mlperf,inference,intel,harness,_build-harness + inherit_variation_tags: true + names: + - build-harness + skip_inherit_variation_groups: + - run-mode + - device-info + - sut + - loadgen-batchsize + - loadgen-scenario + force_cache: true + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + - tags: get,generic-sys-util,_rsync + + env: + CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: run_harness + + # Env keys which are exposed to higher level scripts + new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_IMAGENET_ACCURACY_DTYPE + - CM_SQUAD_ACCURACY_DTYPE + + + + maxq: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + + maxn: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + int4: + group: precision + + uint8: + group: precision + adr: + dataset-preprocessed: + tags: _uint8,_rgb8 + + int8: + alias: uint8 + + int4,gptj_: + env: + INTEL_GPTJ_INT4: 'yes' + + int8,gptj_: + env: + INTEL_GPTJ_INT4: 'no' + + fp32: + group: precision + adr: + dataset-preprocessed: + tags: _float32,_rgb32 + env: + CM_IMAGENET_ACCURACY_DTYPE: float32 + + sapphire-rapids.112c: + group: sut + env: + WARMUP: " --warmup" + + sapphire-rapids.24c: + group: sut + + sapphire-rapids.24c,gptj-99,offline,int8: + env: + KMP_BLOCKTIME: 10 + WORKERS_PER_PROC: 1 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + + sapphire-rapids.24c,gptj-99,offline,int4: + env: + KMP_BLOCKTIME: 10 + WORKERS_PER_PROC: 1 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + + sapphire-rapids.112c,gptj-99,offline,int8: + env: + KMP_BLOCKTIME: 1 + WORKERS_PER_PROC: 2 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 14 + + sapphire-rapids.112c,gptj-99,offline,int4: + env: + NUM_PROC: 4 + KMP_BLOCKTIME: 1 + WORKERS_PER_PROC: 3 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 8 + + sapphire-rapids.112c,gptj-99,server,int8: + env: + KMP_BLOCKTIME: 1 + WORKERS_PER_PROC: 2 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 1 + + sapphire-rapids.112c,gptj-99,server,int4: + env: + KMP_BLOCKTIME: 1 + WORKERS_PER_PROC: 4 + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 1 + + sapphire-rapids.24c,bert_: + env: + WORKERS_PER_PROC: 1 + sapphire-rapids.112c,bert_,offline: + env: + WORKERS_PER_PROC: 4 + sapphire-rapids.112c,bert_,server: + env: + WORKERS_PER_PROC: 8 + + +docker: + real_run: False diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_bert_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_bert_harness.sh new file mode 100644 index 000000000..4a2b957a9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_bert_harness.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH +echo $PWD + +if [ ! -d harness ]; then + mkdir -p harness +fi + +rm -rf ${CM_CONDA_LIB_PATH}/cmake/mkl/* + +rsync -avz --exclude=".git" ${CM_HARNESS_CODE_ROOT}/ harness/ +pushd harness +rsync -avz --exclude=".git" ${CM_MLPERF_INFERENCE_SOURCE}/ inference/ +test $? -eq 0 || exit $? +pushd mlperf_plugins +rm -rf onednn +rsync -avz --exclude=".git" ${CM_ONEDNN_INSTALLED_PATH}/ onednn/ +test $? -eq 0 || exit $? +popd + +mkdir build +pushd build +cmake -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DBUILD_TPPS_INTREE=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(dirname $(python3 -c 'import torch; print(torch.__file__)'));../cmake/Modules" -GNinja -DUSERCP=ON .. +test $? -eq 0 || exit $? +ninja +test $? -eq 0 || exit $? +popd +test $? -eq 0 || exit $? + +mkdir -p bert/dataset +cd bert +ln -sf ${CM_DATASET_SQUAD_VAL_PATH} dataset/dev-v1.1.json +test $? -eq 0 || exit $? +if [ ! -d model ]; then + git clone https://huggingface.co/bert-large-uncased model + cd model + rm pytorch_model.bin + ln -sf ${CM_ML_MODEL_FILE_WITH_PATH} pytorch_model.bin + test $? -eq 0 || exit $? + cd .. +fi + +cd .. +pip install boto3 tokenization +test $? -eq 0 || exit $? +bash convert.sh +test $? -eq 0 || exit $? +popd + + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_gptj_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_gptj_harness.sh new file mode 100644 index 000000000..3c2f26dc4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_gptj_harness.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH +echo $PWD + +if [ ! -d harness ]; then + mkdir -p harness +fi + +echo ${CM_HARNESS_CODE_ROOT} +cd ${CM_HARNESS_CODE_ROOT} +cd utils +python -m pip install . +test $? -eq 0 || exit $? +cd ../ + + +mkdir -p data +export WORKLOAD_DATA=$(pwd)/data +mkdir -p ${WORKLOAD_DATA}/model + +export INT8_MODEL_DIR=${WORKLOAD_DATA}/gpt-j-int8-model +export INT4_MODEL_DIR=${WORKLOAD_DATA}/gpt-j-int4-model + + +python download-dataset.py --split validation --output-dir ${WORKLOAD_DATA}/validation-data +test $? -eq 0 || exit $? +python download-calibration-dataset.py --calibration-list-file calibration-list.txt --output-dir ${WORKLOAD_DATA}/calibration-data +test $? -eq 0 || exit $? + +if [[ -f ${INT8_MODEL_DIR}/best_model.pt ]]; then + exit 0 +fi + +export CALIBRATION_DATA_JSON=${WORKLOAD_DATA}/calibration-data/cnn_dailymail_calibration.json +export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json +#export INT4_CALIBRATION_DIR=${WORKLOAD_DATA}/quantized-int4-model +#sudo -E bash run_quantization.sh +#bash run_quantization.sh + +INSTALLED_NS=$(python -c "import neural_speed; print(neural_speed.__path__[0])") +PATH_CONVERTED=`pwd` + +export INSTALLED_NS=$INSTALLED_NS +echo "INSTALLED_NS=$INSTALLED_NS" +#export PATH_CONVERTED=$PATH_CONVERTED + +echo "${RUN_QUANTIZATION_CMD}" +eval "${RUN_QUANTIZATION_CMD}" +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_resnet50_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_resnet50_harness.sh new file mode 100644 index 000000000..92ef96243 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_resnet50_harness.sh @@ -0,0 +1,58 @@ +export PATH=${CM_CONDA_BIN_PATH}:$PATH +echo $PWD + + +export DATA_CAL_DIR=calibration_dataset +export CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH} + +cd ${CM_HARNESS_CODE_ROOT} + +cd src/ckernels/ && mkdir -p 3rdparty && \ + cd 3rdparty && \ + (test -e onednn || git clone https://github.com/oneapi-src/oneDNN.git onednn) && \ + cd onednn && \ + git checkout v2.6 && cd ../../../../ + + +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} + +export IPEX_PATH=${CM_IPEX_INSTALLED_PATH} +export TORCH_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` + +if [[ -z ${TORCH_PATH} ]]; then + echo "Torch not found" + exit 1 +fi + +export LOADGEN_DIR="${CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../" +export OPENCV_DIR=${CM_OPENCV_BUILD_PATH} +export RAPIDJSON_INCLUDE_DIR=${CM_RAPIDJSON_SRC_REPO_PATH}/include +export GFLAGS_DIR=${CM_GFLAGS_BUILD_PATH} +export ONEDNN_DIR=${CM_ONEDNN_INSTALLED_PATH} +export USE_CUDA=0 + +BUILD_DIR=${PWD}/build +rm -rf "$BUILD_DIR" + +SRC_DIR=${PWD}/src + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${OPENCV_DIR}/lib:${ONEDNN_DIR}/build/src:${CONDA_PREFIX}/lib +export LIBRARY_PATH=${LIBRARY_PATH}:${CONDA_PREFIX}/lib + + +cmd="cmake -DCMAKE_PREFIX_PATH=${TORCH_PATH} \ + -DLOADGEN_DIR=${LOADGEN_DIR} \ + -DOpenCV_DIR=${OPENCV_DIR} \ + -DRapidJSON_INCLUDE_DIR=${RAPIDJSON_INCLUDE_DIR} \ + -Dgflags_DIR=${GFLAGS_DIR} \ + -DINTEL_EXTENSION_FOR_PYTORCH_PATH=${IPEX_PATH} \ + -DONEDNN_DIR=${ONEDNN_DIR} \ + -DCMAKE_BUILD_TYPE=Release \ + -B${BUILD_DIR} \ + -H${SRC_DIR}" +echo "$cmd" +eval "$cmd" +test "$?" -eq 0 || exit "$?" + +cmake --build ${BUILD_DIR} --config Release -j$(nproc) +test "$?" -eq 0 || exit "$?" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_retinanet_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_retinanet_harness.sh new file mode 100644 index 000000000..0d577b26b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_retinanet_harness.sh @@ -0,0 +1,44 @@ +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +cd ${CM_HARNESS_CODE_ROOT} + + +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} + +export IPEX_PATH=${CM_IPEX_INSTALLED_PATH} +export TORCH_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` + +if [[ -z ${TORCH_PATH} ]]; then + echo "Torch not found" + exit 1 +fi + +export LOADGEN_DIR="${CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH}/../" +export OPENCV_DIR=${CM_OPENCV_BUILD_PATH} +export RAPIDJSON_INCLUDE_DIR=${CM_RAPIDJSON_SRC_REPO_PATH}/include +export GFLAGS_DIR=${CM_GFLAGS_BUILD_PATH} +export USE_CUDA=0 + +BUILD_DIR=${PWD}/build +rm -rf "$BUILD_DIR" + +SRC_DIR=${PWD}/src + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${OPENCV_DIR}/lib:${ONEDNN_DIR}/build/src +export LIBRARY_PATH=${LIBRARY_PATH}:${CONDA_PREFIX}/lib + +cmd="cmake -DCMAKE_PREFIX_PATH=${TORCH_PATH} \ + -DLOADGEN_DIR=${LOADGEN_DIR} \ + -DOpenCV_DIR=${OPENCV_DIR} \ + -DRapidJSON_INCLUDE_DIR=${RAPIDJSON_INCLUDE_DIR} \ + -Dgflags_DIR=${GFLAGS_DIR} \ + -DINTEL_EXTENSION_FOR_PYTORCH_PATH=${IPEX_PATH} \ + -DCMAKE_BUILD_TYPE=Release \ + -B${BUILD_DIR} \ + -H${SRC_DIR}" +echo "$cmd" +eval "$cmd" +test "$?" -eq 0 || exit "$?" + +cmake --build ${BUILD_DIR} --config Release -j$(nproc) +test "$?" -eq 0 || exit "$?" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_sdxl_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_sdxl_harness.sh new file mode 100644 index 000000000..a0817e495 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/build_sdxl_harness.sh @@ -0,0 +1,27 @@ +cd ${CM_HARNESS_CODE_ROOT} + +cd utils +cmd=" python -m pip install ." + +echo "$cmd" +eval "$cmd" +test "$?" -eq 0 || exit "$?" + +cd ../tools +wget https://raw.githubusercontent.com/mlcommons/inference/master/text_to_image/tools/coco.py +test "$?" -eq 0 || exit "$?" +cd .. + +mkdir -p coco2014/captions +wget -P coco2014/captions/ https://raw.githubusercontent.com/mlcommons/inference/master/text_to_image/coco2014/captions/captions_source.tsv +test "$?" -eq 0 || exit "$?" + +mkdir -p coco2014/latents +wget -P coco2014/latents/ https://github.com/mlcommons/inference/raw/master/text_to_image/tools/latents.pt +test "$?" -eq 0 || exit "$?" + +cd tools/ +bash download-coco-2014-calibration.sh --download-path ${PWD}/../coco2014/warmup_dataset --num-workers 1 +test "$?" -eq 0 || exit "$?" +cd .. + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh new file mode 100644 index 000000000..82aa6906c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +export MODEL_DIR=${CM_ML_MODEL_FILE_WITH_PATH} +export DATA_DIR=/mnt/dlrm_data +echo ${CM_HARNESS_CODE_ROOT} +cd ${CM_HARNESS_CODE_ROOT} +python -m pip install scikit-learn==1.3.0 torchsnapshot torchrec==0.3.2 +test $? -eq 0 || exit $? +python -m pip install fbgemm-gpu==0.3.2 --index-url https://download.pytorch.org/whl/cpu +test $? -eq 0 || exit $? +python python/dump_torch_model.py --model-path=$MODEL_DIR --dataset-path=$DATA_DIR +test $? -eq 0 || exit $? + +python python/calibration.py \ + --max-batchsize=65536 \ + --model-path=${MODEL_DIR}/../dlrm-multihot-pytorch.pt \ + --dataset-path=/mnt/dlrm_data/ \ + --use-int8 --calibration +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh new file mode 100644 index 000000000..75a0774d5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/calibrate_gptj_int4_model.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +cd ${CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH} +CUR_DIR=$(pwd) +export WORKLOAD_DATA=${CUR_DIR}/data +mkdir -p ${WORKLOAD_DATA} + +python download-calibration-dataset.py --calibration-list-file calibration-list.txt --output-dir ${WORKLOAD_DATA}/calibration-data +test $? -eq 0 || exit $? + +export CALIBRATION_DATA_JSON=${WORKLOAD_DATA}/calibration-data/cnn_dailymail_calibration.json + +export CHECKPOINT_DIR=${WORKLOAD_DATA}/gpt-j-checkpoint +cmd="ln -s ${GPTJ_CHECKPOINT_PATH} ${CHECKPOINT_DIR}" +echo $cmd +eval $cmd + +export QUANTIZED_MODEL_DIR=${WORKLOAD_DATA}/quantized-int4-model +mkdir -p ${QUANTIZED_MODEL_DIR} + +wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh +CONDA_INSTALL_PATH=`pwd`/miniconda3 +rm -rf ${CONDA_INSTALL_PATH} +bash miniconda.sh -b -p ${CONDA_INSTALL_PATH} +export CONDA_PREFIX=${CONDA_INSTALL_PATH} + +export PATH=${CONDA_INSTALL_PATH}/bin:$PATH +conda install -y python=3.9.0 numpy=1.23.5 +python -m pip install transformers==4.21.2 +python -m pip install texttable +python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu +python -m pip install datasets +bash run_calibration_int4.sh +test $? -eq 0 || exit $? +#exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/compile_resnet50.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/compile_resnet50.sh new file mode 100644 index 000000000..ee81956ec --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/compile_resnet50.sh @@ -0,0 +1,9 @@ +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +export DATA_CAL_DIR=${CM_HARNESS_CODE_ROOT}/calibration_dataset +export CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH} + +cd ${CM_HARNESS_CODE_ROOT} + +bash generate_torch_model.sh +test "$?" -eq 0 || exit "$?" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/compile_retinanet.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/compile_retinanet.sh new file mode 100644 index 000000000..7e23b889a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/compile_retinanet.sh @@ -0,0 +1,11 @@ +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +export CALIBRATION_ANNOTATIONS=${CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} + +export CALIBRATION_DATA_DIR=${CM_CALIBRATION_DATASET_PATH} +export MODEL_CHECKPOINT=${CM_ML_MODEL_FILE_WITH_PATH} + +cd ${CM_HARNESS_CODE_ROOT} + +bash run_calibration.sh +test "$?" -eq 0 || exit "$?" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/customize.py new file mode 100644 index 000000000..5a62e19a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/customize.py @@ -0,0 +1,261 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + import json + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + ml_model = env['CM_MODEL'] + master_model = ml_model.replace("-99.9", "").replace("-99", "") + master_model = master_model.replace("gptj", "gpt-j") + + backend = env['CM_MLPERF_BACKEND'] + device = env['CM_MLPERF_DEVICE'] + code_base_folder = backend + '-' + device + if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == 'v4.0': + if 'gptj' in ml_model: + code_base_folder = "ITREX" + if 'dlrm-v2' in ml_model: + code_base_folder = "pytorch-cpu-int8" + + harness_root = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + 'closed', + 'Intel', + 'code', + ml_model, + code_base_folder) + + env['CM_HARNESS_CODE_ROOT'] = harness_root + + if env.get('CM_MODEL') == "resnet50": + pass + + elif "bert" in env.get('CM_MODEL'): + pass + elif "retinanet" in env.get('CM_MODEL'): + pass + elif "gptj" in env.get('CM_MODEL'): + env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH'] + + script_path = i['run_script_input']['path'] + if env['CM_MODEL'] == "retinanet": + env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'CM_MLPERF_USER_CONF' not in env: + env['CM_MLPERF_USER_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] + env['CONDA_PREFIX'] = env['CM_CONDA_PREFIX'] + + if env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "calibration": + if master_model == "resnet50": + i['run_script_input']['script_name'] = "prepare_imagenet_calibration" + elif master_model == "3d-unet": + i['run_script_input']['script_name'] = "prepare_3d-unet_data_model" + elif "dlrm-v2" in master_model: + i['run_script_input']['script_name'] = "calibrate_dlrm_v2_model" + else: + calibration_root = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + 'closed', + 'Intel', + 'calibration', + master_model, + backend + "-" + device) + + if "gpt" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "calibrate_gptj_int4_model" + calibration_path = os.path.join(calibration_root, "INT4") + env['CM_MLPERF_INFERENCE_INTEL_CALIBRATION_PATH'] = calibration_path + env['INT4_CALIBRATION_DIR'] = os.path.join( + calibration_path, "data", "quantized-int4-model") + + elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "compilation": + if master_model == "resnet50": + i['run_script_input']['script_name'] = "compile_resnet50" + elif master_model == "retinanet": + i['run_script_input']['script_name'] = "compile_retinanet" + env['CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] = os.path.join( + os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']), 'retinanet-int8-model.pth') + + elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness": + print(f"Harness Root: {harness_root}") + if "bert" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "build_bert_harness" + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + os.getcwd(), "harness", "build", "bert_inference") + env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "bert") + elif "stable-diffusion" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "build_sdxl_harness" + elif "resnet50" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "build_resnet50_harness" + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + os.getcwd(), "harness", "build", "resnet50_inference") + env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "resnet50") + elif "retinanet" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "build_retinanet_harness" + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + os.getcwd(), "harness", "build", "retinanet_inference") + elif "gpt" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "build_gptj_harness" + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join( + os.getcwd(), "harness", "build", "gptj_inference") + env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "gptj") + env['MLPERF_INFERENCE_ROOT'] = env['CM_MLPERF_INFERENCE_SOURCE'] + if env.get('INTEL_GPTJ_INT4', '') == 'yes': + model_precision = "int4" + if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == 'v3.1': + env['RUN_QUANTIZATION_CMD'] = "bash run_quantization_int4.sh" + else: + env['FILE_TAG'] = "final" + env['OUT_DIR'] = os.getcwd() + env['RUN_QUANTIZATION_CMD'] = "bash run_quantization.sh" + else: + model_precision = "int8" + env['RUN_QUANTIZATION_CMD'] = "bash run_quantization.sh" + if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1": + final_model_path = os.path.join( + harness_root, "data", f"gpt-j-{model_precision}-model", "best_model.pt") + else: + final_model_path = os.path.join( + env['OUT_DIR'], "checkpoint-final-final-q4-j-int8-pc.bin") + model_dir_name = f"{model_precision.upper()}_MODEL_DIR" + env[model_dir_name] = os.path.dirname(final_model_path) + if not os.path.exists(env[model_dir_name]): + os.makedirs(env[model_dir_name]) + env['CM_ML_MODEL_PATH'] = env[model_dir_name] + env['CM_ML_MODEL_FILE_WITH_PATH'] = final_model_path + if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH', + '') != '' and env.get('INT8_MODEL_DIR', '') != '': + shutil.copy( + env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH'], + env[model_dir_name]) + if env.get('CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH', + '') != '' and env.get('INT4_MODEL_DIR', '') != '': + shutil.copy( + env['CM_MLPERF_INFERENCE_INTEL_GPTJ_INT4_MODEL_PATH'], + env[model_dir_name]) + + elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "run_harness": + print(f"Harness Root: {harness_root}") + if env.get('CM_MLPERF_LOADGEN_MODE', '') == "compliance": + audit_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] + shutil.copy(audit_path, env['CM_RUN_DIR']) + + if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy": + env['LOADGEN_MODE'] = 'Accuracy' + else: + env['LOADGEN_MODE'] = 'Performance' + + if 'bert' in env['CM_MODEL']: + env['MODEL_PATH'] = os.path.dirname(os.path.dirname( + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['DATASET_PATH'] = os.path.dirname(os.path.dirname( + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_RUN_CMD'] = "bash run_bert_harness.sh " + \ + ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] + == "accuracy" else "") + + elif 'resnet50' in env['CM_MODEL']: + env['MODEL_PATH'] = os.path.dirname(os.path.dirname( + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['DATASET_PATH'] = os.path.dirname(os.path.dirname( + env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'])) + env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_resnet50_harness.sh')} " + + elif 'retinanet' in env['CM_MODEL']: + env['MODEL_PATH'] = env['CM_ML_MODEL_RETINANET_INT8_FILE_WITH_PATH'] + env['DATA_DIR'] = env['CM_DATASET_PATH_ROOT'] + env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_retinanet_harness.sh')} " + + elif '3d-unet' in env['CM_MODEL']: + env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_3d-unet_harness.sh')} " + + elif 'dlrm' in env['CM_MODEL']: + env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_dlrm_v2_harness.sh')} " + + elif 'stable-diffusion' in env['CM_MODEL']: + env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_RUN_CMD'] = "bash run_sdxl_harness.sh " + \ + ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] + == "accuracy" else "") + + elif "gptj" in env['CM_MODEL']: + env['CM_RUN_DIR'] = i['run_script_input']['path'] + if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1": + env['CM_RUN_CMD'] = "bash run_gptj_harness_v3_1.sh " + if env.get('INTEL_GPTJ_INT4', '') == 'yes': + model_precision = "int4" + env['INT4_MODEL_DIR'] = env['CM_ML_MODEL_PATH'] + env['QUANTIZED_MODEL'] = os.path.join( + env['INT4_MODEL_DIR'], "best_int4_model.pt") + env['PRECISION'] = "int4_bf16_mixed" + else: + env['INT8_MODEL_DIR'] = env['CM_ML_MODEL_PATH'] + env['QUANTIZED_MODEL'] = os.path.join( + env["INT8_MODEL_DIR"], "best_model.pt") + env['PRECISION'] = "int8" + elif env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v4.0": + env['CM_RUN_CMD'] = "bash run_gptj_harness_v4_0.sh " + + if env['CM_MLPERF_RUN_STYLE'] == "test": + env['TOTAL_SAMPLE_COUNT'] = env['CM_TEST_QUERY_COUNT'] + else: + env['TOTAL_SAMPLE_COUNT'] = env.get( + 'CM_MLPERF_MAX_QUERY_COUNT', env['CM_TEST_QUERY_COUNT']) + + if env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline": + env['WORKERS_PER_PROC'] = 4 + else: + env['WORKERS_PER_PROC'] = 1 + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh new file mode 100644 index 000000000..263388147 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/prepare_3d-unet_data_model.sh @@ -0,0 +1,19 @@ +#!/bin/bash + + +export DOWNLOAD_DATA_DIR=${CM_DATASET_PATH} +cd ${CM_HARNESS_CODE_ROOT} + +mkdir -p build +ln -sf ${CM_DATASET_PREPROCESSED_PATH} build/preprocessed_data +mkdir -p build/model +ln -sf ${CM_ML_MODEL_FILE_WITH_PATH} build/model/3dunet_kits19_pytorch_checkpoint.pth +#make setup +#make duplicate_kits19_case_00185 + +#make preprocess_data +make preprocess_calibration_data +make preprocess_gaussian_patches + +export LD_PRELOAD=${CONDA_PREFIX}/lib/libiomp5.so:$LD_PRELOAD +python trace_model.py diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh new file mode 100644 index 000000000..e8a4fc61f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/prepare_imagenet_calibration.sh @@ -0,0 +1,7 @@ +cd ${CM_HARNESS_CODE_ROOT} +if [ ! -e ILSVRC2012_img_val ]; then + ln -s ${CM_DATASET_IMAGENET_VAL_PATH} ILSVRC2012_img_val +fi + +bash prepare_calibration_dataset.sh +cd - diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_3d-unet_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_3d-unet_harness.sh new file mode 100644 index 000000000..78f44fb2b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_3d-unet_harness.sh @@ -0,0 +1,49 @@ +#!/bin/bash + + +scenario=${CM_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${CM_MLPERF_OUTPUT_DIR}" +#python ../../user_config.py + + +CPUS_PER_INSTANCE=8 + +export DNNL_MAX_CPU_ISA=AVX512_CORE_AMX + +number_threads=`nproc --all` +export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') +num_instance=$((number_cores/CPUS_PER_INSTANCE)) +export PYTHONPATH=${CM_HARNESS_CODE_ROOT}/common:$PYTHONPATH +cp -r ${CM_HARNESS_CODE_ROOT}/meta $OUTDIR/ +cp ${CM_HARNESS_CODE_ROOT}/unet3d_jit_model.pt $OUTDIR/ +cp ${CM_HARNESS_CODE_ROOT}/calibration_result.json $OUTDIR/ +ln -sf ${CM_HARNESS_CODE_ROOT}/build $OUTDIR/build +#the log path is hardcoded in the intel implementation. This is a hack to get them to where we want +rm -rf $OUTDIR/output_logs +ln -sf $OUTDIR $OUTDIR/output_logs + +PYTHON_VERSION=`python -c 'import sys; print ("{}.{}".format(sys.version_info.major, sys.version_info.minor))'` +SITE_PACKAGES=`python -c 'import site; print (site.getsitepackages()[0])'` +IPEX_VERSION=`conda list |grep torch-ipex | awk '{print $2}' ` +export LD_LIBRARY_PATH=$SITE_PACKAGES/torch_ipex-${IPEX_VERSION}-py$PYTHON_VERSION-linux-x86_64.egg/lib/:$LD_LIBRARY_PATH +export LD_PRELOAD=$CONDA_PREFIX/lib/libjemalloc.so:$LD_PRELOAD +export MALLOC_CONF="oversize_threshold:1,background_thread:true,percpu_arena:percpu,metadata_thp:always,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000"; + + +#cd ${CM_HARNESS_CODE_ROOT} +cmd="python ${CM_HARNESS_CODE_ROOT}/run.py \ + --mode ${LOADGEN_MODE} \ + --workload-name 3dunet \ + --mlperf-conf ${CM_MLPERF_CONF} \ + --user-conf ${CM_MLPERF_USER_CONF} \ + --workload-config ${CM_HARNESS_CODE_ROOT}/config.json \ + --num-instance $num_instance \ + --cpus-per-instance $CPUS_PER_INSTANCE \ + --scenario $scenario \ + --warmup 1 \ + --precision=int8" + +echo "$cmd" +eval "$cmd" +test "$?" -eq 0 || exit "$?" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_bert_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_bert_harness.sh new file mode 100644 index 000000000..b49783c6f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_bert_harness.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +WORKERS_PER_PROC=${WORKERS_PER_PROC:-4} +THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS})) + +export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so +export MALLOC_CONF="oversize_threshold:1,background_thread:true,percpu_arena:percpu,metadata_thp:always,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000"; + +accuracy=$1 + +number_threads=`nproc --all` +export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') +num_instance=$(($number_cores / $THREADS_PER_INSTANCE)) + +sut_dir=${MODEL_PATH} +executable=${CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH} +mode=${CM_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${CM_MLPERF_OUTPUT_DIR}" + +#python ../../user_config.py +USER_CONF="${CM_MLPERF_USER_CONF}" + +CONFIG="-n ${num_numa} -i ${num_instance} -j ${THREADS_PER_INSTANCE} --test_scenario=${mode} --model_file=${sut_dir}/bert.pt --sample_file=${sut_dir}/squad.pt --mlperf_config=${CM_MLPERF_CONF} --user_config=${USER_CONF} -o ${OUTDIR} -w 1300 --warmup ${accuracy}" + +${executable} ${CONFIG} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh new file mode 100644 index 000000000..65530c621 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh @@ -0,0 +1,60 @@ +#!/bin/bash +export MODEL_DIR=${CM_ML_MODEL_FILE_WITH_PATH} +export DATA_DIR=/mnt/dlrm_data + + +NUM_SOCKETS=${CM_HOST_CPU_SOCKETS:-2} +export NUM_SOCKETS=$NUM_SOCKETS +export num_physical_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +export CPUS_PER_SOCKET=$((num_physical_cores/NUM_SOCKETS)) +echo $CPUS_PER_SOCKET +export CPUS_PER_PROCESS=24 +#${CPUS_PER_SOCKET} +export CPUS_PER_INSTANCE=1 +export CPUS_FOR_LOADGEN=1 +export BATCH_SIZE=100 +export DNNL_MAX_CPU_ISA=AVX512_CORE_AMX + +export LD_PRELOAD=${CM_CONDA_LIB_PATH}/libiomp5.so + +export KMP_BLOCKTIME=1 +export OMP_NUM_THREADS=$CPUS_PER_INSTANCE +export KMP_AFFINITY="granularity=fine,compact,1,0" +export DNNL_PRIMITIVE_CACHE_CAPACITY=20971520 +export DLRM_DIR=$PWD/python/model +#export TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD=30469645312 + +mode="Offline" +extra_option="--samples-per-query-offline=204800" + +int8_cfg="--int8-configure-dir=int8_configure.json" +echo "Running $mode bs=$batch_size $dtype $test_type $DNNL_MAX_CPU_ISA" + +export CUDA_VISIBLE_DEVICES="" +extra_option=" $extra_option --use-int8" +export EXTRA_OPS="$extra_option" + +#export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` + +model_path="$MODEL_DIR/dlrm-multihot-pytorch.pt" +profile=dlrm-multihot-pytorch +cd ${CM_HARNESS_CODE_ROOT} +OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" + +if [[ "${CM_MLPERF_LOADGEN_MODE}" == "accuracy" ]]; then + accuracy_opt=" --accuracy" +else + accuracy_opt="" +fi + +USER_CONF="${CM_MLPERF_USER_CONF}" +cmd="python -u python/runner.py --profile $profile $common_opt --model dlrm --model-path $model_path \ +--config ${CM_MLPERF_CONF} --user-config ${CM_MLPERF_USER_CONF} \ +--dataset multihot-criteo --dataset-path $DATA_DIR --output $OUTPUT_DIR $EXTRA_OPS \ +--max-ind-range=40000000 --samples-to-aggregate-quantile-file=${PWD}/tools/dist_quantile.txt \ +--max-batchsize=$BATCH_SIZE --scenario=${CM_MLPERF_LOADGEN_SCENARIO} ${accuracy_opt}" + + +echo "$cmd" +#exit 1 +eval "$cmd" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh new file mode 100644 index 000000000..74988df28 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh @@ -0,0 +1,51 @@ +#!/bin/bash +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +KMP_BLOCKTIME=${KMP_BLOCKTIME:-10} + +export KMP_BLOCKTIME=${KMP_BLOCKTIME} +export KMP_AFFINITY=granularity=fine,compact,1,0 +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so + +export num_physical_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') + +NUM_PROC=${NUM_PROC:-$num_numa} +CPUS_PER_PROC=$((num_physical_cores/num_numa)) +WORKERS_PER_PROC=${WORKERS_PER_PROC:-1} +TOTAL_SAMPLE_COUNT=13368 +BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE:-8} +TIMESTAMP=$(date +%m-%d-%H-%M) +HOSTNAME=$(hostname) +#OUTPUT_DIR=offline-output-${HOSTNAME}-batch-${BATCH_SIZE}-procs-${NUM_PROC}-ins-per-proc-${WORKERS_PER_PROC}-${TIMESTAMP} + +export WORKLOAD_DATA=${CM_HARNESS_CODE_ROOT}/data +export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json + +cd ${CM_HARNESS_CODE_ROOT} +OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" + +USER_CONF="${CM_MLPERF_USER_CONF}" + + +cmd="python runner.py --workload-name gptj \ + --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --mode ${LOADGEN_MODE} \ + --num-proc ${NUM_PROC} \ + --cpus-per-proc ${CPUS_PER_PROC} \ + --model-checkpoint-path ${CHECKPOINT_DIR} \ + --dataset-path ${VALIDATION_DATA_JSON} \ + --batch-size ${BATCH_SIZE} \ + --mlperf-conf ${CM_MLPERF_CONF} \ + --user-conf ${CM_MLPERF_USER_CONF} \ + --precision ${PRECISION} \ + --pad-inputs \ + --quantized-model ${QUANTIZED_MODEL} \ + --workers-per-proc ${WORKERS_PER_PROC} \ + --total-sample-count ${TOTAL_SAMPLE_COUNT} \ + --output-dir ${OUTPUT_DIR} \ + 2>&1 | tee ${OUTPUT_DIR}.log" + +echo "$cmd" +eval "$cmd" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh new file mode 100644 index 000000000..9186f733a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh @@ -0,0 +1,75 @@ +#!/bin/bash +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +export KMP_BLOCKTIME=1 +export KMP_AFFINITY=granularity=fine,compact,1,0 +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so +# export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so +# + +BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE} + +DIR_SCRIPT=$(dirname "${BASH_SOURCE[0]}") +[ -z $DIR_NS ] && DIR_NS="$DIR_SCRIPT/gpt-j-env/neural-speed" +[ -z $VALIDATION_DATA_JSON ] && VALIDATION_DATA_JSON="$DIR_SCRIPT/gpt-j-env/cnn_dailymail_validation.json" +[ -z $CHECKPOINT_DIR ] && CHECKPOINT_DIR="$DIR_SCRIPT/gpt-j-env/finetuned_gptj" + +# num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') + +export num_physical_cores=$(lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l) +IFS=', ' read -r -a available_nodes_list <<<"$(numactl -s | grep nodebind | sed -E 's/^nodebind: (.+)$/\1/')" +declare -p available_nodes_list +num_numa="${#available_nodes_list[@]}" +declare -p num_numa + +find "$DIR_NS" -name CMakeCache.txt -exec rm {} \; +CMAKE_ARGS="-DNS_PROFILING=ON" pip install -e "$DIR_NS" + +[ -z $NUM_PROC ] && NUM_PROC=$num_numa +CPUS_PER_PROC=$((num_physical_cores / num_numa)) +[ -z $WORKERS_PER_PROC ] && WORKERS_PER_PROC=1 +[ -z $CPUS_PER_WORKER ] && CPUS_PER_WORKER= # e.g. 8:8:8:8:8:8:8 +[ -z $BATCH_PROC_ALLOC ] && BATCH_PROC_ALLOC= # e.g. 12:12:12:12:12:12:12 +[ -z $LOGICAL_CORES_START ] && LOGICAL_CORES_START=-1 # set to -1 to disable / or use $num_physical_cores +[ -z $CORES_OFFSET ] && CORES_OFFSET=0 + +[ -z $BATCH_SIZE ] && BATCH_SIZE=12 +[ -z $BEAM_SIZE ] && BEAM_SIZE=4 + +OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" +MODEL_PATH="${CM_ML_MODEL_FILE_WITH_PATH}" +cd ${CM_HARNESS_CODE_ROOT} +export WORKLOAD_DATA=${CM_HARNESS_CODE_ROOT}/data +export VALIDATION_DATA_JSON=${WORKLOAD_DATA}/validation-data/cnn_dailymail_validation.json + + +for i in $(seq 0 $(($NUM_PROC - 1))); do + [[ ! -e "${MODEL_PATH}${i}" ]] && ln -fs "$(basename $MODEL_PATH)" "${MODEL_PATH}${i}" +done + +echo "Start time: $(date)" +cmd="python runner.py --workload-name gptj \ + --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --mode ${LOADGEN_MODE} \ + --num-proc ${NUM_PROC} \ + --cpus-per-proc ${CPUS_PER_PROC} \ + --dataset-path ${VALIDATION_DATA_JSON} \ + --model-path ${MODEL_PATH} \ + --model-checkpoint ${CHECKPOINT_DIR} \ + --batch-size ${BATCH_SIZE} \ + --beam-size ${BEAM_SIZE} \ + --mlperf-conf ${CM_MLPERF_CONF} \ + --user-conf ${CM_MLPERF_USER_CONF} \ + --workers-per-proc ${WORKERS_PER_PROC} \ + --total-sample-count ${TOTAL_SAMPLE_COUNT} \ + --output-dir ${OUTPUT_DIR} \ + --cores-offset ${CORES_OFFSET} \ + --logical-cores-start \"${LOGICAL_CORES_START}\" \ + --cpus-per-worker \"${CPUS_PER_WORKER}\" \ + --batch-proc-alloc \"${BATCH_PROC_ALLOC}\" \ + 2>&1 | tee ${OUTPUT_DIR}.log" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? +echo "End time: $(date)" + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_resnet50_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_resnet50_harness.sh new file mode 100644 index 000000000..861d891aa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_resnet50_harness.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +CPUS_PER_INSTANCE=1 +number_threads=`nproc --all` +export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +number_sockets=`grep physical.id /proc/cpuinfo | sort -u | wc -l` +cpu_per_socket=$((number_cores/number_sockets)) + +WORKERS_PER_PROC=${WORKERS_PER_PROC:-4} +THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS})) + +export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so +export LD_PRELOAD=${CONDA_PREFIX}/lib/libiomp5.so +export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000"; + +KMP_SETTING="KMP_AFFINITY=granularity=fine,compact,1,0" +export KMP_BLOCKTIME=1 +export $KMP_SETTING + + +export DATA_DIR=${CM_HARNESS_CODE_ROOT}/ILSVRC2012_img_val +export RN50_START=${CM_HARNESS_CODE_ROOT}/models/resnet50-start-int8-model.pth +export RN50_END=${CM_HARNESS_CODE_ROOT}/models/resnet50-end-int8-model.pth +export RN50_FULL=${CM_HARNESS_CODE_ROOT}/models/resnet50-full.pth + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CONDA_PREFIX}/lib + +rsync -avz ${CM_HARNESS_CODE_ROOT}/val_data/ ${DATA_DIR}/ +executable="${CM_HARNESS_CODE_ROOT}/build/bin/mlperf_runner" + +number_threads=`nproc --all` +export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') +num_instance=$(($number_cores / $THREADS_PER_INSTANCE)) + +scenario=${CM_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${CM_MLPERF_OUTPUT_DIR}" +scenario="Offline" +#python ../../user_config.py + +#--warmup_iters 20 \ +CONFIG=" --scenario ${scenario} --mode ${LOADGEN_MODE} --model_name resnet50 \ + --rn50-part1 ${RN50_START} --rn50-part3 ${RN50_END} --rn50-full-model ${RN50_FULL} \ + --data_path ${DATA_DIR} \ + --mlperf_conf ${CM_MLPERF_CONF} --user_conf ${CM_MLPERF_USER_CONF} \ + --cpus_per_instance $CPUS_PER_INSTANCE \ + --num_instance $number_cores \ + --total_sample_count 50000 \ + --batch_size 256 + " + +cmd=" ${executable} ${CONFIG}" +echo "$cmd" +eval "$cmd" +test "$?" -eq 0 || exit "$?" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_retinanet_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_retinanet_harness.sh new file mode 100644 index 000000000..98ca3a5b2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_retinanet_harness.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +CPUS_PER_INSTANCE=8 +number_threads=`nproc --all` +export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +number_sockets=`grep physical.id /proc/cpuinfo | sort -u | wc -l` +cpu_per_socket=$((number_cores/number_sockets)) +number_instance=$((number_cores/CPUS_PER_INSTANCE)) + +WORKERS_PER_PROC=${WORKERS_PER_PROC:-4} +THREADS_PER_INSTANCE=$((( ${WORKERS_PER_PROC} * ${CM_HOST_CPU_THREADS_PER_CORE}) / ${CM_HOST_CPU_SOCKETS})) + +export LD_PRELOAD=${CONDA_PREFIX}/lib/libjemalloc.so +export LD_PRELOAD=${CONDA_PREFIX}/lib/libiomp5.so +export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000"; + +KMP_SETTING="KMP_AFFINITY=granularity=fine,compact,1,0" +export KMP_BLOCKTIME=1 +export $KMP_SETTING + + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CONDA_PREFIX}/lib + +executable="${CM_HARNESS_CODE_ROOT}/build/bin/mlperf_runner" + +number_threads=`nproc --all` +export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') +num_instance=$(($number_cores / $THREADS_PER_INSTANCE)) + +scenario=${CM_MLPERF_LOADGEN_SCENARIO} +OUTDIR="${CM_MLPERF_OUTPUT_DIR}" +scenario="Offline" +#python ../../user_config.py + +#--warmup_iters 20 \ +CONFIG=" --scenario ${scenario} --mode ${LOADGEN_MODE} --model_name retinanet \ + --model_path ${MODEL_PATH} \ + --data_path ${DATA_DIR} \ + --mlperf_conf ${CM_MLPERF_CONF} --user_conf ${CM_MLPERF_USER_CONF} \ + --cpus_per_instance $CPUS_PER_INSTANCE \ + --num_instance $number_instance \ + --total_sample_count 24781 \ + --batch_size 1 + " + +cmd=" ${executable} ${CONFIG}" +echo "$cmd" +eval "$cmd" +test "$?" -eq 0 || exit "$?" diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_sdxl_harness.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_sdxl_harness.sh new file mode 100644 index 000000000..3dd71ec83 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-intel/run_sdxl_harness.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +export KMP_BLOCKTIME=1 +export KMP_AFFINITY=granularity=fine,compact,1,0 +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so +# export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so +# + +BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE} + +export num_physical_cores=$(lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l) +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') + + + +OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" +MODEL_PATH="${SDXL_CHECKPOINT_PATH}" +cd ${CM_HARNESS_CODE_ROOT} + +NUM_PROC=1 +CPUS_PER_PROC=16 +WORKERS_PER_PROC=1 +TOTAL_SAMPLE_COUNT=5000 +BATCH_SIZE=8 + +FD_MAX=$(ulimit -n -H) +ulimit -n $((FD_MAX - 1)) + +echo "Start time: $(date)" +cmd="python -u main.py \ + --dtype bfloat16 \ + --device 'cpu' \ + --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --mode ${LOADGEN_MODE} \ + --num-proc ${NUM_PROC} \ + --cpus-per-proc ${CPUS_PER_PROC} \ + --model-path ${MODEL_PATH} \ + --batch-size ${BATCH_SIZE} \ + --mlperf-conf ${CM_MLPERF_CONF} \ + --user-conf ${CM_MLPERF_USER_CONF} \ + --workers-per-proc ${WORKERS_PER_PROC} \ + --total-sample-count ${TOTAL_SAMPLE_COUNT} \ + --log-dir ${OUTPUT_DIR} " + +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? +echo "End time: $(date)" + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md new file mode 100644 index 000000000..16850be95 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md @@ -0,0 +1,29 @@ +## Contributing to the MLCommons + +The best way to contribute to the MLCommons is to get involved with one of our many project communities. +You find more information about getting involved with MLCommons [here](https://mlcommons.org/en/get-involved/#getting-started). + +Generally we encourage people to become a MLCommons member if they wish to contribute to MLCommons projects, +but outside pull requests are very welcome too. + +Regardless of if you are a member, your organization needs to sign the MLCommons CLA. +Please fill out this [CLA sign up form](https://forms.gle/Ew1KkBVpyeJDuRw67) form to get started. + +MLCommons project work is tracked with issue trackers and pull requests. +Modify the project in your own fork and issue a pull request once you want other developers +to take a look at what you have done and discuss the proposed changes. +Ensure that cla-bot and other checks pass for your Pull requests. + +## Contributing to this project + +Please join our [Discord server](https://discord.gg/JjWNWXKxwT) +to learn about how to use the CK technology v3 (including the MLCommons CM automation language, CK playground +and Modular Inference Library) or participate in collaborative developments. + +Thank you for your support and looking forward to collaborating with you! + +## Core contributors + +* [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189) +* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) +* [Grigori Fursin](https://cKnowledge.org/gfursin). diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/README-extra.md new file mode 100644 index 000000000..b344ea7ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/README-extra.md @@ -0,0 +1,83 @@ +# About + +The MLCommons C++ Modular Inference Library (MIL) is a community project to provide +a simple and extensible C++ harness to connect diverse ML models, frameworks, data sets and hardware +backends to the [MLPerf loadgen](https://github.com/mlcommons/inference/tree/master/loadgen) +and run it using the [MLCommons CM automation language](https://github.com/mlcommons/ck/tree/master/cm). + +It is intended to help new submitters add new hardware backends to MLPerf, +optimize their MLPerf results using low-level knobs, +and automate their submission using the MLCommons CM automation language. + +MIL is maintained and extended by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +based on user feedback to make it easier to run, optimize and reproduce MLPerf inference benchmarks +across diverse platforms with continuously changing software and hardware. + +MIL was originally developed by [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189) + +[![License](https://img.shields.io/badge/License-Apache%202.0-green)](https://github.com/mlcommons/ck/tree/master/cm) +[![CM repository](https://img.shields.io/badge/Collective%20Mind-compatible-blue)](https://github.com/mlcommons/ck) + +© 2021-2023 [MLCommons](https://mlcommons.org)
    + +## About + +This is a modularized C++ implementation of an MLPerf Inference SUT. Each file corresponds to a different class that can be changed independently of other ones: +1. `Backend` runs the actual inference using a framework (ONNX Runtime, TF Lite, etc) +2. `Device` manages devices and memory (CPU, GPU, etc) +3. `Model` is a struct representing a model file (ResNet50, etc) +4. `SampleLibrary` is a dataset loader (ImageNet, COCO, etc) +5. `System` is the SUT interface to LoadGen which manages how input queries are issued + +Data flow: +* Init + 1. All classes are initialized, e.g. `Backend` is initialized with selected `Model` and `Device` +* Loading samples to memory + 1. LoadGen calls `SampleLibrary->LoadSamplesFromRam()` + 2. `SampleLibrary` reads sample (e.g. from .npy file) and calls `Backend->LoadSampleFromRam()` + 3. `Backend` stores samples contiguously into each device memory, e.g. by `Device->Write()` +* Running the model + 1. LoadGen calls `System->IssueQuery()` + 2. `System` gathers a batch of samples, selects a device concurrency (e.g. the 3rd CPU core) and calls `Backend->IssueBatch()` + 3. `Backend` retrieves pointers to input data in device memory, and calls `RunInference()` implemented by a derived class, e.g. `OnnxRuntimeBackend->RunInference()` + 4. in this example, `OnnxRuntimeBackend->RunInference()` calls the ONNX Runtime API with the retrieved pointers as input, packs the raw ONNX Runtime output to LoadGen format via `Model->PostProcess()`, and sends the response to LoadGen + 5. LoadGen records the latency from 1 to 4 + +See comments in code for each class for details. + +## Examples + +### ResNet50, ONNX Runtime, CPU, Accuracy +```sh +cm run script "cpp mlperf _resnet50 _onnxruntime _cpu" \ + --output_dir= \ + --count=500 \ + --max_batchsize=32 \ + --mode=accuracy + +python \ + /PATH/TO/inference/vision/classification_and_detection/tools/accuracy-imagenet.py \ + --mlperf-accuracy-file=/mlperf_log_accuracy.json \ + --imagenet-val-file `cm find cache --tags=imagenet-aux`/data/val.txt \ + --dtype int64 +``` + +### RetinaNet, ONNX Runtime, GPU, Accuracy + +Install dataset: +```sh +cm run script --tags=get,preprocessed,openimages,_500,_NCHW +``` + +Run benchmark: +```sh +cm run script "cpp mlperf _retinanet _onnxruntime _cuda" \ + --output_dir= \ + --count=500 \ + --max_batchsize=1 \ + --mode=accuracy + +python /PATH/TO/inference/vision/classification_and_detection/tools/accuracy-openimages.py \ + --mlperf-accuracy-file /mlperf_log_accuracy.json \ + --openimages-dir `cm find cache --tags=openimages,original`/install +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/README.md new file mode 100644 index 000000000..cd830ece6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml new file mode 100644 index 000000000..e13bab985 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml @@ -0,0 +1,260 @@ +# Identification of this CM script +alias: app-mlperf-inference-mlcommons-cpp +uid: bf62405e6c7a44bf + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf inference benchmark pipeline" + +developers: "[Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - app + - mlcommons + - mlperf + - inference + - cpp + + + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_FAST_COMPILATION: "yes" + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: cpp + + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_ML_MODEL_* + - CM_HW_NAME + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect CUDA if required + - tags: get,cuda,_cudnn + enable_if_env: + CM_MLPERF_DEVICE: + - gpu + + ######################################################################## + # Install MLPerf inference dependencies + + # Install MLPerf loadgen + - tags: get,loadgen + names: + - loadgen + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + ######################################################################## + # Install ML engines via CM + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - cpu + tags: get,lib,onnxruntime,lang-cpp,_cpu + + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + tags: get,lib,onnxruntime,lang-cpp,_cuda + + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + tags: get,dataset,preprocessed,imagenet,_NCHW + + - enable_if_env: + CM_MODEL: + - resnet50 + tags: get,ml-model,raw,resnet50,_onnx + + + ######################################################################## + # Install RetinaNet model (ONNX) and OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-preprocessed + tags: get,dataset,preprocessed,openimages,_validation,_NCHW + + - enable_if_env: + CM_MODEL: + - retinanet + tags: get,ml-model,retinanet,_onnx,_fp32 + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + +# Post dependencies to compile and run this app +post_deps: + + - names: + - compile-program + tags: compile,cpp-program + skip_if_env: + CM_MLPERF_SKIP_RUN: + - "yes" + + - names: + - mlperf-runner + tags: benchmark-mlperf + skip_if_env: + CM_MLPERF_SKIP_RUN: + - "yes" + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + # ML engine + onnxruntime: + group: framework + default: true + env: + CM_MLPERF_BACKEND: onnxruntime + CM_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime + + pytorch: + group: framework + env: + CM_MLPERF_BACKEND: pytorch + + tf: + group: framework + env: + CM_MLPERF_BACKEND: tf + + tflite: + group: framework + env: + CM_MLPERF_BACKEND: tflite + + tvm-onnx: + group: framework + env: + CM_MLPERF_BACKEND: tvm-onnx + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + + retinanet: + group: model + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + env: + CM_MODEL: retinanet + + resnet50,offline: + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 32 + + resnet50,server: + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 32 + + resnet50,multistream: + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 8 + + batch-size.#: + group: batch-size + env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" + + offline: + group: loadgen-scenario + default: true + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 1 + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + multistream,resnet50: + default_variations: + batch-size: batch-size.8 + + offline,resnet50: + default_variations: + batch-size: batch-size.32 + + multistream,retinanet: + default_variations: + batch-size: batch-size.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/customize.py new file mode 100644 index 000000000..e76b5f081 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/customize.py @@ -0,0 +1,119 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + automation = i['automation'] + + meta = i['meta'] + + if os_info['platform'] == 'windows': + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('WARNING: this script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') +# # Currently support only LLVM on Windows +# print ('# Forcing LLVM on Windows') +# r = automation.update_deps({'deps':meta['post_deps'], 'update_deps':{'compile-program': {'adr':{'compiler':{'tags':'llvm'}}}}}) +# if r['return']>0: return r + + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + source_files = [] + script_path = i['run_script_input']['path'] + if env['CM_MODEL'] == "retinanet": + env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + env['CM_SOURCE_FOLDER_PATH'] = os.path.join(script_path, "src") + + for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + if file.endswith(".c") or file.endswith(".cpp"): + source_files.append(file) + + env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + + if '+CPLUS_INCLUDE_PATH' not in env: + env['+CPLUS_INCLUDE_PATH'] = [] + + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + env['+C_INCLUDE_PATH'].append(os.path.join(script_path, "inc")) + + if env['CM_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + + if '+ CXXFLAGS' not in env: + env['+ CXXFLAGS'] = [] + env['+ CXXFLAGS'].append("-std=c++14") + + # add preprocessor flag like "#define CM_MODEL_RESNET50" + env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) + # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + + env['CM_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + + env['CM_MLPERF_DEVICE'].upper()) + + if '+ LDCXXFLAGS' not in env: + env['+ LDCXXFLAGS'] = [] + + env['+ LDCXXFLAGS'] += [ + "-lmlperf_loadgen", + "-lpthread" + ] + # e.g. -lonnxruntime + if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + # e.g. -lcudart + if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) + + env['CM_LINKER_LANG'] = 'CXX' + env['CM_RUN_DIR'] = os.getcwd() + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'CM_MLPERF_USER_CONF' not in env: + env['CM_MLPERF_USER_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/backend.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/backend.h new file mode 100644 index 000000000..ccfdd25ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/backend.h @@ -0,0 +1,304 @@ +#ifndef BACKEND_H_ +#define BACKEND_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "loadgen.h" +#include "query_sample.h" + +#include "device.h" +#include "model.h" + +/** + * The Backend base class manages how samples are stored in memory, + * receives queries from SUT and issues them to derived classes via RunInference. + * + * For memory storage, on calls to LoadSampleFromRam() from the QSL, loaded samples are + * stored contiguously into each device memory. The Backend class retains the + * location of every sample in device memory. + * + * When SUT issues a batch to run on a device concurrency, the backend class retrieves + * the location in memory of this batch, and passes this to RunInference implemented by + * derived classes (e.g. OnnxRuntimeBackend). + */ +class Backend { +public: + Backend(std::shared_ptr &model, std::shared_ptr &device, + size_t performance_sample_count, size_t batch_size) + : model(model), device(device) + , performance_sample_count(performance_sample_count), batch_size(batch_size) + , num_memory(device->NumMemory()), num_inputs(model->num_inputs) + , batch_memory_mutex(num_memory) { + // have batch_size padding at the end that cycles back to beginning for contiguity + size_t memory_capacity = performance_sample_count + batch_size; + samples.resize(memory_capacity); + sample_memory.resize(num_inputs); + sample_memory_size.resize(num_inputs, 0); + sample_memory_offset.resize(num_inputs); + batch_memory.resize(num_inputs); + for (size_t i = 0; i < num_inputs; i++) { + sample_memory[i].resize(num_memory); + batch_memory[i].resize(num_memory); + for (size_t j = 0; j < num_memory; j++) { + sample_memory[i][j] = + device->Alloc(j, model->input_sizes[i] * memory_capacity); + // working memory for an incontiguous batch + batch_memory[i][j] = + device->Alloc(j, model->input_sizes[i] * batch_size); + } + sample_memory_offset[i].resize(memory_capacity, 0); + } + + if (performance_sample_count == 0) + std::cerr << "warning: performance sample count = 0" << std::endl; + } + + virtual ~Backend() { + for (size_t i = 0; i < num_inputs; i++) { + for (size_t j = 0; j < num_memory; j++) { + device->Free(j, sample_memory[i][j]); + device->Free(j, batch_memory[i][j]); + } + } + } + + size_t NumConcurrency() const { + return device->NumConcurrency(); + } + + size_t PerformanceSampleCount() const { + return performance_sample_count; + } + + size_t MaxBatchSize() const { + return batch_size; + } + + // load input to device memory + void LoadSampleToRam( + mlperf::QuerySampleIndex sample_index, + const std::vector> &input_datas, + const std::vector &input_sizes, + const std::vector> &input_shapes) { + size_t index_in_memory = num_samples_in_memory; + Sample sample; + sample.index = sample_index; + sample.shape = input_shapes; + sample.size = input_sizes; + sample.index_in_memory = index_in_memory; + + samples[index_in_memory] = sample; + sample_map[sample_index] = sample; + + for (size_t input_index = 0; input_index < num_inputs; input_index++) { + const std::vector &input_data = input_datas[input_index]; + size_t input_size = input_sizes[input_index]; + + if (sample_memory_size[input_index] + input_size > + (performance_sample_count + batch_size) * model->input_sizes[input_index]) + std::cerr << "warning: memory exceeded; try increasing model->input_sizes" << std::endl; + + // write to end of memory + sample_memory_offset[input_index][index_in_memory] = sample_memory_size[input_index]; + sample_memory_size[input_index] += input_size; + for (size_t j = 0; j < num_memory; j++) { + void *destination = GetMemoryAddress(input_index, j, index_in_memory); + device->Write(j, destination, input_data); + } + } + + num_samples_in_memory++; + } + + void FinishLoading() { + return; //This probably needs a FinishUnLoading counterpart + // copy first batch to end of memory to form cycle + for (size_t k = 0; k < batch_size - 1; k++) { + size_t index_in_memory = k % performance_sample_count; + std::vector> data(num_inputs); + for (size_t i = 0; i < num_inputs; i++) + device->Read( + 0, data[i], GetMemoryAddress(i, 0, index_in_memory), samples[index_in_memory].size[i]); + // LoadSampleToRam copies samples[index_in_memory] to end of memory + LoadSampleToRam( + samples[index_in_memory].index, data, + samples[index_in_memory].size, samples[index_in_memory].shape); + } + // write substrings of samples vector to contiguity tree + + for (size_t start = 0; start < num_samples_in_memory; start++) { + Trie *node = &batches; + for (size_t end = start; end < std::min(start + batch_size, num_samples_in_memory); end++) { + node = &node->children[samples[end].index]; + node->index_in_memory = samples[start].index_in_memory; + } + } + } + + void UnloadSampleFromRam(mlperf::QuerySampleIndex sample_index) { + for (size_t i = 0; i < num_inputs; i++) + sample_memory_size[i] -= GetSampleSize(sample_index, i); + batches.children.erase(sample_index); + num_samples_in_memory--; + } + + void IssueBatch( + size_t concurrency_index, + const std::vector &batch) { + // determine contiguity + bool contiguous = true; + Trie *node = &batches; + for (const mlperf::QuerySample &sample : batch) { + auto next = node->children.find(sample.index); + if (next != node->children.end()) { + node = &next->second; + } else { + contiguous = false; + break; + } + } + // std::cerr << "node " << concurrency_index + // << " running batch #" << batch.front().index << "-#" << batch.back().index + // << " (" << (contiguous ? "contiguous" : "incontiguous") << ")" << std::endl; + + // batch pointer in memory [input_index] + std::vector batch_data(num_inputs); + + // gather samples + size_t memory_index = device->GetMemoryIndex(concurrency_index); + // might use batch_memory + std::unique_lock batch_memory_lock{batch_memory_mutex[memory_index], std::defer_lock}; + for (size_t i = 0; i < num_inputs; i++) { + // if input is contiguous, use input directly as batch address + // otherwise, gather a batch to batch_memory + if (contiguous) { + batch_data[i] = GetMemoryAddress(i, memory_index, node->index_in_memory); + } else { + // copy data if not contiguous + batch_memory_lock.lock(); + for (size_t k = 0; k < batch.size(); k++) { + const mlperf::QuerySample &sample = batch[k]; + void *sample_address = GetMemoryAddress(i, memory_index, sample_map[sample.index].index_in_memory); + void *batch_sample_address = GetBatchMemoryAddress(i, memory_index, k); + device->Copy(memory_index, batch_sample_address, sample_address, GetSampleSize(sample.index, i)); + } + batch_data[i] = batch_memory[i][memory_index]; + } + } + + RunInference(concurrency_index, batch, batch_data); + } + + void *GetMemoryAddress(size_t input_index, size_t memory_index, size_t index_in_memory) const { + size_t offset = sample_memory_offset[input_index][index_in_memory]; + return static_cast(sample_memory[input_index][memory_index]) + offset; + } + + void *GetBatchMemoryAddress(size_t input_index, size_t memory_index, size_t index_in_memory) const { + size_t offset = index_in_memory * model->input_sizes[input_index]; + return static_cast(batch_memory[input_index][memory_index]) + offset; + } + + const std::vector &GetSampleShape(mlperf::QuerySampleIndex sample_index, size_t input_index) { + return sample_map[sample_index].shape[input_index]; + } + + size_t GetSampleSize(mlperf::QuerySampleIndex sample_index, size_t input_index) { + return sample_map[sample_index].size[input_index]; + } + + void SetDeviceConcurrencyIndex(size_t concurrency_index) { + device->SetConcurrencyIndex(concurrency_index); + } + + /** + * @brief Runs inference on a batch of samples and calls mlperf::QuerySamplesComplete + * + * @param concurrency_index which device concurrency (device/core/thread) to run on + * @param batch the indices of the input + * @param batch_data pointer to inputs in the device memory + */ + virtual void RunInference( + size_t concurrency_index, + const std::vector &batch, + std::vector &batch_data) = 0; + +protected: + std::shared_ptr model; + std::shared_ptr device; + size_t performance_sample_count; + size_t batch_size; + size_t num_memory; + size_t num_inputs; + +private: + // sample_memory[input_index][memory_index] points to loaded input buffer in device memory + std::vector> sample_memory; + // sample_memory_size[input_index] is current # bytes in this buffer + std::vector sample_memory_size; + // sample_memory_offset[input_index][index_in_memory] is the offset to a sample input + std::vector> sample_memory_offset; + // batch_memory[input_index][memory_index] points to working memory for a batch + std::vector> batch_memory; + + // batch_memory_mutex[memory_index] is mutex for using batch_memory + std::vector batch_memory_mutex; + + // smallest unit of input stored in memory + struct Sample { + mlperf::QuerySampleIndex index; + size_t index_in_memory; + // sample data and sizes indexed by input_index + std::vector> shape; + std::vector size; + }; + // information of samples currently in memory (ordered) + std::vector samples; + // number of samples currently in memory + size_t num_samples_in_memory{0}; + // sample_map[sample_id] last recorded sample with sample_id in memory + std::map sample_map; + + // tree for determining batch contiguity (index tree) + struct Trie { + // index_in_memory is location of a contiguous batch from root to this node + size_t index_in_memory; + std::map children; + }; + Trie batches; +}; + +class DummyBackend : public Backend { +public: + DummyBackend( + std::shared_ptr &model, std::shared_ptr &device, + size_t performance_sample_count, size_t batch_size) + : Backend(model, device, performance_sample_count, batch_size) {} + + void RunInference( + size_t concurrency_index, + const std::vector &batch, + std::vector &batch_data) override { + size_t size = batch.size(); + std::vector response(size); + for (size_t i = 0; i < size; i++) { + response[i].id = batch[i].id; + response[i].data = reinterpret_cast(&dummy_response); + response[i].size = sizeof(int64_t); + } + mlperf::QuerySamplesComplete(response.data(), response.size()); + } + +private: + // labels for ImageNet samples #1, #324 + int64_t dummy_response{65}; +}; + +#endif // BACKEND_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/common.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/common.h new file mode 100644 index 000000000..f572cdd13 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/common.h @@ -0,0 +1,5 @@ +std::string getenv(const std::string& name, const std::string& default_value) { + const char* value = std::getenv(name.c_str()); + return value ? value : default_value; + } + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/device.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/device.h new file mode 100644 index 000000000..7f68027b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/device.h @@ -0,0 +1,64 @@ +#ifndef DEVICE_H_ +#define DEVICE_H_ + +#include +#include +#include + +/** + * This class represents device and memory that the benchmark is run on. + * + * It assumes there are NumMemory() separate memories on the device(s), + * and NumConcurrency() concurrencies for running the model, + * each concurrency with access to the memory GetMemoryIndex(concurrency_index). + * + * For example, a single CPU can have 1 memory (RAM), + * any number of concurrencies (may be number of cores), and each concurrency + * with access to the only memory. + * 2 GPUs can have 2 memories (one for each GPU), 2 concurrencies (one for each GPU). + * + * The Alloc, Free, Read, Write, Copy operations are for the corresponding device memory. + */ +class Device { +public: + virtual size_t NumConcurrency() const = 0; + virtual size_t NumMemory() const = 0; + virtual size_t GetMemoryIndex(size_t concurrency_index) const = 0; + virtual void *Alloc(size_t memory_index, size_t size) = 0; + virtual void Free(size_t memory_index, void *data) = 0; + virtual void Read(size_t memory_index, std::vector &to, const void *from, size_t size) = 0; + virtual void Write(size_t memory_index, void *to, const std::vector &from) = 0; + virtual void Copy(size_t memory_index, void *to, const void *from, size_t size) = 0; + // This is specifically for CUDA, which needs to set a device index for each host thread + virtual void SetConcurrencyIndex(size_t concurrency_index) {} +}; + +class CPUDevice : public Device { + size_t NumConcurrency() const override { + return 2;//std::thread::hardware_concurrency(); + } + size_t NumMemory() const override { + return 2; + } + size_t GetMemoryIndex(size_t concurrency_index) const override { + return 0; + } + void *Alloc(size_t memory_index, size_t size) override { + return std::malloc(size); + } + void Free(size_t memory_index, void *data) override { + std::free(data); + } + void Read(size_t memory_index, std::vector &to, const void *from, size_t size) override { + to.resize(size); + std::memcpy(to.data(), from, size); + } + void Write(size_t memory_index, void *to, const std::vector &from) override { + std::memcpy(to, from.data(), from.size()); + } + void Copy(size_t memory_index, void *to, const void *from, size_t size) override { + std::memcpy(to, from, size); + } +}; + +#endif // DEVICE_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/gpu_device.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/gpu_device.h new file mode 100644 index 000000000..e451417f5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/gpu_device.h @@ -0,0 +1,53 @@ +#ifndef GPU_DEVICE_H_ +#define GPU_DEVICE_H_ + +#include + +#include "cuda.h" +#include "cuda_runtime.h" + +#include "device.h" + +#define CHECK_CUDA_SUCCESS(x) if ((x) != cudaSuccess) std::cerr << "encountered CUDA error" << std::endl; + +class GPUDevice : public Device { + size_t NumConcurrency() const override { + return NumMemory(); + } + size_t NumMemory() const override { + int num_devices; + CHECK_CUDA_SUCCESS(cudaGetDeviceCount(&num_devices)); + return num_devices; + } + size_t GetMemoryIndex(size_t concurrency_index) const override { + return concurrency_index; + } + void *Alloc(size_t memory_index, size_t size) override { + void *data; + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaMalloc(&data, size)); + return data; + } + void Free(size_t memory_index, void *data) override { + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaFree(data)); + } + void Read(size_t memory_index, std::vector &to, const void *from, size_t size) override { + to.resize(size); + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaMemcpy(to.data(), from, size, cudaMemcpyDeviceToHost)); + } + void Write(size_t memory_index, void *to, const std::vector &from) override { + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaMemcpy(to, from.data(), from.size(), cudaMemcpyHostToDevice)); + } + void Copy(size_t memory_index, void *to, const void *from, size_t size) override { + CHECK_CUDA_SUCCESS(cudaSetDevice(memory_index)); + CHECK_CUDA_SUCCESS(cudaMemcpy(to, from, size, cudaMemcpyDeviceToDevice)); + } + void SetConcurrencyIndex(size_t concurrency_index) override { + CHECK_CUDA_SUCCESS(cudaSetDevice(concurrency_index)); + } +}; + +#endif // GPU_DEVICE_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/model.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/model.h new file mode 100644 index 000000000..163869890 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/model.h @@ -0,0 +1,124 @@ +#ifndef MODEL_H_ +#define MODEL_H_ + +#include +#include +#include +#include + +#include "query_sample.h" + +class Model { +public: + Model( + std::string model_path, + size_t num_inputs, std::vector input_names, + std::vector input_sizes, std::vector> input_shapes, + size_t num_outputs, std::vector output_names, + std::vector output_sizes, std::vector> output_shapes) : + model_path(model_path), + num_inputs(num_inputs), input_names(input_names), input_sizes(input_sizes), input_shapes(input_shapes), + num_outputs(num_outputs), output_names(output_names), output_sizes(output_sizes), output_shapes(output_shapes) {} + + std::string model_path; + + size_t num_inputs; + std::vector input_names; + // maximum size for memory allocation purpose + std::vector input_sizes; + // input shape, if fixed + std::vector> input_shapes; + + size_t num_outputs; + std::vector output_names; + // output size & shape, if fixed + std::vector output_sizes; + std::vector> output_shapes; + + /** + * @brief Post-process raw output from model and store in LoadGen response + * + * @param index query sample index + * @param raw raw outputs + * @param raw_shapes shapes of corresponding outputs + * @param response_buffer response buffer to write to + */ + virtual void PostProcess( + mlperf::QuerySampleIndex index, + const std::vector &raw, + const std::vector> &raw_shapes, + std::vector &response_buffer) = 0; +}; + +class Resnet50 : public Model { +public: + Resnet50(std::string model_path, int64_t argmax_shift) : + Model( + model_path, + 1, {"input_tensor:0"}, {3 * 224 * 224 * sizeof(float)}, {{3, 224, 224}}, + 1, {"ArgMax:0"}, {sizeof(int64_t)}, {{1}}), + argmax_shift(argmax_shift) {} + + void PostProcess( + mlperf::QuerySampleIndex index, + const std::vector &raw, + const std::vector> &raw_shapes, + std::vector &response_buffer) override { + response_buffer.resize(sizeof(int64_t)); + int64_t *buffer = reinterpret_cast(response_buffer.data()); + buffer[0] = *static_cast(raw.front()) + argmax_shift; + } + +private: + int64_t argmax_shift; +}; + +class Retinanet : public Model { +public: + Retinanet(std::string model_path, size_t width, size_t height, float score_threshold) : + Model( + model_path, + 1, {"images"}, {3 * width * height * sizeof(float)}, {{3, width, height}}, + // no fixed output sizes/shapes + 3, {"boxes", "scores", "labels"}, {0, 0, 0}, {{0, 4}, {0}, {0}}), + width(width), height(height), + score_threshold(score_threshold) {} + + void PostProcess( + mlperf::QuerySampleIndex index, + const std::vector &raw, + const std::vector> &raw_shapes, + std::vector &response_buffer) override { + float *const boxes = static_cast(raw.at(0)); + float *const scores = static_cast(raw.at(1)); + int64_t *const labels = static_cast(raw.at(2)); + const std::vector &boxes_shape = raw_shapes.at(0); + const std::vector &scores_shape = raw_shapes.at(1); + const std::vector &labels_shape = raw_shapes.at(2); + + size_t keep = 0; + while (keep < scores_shape[0] && scores[keep] >= score_threshold) + keep++; + + response_buffer.resize(7 * keep * sizeof(float)); + float *buffer = reinterpret_cast(response_buffer.data()); + for (size_t i = 0; i < keep; i++) { + int64_t label = labels[i]; + float *const box = &boxes[4 * i]; + buffer[7 * i + 0] = static_cast(index); + buffer[7 * i + 1] = box[1] / 800.0f; + buffer[7 * i + 2] = box[0] / 800.0f; + buffer[7 * i + 3] = box[3] / 800.0f; + buffer[7 * i + 4] = box[2] / 800.0f; + buffer[7 * i + 5] = scores[i]; + buffer[7 * i + 6] = static_cast(label); + } + } + +private: + size_t width; + size_t height; + float score_threshold; +}; + +#endif // MODEL_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/npy.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/npy.h new file mode 100644 index 000000000..cb69db53b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/npy.h @@ -0,0 +1,143 @@ +/* + * Adapted from NVIDIA code. Original copyright notice: + * + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NPY_H_ +#define NPY_H_ + +#include +#include +#include +#include +#include +#include +#include + +// patch glog +#include +#define CHECK(x) if (x) {} else std::cerr + +namespace npy { + class NpyFile { + private: + std::string m_Path; + std::ifstream m_FStream; + size_t m_HeaderSize; + std::string m_Header; + size_t m_TensorSize; + size_t m_ElementSize; + std::vector m_TensorDims; + std::vector m_Cache; + public: + NpyFile(const std::string& path, bool cache = false) : m_Path(path), m_FStream(m_Path) { + // magic and fixed header + char b[256]; + m_FStream.read(b, 10); + CHECK(m_FStream) << "Unable to parse: " << m_Path; + + // check magic + CHECK(static_cast(b[0]) == 0x93 && b[1] == 'N' && b[2] == 'U' && b[3] == 'M' && b[4] == 'P' && b[5] == 'Y') << "Bad magic: " << m_Path; + + // get header + auto major = *reinterpret_cast(b + 6); + //auto minor = *reinterpret_cast(b + 7); + CHECK(major == 1) << "Only npy version 1 is supported: " << m_Path; + m_HeaderSize = *reinterpret_cast(b + 8); + m_Header.resize(m_HeaderSize); + // const cast for c++14 + m_FStream.read(const_cast(m_Header.data()), m_HeaderSize); + + // get file size + auto cur = m_FStream.tellg(); + m_FStream.seekg(0, std::ios::end); + auto size = m_FStream.tellg(); + m_TensorSize = size - cur; + + // cache result + if (cache) { + m_FStream.seekg(10 + m_HeaderSize, std::ios::beg); + m_Cache.resize(m_TensorSize); + m_FStream.read(m_Cache.data(), m_TensorSize); + CHECK(m_FStream) << "Unable to parse: " << m_Path; + CHECK(m_FStream.peek() == EOF) << "Did not consume full file: " << m_Path; + } + + // parse header + std::regex re(R"re(\{'descr': '[<|][fi]([\d])', 'fortran_order': False, 'shape': \(([\d, ]*)\), \} +\n)re"); + std::smatch matches; + CHECK(std::regex_match(m_Header, matches, re)) << "Cannot parse numpy header: " << m_Path; + CHECK(matches.size() == 3) << "Cannot parse numpy header: " << m_Path; + m_ElementSize = std::stoi(matches[1]); + std::vector dims = splitString(matches[2], ", "); + m_TensorDims.resize(dims.size()); + std::transform(dims.begin(), dims.end(), m_TensorDims.begin(), [](const std::string& s){ return std::stoi(s); }); + + // check header sanity + size_t tensorSize = std::accumulate(m_TensorDims.begin(), m_TensorDims.end(), m_ElementSize, std::multiplies()); + CHECK(tensorSize == m_TensorSize) << "Header description does not match file size: " << m_Path; + + } + ~NpyFile() { + m_FStream.close(); + }; + std::vector getDims() { + return m_TensorDims; + } + size_t getTensorSize() { + return m_TensorSize; + } + // load the entire tensor + void loadAll(std::vector& dst) { + m_FStream.seekg(10 + m_HeaderSize, std::ios::beg); + dst.resize(m_TensorSize); + m_FStream.read(dst.data(), m_TensorSize); + CHECK(m_FStream) << "Unable to parse: " << m_Path; + CHECK(m_FStream.peek() == EOF) << "Did not consume full file: " << m_Path; + } + // cache the entire tensor + void cacheAll() { + loadAll(m_Cache); + } + // load only selected indices from the Tensor, assuming that the first dim is batch dim. + void loadSamples(std::vector& dst, const std::vector& indices) { + if (m_Cache.empty()) { + cacheAll(); + } + size_t sampleSize = std::accumulate(m_TensorDims.begin() + 1, m_TensorDims.end(), m_ElementSize, std::multiplies()); + dst.resize(sampleSize * indices.size()); + for (size_t i = 0; i < indices.size(); i++) { + std::memcpy(dst.data() + i * sampleSize, m_Cache.data() + indices[i] * sampleSize, sampleSize); + } + } + // helper function to split a string based on a delimiting character + std::vector splitString(const std::string& input, const std::string& delimiter) + { + std::vector result; + size_t start = 0; + size_t next = 0; + while(next != std::string::npos) + { + next = input.find(delimiter, start); + result.emplace_back(input, start, next - start); + start = next + 1; + } + return result; + } + }; +} + +#endif // NPY_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/onnxruntime_backend.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/onnxruntime_backend.h new file mode 100644 index 000000000..eab583897 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/onnxruntime_backend.h @@ -0,0 +1,132 @@ +#ifndef ONNXRUNTIME_BACKEND_H_ +#define ONNXRUNTIME_BACKEND_H_ + +#include +#include +#include + +#include "onnxruntime_cxx_api.h" + +#include "loadgen.h" + +#include "backend.h" + +class OnnxRuntimeBackend : public Backend { +public: + OnnxRuntimeBackend( + std::shared_ptr &model, std::shared_ptr &device, + size_t performance_sample_count, size_t batch_size, + bool use_cuda) + : Backend(model, device, performance_sample_count, batch_size) + , env(ORT_LOGGING_LEVEL_WARNING, "env") { + for (size_t i = 0; i < device->NumMemory(); i++) { + memory_infos.emplace_back( + use_cuda ? "Cuda" : "Cpu", + OrtAllocatorType::OrtArenaAllocator, i, OrtMemTypeDefault); + } + + for (size_t i = 0; i < device->NumConcurrency(); i++) { + Ort::SessionOptions session_options; + // arm64 does not work with optimization level 3 (ORT_ENABLE_ALL) + if (getenv("ORT_ENABLE_ALL", "") == "no") + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED); + + const auto &api = Ort::GetApi(); + + std::vector keys{"device_id"}; + std::vector values{std::to_string(i).c_str()}; + + OrtCUDAProviderOptionsV2 *cuda_options = nullptr; + if (use_cuda) { + Ort::ThrowOnError(api.CreateCUDAProviderOptions(&cuda_options)); + + Ort::ThrowOnError(api.UpdateCUDAProviderOptions(cuda_options, keys.data(), values.data(), keys.size())); + + Ort::ThrowOnError(api.SessionOptionsAppendExecutionProvider_CUDA_V2( + static_cast(session_options), + cuda_options)); + } + + sessions.emplace_back(env, model->model_path.c_str(), session_options); + bindings.emplace_back(sessions[i]); + + if (use_cuda) { + api.ReleaseCUDAProviderOptions(cuda_options); + } + } + } + + void RunInference( + size_t concurrency_index, + const std::vector &batch, + std::vector &batch_data) override { + Ort::Session &session = sessions[concurrency_index]; + Ort::IoBinding &binding = bindings[concurrency_index]; + size_t memory_index = device->GetMemoryIndex(concurrency_index); + + for (size_t i = 0; i < model->num_inputs; i++) { + size_t size = batch.size() * GetSampleSize(batch.front().index, i); + const std::vector &shape = GetSampleShape(batch.front().index, i); + std::vector input_shape; + input_shape.push_back(batch.size()); + for (size_t dim : shape) + input_shape.push_back(dim); + ONNXTensorElementDataType input_type = + session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetElementType(); + Ort::Value value = Ort::Value::CreateTensor( + memory_infos[memory_index], + batch_data[i], size, + input_shape.data(), input_shape.size(), + input_type); + binding.BindInput(model->input_names[i].c_str(), value); + } + + for (std::string &output : model->output_names) + binding.BindOutput(output.c_str(), memory_info_cpu); + + session.Run(Ort::RunOptions(), binding); + + std::vector outputs = binding.GetOutputValues(); + std::vector responses(batch.size()); + std::vector> response_buffers(batch.size()); + for (size_t i = 0; i < batch.size(); i++) { + // get output data and shapes + std::vector output_buffers(outputs.size()); + std::vector> output_shapes(outputs.size()); + for (size_t j = 0; j < outputs.size(); j++) { + // assume ith position in output is ith sample in batch + output_buffers[j] = + static_cast(outputs[j].GetTensorMutableData()) + + i * model->output_sizes[j]; + size_t rank = outputs[j].GetTensorTypeAndShapeInfo().GetDimensionsCount(); + std::vector output_shape(rank); + outputs[j].GetTensorTypeAndShapeInfo().GetDimensions(output_shape.data(), rank); + output_shapes[j].resize(rank); + for (size_t k = 0; k < rank; k++) + output_shapes[j][k] = output_shape[k]; + } + + model->PostProcess( + batch[i].index, output_buffers, output_shapes, response_buffers[i]); + + responses[i].id = batch[i].id; + responses[i].data = reinterpret_cast(response_buffers[i].data()); + responses[i].size = response_buffers[i].size(); + } + + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + + binding.ClearBoundInputs(); + binding.ClearBoundOutputs(); + }; + +private: + Ort::Env env; + std::vector sessions; + std::vector bindings; + std::vector memory_infos; + Ort::MemoryInfo memory_info_cpu{ + Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemTypeDefault)}; +}; + +#endif // ONNXRUNTIME_BACKEND_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/sample_library.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/sample_library.h new file mode 100644 index 000000000..045ddc961 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/sample_library.h @@ -0,0 +1,181 @@ +#ifndef SAMPLE_LIBRARY_H_ +#define SAMPLE_LIBRARY_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "query_sample_library.h" + +#include "backend.h" +#include "npy.h" + +/** + * SampleLibrary reads stored samples on request of LoadGen and passes + * them to Backend. Derived classes specify how samples are read (e.g. from .npy files) + */ +class SampleLibrary : public mlperf::QuerySampleLibrary { +public: + SampleLibrary( + const std::string &name, std::shared_ptr &backend, + size_t max_sample_count, size_t num_inputs) + : name(name), backend(backend), max_sample_count(max_sample_count), num_inputs(num_inputs) {} + + const std::string &Name() override { return name; } + size_t PerformanceSampleCount() override { return backend->PerformanceSampleCount(); } + size_t TotalSampleCount() override { + return max_sample_count != 0 ? std::min(max_sample_count, NumSamples()) : NumSamples(); + } + + void LoadSamplesToRam(const std::vector &samples) override { + std::cerr << "loading samples to ram with total sample size: " << samples.size()<< std::endl; + for (size_t i = 0; i < samples.size(); i++) { + mlperf::QuerySampleIndex sample = samples[i]; + std::vector> input_datas(num_inputs); + std::vector input_sizes(num_inputs); + std::vector> input_shapes(num_inputs); + for (size_t j = 0; j < num_inputs; j++) { + GetSample(sample, j, input_datas[j], input_sizes[j], input_shapes[j]); + } + backend->LoadSampleToRam(sample, input_datas, input_sizes, input_shapes); + } + backend->FinishLoading(); + } + + void UnloadSamplesFromRam(const std::vector &samples) override { + for (mlperf::QuerySampleIndex sample : samples){ + backend->UnloadSampleFromRam(sample); + } + } + + virtual size_t NumSamples() = 0; + + virtual void GetSample( + mlperf::QuerySampleIndex sample_index, + size_t input_index, + std::vector &data, + size_t &size, + std::vector &shape) = 0; + +protected: + std::shared_ptr backend; + size_t max_sample_count; + size_t num_inputs; + +private: + std::string name{"SampleLibrary"}; +}; + +class NumpyLibrary : public SampleLibrary { +public: + /** + * @brief Constructs a QSL with .npy files in a directory + * + * @param backend backend to use + * @param max_sample_count maximum library size (use 0 for unlimited size) + * @param preprocessed_path path to directory containing .npy files + * @param filenames filenames of npy files: / + */ + NumpyLibrary( + std::shared_ptr &backend, size_t max_sample_count, + std::string preprocessed_path, + const std::vector &filenames) + : SampleLibrary("NumpyLibrary", backend, max_sample_count, 1) { + for (std::string filename : filenames) { + std::string file_path = preprocessed_path + "/" + filename; + + std::ifstream f(file_path); + if (f.good()) + file_paths.push_back(file_path); + } + } + + size_t NumSamples() override { + return file_paths.size(); + } + + void GetSample( + mlperf::QuerySampleIndex sample_index, + size_t input_index, + std::vector &data, + size_t &size, + std::vector &shape) override { + npy::NpyFile data_file(file_paths[sample_index]); + std::vector data_char; + data_file.loadAll(data_char); + data.assign(data_char.begin(), data_char.end()); + size = data_file.getTensorSize(); + shape = data_file.getDims(); + } + +private: + std::vector file_paths; +}; + +class Imagenet : public NumpyLibrary { +public: + Imagenet( + std::shared_ptr &backend, size_t max_sample_count, + std::string preprocessed_path, std::string val_map_path) + : NumpyLibrary( + backend, max_sample_count, preprocessed_path, + ReadValMap(val_map_path)) { + std::cerr << "loaded imagenet with " << TotalSampleCount() << " samples" << std::endl; + } + +private: + static const std::vector ReadValMap(std::string val_map_path) { + std::vector filenames; + std::ifstream val_map(val_map_path); + std::string line; + std::regex val_map_regex(R"(\s*([\.\w]*)\s+(\d+)\s*)"); + while (std::getline(val_map, line)) { + std::smatch match; + std::regex_match(line, match, val_map_regex); + std::string image_filename = match[1]; + int64_t label = std::stoi(match[2]); + + filenames.push_back(image_filename + ".npy"); + } + return filenames; + } +}; + +class Openimages : public NumpyLibrary { +public: + Openimages( + std::shared_ptr &backend, size_t max_sample_count, + std::string preprocessed_path, std::string annotations_path) + : NumpyLibrary( + backend, max_sample_count, preprocessed_path, + ReadAnnotations(annotations_path, max_sample_count)) { + std::cerr << "loaded openimages with " << TotalSampleCount() << " samples" << std::endl; + } + +private: + static const std::vector ReadAnnotations( + std::string annotations_path, size_t max_sample_count) { + std::vector filenames; + std::ifstream val_map(annotations_path); + std::stringstream buffer; + buffer << val_map.rdbuf(); + std::string annotations = buffer.str(); + + std::regex image_regex(R"(\"file_name\": \"([^\"]*)\")"); + std::smatch match; + + while (std::regex_search(annotations, match, image_regex) && filenames.size() < max_sample_count) { + std::string image_filename = match[1]; + + filenames.push_back(image_filename + ".npy"); + annotations = match.suffix(); + } + return filenames; + } +}; + +#endif // SAMPLE_LIBRARY_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/system.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/system.h new file mode 100644 index 000000000..b2bab3904 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/system.h @@ -0,0 +1,135 @@ +#ifndef SYSTEM_H_ +#define SYSTEM_H_ + +#include +#include +#include +#include +#include +#include + +#include "system_under_test.h" + +#include "backend.h" + +/** + * A System class represents the policy by which requests from LoadGen are handled. + * + * StreamSUT immediately takes any LoadGen requests to device concurrency 0 + * (this is for single-stream). + * + * QueueSUT maintains a queue of LoadGen requests and dequeues requests to + * any available device concurrency. + */ +class System : public mlperf::SystemUnderTest { +public: + System(const std::string &name, std::shared_ptr &backend) + : name(name), backend(backend) {} + const std::string &Name() override { return name; } + +protected: + std::shared_ptr backend; + +private: + std::string name; +}; + +class StreamSUT : public System { +public: + StreamSUT(std::shared_ptr &backend) : System("StreamSUT", backend) {} + + void IssueQuery(const std::vector &samples) override { + size_t max_batch_size = backend->MaxBatchSize(); + size_t size = samples.size(); + + // for CUDA, set the device to use for this thread + backend->SetDeviceConcurrencyIndex(0); + + for (auto batch_begin = samples.begin(); batch_begin < samples.end(); batch_begin += max_batch_size) { + auto batch_end = std::min(batch_begin + max_batch_size, samples.end()); + const std::vector batch_queries(batch_begin, batch_end); + backend->IssueBatch(0, batch_queries); + } + } + + void FlushQueries() override {} +}; + +class QueueSUT : public System { +public: + QueueSUT(std::shared_ptr &backend) + : System("QueueSUT", backend) { + // launch threads + size_t num_threads = backend->NumConcurrency(); + for (size_t i = 0; i < num_threads; i++) + threads.emplace_back(&QueueSUT::ThreadStart, this, i); + } + + ~QueueSUT() override { + { + std::lock_guard lock(queries_mutex); + done = true; + } + has_queries.notify_all(); + + for (std::thread &thread : threads) + thread.join(); + } + + void IssueQuery(const std::vector &samples) override { + // enqueue queries + { + std::lock_guard lock(queries_mutex); + queries.insert(queries.end(), samples.begin(), samples.end()); + } + has_queries.notify_one(); + } + + void FlushQueries() override {} + +private: + void ThreadStart(size_t concurrency_index) { + size_t max_batch_size = backend->MaxBatchSize(); + std::vector batch_queries; + batch_queries.reserve(max_batch_size); + size_t size; + + // for CUDA, set the device to use for this thread + backend->SetDeviceConcurrencyIndex(concurrency_index); + + while (true) { + // dequeue queries + { + std::unique_lock lock(queries_mutex); + has_queries.wait(lock, [&]() { return !queries.empty() || done; }); + + if (done) + break; + + // load a batch of queries to batch_queries + size = std::min(max_batch_size, queries.size()); + auto begin = queries.begin(); + auto end = begin + size; + batch_queries.assign(begin, end); + queries.erase(begin, end); + } + has_queries.notify_one(); + + // compute response from batch_queries + // and log to LoadGen + backend->IssueBatch(concurrency_index, batch_queries); + } + } + + // queue of incoming queries + std::deque queries; + std::mutex queries_mutex; + std::condition_variable has_queries; + + // worker threads to process queries + std::vector threads; + + bool done{false}; +}; + +#endif // SYSTEM_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/tflite_backend.h b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/tflite_backend.h new file mode 100644 index 000000000..eab583897 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/inc/tflite_backend.h @@ -0,0 +1,132 @@ +#ifndef ONNXRUNTIME_BACKEND_H_ +#define ONNXRUNTIME_BACKEND_H_ + +#include +#include +#include + +#include "onnxruntime_cxx_api.h" + +#include "loadgen.h" + +#include "backend.h" + +class OnnxRuntimeBackend : public Backend { +public: + OnnxRuntimeBackend( + std::shared_ptr &model, std::shared_ptr &device, + size_t performance_sample_count, size_t batch_size, + bool use_cuda) + : Backend(model, device, performance_sample_count, batch_size) + , env(ORT_LOGGING_LEVEL_WARNING, "env") { + for (size_t i = 0; i < device->NumMemory(); i++) { + memory_infos.emplace_back( + use_cuda ? "Cuda" : "Cpu", + OrtAllocatorType::OrtArenaAllocator, i, OrtMemTypeDefault); + } + + for (size_t i = 0; i < device->NumConcurrency(); i++) { + Ort::SessionOptions session_options; + // arm64 does not work with optimization level 3 (ORT_ENABLE_ALL) + if (getenv("ORT_ENABLE_ALL", "") == "no") + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED); + + const auto &api = Ort::GetApi(); + + std::vector keys{"device_id"}; + std::vector values{std::to_string(i).c_str()}; + + OrtCUDAProviderOptionsV2 *cuda_options = nullptr; + if (use_cuda) { + Ort::ThrowOnError(api.CreateCUDAProviderOptions(&cuda_options)); + + Ort::ThrowOnError(api.UpdateCUDAProviderOptions(cuda_options, keys.data(), values.data(), keys.size())); + + Ort::ThrowOnError(api.SessionOptionsAppendExecutionProvider_CUDA_V2( + static_cast(session_options), + cuda_options)); + } + + sessions.emplace_back(env, model->model_path.c_str(), session_options); + bindings.emplace_back(sessions[i]); + + if (use_cuda) { + api.ReleaseCUDAProviderOptions(cuda_options); + } + } + } + + void RunInference( + size_t concurrency_index, + const std::vector &batch, + std::vector &batch_data) override { + Ort::Session &session = sessions[concurrency_index]; + Ort::IoBinding &binding = bindings[concurrency_index]; + size_t memory_index = device->GetMemoryIndex(concurrency_index); + + for (size_t i = 0; i < model->num_inputs; i++) { + size_t size = batch.size() * GetSampleSize(batch.front().index, i); + const std::vector &shape = GetSampleShape(batch.front().index, i); + std::vector input_shape; + input_shape.push_back(batch.size()); + for (size_t dim : shape) + input_shape.push_back(dim); + ONNXTensorElementDataType input_type = + session.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetElementType(); + Ort::Value value = Ort::Value::CreateTensor( + memory_infos[memory_index], + batch_data[i], size, + input_shape.data(), input_shape.size(), + input_type); + binding.BindInput(model->input_names[i].c_str(), value); + } + + for (std::string &output : model->output_names) + binding.BindOutput(output.c_str(), memory_info_cpu); + + session.Run(Ort::RunOptions(), binding); + + std::vector outputs = binding.GetOutputValues(); + std::vector responses(batch.size()); + std::vector> response_buffers(batch.size()); + for (size_t i = 0; i < batch.size(); i++) { + // get output data and shapes + std::vector output_buffers(outputs.size()); + std::vector> output_shapes(outputs.size()); + for (size_t j = 0; j < outputs.size(); j++) { + // assume ith position in output is ith sample in batch + output_buffers[j] = + static_cast(outputs[j].GetTensorMutableData()) + + i * model->output_sizes[j]; + size_t rank = outputs[j].GetTensorTypeAndShapeInfo().GetDimensionsCount(); + std::vector output_shape(rank); + outputs[j].GetTensorTypeAndShapeInfo().GetDimensions(output_shape.data(), rank); + output_shapes[j].resize(rank); + for (size_t k = 0; k < rank; k++) + output_shapes[j][k] = output_shape[k]; + } + + model->PostProcess( + batch[i].index, output_buffers, output_shapes, response_buffers[i]); + + responses[i].id = batch[i].id; + responses[i].data = reinterpret_cast(response_buffers[i].data()); + responses[i].size = response_buffers[i].size(); + } + + mlperf::QuerySamplesComplete(responses.data(), responses.size()); + + binding.ClearBoundInputs(); + binding.ClearBoundOutputs(); + }; + +private: + Ort::Env env; + std::vector sessions; + std::vector bindings; + std::vector memory_infos; + Ort::MemoryInfo memory_info_cpu{ + Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemTypeDefault)}; +}; + +#endif // ONNXRUNTIME_BACKEND_H_ diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp new file mode 100644 index 000000000..c5a3c809e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/src/main.cpp @@ -0,0 +1,214 @@ +#include +#include +#include +#include + +#include "loadgen.h" +#include "test_settings.h" +#include "common.h" +#include "backend.h" +#include "device.h" +#include "model.h" +#include "sample_library.h" +#include "system.h" +#ifdef CM_MLPERF_DEVICE_GPU + #include "gpu_device.h" +#endif + +#ifdef CM_MLPERF_BACKEND_ONNXRUNTIME + #include "onnxruntime_backend.h" +#endif + +class InputSettings { + +public: + InputSettings() { + mlperf_conf_path = getenv("CM_MLPERF_CONF", "../inference/mlperf.conf"); + user_conf_path = getenv("CM_MLPERF_USER_CONF", "../inference/vision/classification_and_detection/user.conf"); + audit_conf_path = getenv("CM_MLPERF_INFERENCE_AUDIT_PATH", ""); + output_dir = getenv("CM_MLPERF_OUTPUT_DIR", "."); + backend_name = getenv("CM_MLPERF_BACKEND", "onnxruntime"); + device_name = getenv("CM_MLPERF_DEVICE", "cpu"); + model_name = getenv("CM_MODEL", "resnet50"); + model_path = getenv("CM_ML_MODEL_FILE_WITH_PATH", ""); + dataset_preprocessed_path = getenv("CM_DATASET_PREPROCESSED_PATH", ""); + dataset_path = getenv("CM_DATASET_PATH", ""); + dataset_list = getenv("CM_DATASET_LIST", ""); + imagenet_val_path = getenv("CM_DATASET_AUX_PATH", "") + "/val.txt"; + scenario_name = getenv("CM_MLPERF_LOADGEN_SCENARIO", "Offline"); + mode_name = getenv("CM_MLPERF_LOADGEN_MODE", "PerformanceOnly"); + if (mode_name == "accuracy") + mode_name = "AccuracyOnly"; + if (mode_name == "performance") + mode_name = "PerformanceOnly"; + query_count_override = std::stol(getenv("CM_MLPERF_LOADGEN_QUERY_COUNT", "0")); + query_count_override = 0; + performance_sample_count = std::stol(getenv("CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT", "0")); + batch_size = std::stol(getenv("CM_MLPERF_LOADGEN_MAX_BATCHSIZE", "32")); + std::cout << "MLPerf Conf path: " << mlperf_conf_path << std::endl; + std::cout << "User Conf path: " << user_conf_path << std::endl; + std::cout << "Dataset Preprocessed path: " << dataset_preprocessed_path << std::endl; + std::cout << "Dataset List filepath: " << dataset_list << std::endl; + std::cout << "Scenario: " << scenario_name << std::endl; + std::cout << "Mode: " << mode_name << std::endl; + std::cout << "Batch size: " << batch_size << std::endl; + std::cout << "Query count override: " << query_count_override << std::endl; + std::cout << "Performance sample count override in application: " << performance_sample_count << std::endl; + } + + std::string mlperf_conf_path; + std::string user_conf_path; + std::string audit_conf_path; + std::string output_dir; + std::string backend_name; + std::string device_name; + std::string model_name; + std::string model_path; + std::string dataset_preprocessed_path; + std::string dataset_path; + std::string dataset_list; + std::string imagenet_val_path; + std::string scenario_name; + std::string mode_name; + size_t performance_sample_count; + size_t batch_size; + size_t query_count_override; +}; + +int main(int argc, const char *argv[]) { + // configure test settings + InputSettings input_settings; + mlperf::TestSettings test_settings; + test_settings.scenario = + input_settings.scenario_name == "SingleStream" ? mlperf::TestScenario::SingleStream : + input_settings.scenario_name == "MultiStream" ? mlperf::TestScenario::MultiStream : + input_settings.scenario_name == "Server" ? mlperf::TestScenario::Server : + input_settings.scenario_name == "Offline" ? mlperf::TestScenario::Offline : + mlperf::TestScenario::SingleStream; + test_settings.mode = + input_settings.mode_name == "SubmissionRun" ? mlperf::TestMode::SubmissionRun : + input_settings.mode_name == "AccuracyOnly" ? mlperf::TestMode::AccuracyOnly : + input_settings.mode_name == "PerformanceOnly" ? mlperf::TestMode::PerformanceOnly : + input_settings.mode_name == "FindPeakPerformance" ? mlperf::TestMode::FindPeakPerformance : + mlperf::TestMode::SubmissionRun; + + // read test settings from mlperf.conf and user.conf + if (test_settings.FromConfig(input_settings.mlperf_conf_path, input_settings.model_name, input_settings.scenario_name)) { + std::cerr << "Could not read mlperf.conf at " << input_settings.mlperf_conf_path << std::endl; + return 1; + } + if (test_settings.FromConfig(input_settings.user_conf_path, input_settings.model_name, input_settings.scenario_name)) { + std::cerr << "Could not read user.conf at " << input_settings.user_conf_path << std::endl; + return 1; + } + + // configure log settings + mlperf::LogSettings log_settings; + log_settings.log_output.outdir = input_settings.output_dir; + + // build model + std::shared_ptr model; + if (input_settings.model_name == "resnet50") { + model.reset(new Resnet50(input_settings.model_path, -1)); + // can change model params here + // e.g. if (backend == "torch") { + // model.reset(new Resnet50(input_settings.model_path, 0)); + // model->input_names = {"image"}; + // } + } else if (input_settings.model_name == "retinanet") { + // onnx retinanet requires batch size 1 + if (input_settings.backend_name == "onnxruntime" && input_settings.batch_size != 1) { + std::cerr << "onnx retinanet requires batch size 1" + << " (current batch size: " << input_settings.batch_size << ")" << std::endl; + return 1; + } + model.reset(new Retinanet(input_settings.model_path, 800, 800, 0.05f)); + } else { + std::cerr << "model (" << input_settings.model_name << ") not supported" << std::endl; + return 1; + } + + // build device + std::shared_ptr device; + if (input_settings.device_name == "cpu") { + device.reset(new CPUDevice()); + } else if (input_settings.device_name == "gpu") { +#ifdef CM_MLPERF_DEVICE_GPU + device.reset(new GPUDevice()); +#endif + } else { + std::cerr << "device (" << input_settings.device_name << ") not supported" << std::endl; + return 1; + } + + // get counts + if (input_settings.query_count_override != 0) + test_settings.max_query_count = input_settings.query_count_override; + size_t max_sample_count = test_settings.max_query_count; + size_t performance_sample_count = + test_settings.performance_sample_count_override != 0 ? + test_settings.performance_sample_count_override : + input_settings.performance_sample_count; + + if (performance_sample_count != 0) {//Its changed from user.conf + //test_settings.performance_sample_count_override = performance_sample_count; + } + if (max_sample_count != 0) + performance_sample_count = + std::min(performance_sample_count, max_sample_count); + if (max_sample_count == 0) + max_sample_count = INT_MAX; + // build backend + std::shared_ptr backend; + if (input_settings.backend_name == "onnxruntime") { +#ifdef CM_MLPERF_BACKEND_ONNXRUNTIME + backend.reset(new OnnxRuntimeBackend( + model, device, performance_sample_count, input_settings.batch_size, + input_settings.device_name == "gpu")); +#endif + } else { + std::cerr << "backend (" << input_settings.backend_name << ") not supported" << std::endl; + return 1; + } + + // build QSL + std::shared_ptr qsl; + if (input_settings.model_name == "resnet50") { + qsl.reset(new Imagenet( + backend, max_sample_count, + input_settings.dataset_preprocessed_path, + input_settings.imagenet_val_path)); + } else if (input_settings.model_name == "retinanet") { + qsl.reset(new Openimages( + backend, max_sample_count, + input_settings.dataset_preprocessed_path, + input_settings.dataset_list)); + } else { + std::cerr << "dataset for model (" + << input_settings.model_name << ") not supported" << std::endl; + return 1; + } + + // sanity check: common problem in workflow + if (qsl->TotalSampleCount() == 0) { + std::cerr << "error: 0 samples found in dataset" << std::endl; + return 1; + } + if (qsl->PerformanceSampleCount() == 0) { + std::cerr << "error: performance sample count = 0" << std::endl; + return 1; + } + + // build SUT + // using QueueSUT for all scenarios except for StreamSUT for single-stream + std::shared_ptr sut; + if (input_settings.scenario_name == "SingleStream") { + sut.reset(new StreamSUT(backend)); + } else { + sut.reset(new QueueSUT(backend)); + } + + // start benchmark + std::cerr << "starting benchmark" << std::endl; + mlperf::StartTest(sut.get(), qsl.get(), test_settings, log_settings, input_settings.audit_conf_path); +} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat new file mode 100644 index 000000000..08dc944a4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-cpp/tests/win.bat @@ -0,0 +1,8 @@ +rem TBD: current not compiling - need to check ... + +cmr "install llvm prebuilt" --version=16.0.4 +cmr "install llvm prebuilt" --version=17.0.6 + +cmr "get lib onnxruntime lang-cpp _cpu" --version=1.11.1 +cmr "get lib onnxruntime lang-cpp _cpu" --version=1.13.1 +cmr "get lib onnxruntime lang-cpp _cpu" --version=1.15.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README-about.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README-about.md new file mode 100644 index 000000000..77ba7ea07 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README-about.md @@ -0,0 +1,7 @@ +This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) +using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). +The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks +across diverse platforms with continuously changing software and hardware. + +See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README-extra.md new file mode 100644 index 000000000..4a9706638 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README-extra.md @@ -0,0 +1,235 @@ +# About + +This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) +using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). +The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks +across diverse platforms with continuously changing software and hardware. + +# Current Coverage + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ModelDeviceBackendModel PrecisionStatusComments
    ResNet50CPUOnnxruntimefp32Works on all tested versions
    Tensorflowfp32Works on all tested versions
    PytorchNReference Implementation missing
    CUDAOnnxruntimefp32Works on all tested versions
    Tensorflowfp32Works on all tested versions
    PytorchNReference Implementation missing
    RetinaNetCPUOnnxruntimefp32Works on all tested versions
    Tensorflowfp32Not Implemented
    Pytorchfp32Works on all tested versions
    CUDAOnnxruntimefp32Works on all tested versions
    Tensorflowfp32Not Implemented
    Pytorchfp32Works on all tested versions
    BertCPUOnnxruntimefp32Works on all tested versions
    int8Works on all tested versions
    Tensorflowfp32 +Works with protobuf 3.19. Issue mentioned here +
    Pytorchfp32 +Works on all tested versions +
    CUDAOnnxruntimefp32Works on all tested versions +
    int8Works on all tested versions +
    Tensorflowfp32Not tested
    Pytorchfp32Works on all tested versions +
    3d-unetCPUOnnxruntimefp32Works on all tested versions
    Tensorflowfp32 +Works on all tested versions +
    Pytorchfp32 +Works on all tested versions +
    CUDAOnnxruntimefp32 +Works on all tested versions +
    Tensorflowfp32 +Works on all tested versions +
    Pytorchfp32 +Works on all tested versions +
    RnntCPUPytorchfp32Works on all tested versions
    DLRMCPUPytorchfp32Works with torch 1.10 and numpy 1.19
    CUDAPytorchfp32?Needs GPU with high memory capacity
    + +Please follow our R&D roadmap [here](https://github.com/mlcommons/ck/issues/536). + + + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README.md new file mode 100644 index 000000000..459df30c8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/_cm.yaml new file mode 100644 index 000000000..85fddc989 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/_cm.yaml @@ -0,0 +1,1387 @@ +# Identification of this CM script +alias: app-mlperf-inference-mlcommons-python +uid: ff149e9781fc4b65 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf inference benchmark pipeline" + +developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - app + - vision + - language + - mlcommons + - mlperf + - inference + - reference + - ref + +# Default environment +default_env: + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_OUTPUT_FOLDER_NAME: test_results + CM_MLPERF_RUN_STYLE: test + CM_TEST_QUERY_COUNT: '10' + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' + +docker: + real_run: False + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + docker: CM_RUN_DOCKER_CONTAINER + hw_name: CM_HW_NAME + imagenet_path: IMAGENET_PATH + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mode: CM_MLPERF_LOADGEN_MODE + num_threads: CM_NUM_THREADS + threads: CM_NUM_THREADS + dataset: CM_MLPERF_VISION_DATASET_OPTION + model: CM_MLPERF_CUSTOM_MODEL_PATH + output_dir: OUTPUT_BASE_DIR + power: CM_MLPERF_POWER + power_server: CM_MLPERF_POWER_SERVER_ADDRESS + ntp_server: CM_MLPERF_POWER_NTP_SERVER + max_amps: CM_MLPERF_POWER_MAX_AMPS + max_volts: CM_MLPERF_POWER_MAX_VOLTS + regenerate_files: CM_REGENERATE_MEASURE_FILES + rerun: CM_RERUN + scenario: CM_MLPERF_LOADGEN_SCENARIO + test_query_count: CM_TEST_QUERY_COUNT + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + dataset_args: CM_MLPERF_EXTRA_DATASET_ARGS + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + network: CM_NETWORK_LOADGEN + sut_servers: CM_NETWORK_LOADGEN_SUT_SERVERS + + +# Duplicate CM environment variables to the ones used in native apps +env_key_mappings: + CM_HOST_: HOST_ + CM_ML_: ML_ + CM_MLPERF_TVM: MLPERF_TVM + CM_MLPERF_DELETE: MLPERF_DELETE + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_VLLM_* +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + # Detect CUDA if required + - tags: get,cuda,_cudnn + names: + - cuda + enable_if_env: + CM_MLPERF_DEVICE: + - gpu + CM_MLPERF_BACKEND: + - onnxruntime + - tf + - tflite + - pytorch + + # Detect TensorRT if required + - tags: get,nvidia,tensorrt + enable_if_env: + CM_MLPERF_BACKEND: + - tensorrt + + + + + ######################################################################## + # Install ML engines via CM + + ## Onnx CPU Runtime + - tags: get,generic-python-lib,_onnxruntime + names: + - ml-engine-onnxruntime + - onnxruntime + enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + - tvm-onnx + CM_MLPERF_DEVICE: + - cpu + - rocm + + ## Onnx CUDA Runtime + - tags: get,generic-python-lib,_onnxruntime_gpu + names: + - ml-engine-onnxruntime-cuda + enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + - tvm-onnx + CM_MLPERF_DEVICE: + - gpu + skip_if_env: + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + + ## resnet50 and 3d-unet need both onnxruntime and onnxruntime_gpu on cuda + - tags: get,generic-python-lib,_onnxruntime + enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + - resnet50 + - tags: get,generic-python-lib,_onnxruntime_gpu + env: + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: "" + enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + - resnet50 + + ## Pytorch (CPU) + - tags: get,generic-python-lib,_torch + names: + - torch + - ml-engine-pytorch + - pytorch + skip_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tvm-pytorch + CM_MLPERF_DEVICE: + - cpu + - rocm + + ## Pytorch (CUDA) + - tags: get,generic-python-lib,_torch_cuda + names: + - ml-engine-pytorch + - pytorch + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tvm-pytorch + - ray + CM_MLPERF_DEVICE: + - gpu + + ## Torchvision (CPU) + - tags: get,generic-python-lib,_torchvision + names: + - ml-engine-torchvision + - torchvision + skip_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tvm-pytorch + CM_MLPERF_DEVICE: + - cpu + + ## Torchvision (CUDA) + - tags: get,generic-python-lib,_torchvision_cuda + names: + - ml-engine-torchvision + - torchvision + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tvm-pytorch + - ray + CM_MLPERF_DEVICE: + - gpu + + ## tensorrt + - tags: get,generic-python-lib,_tensorrt + names: + - ml-engine-tensorrt + enable_if_env: + CM_MLPERF_BACKEND: + - ray + + ## torch_tensorrt + - tags: get,generic-python-lib,_torch_tensorrt + names: + - ml-engine-torch_tensorrt + enable_if_env: + CM_MLPERF_BACKEND: + - ray + + ## Ray + - tags: get,generic-python-lib,_ray + names: + - ray + enable_if_env: + CM_MLPERF_BACKEND: + - ray + + ## async_timeout (for multi-node) + # NOTE. This is a bug in ray 2.8.0. Ray 2.8.0 needs the pip package + # async_timeout to be installed, so we need to install it manually. + - tags: get,generic-python-lib,_async_timeout + names: + - async_timeout + enable_if_env: + CM_MLPERF_BACKEND: + - ray + + ## Transformers + - tags: get,generic-python-lib,_transformers + names: + - ml-engine-transformers + enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + - gptj-99 + - gptj-99.9 + + ## Tensorflow + - tags: get,generic-python-lib,_tensorflow + names: + - ml-engine-tensorflow + - tensorflow + enable_if_env: + CM_MLPERF_BACKEND: + - tf + + ## NCNN + - tags: get,generic-python-lib,_package.ncnn + names: + - ml-engine-ncnn + enable_if_env: + CM_MLPERF_BACKEND: + - ncnn + + - tags: get,tensorflow,lib,_tflite + names: + - ml-engine-tflite + enable_if_env: + CM_MLPERF_BACKEND: + - tflite + + + ######################################################################## + # Install ML models + + - tags: get,ml-model,neural-magic,zoo + # sets CM_MLPERF_CUSTOM_MODEL_PATH + names: + - custom-ml-model + enable_if_env: + CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB: + - "on" + update_tags_from_env_with_prefix: + "_model-stub.": + - CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB + + ## ResNet50 + - tags: get,ml-model,image-classification,resnet50 + names: + - ml-model + - resnet50-model + enable_if_env: + CM_MODEL: + - resnet50 + skip_if_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + + ## RetinaNet + - tags: get,ml-model,object-detection,retinanet + names: + - ml-model + - retinanet-model + enable_if_env: + CM_MODEL: + - retinanet + + ## GPT-J + - tags: get,ml-model,large-language-model,gptj + names: + - ml-model + - gptj-model + - gpt-j-model + enable_if_env: + CM_MODEL: + - gptj-99 + - gptj-99.9 + skip_if_env: + CM_NETWORK_LOADGEN: + - lon + + + + ## RetinaNet (PyTorch weights, FP32) + - tags: get,ml-model,object-detection,resnext50,fp32,_pytorch-weights + names: + - ml-model + - retinanet-model + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_IMPLEMENTATION: + - nvidia + CM_MODEL: + - retinanet + + ## BERT + - tags: get,ml-model,language-processing,bert-large + names: + - ml-model + - bert-model + enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + skip_if_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + + ## SDXL + - tags: get,ml-model,stable-diffusion,text-to-image,sdxl + names: + - ml-model + - sdxl-model + - ml-model-float16 + enable_if_env: + CM_MODEL: + - stable-diffusion-xl + skip_if_any_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + skip_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + - 'yes' + + ## LLAMA2-70B + - tags: get,ml-model,llama2 + names: + - ml-model + - llama2-model + enable_if_env: + CM_MODEL: + - llama2-70b-99 + - llama2-70b-99.9 + skip_if_any_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + CM_MLPERF_INFERENCE_API_SERVER: + - "on" + skip_if_env: + CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + - 'yes' + CM_RUN_STATE_DOCKER: + - 'yes' + + ## mixtral-8x7b + - tags: get,ml-model,mixtral + names: + - ml-model + - mixtral-model + enable_if_env: + CM_MODEL: + - mixtral-8x7b + skip_if_any_env: + CM_MLPERF_CUSTOM_MODEL_PATH: + - "on" + skip_if_env: + CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + - 'yes' + CM_RUN_STATE_DOCKER: + - 'yes' + + ## 3d-unet + - tags: get,ml-model,medical-imaging,3d-unet + names: + - ml-model + - 3d-unet-model + enable_if_env: + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + + ## Rnnt + - tags: get,ml-model,speech-recognition,rnnt + names: + - ml-model + - rnnt-model + enable_if_env: + CM_MODEL: + - rnnt + + ## Dlrm + - tags: get,ml-model,recommendation,dlrm + names: + - ml-model + - dlrm-model + enable_if_env: + CM_MODEL: + - dlrm-99 + - dlrm-99.9 + - dlrm-v2-99 + - dlrm-v2-99.9 + skip_if_env: + CM_ML_MODEL_FILE_WITH_PATH: + - 'on' + + + ## RGAT + - tags: get,ml-model,rgat + names: + - ml-model + - rgat-model + enable_if_env: + CM_MODEL: + - rgat + skip_if_env: + RGAT_CHECKPOINT_PATH: + - 'on' + + ######################################################################## + # Install datasets + + ## ImageNet (small for tests) + - tags: get,dataset,image-classification,imagenet,preprocessed + names: + - imagenet-preprocessed + enable_if_env: + CM_MODEL: + - resnet50 + skip_if_env: + CM_MLPERF_VISION_DATASET_OPTION: + - on + + - tags: get,dataset,image-classification,imagenet,preprocessed,_pytorch + names: + - imagenet-preprocessed + enable_if_env: + CM_MODEL: + - resnet50 + CM_MLPERF_VISION_DATASET_OPTION: + - imagenet_pytorch + + - tags: get,dataset-aux,image-classification,imagenet-aux + enable_if_env: + CM_MODEL: + - resnet50 + + ## Open Images for RetinaNet + - tags: get,dataset,object-detection,open-images,openimages,preprocessed,_validation + names: + - openimages-preprocessed + enable_if_env: + CM_MODEL: + - retinanet + + ## CNNDM for Large Language Model + - tags: get,dataset,cnndm,_validation + names: + - cnndm-original + enable_if_env: + CM_MODEL: + - gptj-99 + - gptj-99.9 + + ## Squad for BERT + - tags: get,dataset,squad,original + names: + - squad-original + enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + + - tags: get,dataset-aux,squad-vocab + enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + + ## COCO for SDXL + - tags: get,dataset,coco2014,_validation + names: + - coco2014-preprocessed + - coco2014-dataset + enable_if_env: + CM_MODEL: + - stable-diffusion-xl + + ## OpenOrca for LLAMA2-70b + - tags: get,preprocessed,dataset,openorca,_validation,_mlcommons + names: + - openorca-preprocessed + enable_if_env: + CM_MODEL: + - llama2-70b-99 + - llama2-70b-99.9 + + ## OpenOrca,mbxp,gsm8k combined dataset for mixtral-8x7b + - tags: get,dataset-mixtral,openorca-mbxp-gsm8k-combined + names: + - openorca-mbxp-gsm8k-combined-preprocessed + enable_if_env: + CM_MODEL: + - mixtral-8x7b + skip_if_env: + CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + - 'yes' + + ## Kits19 for 3d-unet + - tags: get,dataset,kits19,preprocessed + names: + - kits19-preprocessed + enable_if_env: + CM_MODEL: + - 3d-unet-99 + - 3d-unet-99.9 + skip_if_env: + CM_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST: + - 'yes' + + ## Librispeech for rnnt + - tags: get,dataset,librispeech,preprocessed + names: + - librispeech-preprocessed + enable_if_env: + CM_MODEL: + - rnnt + + ## Criteo for dlrm + - tags: get,dataset,criteo,preprocessed,_mlc + names: + - criteo-preprocessed + enable_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + skip_if_env: + CM_CRITEO_PREPROCESSED_PATH: + - on + + ## igbh for rgat + - tags: get,dataset,mlperf,inference,igbh + names: + - igbh-dataset + - illinois-graph-benchmark-heterogeneous + enable_if_env: + CM_MODEL: + - rgat + + ######################################################################## + # Install MLPerf inference dependencies + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + skip_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + + # Install MLPerf loadgen + - tags: get,loadgen + names: + - loadgen + - mlperf-inference-loadgen + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + env: + CM_GET_MLPERF_IMPLEMENTATION_ONLY: 'yes' + names: + - mlperf-implementation + + - tags: get,generic-python-lib,_package.psutil + +prehook_deps: + - names: + - remote-run-cmds + tags: remote,run,cmds + enable_if_env: + CM_ASSH_RUN_COMMANDS: + - "on" + +posthook_deps: + - names: + - mlperf-runner + tags: benchmark-mlperf + skip_if_env: + CM_MLPERF_SKIP_RUN: + - "on" + +post_deps: + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + python: + group: implementation + default: true, + add_deps_recursive: + imagenet-accuracy-script: + tags: _float32 + env: + CM_MLPERF_PYTHON: 'yes' + CM_MLPERF_IMPLEMENTATION: reference + + # ML engine + onnxruntime: + group: framework + default: true + add_deps_recursive: + imagenet-preprocessed: + tags: _NCHW + openimages-preprocessed: + tags: _NCHW + ml-model: + tags: raw,_onnx + numpy: + version_max: "1.26.4" + version_max_usable: "1.26.4" + env: + CM_MLPERF_BACKEND: onnxruntime + + onnxruntime,cpu: + env: + CM_MLPERF_BACKEND_VERSION: <<>> + + onnxruntime,cuda: + env: + CM_MLPERF_BACKEND_VERSION: <<>> + ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "CUDAExecutionProvider" + + pytorch: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NCHW + openimages-preprocessed: + tags: _NCHW + ml-model: + tags: raw,_pytorch + numpy: + version_max: "1.26.4" + version_max_usable: "1.26.4" + env: + CM_MLPERF_BACKEND: pytorch + CM_MLPERF_BACKEND_VERSION: <<>> + + pytorch,rocm: + add_deps_recursive: + pytorch: + tags: _rocm + torchvision: + tags: _rocm + + ray: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NCHW + openimages-preprocessed: + tags: _NCHW + ml-model: + tags: raw,_pytorch + env: + CM_MLPERF_BACKEND: ray + CM_MLPERF_BACKEND_VERSION: <<>> + + tf,rocm: + add_deps_recursive: + tensorflow: + tags: _rocm + env: + CM_MLPERF_BACKEND_VERSION: <<>> + + onnxruntime,rocm: + add_deps_recursive: + onnxruntime: + tags: _rocm + env: + ONNXRUNTIME_PREFERRED_EXECUTION_PROVIDER: "ROCMExecutionProvider" + CM_MLPERF_BACKEND_VERSION: <<>> + + ncnn: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NCHW + ml-model: + tags: raw,_ncnn + env: + CM_MLPERF_BACKEND: ncnn + CM_MLPERF_BACKEND_VERSION: <<>> + CM_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch + + tflite: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NHWC + ml-model: + tags: raw,_tflite,_no-argmax + env: + CM_MLPERF_BACKEND: tflite + CM_MLPERF_BACKEND_VERSION: <<>> + CM_MLPERF_VISION_DATASET_OPTION: imagenet_tflite_tpu + + tf: + group: framework + add_deps_recursive: + imagenet-preprocessed: + tags: _NHWC + ml-model: + tags: raw,_tf + env: + CM_MLPERF_BACKEND: tf + CM_MLPERF_BACKEND_VERSION: <<>> + + tensorflow: + alias: tf + + deepsparse: + group: framework + env: + CM_MLPERF_BACKEND: deepsparse + CM_MLPERF_BACKEND_VERSION: <<>> + deps: + - tags: get,generic-python-lib,_deepsparse + skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - aarch64 + - tags: get,generic-python-lib,_package.deepsparse-nightly + enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - aarch64 + add_deps_recursive: + mlperf-implementation: + version: deepsparse + ml-model: + tags: raw,_deepsparse + + tvm-onnx: + group: framework + env: + CM_MLPERF_BACKEND: tvm-onnx + CM_MLPERF_BACKEND_VERSION: <<>> + deps: + - tags: get,generic-python-lib,_onnx + - tags: get,generic-python-lib,_numpy + version_max: "1.26.4" + version_max_usable: "1.26.4" + - tags: get,tvm + names: + - tvm + - tags: get,tvm-model,_onnx + names: + - tvm-model + update_tags_from_env_with_prefix: + _model.: + - CM_MODEL + + + tvm-tflite: + group: framework + env: + CM_MLPERF_BACKEND: tvm-tflite + CM_MLPERF_BACKEND_VERSION: <<>> + deps: + - tags: get,generic-python-lib,_tflite + - tags: get,tvm + names: + - tvm + - tags: get,tvm-model,_tflite + names: + - tvm-model + update_tags_from_env_with_prefix: + _model.: + - CM_MODEL + + tvm-pytorch: + group: framework + env: + CM_MLPERF_BACKEND: tvm-pytorch + CM_MLPERF_BACKEND_VERSION: <<>> + CM_PREPROCESS_PYTORCH: 'yes' + MLPERF_TVM_TORCH_QUANTIZED_ENGINE: qnnpack + deps: + - tags: get,generic-python-lib,_torch + names: + - torch + - pytorch + - tags: get,tvm + names: + - tvm + - tags: get,tvm-model,_pytorch + names: + - tvm-model + update_tags_from_env_with_prefix: + _model.: + - CM_MODEL + + # Reference MLPerf models + gptj-99.9: + group: models + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + + gptj-99: + group: models + base: + - gptj_ + env: + CM_MODEL: gptj-99 + + gptj_: + deps: + - tags: get,generic-python-lib,_package.datasets + - tags: get,generic-python-lib,_package.attrs + - tags: get,generic-python-lib,_package.accelerate + + bert-99.9: + group: models + base: + - bert + env: + CM_MODEL: bert-99.9 + + bert-99: + group: models + base: + - bert + env: + CM_MODEL: bert-99 + + bert: + env: + CM_MLPERF_MODEL_SKIP_BATCHING: true + deps: + - tags: get,generic-python-lib,_package.pydantic + - tags: get,generic-python-lib,_tokenization + - tags: get,generic-python-lib,_six + - tags: get,generic-python-lib,_package.absl-py + - tags: get,generic-python-lib,_protobuf + names: + - protobuf + version_max: "3.19" + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + - tags: get,generic-python-lib,_boto3 + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + - tags: get,generic-python-lib,_torch + names: + - ml-engine-pytorch + - pytorch + skip_if_env: + CM_MLPERF_DEVICE: + - gpu + add_deps_recursive: + inference-src: + tags: _deeplearningexamples + + sdxl: + group: models + env: + CM_MODEL: stable-diffusion-xl + CM_NUM_THREADS: "1" + deps: + - tags: get,generic-python-lib,_package.diffusers + names: + - diffusers + - tags: get,generic-python-lib,_package.transformers + names: + - transformers + - tags: get,generic-python-lib,_package.torchvision + names: + - torchvision + - tags: get,generic-python-lib,_package.accelerate + names: + - accelerate + - tags: get,generic-python-lib,_package.torchmetrics + names: + - torchmetrics + - tags: get,generic-python-lib,_package.torch-fidelity + names: + - torch-fidelity + - tags: get,generic-python-lib,_package.open_clip_torch + names: + - open-clip + - tags: get,generic-python-lib,_package.opencv-python + names: + - opencv-python + - tags: get,generic-python-lib,_package.scipy + names: + - scipy + - tags: get,generic-python-lib,_package.pandas + names: + - pandas + + llama2-70b_: + env: + CM_MLPERF_MODEL_SKIP_BATCHING: false + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51" + deps: + - tags: get,generic-python-lib,_package.transformers + names: + - transformers + - tags: get,generic-python-lib,_package.datasets + names: + - datasets + - tags: get,generic-python-lib,_package.sentencepiece + names: + - sentencepiece + - tags: get,generic-python-lib,_package.protobuf + names: + - protobuf + - tags: get,generic-python-lib,_package.accelerate + names: + - accelerate + - tags: get,generic-python-lib,_package.absl-py + names: + - absl-py + - tags: get,generic-python-lib,_package.evaluate + names: + - evaluate + - tags: get,generic-python-lib,_package.nltk + names: + - nltk + version_max: 3.8.1 + version_max_usable: 3.8.1 + - tags: get,generic-python-lib,_package.numpy + names: + - numpy + - tags: get,generic-python-lib,_package.rouge-score + names: + - rouge-score + - tags: get,generic-python-lib,_package.more-itertools + names: + - more-itertools + - tags: get,generic-python-lib,_package.compressed_tensors + names: + - compressed_tensors + + llama2-70b-99: + group: models + env: + CM_MODEL: llama2-70b-99 + base: + - llama2-70b_ + + llama2-70b_,cuda: + default_env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: 8 + + llama2-70b-99.9: + group: models + env: + CM_MODEL: llama2-70b-99.9 + base: + - llama2-70b_ + + mixtral-8x7b: + group: models + env: + CM_MODEL: mixtral-8x7b + deps: + - tags: get,rust-compiler + names: + - rustup + - tags: get,generic-python-lib,_package.transformers + names: + - transformers + - tags: get,generic-python-lib,_package.datasets + names: + - datasets + - tags: get,generic-python-lib,_package.sentencepiece + names: + - sentencepiece + - tags: get,generic-python-lib,_package.protobuf + names: + - protobuf + - tags: get,generic-python-lib,_package.accelerate + names: + - accelerate + - tags: get,generic-python-lib,_package.absl-py + names: + - absl-py + - tags: get,generic-python-lib,_package.evaluate + names: + - evaluate + - tags: get,generic-python-lib,_package.nltk + names: + - nltk + - tags: get,generic-python-lib,_package.rouge-score + names: + - rouge-score + - tags: get,generic-python-lib,_package.pybind11 + names: + - rouge-score + - tags: get,generic-python-lib,_mxeval + names: + - rouge-score + + mixtral-8x7b,cuda: + default_env: + CM_MLPERF_LOADGEN_BATCH_SIZE: 1 + + 3d-unet-99.9: + group: models + base: + - 3d-unet + env: + CM_MODEL: 3d-unet-99.9 + + 3d-unet-99: + group: models + base: + - 3d-unet + env: + CM_MODEL: 3d-unet-99 + + 3d-unet: + env: + CM_TMP_IGNORE_MLPERF_QUERY_COUNT: true + CM_MLPERF_MODEL_SKIP_BATCHING: true + deps: + - tags: get,generic-python-lib,_package.nibabel + - tags: get,generic-python-lib,_package.scipy + names: + - scipy + version: 1.10.1 + + dlrm-v2-99.9: + group: models + base: + - dlrm-v2_ + env: + CM_MODEL: dlrm-v2-99.9 + + dlrm-v2-99: + group: models + base: + - dlrm-v2_ + env: + CM_MODEL: dlrm-v2-99 + + dlrm-v2_: + env: + CM_MLPERF_MODEL_SKIP_BATCHING: true + CM_ML_MODEL_DATASET_TYPE: multihot-criteo + + dlrm-v2_,pytorch: + deps: + - tags: get,dlrm,src + names: + - dlrm-src + # to force the version + - tags: get,generic-python-lib,_torch + names: + - torch + - pytorch + - ml-engine-pytorch + - tags: get,generic-python-lib,_mlperf_logging + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_tensorboard + - tags: get,generic-python-lib,_protobuf + - tags: get,generic-python-lib,_scikit-learn + - tags: get,generic-python-lib,_tqdm + - tags: get,generic-python-lib,_onnx + - tags: get,generic-python-lib,_numpy + names: + - numpy + - tags: get,generic-python-lib,_package.pyre-extensions + - tags: get,generic-python-lib,_package.torchsnapshot + - tags: get,generic-python-lib,_package.torchmetrics + - tags: get,generic-python-lib,_package.torchrec + - tags: get,generic-python-lib,_package.fbgemm-gpu + - tags: get,generic-python-lib,_package.fbgemm-gpu-cpu + - tags: get,generic-python-lib,_package.fvcore + - tags: set,user,limit,_large-nofile + + + rnnt: + group: models + env: + CM_MODEL: rnnt + CM_MLPERF_MODEL_SKIP_BATCHING: true + CM_TMP_IGNORE_MLPERF_QUERY_COUNT: true + deps: + - tags: get,generic-python-lib,_package.pydantic + version_max: "1.10.9" + - tags: get,generic-python-lib,_librosa + names: + - librosa + - tags: get,generic-python-lib,_inflect + - tags: get,generic-python-lib,_unidecode + - tags: get,generic-python-lib,_toml + + retinanet: + group: models + deps: + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_numpy + names: + - numpy + - tags: get,generic-python-lib,_pycocotools + + env: + CM_MODEL: retinanet + CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: 'yes' + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: '1' + + resnet50: + group: models + default: true + env: + CM_MODEL: resnet50 + CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: 'yes' + deps: + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_numpy + names: + - numpy + - tags: get,generic-python-lib,_pycocotools + prehook_deps: + - tags: get,generic-python-lib,_protobuf + names: + - protobuf + version_max: "4.23.4" + version_max_usable: "4.23.4" + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + + rgat: + group: models + env: + CM_MODEL: rgat + deps: + - tags: get,generic-python-lib,_package.colorama + - tags: get,generic-python-lib,_package.tqdm + - tags: get,generic-python-lib,_package.requests + - tags: get,generic-python-lib,_package.torchdata + - tags: get,generic-python-lib,_package.torch-geometric + - tags: get,generic-python-lib,_package.torch-scatter + - tags: get,generic-python-lib,_package.torch-sparse + - tags: get,generic-python-lib,_package.pybind11 + - tags: get,generic-python-lib,_package.PyYAML + - tags: get,generic-python-lib,_package.pydantic + - tags: get,generic-python-lib,_package.igb,_url.git+https://github.com/IllinoisGraphBenchmark/IGB-Datasets.git + - tags: get,generic-python-lib,_package.dgl,_find_links_url.https://data.dgl.ai/wheels/torch-2.1/repo.html + enable_if_env: + CM_MLPERF_DEVICE: + - cpu + - tags: get,generic-python-lib,_package.dgl,_find_links_url.https://data.dgl.ai/wheels/torch-2.1/cu121/repo.html + enable_if_env: + CM_MLPERF_DEVICE: + - gpu + + + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: no + USE_GPU: no + + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + USE_CUDA: yes + USE_GPU: yes + + rocm: + group: device + env: + CM_MLPERF_DEVICE: rocm + USE_GPU: yes + + tpu: + group: device + env: + CM_MLPERF_DEVICE: tpu + + tpu,tflite: + add_deps_recursive: + imagenet-preprocessed: + tags: _tflite_tpu + + # Loadgen scenarios + offline: + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + multistream: + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + singlestream: + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + server: + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + # Model precision + fp32: + group: precision + default: true + add_deps_recursive: + ml-model: + tags: + _fp32 + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float32 + + # Model precision + float16: + group: precision + add_deps_recursive: + ml-model-float16: + tags: + _fp16 + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float16 + + # Model precision + bfloat16: + group: precision + add_deps_recursive: + ml-model-float16: + tags: + _fp16 + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: bfloat16 + + int8: + group: precision + env: + CM_MLPERF_QUANTIZATION: on + CM_MLPERF_MODEL_PRECISION: int8 + add_deps_recursive: + ml-model: + tags: + _int8 + + quantized: + alias: int8 + + batch_size.#: + group: batch-size + env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: "#" + add_deps_recursive: + ml-model: + tags: + _batch_size.# + tvm-model: + tags: + _batch_size.# + + network-sut: + group: network + deps: + - tags: get,generic-python-lib,_package.flask + names: + - flask + env: + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_sut + CM_NETWORK_LOADGEN: sut + + network-lon: + group: network + env: + CM_NETWORK_LOADGEN: lon + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: network_loadgen + + beam_size.#: + env: + GPTJ_BEAM_SIZE: "#" + + # Reproducibility (past submissions) + r2.1_default: + add_deps_recursive: + compiler: + tags: llvm + inference-src: + tags: _octoml + loadgen: + version: r2.1 + env: + CM_RERUN: 'yes' + CM_SKIP_SYS_UTILS: 'yes' + CM_TEST_QUERY_COUNT: '100' diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/customize.py new file mode 100644 index 000000000..87e09151b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/customize.py @@ -0,0 +1,525 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json +import shutil +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + return {'return': 0} + + if env.get('CM_MLPERF_POWER', '') == "yes": + power = "yes" + else: + power = "no" + + rerun = True if env.get("CM_RERUN", "") != '' else False + + if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: + env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + + if 'CM_MLPERF_LOADGEN_MODE' not in env: + env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" + + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': "Please select a variation specifying the model to run"} + + # if env['CM_MODEL'] == "resnet50": + # cmd = "cp " + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + " " + os.path.join(env['CM_DATASET_PATH'], + # "val_map.txt") + # ret = os.system(cmd) + + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = " " + \ + env.get('CM_MLPERF_LOADGEN_EXTRA_OPTIONS', '') + " " + + if 'CM_MLPERF_LOADGEN_QPS' not in env: + env['CM_MLPERF_LOADGEN_QPS_OPT'] = "" + else: + env['CM_MLPERF_LOADGEN_QPS_OPT'] = " --qps " + \ + env['CM_MLPERF_LOADGEN_QPS'] + + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['CM_MLPERF_LOADGEN_QPS_OPT'] + + if 'CM_NUM_THREADS' not in env: + if 'CM_MINIMIZE_THREADS' in env: + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + + if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get( + 'CM_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]: + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --max-batchsize " + \ + str(env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE']) + + if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --batch-size " + \ + str(env['CM_MLPERF_LOADGEN_BATCH_SIZE']) + + if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') != '' and not env.get('CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and ( + env['CM_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['CM_MODEL'] or 'llama2' in env['CM_MODEL'] or 'mixtral' in env['CM_MODEL']) and env.get('CM_MLPERF_RUN_STYLE', '') != "valid": + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + \ + env['CM_MLPERF_LOADGEN_QUERY_COUNT'] + + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + + x = "" if os_info['platform'] == 'windows' else "'" + + inference_src_version = env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION', '') + version_tuple = None + if inference_src_version: + version_tuple = tuple(map(int, inference_src_version.split('.'))) + + if version_tuple and version_tuple >= (4, 1, 1): + pass # mlperf_conf is automatically loaded by the loadgen + else: + if "llama2-70b" in env['CM_MODEL'] or "mixtral-8x7b" in env["CM_MODEL"]: + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf-conf " + \ + x + env['CM_MLPERF_CONF'] + x + else: + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf " + \ + x + env['CM_MLPERF_CONF'] + x + + if env.get('CM_NETWORK_LOADGEN', '') != "lon" and env.get( + 'CM_MLPERF_INFERENCE_API_SERVER', '') == '' and "llama2-70b" not in env['CM_MODEL']: + env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH') + if not env['MODEL_DIR']: + env['MODEL_DIR'] = os.path.dirname( + env.get( + 'CM_MLPERF_CUSTOM_MODEL_PATH', + env.get( + 'CM_ML_MODEL_FILE_WITH_PATH', + ''))) + + RUN_CMD = "" + state['RUN'] = {} + + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + state['RUN'][scenario] = {} + scenario_extra_options = '' + + NUM_THREADS = env['CM_NUM_THREADS'] + if int(NUM_THREADS) > 2 and env['CM_MLPERF_DEVICE'] == "gpu": + NUM_THREADS = "2" # Don't use more than 2 threads when run on GPU + + if env['CM_MODEL'] in ['resnet50', 'retinanet', 'stable-diffusion-xl']: + scenario_extra_options += " --threads " + NUM_THREADS + + ml_model_name = env['CM_MODEL'] + if 'CM_MLPERF_USER_CONF' in env: + user_conf_path = env['CM_MLPERF_USER_CONF'] + x = "" if os_info['platform'] == 'windows' else "'" + if 'llama2-70b' in env['CM_MODEL'] or "mixtral-8x7b" in env["CM_MODEL"]: + scenario_extra_options += " --user-conf " + x + user_conf_path + x + else: + scenario_extra_options += " --user_conf " + x + user_conf_path + x + + mode = env['CM_MLPERF_LOADGEN_MODE'] + mode_extra_options = "" + + if 'CM_DATASET_PREPROCESSED_PATH' in env and env['CM_MODEL'] in [ + 'resnet50', 'retinanet']: + # dataset_options = " --use_preprocessed_dataset --preprocessed_dir "+env['CM_DATASET_PREPROCESSED_PATH'] + if env.get('CM_MLPERF_LAST_RELEASE') not in ["v2.0", "v2.1"]: + dataset_options = " --use_preprocessed_dataset --cache_dir " + \ + env['CM_DATASET_PREPROCESSED_PATH'] + else: + dataset_options = "" + if env['CM_MODEL'] == "retinanet": + dataset_options += " --dataset-list " + \ + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + elif env['CM_MODEL'] == "resnet50": + dataset_options += " --dataset-list " + \ + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') + else: + if 'CM_DATASET_PREPROCESSED_PATH' in env: + env['DATA_DIR'] = env.get('CM_DATASET_PREPROCESSED_PATH') + else: + env['DATA_DIR'] = env.get('CM_DATASET_PATH') + + if "dlrm" in env['CM_MODEL']: + env['DATA_DIR'] = env['CM_CRITEO_PREPROCESSED_PATH'] + + dataset_options = '' + + if env.get('CM_MLPERF_EXTRA_DATASET_ARGS', '') != '': + dataset_options += " " + env['CM_MLPERF_EXTRA_DATASET_ARGS'] + + if mode == "accuracy": + mode_extra_options += " --accuracy" + + elif mode == "performance": + pass + + elif mode == "compliance": + + audit_full_path = env['CM_MLPERF_INFERENCE_AUDIT_PATH'] + mode_extra_options = " --audit '" + audit_full_path + "'" + + if env.get('CM_MLPERF_OUTPUT_DIR', '') == '': + env['CM_MLPERF_OUTPUT_DIR'] = os.getcwd() + + mlperf_implementation = env.get('CM_MLPERF_IMPLEMENTATION', 'reference') + cmd, run_dir = get_run_cmd(os_info, env, scenario_extra_options, + mode_extra_options, dataset_options, mlperf_implementation) + + if env.get('CM_NETWORK_LOADGEN', '') == "lon": + + run_cmd = i['state']['mlperf_inference_run_cmd'] + env['CM_SSH_RUN_COMMANDS'] = [] + env['CM_SSH_RUN_COMMANDS'].append( + run_cmd.replace( + "--network=lon", + "--network=sut") + " &") + + env['CM_MLPERF_RUN_CMD'] = cmd + env['CM_RUN_DIR'] = run_dir + env['CM_RUN_CMD'] = cmd + env['CK_PROGRAM_TMP_DIR'] = env.get('CM_ML_MODEL_PATH') # for tvm + + if env.get('CM_HOST_PLATFORM_FLAVOR', '') == "arm64": + env['CM_HOST_PLATFORM_FLAVOR'] = "aarch64" + + return {'return': 0} + + +def get_run_cmd(os_info, env, scenario_extra_options, + mode_extra_options, dataset_options, implementation="reference"): + if implementation == "reference": + return get_run_cmd_reference( + os_info, env, scenario_extra_options, mode_extra_options, dataset_options) + if implementation == "nvidia": + return get_run_cmd_nvidia( + os_info, env, scenario_extra_options, mode_extra_options, dataset_options) + return "", os.getcwd() + + +def get_run_cmd_reference( + os_info, env, scenario_extra_options, mode_extra_options, dataset_options): + + if env['CM_MODEL'] in ["gptj-99", "gptj-99.9"]: + + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j") + if env.get('CM_NETWORK_LOADGEN', '') != "lon": + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \ + " main.py --model-path=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + scenario_extra_options + mode_extra_options + dataset_options + else: + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + \ + " main.py" + ' --dataset-path=' + env['CM_DATASET_EVAL_PATH'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + scenario_extra_options + mode_extra_options + dataset_options + cmd = cmd.replace("--count", "--max_examples") + if env['CM_MLPERF_DEVICE'] == "gpu": + gpu_options = " --gpu" + env['CUDA_VISIBLE_DEVICES'] = "0" + else: + gpu_options = "" + cmd = cmd + gpu_options + env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + + if env['CM_MODEL'] in ["resnet50", "retinanet"]: + + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "vision", + "classification_and_detection") + env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + if env.get('CM_MLPERF_VISION_DATASET_OPTION', '') == '' and env.get( + 'CM_MLPERF_DEVICE') != "tpu": + if os_info['platform'] == 'windows': + cmd = "python python/main.py --profile " + env['CM_MODEL'] + "-" + env['CM_MLPERF_BACKEND'] + \ + " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + ' --dataset-path=' + env['CM_DATASET_PREPROCESSED_PATH'] + \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ + " --output " + env['OUTPUT_DIR'] + " " + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options + else: + cmd = "./run_local.sh " + env['CM_MLPERF_BACKEND'] + ' ' + \ + env['CM_MODEL'] + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options + return cmd, env['RUN_DIR'] + + if env['CM_MLPERF_BACKEND'] == "ncnn": + env['MODEL_FILE'] = os.path.join( + os.path.dirname( + env.get('CM_ML_MODEL_FILE_WITH_PATH')), + "resnet50_v1") + else: + env['MODEL_FILE'] = env.get( + 'CM_MLPERF_CUSTOM_MODEL_PATH', + env.get('CM_ML_MODEL_FILE_WITH_PATH')) + if not env['MODEL_FILE']: + return {'return': 1, 'error': 'No valid model file found!'} + + env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + + extra_options = " --output " + env['CM_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['CM_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ + " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \ + " --preprocessed_dir " + env['CM_DATASET_PREPROCESSED_PATH'] + + if env.get('CM_MLPERF_DEVICE') == "tpu": + cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env.get('CM_SUDO', "") + " " + env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " +\ + "--backend " + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + " --device tpu " + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + mode_extra_options + dataset_options + extra_options + else: + cmd = "cd '" + os.path.join(env['RUN_DIR'], "python") + "' && " + env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " +\ + "--backend " + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + mode_extra_options + dataset_options + extra_options + env['SKIP_VERIFY_ACCURACY'] = True + + elif "bert" in env['CM_MODEL']: + + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "language", "bert") + env['MODEL_FILE'] = env.get( + 'CM_MLPERF_CUSTOM_MODEL_PATH', + env.get('CM_ML_MODEL_FILE_WITH_PATH')) + if not env['MODEL_FILE']: + return {'return': 1, 'error': 'No valid model file found!'} + if env.get('CM_MLPERF_QUANTIZATION') in ["on", True, "1", "True"]: + quantization_options = " --quantized" + else: + quantization_options = "" + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + env['CM_MLPERF_BACKEND'] + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + scenario_extra_options + \ + mode_extra_options + dataset_options + quantization_options + if env['CM_MLPERF_BACKEND'] == "deepsparse": + cmd += " --batch_size=" + \ + env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \ + " --model_path=" + env['MODEL_FILE'] + + if env.get('CM_MLPERF_CUSTOM_MODEL_PATH', '') != '': + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['MODEL_FILE'] + + cmd = cmd.replace("--count", "--max_examples") + env['VOCAB_FILE'] = env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + env['DATASET_FILE'] = env['CM_DATASET_SQUAD_VAL_PATH'] + env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + env['SKIP_VERIFY_ACCURACY'] = True + + elif "rnnt" in env['CM_MODEL']: + + env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_RNNT_PATH'] + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend " + env['CM_MLPERF_BACKEND'] + \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --manifest " + env['CM_DATASET_PREPROCESSED_JSON'] + \ + " --dataset_dir " + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") + \ + " --pytorch_config_toml " + os.path.join("pytorch", "configs", "rnnt.toml") + \ + " --pytorch_checkpoint " + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + " --log_dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + dataset_options + env['SKIP_VERIFY_ACCURACY'] = True + + elif "stable-diffusion-xl" in env['CM_MODEL']: + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image") + if env.get('+PYTHONPATH', '') == '': + env['+PYTHONPATH'] = [] + env['+PYTHONPATH'].append( + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools", + "fid")) + + backend = env['CM_MLPERF_BACKEND'] + device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] not in [ + "gpu", "rocm"] else "cuda" + max_batchsize = env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --profile " + 'stable-diffusion-xl-pytorch ' + \ + " --dataset " + 'coco-1024' + \ + " --dataset-path " + env['CM_DATASET_PATH_ROOT'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'].replace("bfloat", "bf").replace("float", "fp") + \ + " --device " + device + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \ + " --model-path " + env['CM_ML_MODEL_PATH'] + if "--max-batchsize" not in cmd: + cmd += " --max-batchsize " + max_batchsize + if env.get('CM_COCO2014_SAMPLE_ID_PATH', '') != '': + cmd += " --ids-path " + env['CM_COCO2014_SAMPLE_ID_PATH'] + + elif "llama2-70b" in env['CM_MODEL']: + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "language", + "llama2-70b") + backend = env['CM_MLPERF_BACKEND'] + device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" + + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['CM_DATASET_PREPROCESSED_PATH'] + \ + " --device " + device.replace("cuda", "cuda:0") + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + + if env.get('CM_MLPERF_INFERENCE_API_SERVER', '') != '': + env['CM_VLLM_SERVER_MODEL_NAME'] = env.get( + "CM_VLLM_SERVER_MODEL_NAME") or "NousResearch/Meta-Llama-3-8B-Instruct" + # env['CM_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000" + cmd += f" --api-server {env['CM_MLPERF_INFERENCE_API_SERVER']} --model-path {env['CM_VLLM_SERVER_MODEL_NAME']} --api-model-name {env['CM_VLLM_SERVER_MODEL_NAME']} --vllm " + else: + cmd += f" --model-path {env['LLAMA2_CHECKPOINT_PATH']}" + + if env.get('CM_MLPERF_INFERENCE_NUM_WORKERS', '') != '': + cmd += f" --num-workers {env['CM_MLPERF_INFERENCE_NUM_WORKERS']}" + + cmd = cmd.replace("--count", "--total-sample-count") + cmd = cmd.replace("--max-batchsize", "--batch-size") + + elif "mixtral-8x7b" in env['CM_MODEL']: + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "language", + "mixtral-8x7b") + backend = env['CM_MLPERF_BACKEND'] + device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + \ + " --device " + device.replace("cuda", "cuda:0") + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output-log-dir " + env['CM_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + env['CM_MLPERF_MODEL_PRECISION'] + \ + " --model-path " + env['MIXTRAL_CHECKPOINT_PATH'] + cmd = cmd.replace("--count", "--total-sample-count") + cmd = cmd.replace("--max-batchsize", "--batch-size") + + elif "3d-unet" in env['CM_MODEL']: + + env['RUN_DIR'] = env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + backend = env['CM_MLPERF_BACKEND'] if env['CM_MLPERF_BACKEND'] != 'tf' else 'tensorflow' + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " run.py --backend=" + backend + " --scenario=" + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + " --model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + " --preprocessed_data_dir=" + env['CM_DATASET_KITS19_PREPROCESSED_PATH'] + \ + scenario_extra_options + mode_extra_options + dataset_options + + env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] + env['SKIP_VERIFY_ACCURACY'] = True + + elif "dlrm" in env['CM_MODEL']: # DLRM is in draft stage + + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch") + if 'multihot-criteo-sample' in env['CM_ML_MODEL_DATASET_TYPE']: + dataset = "multihot-criteo-sample" + elif 'multihot-criteo' in env['CM_ML_MODEL_DATASET_TYPE']: + dataset = "multihot-criteo" + + env['MODEL_DIR'] = os.path.join(env['MODEL_DIR'], "model_weights") + + if env.get('CM_MLPERF_BIN_LOADER', '') == 'yes': + mlperf_bin_loader_string = " --mlperf-bin-loader" + else: + mlperf_bin_loader_string = "" + if env.get('CM_ML_MODEL_DEBUG', '') == 'yes': + config = " --max-ind-range=10000000 --data-sub-sample-rate=0.875 " + else: + config = " --max-ind-range=40000000 " + + if env['CM_MLPERF_DEVICE'] == "gpu": + gpu_options = "" + env['CUDA_VISIBLE_DEVICES'] = "0" + else: + gpu_options = "" + env['WORLD_SIZE'] = "1" + + if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" and env['CM_MLPERF_LOADGEN_SCENARIO'] == "Offline": + mode_extra_options += " --samples-per-query-offline=1" + + cmd = " ./run_local.sh " + env['CM_MLPERF_BACKEND'] + \ + ' dlrm ' + dataset + ' ' + env['CM_MLPERF_DEVICE'] + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + " " + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + config + mlperf_bin_loader_string + \ + ' --samples-to-aggregate-quantile-file=./tools/dist_quantile.txt ' + \ + scenario_extra_options + mode_extra_options + dataset_options + gpu_options + cmd = cmd.replace("--count", "--count-queries") + env['OUTPUT_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] + + elif "rgat" in env['CM_MODEL']: + env['RUN_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "graph", + "R-GAT") + backend = env['CM_MLPERF_BACKEND'] + + dtype_rgat = env['CM_MLPERF_MODEL_PRECISION'].replace("float", "fp") + + if env.get('CM_MLPERF_SUBMISSION_GENERATION_STYLE', '') == "full": + mode_extra_options += " --dataset igbh-dgl --profile rgat-dgl-full " + else: + mode_extra_options += " --dataset igbh-dgl-tiny --profile debug-dgl " + + device = env['CM_MLPERF_DEVICE'] if env['CM_MLPERF_DEVICE'] != "gpu" else "cuda" + # have to add the condition for running in debug mode or real run mode + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " main.py " \ + " --scenario " + env['CM_MLPERF_LOADGEN_SCENARIO'] + \ + " --dataset-path " + env['CM_IGBH_DATASET_PATH'] + \ + " --device " + device.replace("cuda", "cuda:0") + \ + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ + scenario_extra_options + mode_extra_options + \ + " --output " + env['CM_MLPERF_OUTPUT_DIR'] + \ + ' --dtype ' + dtype_rgat + \ + " --model-path " + env['RGAT_CHECKPOINT_PATH'] + \ + " --mlperf_conf " + \ + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + + if env.get('CM_NETWORK_LOADGEN', '') in ["lon", "sut"]: + cmd = cmd + " " + "--network " + env['CM_NETWORK_LOADGEN'] + if env.get('CM_NETWORK_LOADGEN_SUT_SERVERS', []): + sut_servers = env['CM_NETWORK_LOADGEN_SUT_SERVERS'] + cmd += " --sut_server '" + "','".join(sut_servers) + "' " + + return cmd, env['RUN_DIR'] + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + inp = i['input'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py new file mode 100644 index 000000000..090d1b072 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py @@ -0,0 +1,620 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__doc__ = """Scripts that take a retinanet engine and openImage input, infer the output and test the accuracy +""" + +import argparse +import json +import os +import sys +import glob +import random +import time +import pycuda +from PIL import Image +from importlib import import_module +from typing import Dict, Tuple, List, Optional + +from code.common.fix_sys_path import ScopedRestrictedImport +# with ScopedRestrictedImport(): +import numpy as np +import torch # Retinanet model source requires GPU installation of PyTorch 1.10 +from torchvision.transforms import functional as F +import onnx +import tensorrt as trt +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +from code.common import logging +from code.common.constants import TRT_LOGGER, Scenario +from code.common.systems.system_list import DETECTED_SYSTEM +from code.common.runner import EngineRunner, get_input_format +from code.common.systems.system_list import SystemClassifications +from code.plugin import load_trt_plugin +RetinanetEntropyCalibrator = import_module( + "code.retinanet.tensorrt.calibrator").RetinanetEntropyCalibrator + +G_RETINANET_NUM_CLASSES = 264 +G_RETINANET_IMG_SIZE = (800, 800) +G_RETINANET_INPUT_SHAPE = (3, 800, 800) +G_OPENIMAGE_CALSET_PATH = "build/data/open-images-v6-mlperf/calibration/train/data" +G_OPENIMAGE_CALMAP_PATH = "data_maps/open-images-v6-mlperf/cal_map.txt" +G_OPENIMAGE_VALSET_PATH = os.path.join( + os.environ.get( + "CM_DATASET_PATH", + "build/data/open-images-v6-mlperf"), + "validation", + "data") +G_OPENIMAGE_VALMAP_PATH = "data_maps/open-images-v6-mlperf/val_map.txt" +G_OPENIMAGE_ANNO_PATH = os.path.join( + os.environ.get( + "CM_DATASET_PATH", + "build/data/open-images-v6-mlperf"), + "annotations", + "openimages-mlperf.json") +G_OPENIMAGE_PREPROCESSED_INT8_PATH = "build/preprocessed_data/open-images-v6-mlperf/validation/Retinanet/int8_linear" +# Using EfficientNMS now +G_RETINANET_CALIBRATION_CACHE_PATH = "code/retinanet/tensorrt/calibrator.cache" + + +def load_img_pytorch(fpath: str, do_transform=False) -> torch.tensor: + """ + Load the image from file into torch tensor + From mlcommon training repo: + https://github.com/mlcommons/training/blob/master/single_stage_detector/ssd/model/transform.py#L66 + + Args: + fpath (str): the path to the image file + do_transform (bool): whether to do postprocess (resize + normalize) + """ + loaded_tensor = F.to_tensor(Image.open(fpath).convert("RGB")) + if do_transform: + dtype = torch.float32 + device = torch.device("cpu") + image_size = [800, 800] + image_std = [0.229, 0.224, 0.225] + image_mean = [0.485, 0.456, 0.406] + mean = torch.as_tensor(image_mean, dtype=dtype, device=device) + std = torch.as_tensor(image_std, dtype=dtype, device=device) + img_norm = (loaded_tensor - mean[:, None, None]) / std[:, None, None] + img_resize = torch.nn.functional.interpolate(img_norm[None], size=image_size, scale_factor=None, mode='bilinear', + recompute_scale_factor=None, align_corners=False)[0] + return img_resize + + return loaded_tensor + + +class FirstLayerConvActPoolTacticSelector(trt.IAlgorithmSelector): + def select_algorithms(self, ctx, choices): + if "Conv_0 + 1783 + Mul_1 + 1785 + Add_2 + Relu_3 + MaxPool_4" in ctx.name: # Apply to the first layer + # MLPINF-1833: Disabled CaskConvActPool for TRT 8.5.0.4 + # TRT 8.5.0.4 has a bug with CaskConvActPool which has been fixed + # since 8.5.0.5 + forbidden_set = { + -3689373275198309793, # 0xccccb68da7fc3a5f + -4219016963003938541, # 0xc5730a6ceacd8913 + -4709698786673109216, # 0xbea3c9e81542d720 + 8863348452769974412, # 0x7b00f0752fdcc88c + -216502845640484144, # 0xfcfed3cf18bcdad0 + -2840175123683203852, # 0xd895abc5dcf624f4 + 4391967500208500226, # 0x3cf3672bfafcee02 + -3076721233724812250, # 0xd54d4a56ceee5426 + 8268411641074121664, # 0x72bf4c9462ed7bc0 + 3484514246525022387, # 0x305b7b3ed6e970b3 + 679919370278938099, # 0x096f8f109d6225f3 + 1531503914513228020, # 0x1540feb22cae60f4 + 8162590574723450606, # 0x714758e16557c6ee + 6137316588591593674, # 0x552c20eba11d38ca + -5252194382421728148, # 0xb71c75095873646c + -2136593403804660582, # 0xe2594b9e90c7cc9a + 58603908831090367, # 0x00d033f1d05396bf + 1454666201826561687, # 0x1430033412a38e97 + -7506077189063215810, # 0xd43db7d0f0e3ba45 + -3153162056066942395, # 0x9521940f435d0c18 + -7700711094551245800, # 0xf126325c0aa4aa02 + -1070112490556970494, # 0x97d50e90c139753e + } + filtered_idxs = [idx for idx, choice in enumerate( + choices) if choice.algorithm_variant.tactic not in forbidden_set] + to_ret = filtered_idxs + else: + # By default, say that all tactics are acceptable: + to_ret = [idx for idx, _ in enumerate(choices)] + return to_ret + + def report_algorithms(self, ctx, choices): + pass + + +class TRTTester: + + def __init__(self, engine_file, batch_size, precision, onnx_path, + skip_engine_build=False, verbose=False, + output_file="build/retinanet_trt.out" + ): + """ + Test the accuracy using the onnx file and TensorRT runtime. + The tester is able to build the engine from onnx. + """ + self.batch_size = batch_size + self.verbose = verbose + self.onnx_path = onnx_path + self.engine_file = engine_file + self.cache_file = G_RETINANET_CALIBRATION_CACHE_PATH + self.precision = precision + + # TensorRT engine related fields + # Not supported on dla + self.dla_core = None + + # Initiate the plugin and logger + # Use the global singleton, which is required by TRT. + self.logger = TRT_LOGGER + self.logger.min_severity = trt.Logger.VERBOSE if self.verbose else trt.Logger.INFO + load_trt_plugin("retinanet") + trt.init_libnvinfer_plugins(self.logger, "") + + if self.onnx_path is not None and not skip_engine_build: + print(f"Creating engines from onnx: {self.onnx_path}") + self.create_trt_engine() + else: + if not os.path.exists(engine_file): + raise RuntimeError( + f"Cannot find engine file {engine_file}. Please supply the onnx file or engine file.") + + self.runner = EngineRunner(self.engine_file, verbose=verbose) + + # OpenImage related fields + self.image_dir = G_OPENIMAGE_VALSET_PATH + self.val_annotate = G_OPENIMAGE_ANNO_PATH + self.output_file = output_file + + def apply_flag(self, flag): + """Apply a TRT builder flag.""" + self.builder_config.flags = ( + self.builder_config.flags) | ( + 1 << int(flag)) + + def clear_flag(self, flag): + """Clear a TRT builder flag.""" + self.builder_config.flags = ( + self.builder_config.flags) & ~( + 1 << int(flag)) + + # Helper function to build a TRT engine from ONNX file + def create_trt_engine(self): + self.builder = trt.Builder(self.logger) + self.builder_config = self.builder.create_builder_config() + self.builder_config.max_workspace_size = 8 << 30 + self.builder_config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED if self.verbose else trt.ProfilingVerbosity.LAYER_NAMES_ONLY + + # Precision flags + self.clear_flag(trt.BuilderFlag.TF32) + if self.precision == "fp32": + self.input_dtype = "fp32" + self.input_format = "linear" + elif self.precision == "int8": + self.input_dtype = "int8" + self.input_format = "linear" + self.apply_flag(trt.BuilderFlag.INT8) + + # Calibrator for int8 + preprocessed_data_dir = "build/preprocessed_data" + calib_image_dir = os.path.join( + preprocessed_data_dir, + "open-images-v6-mlperf/calibration/Retinanet/fp32") + self.calibrator = RetinanetEntropyCalibrator(data_dir=calib_image_dir, + cache_file=self.cache_file, batch_size=10, max_batches=50, + force_calibration=False, calib_data_map=G_OPENIMAGE_CALMAP_PATH) + self.builder_config.int8_calibrator = self.calibrator + + # Apply tactic selector bypassing conv act pool for Orin: + if SystemClassifications.is_orin(): + tactic_selector = FirstLayerConvActPoolTacticSelector() + self.builder_config.algorithm_selector = tactic_selector + else: + raise Exception(f"{self.precision} not supported yet.") + + self.network = self.builder.create_network( + 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + model = onnx.load(self.onnx_path) + parser = trt.OnnxParser(self.network, self.logger) + success = parser.parse(onnx._serialize(model)) + if not success: + err_desc = parser.get_error(0).desc() + raise RuntimeError( + f"Retinanet onnx model processing failed! Error: {err_desc}") + + # Set the network input type + if self.precision == "int8": + self.network.get_input(0).dtype = trt.int8 + + # Add obey_precision_constraints flag to suppress reformat + self.apply_flag(trt.BuilderFlag.OBEY_PRECISION_CONSTRAINTS) + + # Prepare the optimization profiles + self.profiles = [] + self.num_profiles = 1 # Can create more profiles here if needed + if self.dla_core is None: + for i in range(self.num_profiles): + profile = self.builder.create_optimization_profile() + for input_idx in range(self.network.num_inputs): + input_shape = self.network.get_input(input_idx).shape + input_name = self.network.get_input(input_idx).name + min_shape = trt.Dims(input_shape) + min_shape[0] = 1 + max_shape = trt.Dims(input_shape) + max_shape[0] = self.batch_size + profile.set_shape( + input_name, min_shape, max_shape, max_shape) + if not profile: + raise RuntimeError("Invalid optimization profile!") + self.builder_config.add_optimization_profile(profile) + self.profiles.append(profile) + else: + # Use fixed batch size if on DLA + for input_idx in range(self.network.num_inputs): + input_shape = self.network.get_input(input_idx).shape + input_shape[0] = self.batch_size + self.network.get_input(input_idx).shape = input_shape + + engine = self.builder.build_engine(self.network, self.builder_config) + engine_inspector = engine.create_engine_inspector() + layer_info = engine_inspector.get_engine_information( + trt.LayerInformationFormat.ONELINE) + logging.info("========= TensorRT Engine Layer Information =========") + logging.info(layer_info) + + buf = engine.serialize() + logging.info(f"Writing built engine to {self.engine_file}") + with open(self.engine_file, 'wb') as f: + f.write(buf) + + def run_openimage(self, num_samples=8): + cocoGt = COCO(annotation_file=self.val_annotate) + image_ids = cocoGt.getImgIds() + cat_ids = cocoGt.getCatIds() + num_images = min(num_samples, len(image_ids)) + print( + f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") + + detections = [] + batch_idx = 0 + for image_idx in range(0, num_images, self.batch_size): + # Print Progress + if batch_idx % 20 == 0: + print( + f"Processing batch: {batch_idx} image: {image_idx}/{num_images}") + + end_idx = min(image_idx + self.batch_size, num_images) + imgs = [] + img_original_sizes = [] + for idx in range(image_idx, end_idx): + image_id = image_ids[idx] + if self.precision == "fp32": + # Load the image using pytorch routine, but perform extra + # resize+normalize steps + img = load_img_pytorch( + os.path.join( + self.image_dir, + cocoGt.imgs[image_id]["file_name"]), + do_transform=True).numpy() + elif self.precision == "int8": + img = np.load( + os.path.join( + G_OPENIMAGE_PREPROCESSED_INT8_PATH, + cocoGt.imgs[image_id]["file_name"] + + '.npy')) + else: + raise Exception(f"Unsupported precision {self.precision}") + imgs.append(img) + img_original_sizes.append( + [cocoGt.imgs[image_id]["height"], cocoGt.imgs[image_id]["width"]]) + + if self.precision == "fp32": + imgs = np.ascontiguousarray(np.stack(imgs), dtype=np.float32) + elif self.precision == "int8": + imgs = np.stack(imgs) + + start_time = time.time() + outputs = self.runner([imgs], batch_size=self.batch_size) + + if self.verbose: + duration = time.time() - start_time + logging.info( + f"Batch {batch_idx} >>> Inference time: {duration}") + + # Concatted outputs is in the shape of [BS, 7001] + # image_ids (duplicate of score for loadgen): [BS, 1000, 1] + # loc: [BS, 1000, 4] + # score: [BS, 1000, 1] + # label: [BS, 1000, 1] + # Concatted into [BS, 1000, 7] then reshape to [BS, 7000] + # keep_count: [BS, 1] + concat_output = outputs[0] + + for idx in range(0, end_idx - image_idx): + # keep_count = keep_counts[idx] + keep_count = int(concat_output[idx * 7001 + 7000]) + image_height = img_original_sizes[idx][0] + image_width = img_original_sizes[idx][1] + + for prediction_idx in range(0, keep_count): + # Each detection is in the order of [dummy_image_idx, xmin, ymin, xmax, ymax, score, label] + # This is pre-callback (otherwise x and y are swapped). + single_detection = concat_output[idx * + 7001 + + prediction_idx * + 7: idx * + 7001 + + prediction_idx * + 7 + + 7] + loc = single_detection[1:5] + label = single_detection[6] + score = single_detection[5] + + # Scale the image output from [0, 1] to (img_h, img_w) + # [ymin, xmin, ymax, xmax] + scale_h = image_height + scale_w = image_width + loc[0::2] = loc[0::2] * scale_h + loc[1::2] = loc[1::2] * scale_w + loc = loc.tolist() + + # Convert from ltrb_xyinverted to [xmin, ymin, w, h] + bbox_coco_fmt = [ + loc[1], + loc[0], + loc[3] - loc[1], + loc[2] - loc[0], + ] + + coco_detection = { + "image_id": image_ids[image_idx + idx], + "category_id": cat_ids[int(label)], + "bbox": bbox_coco_fmt, + "score": float(score), + } + detections.append(coco_detection) + batch_idx += 1 + + output_dir = os.path.dirname(self.output_file) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + with open(self.output_file, "w") as f: + json.dump(detections, f) + cocoDt = cocoGt.loadRes(self.output_file) + e = COCOeval(cocoGt, cocoDt, 'bbox') + e.params.imgIds = image_ids[:num_images] + e.evaluate() + e.accumulate() + e.summarize() + map_score = e.stats[0] + return map_score + + +class PytorchTester: + """ + The reference implementation of the retinanet from the mlcommon-training repo, from: + https://github.com/mlcommons/training/tree/master/single_stage_detector/ssd/model + + To run this tester, you would need to clone the repo, and mount it to the container. + """ + + def __init__(self, pyt_ckpt_path, training_repo_path, + batch_size=8, output_file="build/retinanet_pytorch.out"): + ssd_model_path = os.path.join( + training_repo_path, "single_stage_detector", "ssd") + with ScopedRestrictedImport([ssd_model_path] + sys.path): + from model.retinanet import retinanet_from_backbone + pyt_model = retinanet_from_backbone( + backbone="resnext50_32x4d", + num_classes=G_RETINANET_NUM_CLASSES, + image_size=[800, 800], + data_layout="channels_last", + pretrained=None, + trainable_backbone_layers=3 + ) + + self.training_repo_path = training_repo_path + self.device = torch.device("cuda:0") + pyt_model.to(self.device) + if pyt_model.data_layout == "channels_last": + pyt_model = pyt_model.to(memory_format=torch.channels_last) + cpt = torch.load(pyt_ckpt_path, map_location='cpu') + pyt_model.load_state_dict(cpt["model"]) + self.pyt_model = pyt_model + self.val_annotate = G_OPENIMAGE_ANNO_PATH + self.batch_size = batch_size + self.output_file = output_file + self.image_dir = G_OPENIMAGE_VALSET_PATH + + def run_openimage(self, num_samples=8): + """ + Use openimage raw input to run the pytorch referene model for images. + Note that the input image will be of different sizes, and the output bboxes are not normalized to 800,800 + The pytorch model handles the resize and postprocess internally. For more details, see: + https://github.com/mlcommons/training/blob/master/single_stage_detector/ssd/model/retinanet.py#L475 + """ + self.pyt_model.eval() + cocoGt = COCO(annotation_file=self.val_annotate) + image_ids = cocoGt.getImgIds() + cat_ids = cocoGt.getCatIds() + num_images = min(num_samples, len(image_ids)) + print( + f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") + + coco_detections = [] + for image_idx in range(0, num_images, self.batch_size): + end_idx = min(image_idx + self.batch_size, num_images) + # Load image and transfer to tensor (original image size) + imgs = [] + for idx in range(image_idx, end_idx): + image_id = image_ids[idx] + image_path = os.path.join( + self.image_dir, cocoGt.imgs[image_id]["file_name"]) + img = load_img_pytorch(image_path).to(self.device) + imgs.append(img) + # print(cocoGt.imgs[image_id]["height"], cocoGt.imgs[image_id]["width"]) + + img = [] + for idx in range(image_idx, end_idx): + image_id = image_ids[idx] + tensor = load_img_pytorch( + os.path.join( + self.image_dir, + cocoGt.imgs[image_id]["file_name"]), + do_transform=True).numpy() + print(tensor.shape) + img.append(tensor) + img = np.ascontiguousarray(np.stack(img), dtype=np.float32) + + start_time = time.time() + with torch.no_grad(): + detections = self.pyt_model(imgs) + + for idx in range(0, end_idx - image_idx): + boxes = detections[idx]["boxes"].detach().cpu().numpy() + scores = detections[idx]["scores"].detach().cpu().numpy() + labels = detections[idx]["labels"].detach().cpu().numpy() + + num_preds = boxes.shape[0] + for pred_idx in range(num_preds): + # Convert from lrtb to [xmin, ymin, w, h] for cocoeval + box_pred = boxes[pred_idx][:] + xmin, ymin, xmax, ymax = box_pred + box_pred = np.array( + [xmin, ymin, xmax - xmin, ymax - ymin], dtype=np.float32) + score_pred = float(scores[pred_idx]) + label_pred = int(labels[pred_idx]) + coco_detection = { + "image_id": image_ids[image_idx + idx], + "category_id": cat_ids[label_pred], + "bbox": box_pred.tolist(), # Convert ndarray to list + "score": score_pred + } + coco_detections.append(coco_detection) + + output_dir = os.path.dirname(self.output_file) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + with open(self.output_file, "w") as f: + json.dump(coco_detections, f) + cocoDt = cocoGt.loadRes(self.output_file) + e = COCOeval(cocoGt, cocoDt, 'bbox') + e.params.imgIds = image_ids[:num_images] + e.evaluate() + e.accumulate() + e.summarize() + map_score = e.stats[0] + + # Uncomment below to call reference implementation evaluate. + # Import extra helper function from training repo. + # ssd_model_path = os.path.join(self.training_repo_path, "single_stage_detector", "ssd") + # with ScopedRestrictedImport([ssd_model_path] + sys.path): + # from coco_utils import get_openimages + # import presets + # from utils import collate_fn + # from engine import evaluate + # from coco_utils import get_coco_api_from_dataset + # from coco_eval import DefaultCocoEvaluator + # coco_evaluator = evaluate(self.pyt_model, data_loader_test, device=self.device, epoch=None, args=Args) + # map_score = coco_evaluator.get_stats()['bbox'][0] + return map_score + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("--engine_file", + help="Specify where the retinanet engine file is", + required=False) + parser.add_argument("--onnx_path", + help="The path to the onnx, if building from onnx", + default="build/models/retinanet-resnext50-32x4d/submission/retinanet_resnext50_32x4d_efficientNMS.800x800.onnx", + required=False) + parser.add_argument("--pyt_ckpt_path", + help="Specify where the PyTorch checkpoint file is", + default="build/models/retinanet-resnext50-32x4d/new/retinanet_model_10.pth") + parser.add_argument("--training_repo_path", + help="Specify where the MLCommons training directory is (from https://github.com/mlcommons/training)", + default="/home/scratch.zhihanj_sw/gitlab_root/mlcommons-training" + ) + parser.add_argument("--batch_size", + help="batch size", + type=int, + default=8) + parser.add_argument("--num_samples", + help="Number of samples to run. We have 24781 in total for openImages", + type=int, + default=24781) + parser.add_argument("--trt_precision", + help="Run TensorRT in the specified precision", + choices=("fp32", "fp16", "int8"), + default="fp32") + parser.add_argument("--skip_engine_build", + help="Skip the TRT engine build phase if possible.", + action="store_true") + parser.add_argument("--pytorch", + help="whether to run pytorch inference", + action="store_true") + parser.add_argument("--verbose", + help="verbose output", + action="store_true") + args = parser.parse_args() + + # Pytorch Tester + if args.pytorch: + # TODO: Check existence of training repo. + logging.info( + f"Running Accuracy test for Pytorch reference implementation.") + if args.training_repo_path is None or not os.path.exists( + args.training_repo_path): + raise RuntimeError( + "Please pull mlcommon training repo from https://github.com/mlcommons/training, and specify with --training_repo_path") + pt_tester = PytorchTester( + args.pyt_ckpt_path, + args.training_repo_path, + args.batch_size) + pt_acc = pt_tester.run_openimage(args.num_samples) + logging.info( + f"Pytorch mAP Score: {pt_acc}, Reference: 0.375, % of ref: {pt_acc / 0.375}") + else: + # TRT Tester + logging.info( + f"Running accuracy test for retinanet using {args.engine_file} ...") + tester = TRTTester( + args.engine_file, + args.batch_size, + args.trt_precision, + args.onnx_path, + args.skip_engine_build, + args.verbose) + # acc = tester.run_openimage(args.num_samples) + acc = tester.run_openimage(args.num_samples) + logging.info( + f"mAP Score: {acc}, Reference: 0.375, % of ref: {acc / 0.375}") + + # To run the TRT tester: + # python3 -m code.retinanet.tensorrt.infer --engine_file /tmp/retina.b8.int8.engine --num_samples=1200 --batch_size=8 --trt_precision int8 + # To run the pytorch tester: + # python3 -m code.retinanet.tensorrt.infer --pytorch --num_samples=1200 + # --batch_size=8 + + +if __name__ == "__main__": + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/README-about.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/README-about.md new file mode 100644 index 000000000..b78d64b62 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/README-about.md @@ -0,0 +1,137 @@ +This script is a CM wrapper to the official [Nvidia submission code](https://github.com/mlcommons/inference_results_v3.0/tree/master/closed/NVIDIA) used for MLPerf inference submissions. + + + +## Download the needed files + +* Please ask privately in [this discord channel](https://discord.gg/y7hupJsUNb) if you would like to get access to an Amazon S3 bucket containing all the needed files for easiness. Otherwise, you can download them from the below links. + +For x86 machines, please download the latest install tar files from the below sites +1. [cuDNN](https://developer.nvidia.com/cudnn) (for cuda 11) +2. [TensorRT](https://developer.nvidia.com/tensorrt) +3. Imagenet validation set (unfortunately not available via public URL) following the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) + +
    + + + +## Using Docker (Recommended on x86 systems) + + +Assuming all the downloaded files are to the user home directory please do the following steps: + +1. Download CUDA 11.8 + ``` + wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run + ``` +2. [Install docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + +3. Give docker permission to the current user + ``` + sudo usermod -aG docker $USER + ``` + Logout and login + Restart docker if required and confirm that Nvidia container toolkit is working by + ``` + nvidia-ctk --version + ``` +4. Check if Nvidia driver is working properly on the host. + ``` + nvidia-smi + ``` + If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access + ``` + cmr "install cuda prebuilt _driver" --version=11.8.0 + ``` +5. Build the docker container and mount the paths from the host machine. + ** You may want to change the `scratch_path` location as it can take 100s of GBs.** + ```bash + cm docker script --tags=build,nvidia,inference,server \ + --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \ + --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --imagenet_path=$HOME/imagenet-2012-val \ + --scratch_path=$HOME/mlperf_scratch \ + --docker_cm_repo=mlcommons@cm4mlops \ + --results_dir=$HOME/results_dir \ + --submission_dir=$HOME/submission_dir \ + --adr.compiler.tags=gcc + ``` + * Use `--docker_cache=no` to turn off docker caching + * Use `--docker_run_cmd_prefix="cm pull repo mlcommons@cm4mlops --checkout=dev"` to update the CK repository when docker caching is used + * Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). + +6. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files + ### Example output + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` + Now you'll be inside the CM Nvidia docker container and can run further scripts. + +7. Once the build is complete, you can proceed with any further CM scripts like for MLPerf inference. You can also save the container at this stage using [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) so that it can be launched later without having to go through the previous steps. + +
    + +
    + + + +## Without Docker + + +1. Install CUDA + If CUDA is not detected, CM should download and install it automatically when you run the workflow. + ** Nvidia drivers are expected to be installed on the system ** + +2. Install cuDNN + ```bash + cmr "get cudnn" --tar_file= + ``` +3. Install TensorRT + ```bash + cmr "get tensorrt _dev" --tar_file= + ``` + On non x86 systems like Nvidia Orin, you can do a package manager install and then CM should pick up the installation automatically during the workflow run. + +4. Build the Nvidia inference server + ``` + cmr "build nvidia inference server" \ + --adr.install-cuda-prebuilt.local_run_file_path=/data/cuda_11.8.0_520.61.05_linux.run \ + --adr.tensorrt.tar_file=/data/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ + --adr.cudnn.tar_file=/data/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ + --adr.compiler.tags=gcc \ + [--custom_system=no] + ``` + Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). + +5. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files + + ### Example output + ``` + ============================================ + => A system ID is a string containing only letters, numbers, and underscores + => that is used as the human-readable name of the system. It is also used as + => the system name when creating the measurements/ and results/ entries. + => This string should also start with a letter to be a valid Python enum member name. + => Specify the system ID to use for the current system: phoenix + => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix + => This script will generate Benchmark Configuration stubs for the detected system. + Continue? [y/n]: y + ``` +
    + + +## Acknowledgments + +* A common CM interface and automation for MLPerf inference benchmarks was developed by Arjun Suresh and Grigori Fursin + sponsored by the [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org). +* Nvidia's MLPerf inference implementation was developed by Zhihan Jiang, Ethan Cheng, Yiheng Zhang and Jinho Suh. + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/README.md new file mode 100644 index 000000000..3ffb30814 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia](https://docs.mlcommons.org/cm4mlops/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/_cm.yaml new file mode 100644 index 000000000..0547783f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/_cm.yaml @@ -0,0 +1,1853 @@ +# Identification of this CM script +alias: app-mlperf-inference-nvidia +uid: bc3b17fb430f4732 +cache: false +can_force_cache: true + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Reproduce MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - nvidia-harness + - nvidia + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_FAST_COMPILATION: 'yes' + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + # SKIP_POLICIES: '1' + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia_original + CM_MLPERF_SKIP_RUN: 'no' +env: + CM_CALL_MLPERF_RUNNER: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + devices: CM_MLPERF_NVIDIA_HARNESS_DEVICES + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + use_triton: CM_MLPERF_NVIDIA_HARNESS_USE_TRITON + gpu_copy_streams: CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS + gpu_inference_streams: CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS + gpu_batch_size: CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE + dla_copy_streams: CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS + dla_inference_streams: CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS + dla_batch_size: CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE + input_format: CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + workspace_size: CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE + log_dir: CM_MLPERF_NVIDIA_HARNESS_LOG_DIR + use_graphs: CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS + run_infer_on_copy_streams: CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS + start_from_device: CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE + end_on_device: CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE + max_dlas: CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS + power_setting: CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING + make_cmd: MLPERF_NVIDIA_RUN_COMMAND + rerun: CM_RERUN + extra_run_options: CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS + use_deque_limit: CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT + deque_timeout_usec: CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC + use_cuda_thread_per_device: CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE + num_warmups: CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS + graphs_max_seqlen: CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN + num_issue_query_threads: CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS + soft_drop: CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP + use_small_tile_gemm_plugin: CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN + audio_buffer_num_lines: CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES + use_fp8: CM_MLPERF_NVIDIA_HARNESS_USE_FP8 + enable_sort: CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT + num_sort_segments: CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS + skip_postprocess: CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS + embedding_weights_on_gpu_part: CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART + sdxl_batcher_time_limit: CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Get Nvidia scratch space where data and models get downloaded + - tags: get,mlperf,inference,nvidia,scratch,space + names: + - nvidia-scratch-space + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + skip_if_env: + CM_USE_DATASET_FROM_HOST: + - 'yes' + CM_RUN_STATE_DOCKER: + - 'yes' + names: + - imagenet-original + tags: get,dataset,original,imagenet,_full + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - resnet50-model + - ml-model + tags: get,ml-model,resnet50,_fp32,_onnx,_opset-8 + + ######################################################################## + # Install kits19 dataset + + - enable_if_env: + CM_MODEL: + - 3d-unet-99-disabled + - 3d-unet-99.9-disabled + names: + - kits19-original + tags: get,dataset,original,kits19 + + + ######################################################################## + # Install librispeech dataset + + - enable_if_env: + CM_MODEL: + - rnnt + names: + - librispeech-original + tags: get,dataset,original,librispeech + + ######################################################################## + # Install criteo dataset + + - enable_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + skip_if_any_env: + DLRM_DATA_PATH: + - 'on' + CM_RUN_STATE_DOCKER: + - 'yes' + names: + - criteo-preprocessed + tags: get,dataset,preprocessed,criteo + + ######################################################################## + # Install dlrm model + - enable_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + skip_if_any_env: + DLRM_DATA_PATH: + - on + CM_RUN_STATE_DOCKER: + - 'yes' + names: + - dlrm-model + tags: get,ml-model,dlrm,_pytorch + + ######################################################################## + # Install bert models + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - bert-model + - bert-model-fp32 + tags: get,ml-model,bert,_onnx,_fp32 + + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - bert-model + - bert-model-int8 + tags: get,ml-model,bert,_onnx,_int8 + + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - bert-vocab + tags: get,squad-vocab + + ######################################################################## + # Install OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + skip_if_env: + CM_USE_DATASET_FROM_HOST: + - 'yes' + CM_RUN_STATE_DOCKER: + - 'yes' + names: + - openimages-original + tags: get,dataset,original,openimages,_validation,_full,_custom-annotations + + - enable_if_env: + CM_MODEL: + - retinanet + skip_if_env: + CM_USE_DATASET_FROM_HOST: + - 'yes' + CM_RUN_STATE_DOCKER: + - 'yes' + names: + - openimages-calibration + tags: get,dataset,original,openimages,_calibration + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download Nvidia Submission Code + - tags: get,nvidia,mlperf,inference,common-code + names: + - nvidia-inference-common-code + + - tags: pull,git,repo + env: + CM_GIT_CHECKOUT_PATH: '<<>>' + enable_if_env: + CM_MLPERF_INFERENCE_PULL_CODE_CHANGES: + - 'yes' + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + enable_if_env: + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + - run_harness + + - tags: get,generic-python-lib,_package.pycuda + version: "2022.2.2" + + - tags: get,generic-python-lib,_package.nvmitten + update_tags_from_env_with_prefix: + _path.: + - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH + enable_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + - True + - 'True' + + - tags: get,nvidia,mitten + skip_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + - True + - 'True' + +prehook_deps: + ######################################################################## + # Install GPTJ-6B model + - enable_if_env: + CM_REQUIRE_GPTJ_MODEL_DOWNLOAD: + - 'yes' + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + - download_model + - preprocess_data + names: + - gptj-model + tags: get,ml-model,gptj,_pytorch,_rclone + + # Download model for sdxl + - enable_if_env: + CM_MODEL: + - stable-diffusion-xl + CM_REQUIRE_SDXL_MODEL_DOWNLOAD: + - 'yes' + names: + - stable-diffusion-xl + - sdxl-model + - ml-model + tags: get,ml-model,sdxl,_fp16,_rclone + skip_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + - 'yes' + + # Install coco2014 dataset + - enable_if_env: + CM_REQUIRE_COCO2014_DOWNLOAD: + - 'yes' + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: + - preprocess_data + names: + - coco2014-dataset + tags: get,dataset,coco2014,_validation + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + tags: benchmark-mlperf + enable_if_env: + CM_CALL_MLPERF_RUNNER: + - yes + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # MLPerf inference version + v4.1: + group: version + env: + CM_MLPERF_INFERENCE_CODE_VERSION: "v4.1" + CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized + adr: + pytorch: + tags: _for-nvidia-mlperf-inference-v4.1 + + v4.1-dev: + group: version + default: true + env: + CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0" + CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized + adr: + pytorch: + tags: _for-nvidia-mlperf-inference-v4.0 + + v4.0: + group: version + env: + CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0" + CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized + adr: + pytorch: + tags: _for-nvidia-mlperf-inference-v4.0 + v3.1: + env: + CM_MLPERF_INFERENCE_CODE_VERSION: "v3.1" + CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-07142023.pth + adr: + pytorch: + tags: _for-nvidia-mlperf-inference-v3.1 + + # Target devices + cpu: + group: device + env: + CM_MLPERF_DEVICE: cpu + cuda: + group: device + default: true + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + tensorrt: + group: backend + default: true + env: + CM_MLPERF_BACKEND: tensorrt + CM_MLPERF_BACKEND_NAME: TensorRT + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: 10 + deps: + - tags: get,generic-python-lib,_onnx-graphsurgeon + version: 0.3.27 + - tags: get,generic-python-lib,_package.onnx + version: 1.13.1 + + retinanet: + group: model + env: + CM_MODEL: retinanet + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + deps: + - tags: get,generic-python-lib,_Pillow + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_pycocotools + - tags: get,generic-python-lib,_onnx-graphsurgeon + - tags: get,generic-python-lib,_package.onnx + version: 1.14.1 + - tags: get,generic-python-lib,_package.sympy + + sdxl: + new_env_keys: + - CM_SDXL_ACCURACY_RUN_DEVICE + group: model + env: + CM_MODEL: stable-diffusion-xl + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/main/script/get-ml-model-stable-diffusion/_cm.json#L174" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "quantization, affine fusion" + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_SDXL_ACCURACY_RUN_DEVICE: "gpu" + deps: + - tags: get,generic-python-lib,_package.diffusers + names: + - diffusers + version_max: "0.30.3" + version_max_usable: "0.30.3" + - tags: get,generic-python-lib,_package.transformers + names: + - transformers + - tags: get,generic-python-lib,_package.accelerate + names: + - accelerate + - tags: get,generic-python-lib,_package.torchmetrics + names: + - torchmetrics + - tags: get,generic-python-lib,_package.torch-fidelity + names: + - torch-fidelity + - tags: get,generic-python-lib,_package.open_clip_torch + names: + - open-clip + - tags: get,generic-python-lib,_package.opencv-python + names: + - opencv-python + - tags: get,generic-python-lib,_package.polygraphy + names: + - polygraphy + - tags: get,generic-python-lib,_package.nvtx + names: + - nvtx + - tags: get,generic-python-lib,_package.cuda-python + names: + - cuda-python + - tags: get,generic-python-lib,_package.ninja + names: + - ninja + - tags: get,generic-python-lib,_package.onnxruntime + names: + - onnxruntime + - tags: get,generic-python-lib,_package.colored + names: + - colored + - tags: get,generic-python-lib,_package.nvidia-ammo + names: + - nvidia-ammo + version: 0.7.4 + env: + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com" + CM_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir" + - tags: get,generic-python-lib,_package.optimum + names: + - optimum + - tags: get,generic-python-lib,_package.onnx + names: + - onnx + version: 1.14.0 + - tags: get,generic-python-lib,_package.scipy + names: + - scipy + version: 1.10.1 + - tags: get,generic-python-lib,_package.numpy + names: + - numpy + version_max: 1.22.99 + version_max_usable: "1.22" + + sdxl,v4.1: + deps: + - tags: get,generic-python-lib,_package.torchrec + version: 0.4.0 + - tags: get,generic-python-lib,_package.torchmetrics + version: 1.0.3 + - tags: get,generic-python-lib,_package.typeguard + + bert_: + deps: + - tags: get,generic-python-lib,_transformers + - tags: get,generic-python-lib,_safetensors + - tags: get,generic-python-lib,_onnx + - tags: get,generic-python-lib,_package.sympy + - tags: get,generic-python-lib,_onnx-graphsurgeon + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + 3d-unet_: + deps: + - tags: get,generic-python-lib,_transformers + - tags: get,generic-python-lib,_package.nibabel + - tags: get,generic-python-lib,_pandas + version_max: "1.5.3" + - tags: get,generic-python-lib,_onnx-graphsurgeon + version: 0.3.27 + - tags: get,generic-python-lib,_package.onnx + version: 1.13.1 + + 3d-unet-99: + group: model + base: + - 3d-unet_ + env: + CM_MODEL: 3d-unet-99 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + + 3d-unet-99.9: + group: model + base: + - 3d-unet_ + env: + CM_MODEL: 3d-unet-99.9 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + + rnnt: + group: model + env: + CM_MODEL: rnnt + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: fp16 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + deps: + - tags: get,generic-python-lib,_toml + - tags: get,generic-python-lib,_torchvision_cuda + names: + - torchvision + - tags: get,generic-python-lib,_torch_cuda + - tags: get,generic-python-lib,_nvidia-apex + - tags: get,generic-python-lib,_unidecode + - tags: get,generic-python-lib,_inflect + - tags: get,generic-python-lib,_librosa + names: + - librosa + - tags: get,generic-python-lib,_sox + - tags: get,generic-sys-util,_sox + + dlrm_: + new_env_keys: + - CM_DLRM_V2_DAY23_FILE_PATH + - CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH + deps: + - tags: get,dlrm,data,mlperf,inference,_nvidia + - tags: get,generic-python-lib,_package.torchsnapshot + - tags: get,generic-python-lib,_package.torchrec + version: 0.3.2 + - tags: get,generic-python-lib,_package.fbgemm-gpu + version: 0.3.2 + - tags: get,generic-python-lib,_onnx-graphsurgeon + - tags: get,generic-python-lib,_package.scikit-learn + + dlrm-v2-99: + group: model + base: + - dlrm_ + env: + CM_MODEL: dlrm-v2-99 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + dlrm-v2-99.9: + group: model + base: + - dlrm_ + env: + CM_MODEL: dlrm-v2-99.9 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + llama2-70b_: + deps: + - tags: get,generic-python-lib,_package.transformers + names: + - transformers + - tags: get,generic-python-lib,_package.datasets + names: + - datasets + - tags: get,generic-python-lib,_package.sentencepiece + names: + - sentencepiece + - tags: get,generic-python-lib,_package.protobuf + names: + - protobuf + - tags: get,generic-python-lib,_package.accelerate + names: + - accelerate + - tags: get,generic-python-lib,_package.absl-py + names: + - absl-py + - tags: get,generic-python-lib,_package.evaluate + names: + - evaluate + - tags: get,generic-python-lib,_package.nltk + names: + - nltk + - tags: get,generic-python-lib,_package.numpy + names: + - numpy + - tags: get,generic-python-lib,_package.rouge-score + names: + - rouge-score + env: + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51" + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + + llama2-70b-99: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99 + + llama2-70b-99.9: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99.9 + + gptj_: + deps: + - tags: get,generic-python-lib,_package.datasets + - tags: get,generic-python-lib,_package.simplejson + - tags: get,generic-python-lib,_onnx + - tags: get,generic-python-lib,_transformers + - tags: get,generic-python-lib,_onnx-graphsurgeon + - tags: get,generic-python-lib,_package.sympy + env: + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download" + + gptj_,build: + deps: + - tags: install,pytorch,from.src + names: + - pytorch + - tags: get,cmake + version_min: "3.25.0" + + gptj_,build_engine: + deps: + - tags: install,pytorch,from.src + names: + - pytorch + - tags: get,cmake + version_min: "3.25.0" + + gptj-99: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + gptj-99.9: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, affine fusion + CM_ML_MODEL_INPUTS_DATA_TYPE: int32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp16 + + batch_size.#: + group: batch-size + env: + CM_MODEL_BATCH_SIZE: "#" + CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "#" + #CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "gpu_batch_size.#" + + dla_batch_size.#: + group: dla-batch-size + env: + CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: "#" + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: "dla_batch_size.#" + adr: + build-engine: + tags: _dla_batch_size.# + + use_triton: + group: triton + env: + CM_MLPERF_NVIDIA_HARNESS_USE_TRITON: "yes" + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: "using_triton" + + use-graphs: + group: graphs + env: + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: "yes" + + prebuild: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: prebuild + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: prebuild + + build: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: build + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: build + deps: + - tags: get,cmake + version_min: "3.18" + + # Detect Google Logger + - tags: get,generic,sys-util,_glog-dev + + # Detect GFlags + - tags: get,generic,sys-util,_gflags-dev + + # Detect libgmock-dev + - tags: get,generic,sys-util,_libgmock-dev + + # Detect libre2-dev + - tags: get,generic,sys-util,_libre2-dev + + # Detect libnuma-dev + - tags: get,generic,sys-util,_libnuma-dev + + # Detect libboost-all-dev + - tags: get,generic,sys-util,_libboost-all-dev + + # Detect rapidjson-dev + - tags: get,generic,sys-util,_rapidjson-dev + + # Detect CUDA + - names: + - cuda + tags: get,cuda,_cudnn + + # Detect Tensorrt + - names: + - tensorrt + tags: get,tensorrt + + # Build nvidia inference server + - names: + - nvidia-inference-server + tags: build,nvidia,inference,server + + + maxq: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + + maxn: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + + preprocess-data: + alias: preprocess_data + + preprocess_data: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: preprocess_data + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: preprocess_data + + download-model: + alias: download-model + + download_model: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: download_model + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: download_model + deps: + - tags: get,generic-python-lib,_torch_cuda + enable_if_env: + CM_MODEL: + - retinanet + + calibrate: + group: run-mode + env: + MLPERF_NVIDIA_RUN_COMMAND: calibrate + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: calibrate + deps: + - tags: reproduce,mlperf,inference,nvidia,harness,_download_model + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - gpu-name + - power-mode + - batch-size + - triton + skip_if_env: + CM_MODEL: + - retinanet_old + - resnet50 + - bert-99 + - bert-99.9 + - dlrm-v2-99 + - dlrm-v2-99.9 + + build-engine: + alias: build_engine + + build_engine: + group: run-mode + default_variations: + loadgen-scenario: offline + env: + MLPERF_NVIDIA_RUN_COMMAND: generate_engines + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: generate_engines + deps: + # Detect CUDA + - names: + - cuda + tags: get,cuda,_cudnn + + # Detect Tensorrt + - names: + - tensorrt + tags: get,tensorrt + + # Build nvidia inference server + - names: + - nvidia-inference-server + tags: build,nvidia,inference,server + + - tags: reproduce,mlperf,inference,nvidia,harness,_preprocess_data + names: + - nvidia-preprocess-data + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - gpu-name + - batch-size + - num-gpus + - triton + - build-engine-options + skip_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + + - tags: reproduce,mlperf,inference,nvidia,harness,_download_model + inherit_variation_tags: true + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - gpu-name + - num-gpus + - batch-size + - triton + - power-mode + - build-engine-options + skip_if_env: + CM_MODEL: + - retinanet_old + - resnet50 + - bert-99 + - bert-99.9 + - dlrm-v2-99 + - dlrm-v2-99.9 + + - tags: reproduce,mlperf,inference,nvidia,harness,_calibrate + inherit_variation_tags: true + enable_if_env: + CM_MODEL: + - retinanet + force_cache: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - device-type + - num-gpus + - power-mode + - gpu-name + - triton + - batch-size + - build-engine-options + + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + CUDA_VISIBLE_DEVICES_NOT_USED: "0" + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + run-harness: + alis: run_harness + + run_harness: + group: run-mode + default: true + default_variations: + loadgen-scenario: offline + deps: + # Detect CUDA + - names: + - cuda + tags: get,cuda,_cudnn + + # Detect Tensorrt + - names: + - tensorrt + tags: get,tensorrt + + # Build nvidia inference server + - names: + - nvidia-inference-server + tags: build,nvidia,inference,server + - tags: reproduce,mlperf,inference,nvidia,harness,_build_engine + inherit_variation_tags: true + names: + - build-engine + skip_inherit_variation_groups: + - run-mode + - gpu-name + - num-gpus + - device-memory + force_cache: true + + - tags: reproduce,mlperf,inference,nvidia,harness,_preprocess_data + inherit_variation_tags: true + names: + - nvidia-preprocess-data + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - num-gpus + - device-memory + - power-mode + - gpu-name + - batch-size + - triton + - build-engine-options + force_cache: true + skip_if_env: + CM_MODEL: + - dlrm-v2-99 + - dlrm-v2-99.9 + + - tags: reproduce,mlperf,inference,nvidia,harness,_download_model + inherit_variation_tags: true + skip_inherit_variation_groups: + - run-mode + - loadgen-scenario + - device-memory + - gpu-name + - num-gpus + - power-mode + - batch-size + - build-engine-options + force_cache: true + skip_if_env: + CM_MODEL: + - retinanet + - resnet50 + - bert-99 + - bert-99.9 + - dlrm-v2-99 + - dlrm-v2-99.9 + - stable-diffusion-xl + env: + CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: run_harness + MLPERF_NVIDIA_RUN_COMMAND: run_harness + CM_CALL_MLPERF_RUNNER: 'yes' + new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_ML_MODEL_* + - CM_HW_NAME + - CM_MAX_EXAMPLES + new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + + build_engine_options.#: + group: build-engine-options + env: + CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: "#" + + gpu_memory.16: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "16" + gpu_memory.24: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "24" + gpu_memory.8: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "8" + gpu_memory.32: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "32" + gpu_memory.40: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "40" + gpu_memory.48: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "48" + gpu_memory.80: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "80" + gpu_memory.#: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "#" + + singlestream,resnet50: + env: + CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes + SKIP_POLICIES: '0' # skip_policies used to give better latency but is not working with 4.0 and later Nvidia codes + + server,resnet50: + env: + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True + CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: True + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: 9 + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: 2 + + multistream,resnet50: + env: + CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK: yes + SKIP_POLICIES: '0' + + singlestream,run_harness: + default_variations: + batch-size: batch_size.1 + + llama2-70b_,run_harness: + env: + CM_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True' + + gptj_,run_harness: + deps: + - tags: install,pytorch,from.src + names: + - pytorch + - tags: get,cmake + version_min: "3.25.0" + env: + CM_MLPERF_NVIDIA_HARNESS_USE_FP8: 'True' + CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT: 'True' + CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS: '2' + CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS: True + + gpu_memory.80,num-gpus.2,llama2-70b,offline,run_harness: + default_variations: + batch-size: batch_size.896 + + gpu_memory.16,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.24,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.7 + + gpu_memory.32,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.48,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.14 + + gpu_memory.40,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.10 + + gpu_memory.80,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.32 + + gpu_memory.16,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + gpu_memory.24,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + gpu_memory.32,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.3 + + gpu_memory.80,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.96,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.96,sdxl,server,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.80,sdxl,server,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.140,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.8,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.16,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.24,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.32,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.48,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.1024 + + gpu_memory.40,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + gpu_memory.80,bert_,server,run_harness: + default_variations: + batch-size: batch_size.64 + + gpu_memory.8,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + + gpu_memory.16,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.1024 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + + gpu_memory.40,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.24,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + + gpu_memory.32,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.48,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.80,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + num-gpus.#: + group: num-gpus + env: + CM_NVIDIA_NUM_GPUS: "#" + + num-gpus.1: + group: num-gpus + default: true + env: + CM_NVIDIA_NUM_GPUS: "1" + + resnet50,server,run_harness: + default_variations: + batch-size: batch_size.64 + + resnet50,multistream,run_harness,num-gpus.1: + default_variations: + batch-size: batch_size.8 + + resnet50,multistream,run_harness,num-gpus.2: + default_variations: + batch-size: batch_size.4 + + retinanet,multistream,run_harness: + default_variations: + batch-size: batch_size.2 + + gpu_memory.8,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + gpu_memory.16,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + gpu_memory.40,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.32,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.48,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.24,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + + gpu_memory.80,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + retinanet,server,run_harness: + default_variations: + batch-size: batch_size.8 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + + gpu_memory.8,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.1024 + + gpu_memory.16,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.1024 + + gpu_memory.40,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.24,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.32,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.48,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.80,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + gpu_memory.8,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.16,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + gpu_memory.40,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.24,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.80,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.32,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.48,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + gpu_memory.16,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + gpu_memory.40,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + env: + CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.40" + + gpu_memory.24,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + env: + CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30" + + gpu_memory.32,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + gpu_memory.48,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + env: + CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.50" + + gpu_memory.80,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + orin: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + CM_MODEL_BATCH_SIZE: "" #we pick from nvidia config + CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>" + + orin,rnnt,singlestream,run_harness: + env: + CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1" + + orin,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.1 + + rtx_4090: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + rtx_4090,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.2 + graphs: use-graphs + + rtx_4090,sdxl,server,run_harness: + default_variations: + batch-size: batch_size.2 + graphs: use-graphs + + rtx_4090,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + + rtx_4090,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.32 + graphs: use-graphs + + rtx_4090,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + + rtx_4090,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + + rtx_4090,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_4090,bert_,server,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_4090,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_4090,3d-unet_,server,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_4090,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + rtx_4090,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.2048 + + rtx_4090,gptj_,offline,run_harness: + default_variations: + batch-size: batch_size.7 + + rtx_4090,gptj_,server,run_harness: + default_variations: + batch-size: batch_size.7 + + rtx_4090,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + env: + CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART: "0.30" + + a6000: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + rtx_a6000,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + + rtx_a6000,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.32 + + rtx_a6000,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + rtx_a6000,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + + rtx_a6000,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_a6000,bert_,server,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_a6000,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_a6000,3d-unet_,server,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_a6000,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + rtx_a6000,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.512 + + rtx_a6000,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + rtx_6000_ada: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + rtx_6000_ada,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.64 + + rtx_6000_ada,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.32 + + rtx_6000_ada,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + rtx_6000_ada,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + + rtx_6000_ada,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_6000_ada,bert_,server,run_harness: + default_variations: + batch-size: batch_size.256 + + rtx_6000_ada,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_6000_ada,3d-unet_,server,run_harness: + default_variations: + batch-size: batch_size.8 + + rtx_6000_ada,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.512 + + rtx_6000_ada,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.512 + + rtx_6000_ada,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + l4: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + l4,sdxl,offline,run_harness: + default_variations: + batch-size: batch_size.1 + env: + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 0.6 + + l4,sdxl,offline,run_harness,num-gpu.8: + default_variations: + batch-size: batch_size.1 + env: + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4.8 + + l4,sdxl,server,run_harness,num-gpu.1: + default_variations: + batch-size: batch_size.1 + env: + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 0.55 + CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0 + + l4,sdxl,server,run_harness,num-gpu.8: + default_variations: + batch-size: batch_size.1 + env: + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 5.05 + CM_MLPERF_NVIDIA_HARNESS_SDXL_SERVER_BATCHER_TIME_LIMIT: 0 + + l4,resnet50: + default_env: + CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 10500 + CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 9000 + CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.35 + CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 1 + + l4,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.32 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "1" + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + + l4,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.16 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "9" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 + CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE: 'True' + + l4,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.2 + + l4,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 30000 + CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000 + + l4,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.16 + + l4,bert_,server,run_harness: + default_variations: + batch-size: batch_size.16 + env: + CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "200" + CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "1" + CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "1.0" + CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "True" + + l4,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.1 + + l4,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.512 + + l4,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.512 + env: + CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "64" + CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES: "1024" + CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS: "1024" + + l4,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + t4: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + t4,resnet50: + default_env: + CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS: 4900 + CM_MLPERF_LOADGEN_SERVER_TARGET_QPS: 4000 + CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY: 0.6 + CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY: 2 + + t4,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.256 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + + t4,resnet50,server,run_harness: + default_variations: + batch-size: batch_size.26 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: True + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 2000 + CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP: "0.993" + + t4,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.4 + + t4,retinanet,server,run_harness: + default_variations: + batch-size: batch_size.2 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT: 'True' + CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC: 20000 + CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: 20000000000 + + t4,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + t4,bert_,server,run_harness: + default_variations: + batch-size: batch_size.4 + env: + CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN: "240" + CM_MLPERF_NVIDIA_HARNESS_SERVER_NUM_ISSUE_QUERY_THREADS: "0" + CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN: "no" + + t4,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + t4,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128" + CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True" + + t4,rnnt,server,run_harness: + default_variations: + batch-size: batch_size.2048 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "4" + CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS: 'True' + CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE: "128" + CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN: "True" + + t4,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + + pcie: + group: gpu-connection + + sxm: + group: gpu-connection + + custom: + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + CM_MODEL_BATCH_SIZE: "" #we pick from nvidia config + CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: "<<>>" + + a100: + default_variation: + gpu-connection: sxm + group: gpu-name + env: + CM_NVIDIA_CUSTOM_GPU: "yes" + + a100,sxm,resnet50,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + env: + CM_MLPERF_PERFORMANCE_SAMPLE_COUNT: "2048" + + a100,sxm,retinanet,offline,run_harness: + default_variations: + batch-size: batch_size.32 + env: + CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS: "2" + CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE: "300000000000" + + a100,sxm,bert_,offline,run_harness: + default_variations: + batch-size: batch_size.256 + + a100,sxm,3d-unet_,offline,run_harness: + default_variations: + batch-size: batch_size.8 + + a100,sxm,rnnt,offline,run_harness: + default_variations: + batch-size: batch_size.2048 + + a100,sxm,dlrm_,offline,run_harness: + default_variations: + batch-size: batch_size.1400 + +docker: + real_run: False diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/customize.py new file mode 100644 index 000000000..0ede381f8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/customize.py @@ -0,0 +1,722 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if str(env.get('CM_RUN_STATE_DOCKER', '')).lower() in ['1', 'true', 'yes']: + return {'return': 0} + + if env.get('CM_MODEL', '') == '': + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + + make_command = env['MLPERF_NVIDIA_RUN_COMMAND'] + + if env.get('CM_MLPERF_DEVICE', '') == '': + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + if env.get('CM_MLPERF_SKIP_RUN', + '') == "yes" and make_command == "run_harness": + return {'return': 0} + + env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + + cmds = [] + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + mode = env['CM_MLPERF_LOADGEN_MODE'] + + make_command = env['MLPERF_NVIDIA_RUN_COMMAND'] + + if make_command == "prebuild": + cmds.append(f"make prebuild NETWORK_NODE=SUT") + + if env['CM_MODEL'] == "resnet50": + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'imagenet') + if not os.path.exists(target_data_path): + cmds.append( + f"ln -sf {env['CM_DATASET_IMAGENET_PATH']} {target_data_path}") + + model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'ResNet50', + 'resnet50_v1.onnx') + + if not os.path.exists(os.path.dirname(model_path)): + cmds.append(f"mkdir -p {os.path.dirname(model_path)}") + + if not os.path.exists(model_path): + cmds.append( + f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") + model_name = "resnet50" + + elif "bert" in env['CM_MODEL']: + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'squad') + if not os.path.exists(target_data_path): + cmds.append("make download_data BENCHMARKS='bert'") + + fp32_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'bert', + 'bert_large_v1_1.onnx') + int8_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'bert', + 'bert_large_v1_1_fake_quant.onnx') + vocab_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'bert', + 'vocab.txt') + + if not os.path.exists(os.path.dirname(fp32_model_path)): + cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") + + if not os.path.exists(fp32_model_path): + cmds.append( + f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_FP32_PATH']} {fp32_model_path}") + if not os.path.exists(int8_model_path): + cmds.append( + f"ln -sf {env['CM_ML_MODEL_BERT_LARGE_INT8_PATH']} {int8_model_path}") + if not os.path.exists(vocab_path): + cmds.append( + f"ln -sf {env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH']} {vocab_path}") + model_name = "bert" + model_path = fp32_model_path + + elif "stable-diffusion" in env["CM_MODEL"]: + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'coco', 'SDXL') + tsv_file = os.path.join(target_data_path, "captions_5k_final.tsv") + if os.path.exists(tsv_file): + with open(tsv_file, "r") as file: + line_count = sum(1 for line in file) + if env.get('CM_MLPERF_SUBMISSION_GENERATION_STYLE', '') == 'full': + if line_count < 5000: + shutil.rmtree(target_data_path) + if not os.path.exists(tsv_file): + os.makedirs(target_data_path, exist_ok=True) + # cmds.append("make download_data BENCHMARKS='stable-diffusion-xl'") + env['CM_REQUIRE_COCO2014_DOWNLOAD'] = 'yes' + cmds.append( + f"cp -r \\$CM_DATASET_PATH_ROOT/captions/captions.tsv {target_data_path}/captions_5k_final.tsv") + cmds.append( + f"cp -r \\$CM_DATASET_PATH_ROOT/latents/latents.pt {target_data_path}/latents.pt") + fp16_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'SDXL', + 'official_pytorch', + 'fp16', + 'stable_diffusion_fp16') + + if not os.path.exists(os.path.dirname(fp16_model_path)): + cmds.append(f"mkdir -p {os.path.dirname(fp16_model_path)}") + + if not os.path.exists(fp16_model_path): + if os.path.islink(fp16_model_path): + cmds.append(f"rm -f {fp16_model_path}") + env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' + cmds.append(f"cp -r \\$SDXL_CHECKPOINT_PATH {fp16_model_path}") + + model_name = "stable-diffusion-xl" + model_path = fp16_model_path + + elif "3d-unet" in env['CM_MODEL']: + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'KiTS19', + 'kits19', + 'data') + target_data_path_base_dir = os.path.dirname(target_data_path) + if not os.path.exists(target_data_path_base_dir): + cmds.append(f"mkdir -p {target_data_path_base_dir}") + + inference_cases_json_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'inference_cases.json') + calibration_cases_json_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'KiTS19', 'calibration_cases.json') + + if not os.path.exists(target_data_path) or not os.path.exists( + inference_cases_json_path) or not os.path.exists(calibration_cases_json_path): + # cmds.append(f"ln -sf {env['CM_DATASET_PATH']} {target_data_path}") + cmds.append("make download_data BENCHMARKS='3d-unet'") + + model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + '3d-unet-kits19', + '3dUNetKiTS19.onnx') + model_name = "3d-unet" + + elif "rnnt" in env['CM_MODEL']: + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'LibriSpeech', + 'dev-clean') + target_data_path_base_dir = os.path.dirname(target_data_path) + if not os.path.exists(target_data_path_base_dir): + cmds.append(f"mkdir -p {target_data_path_base_dir}") + if not os.path.exists(target_data_path): + # cmds.append(f"ln -sf {env['CM_DATASET_LIBRISPEECH_PATH']} {target_data_path}") + cmds.append("make download_data BENCHMARKS='rnnt'") + + model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'rnn-t', + 'DistributedDataParallel_1576581068.9962234-epoch-100.pt') + model_name = "rnnt" + + elif "pdlrm" in env['CM_MODEL']: + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'criteo') + if not os.path.exists(target_data_path): + cmds.append( + f"ln -sf {env['CM_DATASET_PREPROCESSED_PATH']} {target_data_path}") + + model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'dlrm', + 'tb00_40M.pt') + if not os.path.exists(os.path.dirname(model_path)): + cmds.append(f"mkdir -p {os.path.dirname(model_path)}") + + if not os.path.exists(model_path): + cmds.append( + f"ln -sf {env['CM_ML_MODEL_FILE_WITH_PATH']} {model_path}") + model_name = "dlrm" + + elif "dlrm-v2" in env['CM_MODEL']: + model_name = "dlrm-v2" + + elif env['CM_MODEL'] == "retinanet": + # print(env) + dataset_path = env['CM_DATASET_OPENIMAGES_PATH'] + # return {'return': 1, 'error': 'error'} + + annotations_path = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] + target_data_path_dir = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'data', 'open-images-v6-mlperf') + if not os.path.exists(target_data_path_dir): + cmds.append(f"mkdir -p {target_data_path_dir}") + target_data_path = os.path.join(target_data_path_dir, 'annotations') + if not os.path.exists(target_data_path): + cmds.append(f"ln -sf {annotations_path} {target_data_path}") + + target_data_path_dir = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'open-images-v6-mlperf', + 'validation') + if not os.path.exists(target_data_path_dir): + cmds.append(f"mkdir -p {target_data_path_dir}") + target_data_path = os.path.join(target_data_path_dir, 'data') + if not os.path.exists(target_data_path): + cmds.append(f"ln -sf {dataset_path} {target_data_path}") + + calibration_dataset_path = env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] + target_data_path_dir = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'open-images-v6-mlperf', + 'calibration', + 'train') + if not os.path.exists(target_data_path_dir): + cmds.append(f"mkdir -p {target_data_path_dir}") + target_data_path = os.path.join(target_data_path_dir, 'data') + if not os.path.exists(target_data_path): + cmds.append( + f"ln -sf {calibration_dataset_path} {target_data_path}") + + preprocessed_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], 'preprocessed_data') + target_model_path_dir = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'retinanet-resnext50-32x4d') + if not os.path.exists(target_model_path_dir): + cmds.append(f"mkdir -p {target_model_path_dir}") + model_path = os.path.join( + target_model_path_dir, + 'retinanet-fpn-torch2.1-postprocessed.onnx') + alt_model_path = os.path.join( + target_model_path_dir, + 'retinanet-fpn-torch2.2-postprocessed.onnx') + if not os.path.exists(model_path) and os.path.exists(alt_model_path): + cmds.append(f"ln -s {alt_model_path} {model_path}") + + model_name = "retinanet" + + elif "gptj" in env['CM_MODEL']: + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'data', + 'cnn-daily-mail', + 'cnn_eval.json') + if not os.path.exists(target_data_path): + cmds.append("make download_data BENCHMARKS='gptj'") + + fp32_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'GPTJ-6B', + 'checkpoint-final') + fp8_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'GPTJ-6B', + 'fp8-quantized-ammo', + env['CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX']) + vocab_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'bert', + 'vocab.txt') + + if not os.path.exists(os.path.dirname(fp32_model_path)): + cmds.append(f"mkdir -p {os.path.dirname(fp32_model_path)}") + if not os.path.exists(os.path.dirname(fp8_model_path)): + cmds.append(f"mkdir -p {os.path.dirname(fp8_model_path)}") + + if not os.path.exists(fp32_model_path): + # download via prehook_deps + env['CM_REQUIRE_GPTJ_MODEL_DOWNLOAD'] = 'yes' + if make_command == "build_engine": + cmds.append( + f"cp -r $CM_ML_MODEL_FILE_WITH_PATH {fp32_model_path}") + + model_name = "gptj" + model_path = fp8_model_path + + elif "llama2" in env["CM_MODEL"]: + # path to which the data file is present + target_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'preprocessed_data', + 'open_orca') + # path to the dataset file + target_data_file_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'preprocessed_data', + 'open_orca', + 'open_orca_gpt4_tokenized_llama.sampled_24576.pkl') + tmp_tp_size = env['CM_NVIDIA_TP_SIZE'] + if tmp_tp_size == "1": + fp8_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'Llama2', + 'fp8-quantized-ammo', + f'llama2-70b-chat-hf-tp{tmp_tp_size}pp1-fp8-02072024') + else: + fp8_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'Llama2', + 'fp8-quantized-ammo', + f'llama2-70b-chat-hf-tp{tmp_tp_size}pp1-fp8') + if not os.path.exists(target_data_file_path): + if env.get('CM_NVIDIA_LLAMA_DATASET_FILE_PATH', '') == '': + return { + 'return': 1, 'error': 'Please specify the path to LLAMA2 dataset (pickle file)'} + if not os.path.exists(target_data_path): + cmds.append(f"mkdir {target_data_path}") + cmds.append( + f"ln -sf {env['CM_NVIDIA_LLAMA_DATASET_FILE_PATH']} {target_data_file_path}") + + model_name = "llama2-70b" + model_path = fp8_model_path + + # cmds.append(f"make prebuild") + if make_command == "download_model": + if not os.path.exists(model_path): + if "llama2" in env['CM_MODEL']: + if not os.path.exists(os.path.join(model_path, 'config.json')): + return { + 'return': 1, 'error': f'Quantised model absent - did not detect config.json in path {model_path}'} + else: + cmds.append(f"make download_model BENCHMARKS='{model_name}'") + elif "stable-diffusion" in env['CM_MODEL']: + folders = ["clip1", "clip2", "unetxl", "vae"] + for folder in folders: + onnx_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'SDXL', + 'onnx_models', + folder, + 'model.onnx') + if not os.path.exists(onnx_model_path): + env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' + cmds.append( + f"make download_model BENCHMARKS='{model_name}'") + break + if scenario.lower() == "singlestream": + ammo_model_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'models', + 'SDXL', + 'ammo_models', + 'unetxl.int8', + 'unet.onnx') + if not os.path.exists(ammo_model_path): + env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' + cmds.append( + f"make download_model BENCHMARKS='{model_name}'") + else: + return {'return': 0} + + elif make_command == "preprocess_data": + if env['CM_MODEL'] == "rnnt": + cmds.append( + f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_dev_clean_500_raw')}") + cmds.append( + f"rm -rf {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'rnnt_train_clean_512_wav')}") + if "llama2" in env["CM_MODEL"]: + # Preprocessing script in the inference results repo is not checking whether the preprocessed + # file is already there, so we are handling it here. + target_preprocessed_data_path = os.path.join( + env['MLPERF_SCRATCH_PATH'], + 'preprocessed_data', + 'open_orca', + 'input_ids_padded.npy') + if not os.path.exists(target_preprocessed_data_path): + cmds.append(f"make preprocess_data BENCHMARKS='{model_name}'") + else: + cmds.append(f"make preprocess_data BENCHMARKS='{model_name}'") + + else: + scenario = scenario.lower() + + if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy": + test_mode = "AccuracyOnly" + elif env['CM_MLPERF_LOADGEN_MODE'] == "performance": + test_mode = "PerformanceOnly" + elif env['CM_MLPERF_LOADGEN_MODE'] == "compliance": + test_mode = "" + test_name = env.get( + 'CM_MLPERF_LOADGEN_COMPLIANCE_TEST', + 'test01').lower() + env['CM_MLPERF_NVIDIA_RUN_COMMAND'] = "run_audit_{}_once".format( + test_name) + make_command = "run_audit_{}_once".format(test_name) + else: + return {'return': 1, 'error': 'Unsupported mode: {}'.format( + env['CM_MLPERF_LOADGEN_MODE'])} + + run_config = '' + + target_qps = env.get('CM_MLPERF_LOADGEN_TARGET_QPS') + offline_target_qps = env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') + server_target_qps = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') + if target_qps: + target_qps = int(float(target_qps)) + if scenario == "offline" and not offline_target_qps: + run_config += f" --offline_expected_qps={target_qps}" + elif scenario == "server" and not server_target_qps: + run_config += f" --server_target_qps={target_qps}" + + if offline_target_qps: + offline_target_qps = int(float(offline_target_qps)) + run_config += f" --offline_expected_qps={offline_target_qps}" + if server_target_qps: + server_target_qps = int(float(server_target_qps)) + run_config += f" --server_target_qps={server_target_qps}" + + target_latency = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY') + singlestream_target_latency = env.get( + 'CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY') + multistream_target_latency = env.get( + 'CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY') + if target_latency: + target_latency_ns = int(float(target_latency) * 1000000) + if scenario == "singlestream" and not singlestream_target_latency: + run_config += f" --single_stream_expected_latency_ns={target_latency_ns}" + elif scenario == "multistream" and not multistream_target_latency: + run_config += f" --multi_stream_expected_latency_ns={target_latency_ns}" + + if singlestream_target_latency: + singlestream_target_latency_ns = int( + float(singlestream_target_latency) * 1000000) + run_config += f" --single_stream_expected_latency_ns={singlestream_target_latency_ns}" + if multistream_target_latency: + multistream_target_latency_ns = int( + float(multistream_target_latency) * 1000000) + run_config += f" --multi_stream_expected_latency_ns={multistream_target_latency_ns}" + + high_accuracy = "99.9" in env['CM_MODEL'] + + config_ver_list = [] + + use_lon = env.get('CM_MLPERF_NVIDIA_HARNESS_LON') + if use_lon: + config_ver_list.append("lon_node") + # run_config += " --lon_node" + + maxq = env.get('CM_MLPERF_NVIDIA_HARNESS_MAXQ') + if maxq: + config_ver_list.append("maxq") + + if high_accuracy: + config_ver_list.append("high_accuracy") + + use_triton = env.get('CM_MLPERF_NVIDIA_HARNESS_USE_TRITON') + if use_triton: + run_config += " --use_triton " + config_ver_list.append("triton") + + if config_ver_list: + run_config += f" --config_ver={'_'.join(config_ver_list)}" + + user_conf_path = env.get('CM_MLPERF_USER_CONF') + if user_conf_path and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + run_config += f" --user_conf_path={user_conf_path}" + + mlperf_conf_path = env.get('CM_MLPERF_INFERENCE_CONF_PATH') + if mlperf_conf_path and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + run_config += f" --mlperf_conf_path={mlperf_conf_path}" + + power_setting = env.get('CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING') + if power_setting and env['CM_MLPERF_NVIDIA_HARNESS_RUN_MODE'] == "run_harness": + run_config += f" --power_setting={power_setting}" + + gpu_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS') + if gpu_copy_streams: + run_config += f" --gpu_copy_streams={gpu_copy_streams}" + + gpu_inference_streams = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS') + if gpu_inference_streams: + run_config += f" --gpu_inference_streams={gpu_inference_streams}" + + dla_copy_streams = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS') + if dla_copy_streams: + run_config += f" --dla_copy_streams={dla_copy_streams}" + + dla_inference_streams = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS') + if dla_inference_streams: + run_config += f" --dla_inference_streams={dla_inference_streams}" + + gpu_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE') + if gpu_batch_size: + run_config += f" --gpu_batch_size={gpu_batch_size}" + + dla_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE') + if dla_batch_size: + run_config += f" --dla_batch_size={dla_batch_size}" + + input_format = env.get('CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT') + if input_format: + run_config += f" --input_format={input_format}" + + performance_sample_count = env.get( + 'CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT') + if performance_sample_count: + run_config += f" --performance_sample_count={performance_sample_count}" + + devices = env.get('CM_MLPERF_NVIDIA_HARNESS_DEVICES') + if devices: + run_config += f" --devices={devices}" + + audio_batch_size = env.get('CM_MLPERF_NVIDIA_HARNESS_AUDIO_BATCH_SIZE') + if audio_batch_size: + run_config += f" --audio_batch_size={audio_batch_size}" + + disable_encoder_plugin = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_ENCODER_PLUGIN', '')) + if disable_encoder_plugin and disable_encoder_plugin.lower() not in [ + "no", "false", "0", ""]: + run_config += " --disable_encoder_plugin" + + disable_beta1_smallk = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_DISABLE_BETA1_SMALLK', '')) + if disable_beta1_smallk and disable_beta1_smallk.lower() in [ + "yes", "true", "1"]: + run_config += " --disable_beta1_smallk" + + workspace_size = env.get('CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE') + if workspace_size: + run_config += f" --workspace_size={workspace_size}" + + if env.get('CM_MLPERF_LOADGEN_LOGS_DIR'): + env['MLPERF_LOADGEN_LOGS_DIR'] = env['CM_MLPERF_LOADGEN_LOGS_DIR'] + + log_dir = env.get('CM_MLPERF_NVIDIA_HARNESS_LOG_DIR') + if log_dir: + run_config += f" --log_dir={log_dir}" + + use_graphs = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS', '')) + if use_graphs and use_graphs.lower() not in ["no", "false", "0", ""]: + run_config += " --use_graphs" + + use_deque_limit = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT')) + if use_deque_limit and use_deque_limit.lower() not in [ + "no", "false", "0"]: + run_config += " --use_deque_limit" + + deque_timeout_usec = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC') + if deque_timeout_usec: + run_config += f" --deque_timeout_usec={deque_timeout_usec}" + + use_cuda_thread_per_device = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE', '')) + if use_cuda_thread_per_device and use_cuda_thread_per_device.lower() not in [ + "no", "false", "0", ""]: + run_config += " --use_cuda_thread_per_device" + + run_infer_on_copy_streams = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS', '')) + if run_infer_on_copy_streams and run_infer_on_copy_streams.lower() not in [ + "no", "false", "0", ""]: + run_config += " --run_infer_on_copy_streams" + + start_from_device = str( + env.get( + 'CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE', + '')) + if start_from_device and start_from_device.lower() not in [ + "no", "false", "0", ""]: + run_config += " --start_from_device" + + end_on_device = str( + env.get( + 'CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE', + '')) + if end_on_device and end_on_device.lower() not in [ + "no", "false", "0", ""]: + run_config += " --end_on_device" + + max_dlas = env.get('CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS') + if max_dlas: + run_config += f" --max_dlas={max_dlas}" + + graphs_max_seqlen = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN') + if graphs_max_seqlen: + run_config += f" --graphs_max_seqlen={graphs_max_seqlen}" + + num_issue_query_threads = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS') + if num_issue_query_threads: + run_config += f" --num_issue_query_threads={num_issue_query_threads}" + + soft_drop = env.get('CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP') + if soft_drop: + run_config += f" --soft_drop={soft_drop}" + + use_small_tile_gemm_plugin = str( + env.get('CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN', '')) + if use_small_tile_gemm_plugin and use_small_tile_gemm_plugin.lower() not in [ + "no", "false", "0", ""]: + run_config += f" --use_small_tile_gemm_plugin" + + audio_buffer_num_lines = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES') + if audio_buffer_num_lines: + run_config += f" --audio_buffer_num_lines={audio_buffer_num_lines}" + + use_fp8 = str(env.get('CM_MLPERF_NVIDIA_HARNESS_USE_FP8', '')) + if use_fp8 and use_fp8.lower() not in ["no", "false", "0", ""]: + run_config += f" --use_fp8" + + if "llama2" in env["CM_MODEL"]: + run_config += f" --fp8_quant_model_path={fp8_model_path}" + run_config += f" --tensor_parallelism={tmp_tp_size}" + + enable_sort = env.get('CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') + if enable_sort and enable_sort.lower() not in ["no", "false", "0"]: + run_config += f" --enable_sort" + + sdxl_server_batcher_time_limit = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT') + if sdxl_server_batcher_time_limit: + run_config += f" --sdxl_batcher_time_limit {sdxl_server_batcher_time_limit}" + + num_sort_segments = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS') + if num_sort_segments: + run_config += f" --num_sort_segments={num_sort_segments}" + + embedding_weights_on_gpu_part = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART', '') + if embedding_weights_on_gpu_part != '': + run_config += f" --embedding_weights_on_gpu_part={embedding_weights_on_gpu_part}" + + num_warmups = env.get('CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS', '') + if num_warmups != '': + run_config += f" --num_warmups={num_warmups}" + + skip_postprocess = str( + env.get( + 'CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS', + '')) + if skip_postprocess and skip_postprocess.lower() not in [ + "no", "false", "0", ""]: + run_config += f" --skip_postprocess" + + if test_mode: + test_mode_string = " --test_mode={}".format(test_mode) + else: + test_mode_string = "" + + extra_build_engine_options_string = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS', '') + + extra_run_options_string = env.get( + 'CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS', + '') # will be ignored during build engine + + if "stable-diffusion" in env["CM_MODEL"]: + extra_build_engine_options_string += f" --model_path {os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'SDXL/')}" + + run_config += " --no_audit_verify" + + cmds.append(f"make {make_command} RUN_ARGS=' --benchmarks={model_name} --scenarios={scenario} {test_mode_string} {run_config} {extra_build_engine_options_string} {extra_run_options_string}'") + + run_cmd = " && ".join(cmds) + env['CM_MLPERF_RUN_CMD'] = run_cmd + env['CM_RUN_CMD'] = run_cmd + env['CM_RUN_DIR'] = env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] + +# print(env) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/run.sh new file mode 100644 index 000000000..ddcd0b550 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-nvidia/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/README.md new file mode 100644 index 000000000..15766a227 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md new file mode 100644 index 000000000..311b3b182 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/README_aws_dl2q.24xlarge.md @@ -0,0 +1,97 @@ +# MLPerf Inference Benchmarking on AWS dl2q.24xlarge instance using 8 QAIC Cloud AI 100 + +`dl2q.24xlarge` instance is available in `us-west-2d` and it has 96 vCPUs and 768 GB of memory. + +[Deep Learning Base Qualcomm AMI (Amazon Linux 2) 20240110, ami-0799a42a111b1b87a](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#LaunchInstances:ami=ami-0799a42a111b1b87a) +image from the Community AMIs is the recommended OS image as it comes with the QIAC SDKs (both Apps and Platform) preinstalled. + +* Recommended to take 300 GB root disk + + +## System setup +``` +sudo yum install -y python38-devel git +python3.8 -m pip install cmind +cm pull repo mlcommons@cm4mlops +cm run script --tags=get,python --version_min=3.8.1 +``` + +## Bert-99 + +### Quick performance run +``` +cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic \ +--backend=glow --scenario=Offline --implementation=kilt --model=bert-99 \ +--test_query_count=40000 --precision=uint8 --rerun --quiet \ +--adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ +--quiet --adr.compiler.tags=gcc --execution-mode=test +``` + +### Full valid run +``` +cm run script --tags=generate-run-cmds,inference,_submission --device=qaic \ +--backend=glow --scenario=Offline --implementation=kilt --model=bert-99 --precision=uint8 \ +--adr.mlperf-inference-implementation.tags=_loadgen-batch-size.4096,_dl2q.24xlarge \ +--rerun --quiet --execution-mode=valid +``` + +The expected performance is ~5700 QPS +The expected accuracy is ~90 +* Use `--scenario=Server --server_target_qps=5200` to run the server scenario + + +## ResNet50 + +(Optional) +If you have Imagenet 2012 validation dataset downloaded, you can register it in CM as follows. This step is optional and can avoid the download from the public URL which can be slow at times. +``` +cm run script --tags=get,dataset,imagenet,original,_full --env.IMAGENET_PATH=`pwd`/imagenet-2012-val +``` + +### Quick performance run + +``` +cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +--scenario=Offline --implementation=kilt --model=resnet50 \ +--test_query_count=400000 --precision=uint8 --rerun --adr.compiler.tags=gcc \ +--adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=test --quiet +``` + +### Full valid run + +``` +cm run script --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +--scenario=Offline --implementation=kilt --model=resnet50 \ +--precision=uint8 --rerun --adr.compiler.tags=gcc \ +--adr.mlperf-inference-implementation.tags=_bs.8,_dl2q.24xlarge --execution-mode=valid --quiet +``` +Expected performance is ~157500 +Expected accuracy is 75.936% + +* Use `--scenario=Server --server_target_qps=152000` to run the server scenario + + +## RetinaNet + +### Quick performance run + +``` +cm run script --tags=generate-run-cmds,inference,_performance-only --device=qaic --backend=glow \ +--scenario=Offline --implementation=kilt --model=retinanet --test_query_count=40000 --precision=uint8 \ +--rerun --quiet --adr.mlperf-inference-implementation.tags=_loadgen-batch-size.1,_dl2q.24xlarge,_bs.1 \ +--adr.compiler.tags=gcc --execution-mode=test +``` + +### Full valid run + +``` +cm run script --tags=generate-run-cmds,inference,_submission --device=qaic --backend=glow \ +--scenario=Offline --implementation=kilt --model=retinanet \ +--precision=uint8 --rerun --adr.compiler.tags=gcc --adr.dataset-preprocessed.tags=_custom-annotations \ +--adr.mlperf-inference-implementation.tags=_bs.1,_dl2q.24xlarge --execution-mode=valid --quiet +``` +Expected performance is ~2200 +The expected accuracy is 37.234 + +* Use `--scenario=Server --server_target_qps=2050` to run the server scenario + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/_cm.yaml new file mode 100644 index 000000000..8de84ac08 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/_cm.yaml @@ -0,0 +1,775 @@ +# Identification of this CM script +alias: app-mlperf-inference-qualcomm +uid: eef1aca5d7c0470e +cache: false +can_force_cache: true + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - qualcomm-harness + - qualcomm + - kilt-harness + - kilt + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_FAST_COMPILATION: 'yes' + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: kilt + CM_MLPERF_SKIP_RUN: 'no' + CM_KILT_REPO_URL: https://github.com/GATEOverflow/kilt-mlperf + CM_QAIC_DEVICES: "0" + kilt_max_wait_abs: 10000 + verbosity: 0 + loadgen_trigger_cold_run: 0 + +env: + CM_CALL_MLPERF_RUNNER: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + devices: CM_QAIC_DEVICES + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: CM_RERUN + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_IMAGENET_ACCURACY_DTYPE + - CM_SQUAD_ACCURACY_DTYPE + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + - tags: get,git,repo + names: + - kilt-repo + update_tags_from_env_with_prefix: + _repo.: + - CM_KILT_REPO_URL + extra_cache_tags: kilt,kilt-repo + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_KILT_CHECKOUT_PATH + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download MLPerf inference loadgen + - tags: get,mlcommons,inference,loadgen + names: + - inference-loadgen + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + skip_if_env: + CM_MLPERF_DEVICE: + - qaic + names: + - resnet50-model + - ml-model + tags: get,ml-model,resnet50,_fp32,_onnx,_from-tf + + - enable_if_env: + CM_MODEL: + - resnet50 + CM_MLPERF_DEVICE: + - qaic + tags: compile,qaic,model,_resnet50 + names: + - qaic-model-compiler + - resnet50-compiler + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + - dataset-preprocessed + tags: get,dataset,imagenet,preprocessed,_for.resnet50,_NHWC,_full + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + + + ######################################################################## + # Install bert dependencies + + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - bert-vocab + tags: get,squad-vocab + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + - enable_if_env: + CM_MODEL: + - bert-99 + - bert-99.9 + names: + - squad-tokenized + tags: get,dataset,tokenized,squad,_raw + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + ######################################################################## + # Install OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + CM_MLPERF_DEVICE: + - qaic + tags: compile,qaic,model,_retinanet + names: + - qaic-model-compiler + - retinanet-compiler + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-preprocessed + - dataset-preprocessed + tags: get,dataset,preprocessed,openimages,_for.retinanet.onnx,_NCHW,_validation,_custom-annotations + update_tags_from_env_with_prefix1: #disabling now to prevent unnecessary preprocessing + _quant-scale.: + - CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET + _quant-offset.: + - CM_QAIC_MODEL_RETINANET_IMAGE_SCALE + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + + + +######################################################################## + # Install ML engines via CM + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - cpu + tags: get,lib,onnxruntime,lang-cpp,_cpu + + - enable_if_env: + CM_MLPERF_BACKEND: + - onnxruntime + CM_MLPERF_DEVICE: + - gpu + tags: get,lib,onnxruntime,lang-cpp,_cuda + + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - compile-program + tags: compile,cpp-program + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + tags: benchmark-mlperf + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + kilt_backend_type: cpu + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + kilt_backend_type: gpu + qaic: + group: device + env: + CM_MLPERF_DEVICE: qaic + CM_MLPERF_DEVICE_LIB_NAMESPEC: QAic + kilt_backend_type: qaic + deps: + - tags: get,qaic,platform,sdk + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + - tags: get,lib,protobuf,_tag.v3.11.4 + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + - tags: set,device,mode,qaic + enable_if_env: + CM_QAIC_VC: + "on" + update_tags_from_env_with_prefix": + _vc.: + - CM_QAIC_VC + - tags: set,device,mode,qaic,_ecc + enable_if_env: + CM_QAIC_ECC: + "yes" + + tensorrt: + group: framework + env: + CM_MLPERF_BACKEND: tensorrt + device: tensorrt + CM_MLPERF_BACKEND_NAME: TensorRT + + # ML engine + onnxruntime: + group: framework + default: true + env: + device: onnxrt + CM_MLPERF_BACKEND: onnxruntime + CM_MLPERF_BACKEND_LIB_NAMESPEC: onnxruntime + + glow: + group: framework + env: + device: qaic + CM_MLPERF_BACKEND: glow + CM_MLPERF_BACKEND_LIB_NAMESPEC: QAic + + bs.#: + group: batch-size + env: + kilt_model_batch_size: "#" + adr: + qaic-model-compiler: + tags: "_bs.#" + + bs.0: + group: batch-size + env: + kilt_model_batch_size: "1" + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + kilt_model_name: resnet50 + kilt_input_count: 1 + kilt_output_count: 1 + kilt_input_format: "FLOAT32,-1,224,224,3" + kilt_output_format: "INT64,-1" + dataset_imagenet_preprocessed_input_square_side: 224 + ml_model_has_background_class: "YES" + ml_model_image_height: 224 + loadgen_buffer_size: 1024 + loadgen_dataset_size: 50000 + CM_BENCHMARK: STANDALONE_CLASSIFICATION + + resnet50,uint8: + env: + kilt_input_format: "UINT8,-1,224,224,3" + kilt_device_qaic_skip_stage: convert + CM_IMAGENET_ACCURACY_DTYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + bert-99,qaic: + deps: + - tags: compile,qaic,model,_bert-99,_pc.99.9980 + names: + - qaic-model-compiler + - bert-99-compiler + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + env: + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int32 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8,fp16 + + bert-99.9,qaic: + deps: + - tags: compile,qaic,model,_bert-99.9 + names: + - qaic-model-compiler + - bert-99.9-compiler + skip_if_env: + CM_MLPERF_SKIP_RUN: + - yes + env: + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int32 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp16 + + retinanet: + group: model + base: + - bs.1 + env: + CM_MODEL: retinanet + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth" + kilt_model_name: retinanet + kilt_input_count: 1 + #kilt_model_disable_nms: '' + kilt_model_max_detections: 600 + kilt_output_count: 1 + kilt_input_format: "FLOAT32,-1,3,800,800" + kilt_output_format: "INT64,-1" + dataset_imagenet_preprocessed_input_square_side: 224 + ml_model_image_height: 800 + ml_model_image_width: 800 + loadgen_buffer_size: 64 + loadgen_dataset_size: 24576 + CM_BENCHMARK: STANDALONE_OBJECT_DETECTION + + deps: + - tags: get,generic-python-lib,_Pillow + - tags: get,generic-python-lib,_torch + - tags: get,generic-python-lib,_torchvision + - tags: get,generic-python-lib,_opencv-python + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_pycocotools + + retinanet,qaic,uint8: + env: + kilt_device_qaic_skip_stage: 'convert' + kilt_input_format: "UINT8,1,3,800,800" + kilt_output_format: "INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,1000:INT8,1,4,1000:INT8,14,1000:INT8,1,4,1000:INT8,1,4,1000:INT8,1,4,1000" + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: "https://github.com/mlcommons/inference_results_v3.1/blob/main/closed/Qualcomm/calibration.md" + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + + + bert_: + deps: + - tags: get,generic-python-lib,_transformers + - tags: get,generic-python-lib,_safetensors + - tags: get,generic-python-lib,_onnx + env: + CM_BENCHMARK: STANDALONE_BERT + kilt_model_name: bert + kilt_model_seq_length: 384 + kilt_model_bert_variant: BERT_PACKED + kilt_input_format: "INT64,1,384:INT64,1,8:INT64,1,384:INT64,1,384" + kilt_output_format: "FLOAT32,1,384:FLOAT32,1,384" + dataset_squad_tokenized_max_seq_length: 384 + loadgen_buffer_size: 10833 + loadgen_dataset_size: 10833 + + bert_,qaic: + default_variations: + batch-size: bs.0 + env: + kilt_model_batch_size: 1 + kilt_input_format: "UINT32,1,384:UINT32,1,8:UINT32,1,384:UINT32,1,384" + kilt_input_formata: "UINT32,1,384:UINT32,1,384:UINT32,1,384" + kilt_output_formatia: "UINT8,1,384:UINT8,1,384" + kilt_device_qaic_skip_stage: 'convert' + + standalone: + group: run-mode + default: true + env: + CM_RUN_MODE: standalone + + network-server: + group: run-mode + env: + CM_RUN_MODE: network-server + + network-client: + group: run-mode + env: + CM_RUN_MODE: network-client + + bert_,network-server: + env: + CM_BENCHMARK: NETWORK_BERT_SERVER + + bert_,network-client: + env: + CM_BENCHMARK: NETWORK_BERT_CLIENT + + bert_,singlestream: + env: + kilt_model_batch_size: 1 + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://zenodo.org/record/3733910/files/model.onnx" + + loadgen-batch-size.#: + group: loadgen-batch-size + env: + CM_MLPERF_LOADGEN_BATCH_SIZE: "#" + + bert-99,offline: + default_variations: + loadgen-batch-size: loadgen-batch-size.4096 + + bert-99.9,offline: + default_variations: + loadgen-batch-size: loadgen-batch-size.4096 + + activation-count.#: + env: + CM_MLPERF_QAIC_ACTIVATION_COUNT: "#" + #CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: "activation_count.#" + + maxq: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXQ: yes + + maxn: + group: power-mode + env: + CM_MLPERF_NVIDIA_HARNESS_MAXN: yes + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + adr: + qaic-model-compiler: + tags: _singlestream + singlestream,resnet50: + default_variations: + batch-size: bs.1 + + singlestream,retinanet: + default_variations: + batch-size: bs.1 + + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + adr: + qaic-model-compiler: + tags: _multistream + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + adr: + qaic-model-compiler: + tags: _offline + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + adr: + qaic-model-compiler: + tags: _server + + uint8: + group: precision + adr: + dataset-preprocessed: + tags: _uint8,_rgb8 + fp16: + group: precision + fp32: + group: precision + adr: + dataset-preprocessed: + tags: _float32,_rgb32 + env: + CM_IMAGENET_ACCURACY_DTYPE: float32 + + nsp.14: + group: nsp + adr: + qaic-model-compiler: + tags: _nsp.14 + + nsp.16: + group: nsp + base: + - pro + adr: + qaic-model-compiler: + tags: _nsp.14 + + nsp.#: + group: nsp + adr: + qaic-model-compiler: + tags: _nsp.# + + + dl2q.24xlarge: + group: sut + base: + - nsp.14 + env: + CM_QAIC_DEVICES: "0,1,2,3,4,5,6,7" + qaic_queue_length: 4 + + dl2q.24xlarge,singlestream: + env: + CM_QAIC_DEVICES: 0 + qaic_activation_count: "1" + + dl2q.24xlarge,resnet50,offline: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "3" + + dl2q.24xlarge,bert-99.9,offline: + env: + qaic_activation_count: "7" + + dl2q.24xlarge,bert-99,offline: + env: + qaic_activation_count: "14" + + dl2q.24xlarge,retinanet,offline: + env: + qaic_activation_count: "14" + + dl2q.24xlarge,resnet50,server: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "3" + + dl2q.24xlarge,bert-99.9,server: + env: + qaic_activation_count: "7" + + dl2q.24xlarge,retinanet,server: + default_variations: + batch-size: bs.1 + env: + qaic_activation_count: "14" + + dl2q.24xlarge,resnet50,multistream: + default_variations: + batch-size: bs.1 + env: + qaic_activation_count: "1" + + pro: + env: + qaic_queue_length: 10 + + num-devices.4: + env: + CM_QAIC_DEVICES: "0,1,2,3" + + pro,num-devices.4,singlestream: + env: + CM_QAIC_DEVICES: "0" + qaic_activation_count: "1" + + pro,num-devices.4,resnet50,offline: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "4" + deps: + - tags: set,device,qaic,_vc.16 + + pro,num-devices.4,bert-99,offline: + default_variations: + loadgen-batch-size: loadgen-batch-size.4096 + env: + qaic_activation_count: "16" + deps: + - tags: set,device,qaic,_vc.15 + + pro,num-devices.4,bert-99.9,offline: + default_variations: + loadgen-batch-size: loadgen-batch-size.4096 + env: + qaic_activation_count: "8" + deps: + - tags: set,device,qaic,_vc.13 + + pro,num-devices.4,bert-99,server: + default_variations: + loadgen-batch-size: loadgen-batch-size.1024 + env: + qaic_activation_count: "16" + deps: + - tags: set,device,qaic,_vc.13 + + pro,num-devices.4,bert-99.9,server: + default_variations: + loadgen-batch-size: loadgen-batch-size.1024 + env: + qaic_activation_count: "8" + deps: + - tags: set,device,qaic,_vc.13 + + pro,num-devices.4,retinanet,offline: + default_variations: + batch-size: bs.1 + env: + qaic_activation_count: "16" + deps: + - tags: set,device,qaic,_vc.17 + + pro,num-devices.4,resnet50,server: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "4" + + pro,num-devices.4,retinanet,server: + default_variations: + batch-size: bs.1 + env: + qaic_activation_count: "16" + + rb6: + group: sut + base: + - nsp.9 + env: + CM_QAIC_DEVICES: "0" + qaic_queue_length: 6 + + rb6,singlestream: + env: + qaic_activation_count: "1" + + rb6,resnet50,offline: + default_variations: + batch-size: bs.8 + env: + qaic_activation_count: "2" + + rb6,resnet50,multistream: + default_variations: + batch-size: bs.4 + env: + qaic_activation_count: "2" + + rb6,bert-99,offline: + env: + qaic_activation_count: "9" + + rb6,retinanet,offline: + env: + qaic_activation_count: "9" + + rb6,retinanet,multistream: + env: + qaic_activation_count: "8" + +docker: + real_run: False diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/customize.py new file mode 100644 index 000000000..68343b491 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/customize.py @@ -0,0 +1,247 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + kilt_root = env['CM_KILT_CHECKOUT_PATH'] + + print(f"Harness Root: {kilt_root}") + + source_files = [] + env['CM_SOURCE_FOLDER_PATH'] = env['CM_KILT_CHECKOUT_PATH'] + + env['kilt_model_root'] = env.get('CM_ML_MODEL_FILE_WITH_PATH') + + if env.get('CM_MLPERF_LOADGEN_BATCH_SIZE', '') != '': + env['kilt_model_batch_size'] = env['CM_MLPERF_LOADGEN_BATCH_SIZE'] + + if env.get('CM_QAIC_DEVICES', '') != '': + env['kilt_device_ids'] = env['CM_QAIC_DEVICES'] + + if '+ CXXFLAGS' not in env: + env['+ CXXFLAGS'] = [] + + if '+CPLUS_INCLUDE_PATH' not in env: + env['+CPLUS_INCLUDE_PATH'] = [] + + if env['CM_MLPERF_DEVICE'] == "qaic": + env['kilt_model_root'] = os.path.dirname( + env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH']) + + if env.get('CM_MODEL') == "resnet50": + env['dataset_imagenet_preprocessed_subset_fof'] = env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] + env['dataset_imagenet_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH'] + + elif "bert" in env.get('CM_MODEL'): + env['dataset_squad_tokenized_max_seq_length'] = env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] + env['dataset_squad_tokenized_root'] = env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] + env['dataset_squad_tokenized_input_ids'] = os.path.basename( + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS']) + env['dataset_squad_tokenized_input_mask'] = os.path.basename( + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK']) + env['dataset_squad_tokenized_segment_ids'] = os.path.basename( + env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS']) + + elif "retinanet" in env.get('CM_MODEL'): + env['kilt_prior_bin_path'] = os.path.join( + kilt_root, "plugins", "nms-abp", "data") + env['kilt_object_detection_preprocessed_subset_fof'] = os.path.basename( + env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST']) + env['kilt_object_detection_preprocessed_dir'] = env['CM_DATASET_PREPROCESSED_PATH'] + env['+ CXXFLAGS'].append("-DMODEL_RX50") + env['+ CXXFLAGS'].append("-DSDK_1_11_X") + + loc_offset = env.get('CM_QAIC_MODEL_RETINANET_LOC_OFFSET') + if loc_offset: + env['+ CXXFLAGS'].append("-DMODEL_RX50") + + keys = ['LOC_OFFSET', 'LOC_SCALE', 'CONF_OFFSET', 'CONF_SCALE'] + + if env.get('CM_RETINANET_USE_MULTIPLE_SCALES_OFFSETS', '') == 'yes': + env['+ CXXFLAGS'].append("-DUSE_MULTIPLE_SCALES_OFFSETS=1") + for j in range(0, 4): + keys.append(f'LOC_OFFSET{j}') + keys.append(f'LOC_SCALE{j}') + keys.append(f'CONF_OFFSET{j}') + keys.append(f'CONF_SCALE{j}') + + for key in keys: + value = env.get('CM_QAIC_MODEL_RETINANET_' + key, '') + if value != '': + env['+ CXXFLAGS'].append(f" -D{key}_={value} ") + + if env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_SERVER': + source_files.append( + os.path.join( + kilt_root, + "benchmarks", + "network", + "bert", + "server", + "pack.cpp")) + source_files.append( + os.path.join( + kilt_root, + "benchmarks", + "network", + "bert", + "server", + "server.cpp")) + env['+ CXXFLAGS'].append("-DNETWORK_DIVISION=1") + elif env.get('CM_BENCHMARK', '') == 'NETWORK_BERT_CLIENT': + # source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "pack.cpp")) + # env['+CPLUS_INCLUDE_PATH'].append(kilt_root) + # source_files.append(os.path.join(kilt_root, "benchmarks", "network", "bert", "client", "client.cpp")) + env['+ CXXFLAGS'].append("-DNETWORK_DIVISION") + elif env.get('CM_BENCHMARK', '') == 'STANDALONE_BERT': + source_files.append( + os.path.join( + kilt_root, + "benchmarks", + "standalone", + "bert", + "pack.cpp")) + + script_path = i['run_script_input']['path'] + if env['CM_MODEL'] == "retinanet": + env['CM_DATASET_LIST'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + + for file in os.listdir(env['CM_SOURCE_FOLDER_PATH']): + if file.endswith(".c") or file.endswith(".cpp"): + source_files.append(file) + + if 'SERVER' not in env.get('CM_BENCHMARK', ''): + source_files.append( + os.path.join( + kilt_root, + "benchmarks", + "harness", + "harness.cpp")) + + # source_files.append(env['CM_QAIC_API_SRC_FILE']) + + env['+CPLUS_INCLUDE_PATH'].append(kilt_root) + env['+C_INCLUDE_PATH'].append(kilt_root) + + if env['CM_MLPERF_DEVICE'] == 'gpu': + env['+C_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+CPLUS_INCLUDE_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + env['+LD_LIBRARY_PATH'].append(env['CM_CUDA_PATH_LIB']) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(env['CM_CUDA_PATH_INCLUDE']) + + elif env['CM_MLPERF_DEVICE'] == 'qaic': + source_files.append( + os.path.join( + kilt_root, + "devices", + "qaic", + "api", + "master", + "QAicInfApi.cpp")) + + print(f"Compiling the source files: {source_files}") + env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) + + env['+ CXXFLAGS'].append("-std=c++17") + env['+ CXXFLAGS'].append("-fpermissive") + + env['+ CXXFLAGS'].append("-DKILT_CONFIG_FROM_ENV") + env['+ CXXFLAGS'].append("-DKILT_CONFIG_TRANSLATE_X") + env['+ CXXFLAGS'].append("-DKILT_BENCHMARK_" + env['CM_BENCHMARK']) + env['+ CXXFLAGS'].append("-DKILT_DEVICE_" + env['device'].upper()) + + # add preprocessor flag like "#define CM_MODEL_RESNET50" + # env['+ CXXFLAGS'].append('-DCM_MODEL_' + env['CM_MODEL'].upper()) + # add preprocessor flag like "#define CM_MLPERF_BACKEND_ONNXRUNTIME" + env['+ CXXFLAGS'].append('-DCM_MLPERF_BACKEND_' + + env['CM_MLPERF_BACKEND'].upper()) + # add preprocessor flag like "#define CM_MLPERF_DEVICE_CPU" + env['+ CXXFLAGS'].append('-DCM_MLPERF_DEVICE_' + + env['CM_MLPERF_DEVICE'].upper()) + + if '+ LDCXXFLAGS' not in env: + env['+ LDCXXFLAGS'] = [] + + env['+ LDCXXFLAGS'] += [ + "-lmlperf_loadgen", + "-lpthread", + "-ldl" + ] + # e.g. -lonnxruntime + if 'CM_MLPERF_BACKEND_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + + env['CM_MLPERF_BACKEND_LIB_NAMESPEC']) + # e.g. -lcudart + if 'CM_MLPERF_DEVICE_LIB_NAMESPEC' in env: + env['+ LDCXXFLAGS'].append('-l' + env['CM_MLPERF_DEVICE_LIB_NAMESPEC']) + + if '-DPRINT_NETWORK_DESCRIPTOR' in env['+ CXXFLAGS']: + env['+ LDCXXFLAGS'].append('-lprotobuf') + + env['CM_LINKER_LANG'] = 'CXX' + env['CM_RUN_DIR'] = env.get('CM_MLPERF_OUTPUT_DIR', os.getcwd()) + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + if 'CM_MLPERF_USER_CONF' not in env: + env['CM_MLPERF_USER_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "user.conf") + + # to LOADGEN_MLPERF_CONF + env['loadgen_mlperf_conf_path'] = env['CM_MLPERF_CONF'] + # to LOADGEN_USER_CONF + env['loadgen_user_conf_path'] = env['CM_MLPERF_USER_CONF'] + env['loadgen_scenario'] = env['CM_MLPERF_LOADGEN_SCENARIO'] + + loadgen_mode = env['CM_MLPERF_LOADGEN_MODE'] + if loadgen_mode == 'performance': + kilt_loadgen_mode = 'PerformanceOnly' + elif loadgen_mode == 'accuracy': + kilt_loadgen_mode = 'AccuracyOnly' + elif loadgen_mode == 'compliance': + kilt_loadgen_mode = 'PerformanceOnly' + else: + return {'return': 1, 'error': 'Unknown loadgen mode'} + env['loadgen_mode'] = kilt_loadgen_mode + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/run.sh new file mode 100644 index 000000000..ddcd0b550 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-qualcomm/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/_cm.yaml new file mode 100644 index 000000000..75f460f37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/_cm.yaml @@ -0,0 +1,308 @@ +# Identification of this CM script +alias: app-mlperf-inference-redhat +uid: 82c9bb3c222447ca +cache: false + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - inference + - harness + - redhat-harness + - redhat + +# Default environment +default_env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_MLPERF_LOADGEN_MODE: performance + CM_SKIP_PREPROCESS_DATASET: 'no' + CM_SKIP_MODEL_DOWNLOAD: 'no' + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: redhat_harness + CM_MLPERF_SKIP_RUN: 'no' + +env: + CM_CALL_MLPERF_RUNNER: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mlperf_conf: CM_MLPERF_CONF + mode: CM_MLPERF_LOADGEN_MODE + output_dir: CM_MLPERF_OUTPUT_DIR + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + scenario: CM_MLPERF_LOADGEN_SCENARIO + user_conf: CM_MLPERF_USER_CONF + skip_preprocess: CM_SKIP_PREPROCESS_DATASET + skip_preprocessing: CM_SKIP_PREPROCESS_DATASET + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + rerun: CM_RERUN + results_repo: CM_MLPERF_INFERENCE_RESULTS_REPO + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + - CM_MAX_EXAMPLES + - CM_IMAGENET_ACCURACY_DTYPE + - CM_SQUAD_ACCURACY_DTYPE + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download MLPerf inference loadgen + - tags: get,mlcommons,inference,loadgen + names: + - inference-loadgen + + # Creates user conf for given SUT + - tags: generate,user-conf,mlperf,inference + names: + - user-conf-generator + + # Get MLPerf logging library + - tags: get,generic-python-lib,_mlperf_logging + names: + - mlperf-logging + + - tags: get,git,repo + names: + - inference-results + - inference-code + update_tags_from_env_with_prefix: + _repo.: + - CM_MLPERF_INFERENCE_RESULTS_REPO + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO + extra_cache_tags: results,repo,mlperf + +# Post dependencies to run this app including for power measurement +post_deps: + + - names: + - runner + - mlperf-runner + skip_if_env: + CM_MLPERF_SKIP_RUN: + - 'yes' + - yes + tags: benchmark-mlperf + + - tags: save,mlperf,inference,state + names: + - save-mlperf-inference-state + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + cuda: + group: device + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + openshift: + group: backend + default: true + env: + CM_MLPERF_BACKEND: openshift + + pytorch: + group: backend + env: + CM_MLPERF_BACKEND: pytorch + + pytorch,cuda: + deps: + - tags: get,generic-python-lib,_torch_cuda + + pytorch,cpu: + deps: + - tags: get,generic-python-lib,_torch + + bs.#: + group: batch-size + + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + + retinanet: + group: model + base: + - bs.1 + env: + CM_MODEL: retinanet + + bert_: + {} + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + + bert_: + {} + + bert-99: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + bert-99.9: + group: model + base: + - bert_ + env: + CM_MODEL: bert-99.9 + + gptj_: + deps: + - tags: get,ml-model,gptj + names: + - gptj-model + - tags: get,dataset,cnndm,_validation + + gptj-99: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99 + CM_SQUAD_ACCURACY_DTYPE: float32 + + gptj-99.9: + group: model + base: + - gptj_ + env: + CM_MODEL: gptj-99.9 + + llama2-70b_: + deps: + - tags: get,dataset,openorca,language-processing,original,_redhat + env: + CM_MLPERF_IMPLEMENTATION: redhat + env: + CM_VLLM_SERVER_MODEL_NAME: NousResearch/Meta-Llama-3-8B-Instruct # assigned just for testing purpose + + llama2-70b-99: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99 + + llama2-70b-99.9: + group: model + base: + - llama2-70b_ + env: + CM_MODEL: llama2-70b-99.9 + + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + + singlestream,resnet50: + default_variations: + batch-size: bs.1 + + singlestream,retinanet: + default_variations: + batch-size: bs.1 + + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + + offline: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + + uint8: + group: precision + fp16: + group: precision + fp32: + group: precision + + r4.1-dev_default: + group: version + default: true + env: + CM_MLPERF_INFERENCE_RESULTS_REPO: https://github.com/mlcommons/inference_results_v4.0 + +docker: + real_run: False diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/customize.py new file mode 100644 index 000000000..e15e57d09 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/customize.py @@ -0,0 +1,116 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_BACKEND' not in env: + return {'return': 1, + 'error': 'Please select a variation specifying the backend'} + if 'CM_MLPERF_DEVICE' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + r = get_run_cmd(env['CM_MODEL'], i) + if r['return'] > 0: + return r + run_cmd = r['run_cmd'] + run_dir = r['run_dir'] + print(run_cmd) + print(run_dir) + env['CM_MLPERF_RUN_CMD'] = run_cmd + env['CM_RUN_DIR'] = run_dir + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} + # return {'return':1, 'error': 'Run command needs to be tested'} + + +def get_run_cmd(model, i): + env = i['env'] + if "gptj" in model: + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + device = env['CM_MLPERF_DEVICE'] + mode = env['CM_MLPERF_LOADGEN_MODE'] + outdir = env['CM_MLPERF_OUTPUT_DIR'] + mlperf_conf_path = env['CM_MLPERF_CONF'] + user_conf_path = env['CM_MLPERF_USER_CONF'] + api_server = env.get('CM_MLPERF_INFERENCE_API_SERVER', 'localhost') + model_path = env['GPTJ_CHECKPOINT_PATH'] + dataset_path = env['CM_DATASET_CNNDM_EVAL_PATH'] + precision = env['CM_MLPERF_MODEL_PRECISION'] + if mode == "accuracy": + accuracy_string = " --accuracy " + else: + accuracy_string = "" + + run_cmd = f"python3 -u main.py --scenario {scenario} --model-path {model_path} --api-server {api_server} --api-model-name gpt-j-cnn --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " + submitter = "CTuning" + run_dir = os.path.join( + env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + "open", + submitter, + "code", + "gptj-99") + + return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} + + if "llama2" in model: + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + device = env['CM_MLPERF_DEVICE'] + mode = env['CM_MLPERF_LOADGEN_MODE'] + outdir = env['CM_MLPERF_OUTPUT_DIR'] + mlperf_conf_path = env['CM_MLPERF_CONF'] + user_conf_path = env['CM_MLPERF_USER_CONF'] + api_server = env.get( + 'CM_MLPERF_INFERENCE_API_SERVER', + 'localhost:8000/v1') + api_model_name = env['CM_VLLM_SERVER_MODEL_NAME'] + dataset_path = env['CM_DATASET_OPENORCA_PATH'] + precision = env['CM_MLPERF_MODEL_PRECISION'] + if mode == "accuracy": + accuracy_string = " --accuracy " + else: + accuracy_string = "" + + run_cmd = f"python3 -u 'main.py' --scenario {scenario} --model-path {api_model_name} --api-model-name {api_model_name} --api-server {api_server} --mlperf-conf {mlperf_conf_path} {accuracy_string} --vllm --user-conf {user_conf_path} --dataset-path {dataset_path} --output-log-dir {outdir} --dtype float32 --device {device} " + submitter = "RedHat-Supermicro" + run_dir = os.path.join( + env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], + "open", + submitter, + "code", + model) + + return {'return': 0, 'run_cmd': run_cmd, 'run_dir': run_dir} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/run.sh new file mode 100644 index 000000000..ddcd0b550 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference-redhat/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README-about.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README-about.md new file mode 100644 index 000000000..987c4e79c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README-about.md @@ -0,0 +1,22 @@ +This CM script provides a unified interface to prepare and run a modular version of the [MLPerf inference benchmark](https://arxiv.org/abs/1911.02549) +across diverse ML models, data sets, frameworks, libraries, run-time systems and platforms +using the [cross-platform automation meta-framework (MLCommons CM)](https://github.com/mlcommons/ck). + +It is assembled from reusable and interoperable [CM scripts for DevOps and MLOps](../list_of_scripts.md) +being developed by the [open MLCommons taskforce on automation and reproducibility](../mlperf-education-workgroup.md). + +It is a higher-level wrapper to several other CM scripts modularizing the MLPerf inference benchmark: +* [Reference Python implementation](../app-mlperf-inference-reference) +* [Universal C++ implementation](../app-mlperf-inference-cpp) +* [TFLite C++ implementation](../app-mlperf-inference-tflite-cpp) +* [NVidia optimized implementation](app-mlperf-inference-nvidia) + +See [this SCC'23 tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md) +to use this script to run a reference (unoptimized) Python implementation of the MLPerf object detection benchmark +with RetinaNet model, Open Images dataset, ONNX runtime and CPU target. + +See this [CM script](../run-mlperf-inference-app) to automate and validate your MLPerf inference submission. + +Get in touch with the [open taskforce on automation and reproducibility at MLCommons](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) +if you need help with your submission or if you would like to participate in further modularization of MLPerf +and collaborative design space exploration and optimization of ML Systems. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README-extra.md new file mode 100644 index 000000000..e661f3e53 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README-extra.md @@ -0,0 +1,131 @@ +# Examples + +## MLPerf object detection with python, RetinaNet, Open Images, ONNX runtime (CPU), Ubuntu + +This example shows how to use this CM script to run the reference python implementation +of the MLPerf inference benchmark for object detection, RetinaNet, ONNX run-time (CPU) and Ubuntu. + +Install the MLCommons CM automation meta-framework as described [here]( https://github.com/mlcommons/ck/blob/master/cm/docs/installation.md ). + +Here is the typical installation on Ubuntu 20.04: + +```bash +sudo apt install python3 python3-pip git wget +python3 -m pip install cmind +source .profile +``` + +Next you need to install a CM repository with [cross-platform CM scripts](https://github.com/mlcommons/cm4mlops/tree/main/script) for ML Systems: + +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +``` + +Note that you can fork [this repository](https://github.com/mlcommons/cm4mlops) and use it instead of mlcommons@cm4mlops +to add CM scripts for your own public and private ML models, data sets, software and hardware. +In such case, just change mlcommons@cm4mlops to your own fork in the above command. + +You can find the location of this repository on your system as follows: +```bash +cm find repo mlcommons@cm4mlops +``` + +Now we suggest you to set up a virtual python via CM to avoid mixing up your native Python installation: +```bash +cm run script "install python-venv" --name=mlperf +``` + +If you need a specific python version use this command: +```bash +cm run script "install python-venv" --name=mlperf --version=3.10.7 +``` + +You can now test the MLPerf inference benchmark with RetinaNet and ONNX runtime CPU using just one CM command: + +```bash +cm run script "app mlperf inference generic reference _python _retinanet _onnxruntime _cpu" \ + --adr.python.name=mlperf \ + --adr.compiler.tags=gcc \ + --scenario=Offline \ + --mode=accuracy \ + --test_query_count=10 \ + --quiet +``` + +The first run of this CM script takes around 25 minutes on a GCP instance with 16 cores and 64GB of memory because +CM will automatically detect, install and cache all the necessary ML components +while adapting them to your system using [portable CM scripts](https://github.com/mlcommons/cm4mlops/tree/main/script). + +These dependencies are described using [this simple YAML file](https://github.com/octoml/ck/blob/master/cm-mlops/script/app-mlperf-inference-reference/_cm.yaml#L57) +and can be turned on or off using different environment variables passed to this CM script using `--env.KEY=VALUE`. + +You should see the following output in the end: +```txt + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.654 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.827 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.654 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.657 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.566 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.705 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.735 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.738 + +mAP=65.417% + +``` + +Any other run will automatically pick up all dependencies from the CM cache while setting up all environment variables and files +to launch the prepared MLPerf inference benchmark. For example, you can run these benchmark in performance mode as follows: + +```bash +cm run script "app mlperf inference generic reference _python _retinanet _onnxruntime _cpu" \ + --adr.python.name=mlperf \ + --adr.compiler.tags=gcc \ + --scenario=Offline \ + --mode=performance \ + --test_query_count=10 \ + --rerun +``` + +You should see the following output: +```txt +TestScenario.Offline qps=0.89, mean=8.6960, time=11.180, acc=31.661%, mAP=65.417%, queries=10, tiles=50.0:8.8280,80.0:9.0455,90.0:9.1450,95.0:9.2375,99.0:9.3114,99.9:9.3281 +``` + + + +### Using Docker + +Please check the prototype of Docker containers with the CM automation meta-framework +for modular MLPerf [here](https://github.com/mlcommons/ck/tree/master/docker) +(on-going work). + +```bash +docker build -f dockerfiles/resnet50/ubuntu_20.04_python_onnxruntime_cpu.Dockerfile -t resnet50_onnxruntime:ubuntu20.04 . +``` + +```bash +docker run -it --rm resnet50_onnxruntime:ubuntu20.04 -c "cm run script --tags=app,mlperf,inference,reference,python_resnet50,_onnxruntime,_cpu --scenario=Offline --mode=accuracy" +``` + + + + +# Future work + +* See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). + +* See the development roadmap [here](https://github.com/mlcommons/ck/issues/536). + +* See extension projects to enable collaborative benchmarking, design space exploration and optimization of ML and AI Systems [here](https://github.com/mlcommons/ck/issues/627). + + +# Developers + +[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), +[Grigori Fursin]( https://cKnowledge.org/gfursin ) +and [individual contributors](https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md). diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README.md new file mode 100644 index 000000000..5808cacc9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/_cm.yaml new file mode 100644 index 000000000..6e95a0082 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/_cm.yaml @@ -0,0 +1,1787 @@ +# Identification of this CM script +alias: app-mlperf-inference +uid: d775cac873ee4231 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf inference benchmark pipeline" + +developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - app + - vision + - language + - mlcommons + - mlperf + - inference + - generic + +# Default environment +default_env: + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_OUTPUT_FOLDER_NAME: test_results + CM_MLPERF_RUN_STYLE: test + CM_TEST_QUERY_COUNT: '10' + CM_MLPERF_QUANTIZATION: off + CM_GET_PLATFORM_DETAILS: yes + +env: + CM_MLPERF_PRINT_SUMMARY: "no" + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'no' + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + docker: CM_RUN_DOCKER_CONTAINER + hw_name: CM_HW_NAME + imagenet_path: IMAGENET_PATH + max_batchsize: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + mode: CM_MLPERF_LOADGEN_MODE + num_threads: CM_NUM_THREADS + output_dir: OUTPUT_BASE_DIR + power: CM_MLPERF_POWER + power_server: CM_MLPERF_POWER_SERVER_ADDRESS + ntp_server: CM_MLPERF_POWER_NTP_SERVER + max_amps: CM_MLPERF_POWER_MAX_AMPS + max_volts: CM_MLPERF_POWER_MAX_VOLTS + regenerate_files: CM_REGENERATE_MEASURE_FILES + rerun: CM_RERUN + scenario: CM_MLPERF_LOADGEN_SCENARIO + test_query_count: CM_TEST_QUERY_COUNT + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + readme: CM_MLPERF_README + debug: CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM + gpu_name: CM_NVIDIA_GPU_NAME + nvidia_llama2_dataset_file_path: CM_NVIDIA_LLAMA_DATASET_FILE_PATH + tp_size: CM_NVIDIA_TP_SIZE + use_dataset_from_host: CM_USE_DATASET_FROM_HOST + +# Duplicate CM environment variables to the ones used in native apps +env_key_mappings: + CM_HOST_: HOST_ + CM_ML_: ML_ + CM_MLPERF_TVM: MLPERF_TVM + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + +new_state_keys: + - app_mlperf_inference_* + - cm-mlperf-inference-results* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: pull,git,repo + env: + CM_GIT_CHECKOUT_PATH: '<<>>' + enable_if_env: + CM_MLPERF_INFERENCE_PULL_SRC_CHANGES: + - 'yes' + - tags: get,mlperf,inference,utils + + - tags: install,pip-package,for-cmind-python,_package.pandas + enable_if_env: + CM_PROFILE_NVIDIA_POWER: + - on + +posthook_deps: + - tags: get,mlperf,sut,description #populate system meta information like framework + - tags: get,platform,details + enable_if_any_env: + CM_GET_PLATFORM_DETAILS: + - yes + skip_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + env: + CM_PLATFORM_DETAILS_FILE_PATH: '<<>>/system_info.txt' + +post_deps: + - tags: draw,graph,from-json + enable_if_env: + CM_MLPERF_RUN_JSON_VERSION_INFO_FILE: + - on + env: + CM_JSON_INPUT_FILE: <<>> + CM_OUTPUT_IMAGE_PATH: <<>> + CM_OUTPUT_MERMAID_PATH: <<>> + +# Order of variations for documentation +variation_groups_order: + - implementation + - backend + - device + - model + - precision + - execution-mode + - reproducibility + +# Variations to customize dependencies +variations: + # Implementation (cpp, reference/python, nvidia, tflite-cpp) + cpp: + group: + implementation + add_deps_recursive: + imagenet-accuracy-script: + tags: _int64 + env: + CM_MLPERF_CPP: 'yes' + CM_MLPERF_IMPLEMENTATION: mlcommons_cpp + CM_IMAGENET_ACCURACY_DTYPE: float32 + CM_OPENIMAGES_ACCURACY_DTYPE: float32 + prehook_deps: + - names: + - cpp-mlperf-inference + - mlperf-inference-implementation + tags: app,mlperf,cpp,inference + skip_if_env: + CM_SKIP_RUN: + - yes + + mil: + alias: cpp + + mlcommons-cpp: + alias: cpp + + ctuning-cpp-tflite: + alias: tflite-cpp + + tflite-cpp: + default_variations: + backend: tflite + device: cpu + group: + implementation + add_deps_recursive: + imagenet-accuracy-script: + tags: _float32 + env: + CM_MLPERF_TFLITE_CPP: 'yes' + CM_MLPERF_CPP: 'yes' + CM_MLPERF_IMPLEMENTATION: ctuning_cpp_tflite + CM_IMAGENET_ACCURACY_DTYPE: float32 + prehook_deps: + - names: + - tflite-cpp-mlperf-inference + - mlperf-inference-implementation + tags: app,mlperf,tflite-cpp,inference + skip_if_env: + CM_SKIP_RUN: + - yes + + reference: + group: + implementation + default: + true + default_variations: + reproducibility: r4.1-dev_default + add_deps_recursive: + imagenet-accuracy-script: + tags: _float32 + squad-accuracy-script: + tags: _float32 + librispeech-accuracy-script: + tags: _int32 + cnndm-accuracy-script: + tags: _int32 + env: + CM_MLPERF_PYTHON: 'yes' + CM_MLPERF_IMPLEMENTATION: mlcommons_python + CM_SQUAD_ACCURACY_DTYPE: float32 + CM_IMAGENET_ACCURACY_DTYPE: float32 + CM_OPENIMAGES_ACCURACY_DTYPE: float32 + CM_LIBRISPEECH_ACCURACY_DTYPE: float32 + CM_CNNDM_ACCURACY_DTYPE: int32 + prehook_deps: + - names: + - python-reference-mlperf-inference + - mlperf-inference-implementation + tags: app,mlperf,reference,inference + skip_if_env: + CM_SKIP_RUN: + - yes + + neuralmagic: + alias: reference + + all-models: {} + + python: + alias: reference + + nvidia: + alias: nvidia-original + + mlcommons-python: + alias: reference + + reference,gptj_: + default_variations: + backend: pytorch + + reference,rgat: + default_variations: + backend: pytorch + + reference,sdxl_: + default_variations: + backend: pytorch + + reference,dlrm-v2_: + default_variations: + backend: pytorch + + reference,llama2-70b_: + default_variations: + backend: pytorch + + reference,mixtral-8x7b: + default_variations: + backend: pytorch + + reference,resnet50: + default_variations: + backend: onnxruntime + + reference,retinanet: + default_variations: + backend: onnxruntime + + reference,bert_: + default_variations: + backend: onnxruntime + + all-models,nvidia-original: + docker: + deps: + - tags: get,ml-model,gptj,raw + skip_if_env: + CM_MLPERF_NVIDIA_SKIP_GPTJ: + - "yes" + - tags: get,ml-model,gptj,_nvidia,_fp8 + skip_if_env: + CM_MLPERF_NVIDIA_SKIP_GPTJ: + - "yes" + - tags: get,ml-model,llama2-70b,_nvidia,_fp8 + update_tags_from_env_with_prefix: + _tp-size.: + - CM_NVIDIA_TP_SIZE + skip_if_env: + CM_MLPERF_NVIDIA_SKIP_LLAMA2_70B: + - "yes" + - tags: get,dataset,imagenet,validation,original,_full + names: + - imagenet-original + - dataset-original + skip_if_env: + CM_MLPERF_NVIDIA_SKIP_RESNET50: + - "yes" + - tags: get,dlrm,data,mlperf,inference,_nvidia + skip_if_env: + CM_MLPERF_NVIDIA_SKIP_DLRM: + - "yes" + - enable_if_env: + CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + - 'yes' + tags: get,ml-model,sdxl,_fp16,_rclone + skip_if_env: + CM_MLPERF_NVIDIA_SKIP_SDXL: + - "yes" + env: + BUILD_TRTLLM: 1 + + nvidia-original,r4.1-dev_default: + docker: + build_deps: + - tags: detect,os + image_name: mlperf-inference-nvidia-v4.1-dev-common + update_meta_if_env: + - enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + docker: + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.0-cuda12.2-cudnn8.9-x86_64-ubuntu20.04-public + + - skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + docker: + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.1-cuda12.4-pytorch24.04-ubuntu22.04-aarch64-GraceHopper-release + + + + nvidia-original,gptj_: + env: + BUILD_TRTLLM: 1 + + nvidia-original,llama2-70b_: + env: + BUILD_TRTLLM: 1 + + nvidia-original,mixtral-8x7b: + env: + BUILD_TRTLLM: 1 + + nvidia-original,r4.1-dev_default,gptj_: + docker: + image_name: mlperf-inference-nvidia-v4.1-dev-llm + deps: + - tags: get,ml-model,gptj,_nvidia,_fp8 + update_tags_from_env_with_prefix: + _tp-size.: + - CM_NVIDIA_TP_SIZE + + nvidia-original,r4.1_default: + docker: + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.1-cuda12.4-pytorch24.04-ubuntu22.04-x86_64-release + + nvidia-original,r4.1_default,gptj_: + docker: + deps: + - tags: get,ml-model,gptj,_nvidia,_fp8 + update_tags_from_env_with_prefix: + _tp-size.: + - CM_NVIDIA_TP_SIZE + + + nvidia-original,r4.1-dev_default,llama2-70b_: + docker: + image_name: mlperf-inference-nvidia-v4.1-dev-llm + deps: + - tags: get,ml-model,llama2-70b,_nvidia,_fp8 + update_tags_from_env_with_prefix: + _tp-size.: + - CM_NVIDIA_TP_SIZE + env: + BUILD_TRTLLM: 1 + + nvidia-original,r4.1_default,llama2-70b_: + docker: + deps: + - tags: get,ml-model,llama2-70b,_nvidia,_fp8 + update_tags_from_env_with_prefix: + _tp-size.: + - CM_NVIDIA_TP_SIZE + env: + BUILD_TRTLLM: 1 + + nvidia-original: + docker: + interactive: True + extra_run_args: ' --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v3.1-cuda12.2-cudnn8.9-x86_64-ubuntu20.04-l4-public + os: "ubuntu" + os_version: "20.04" + deps: + - tags: get,mlperf,inference,nvidia,scratch,space + names: + - mlperf-inference-nvidia-scratch-space + - tags: get,nvidia-docker + skip_if_env: + CM_SKIP_GET_NVIDIA_DOCKER: + - yes + mounts: + - "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}" + - "${{ CM_TENSORRT_TAR_FILE_PATH }}:${{ CM_TENSORRT_TAR_FILE_PATH }}" + - "${{ CUDA_RUN_FILE_LOCAL_PATH }}:${{ CUDA_RUN_FILE_LOCAL_PATH }}" + - "${{ MLPERF_SCRATCH_PATH }}:${{ MLPERF_SCRATCH_PATH }}" + + update_meta_if_env: + - enable_if_env: + CM_HOST_OS_FLAVOR: + - ubuntu + CM_HOST_OS_VERSION: + - 20.04 + docker: + extra_run_args: ' --runtime=nvidia --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + + default_variations: + backend: tensorrt + device: cuda + reproducibility: r4.1-dev_default + group: + implementation + add_deps_recursive: + imagenet-accuracy-script: + tags: _int32 + squad-accuracy-script: + tags: _float16 + librispeech-accuracy-script: + tags: _int8 + cnndm-accuracy-script: + tags: _int32 + env: + BUILD_TRTLLM: 0 + CM_MLPERF_IMPLEMENTATION: nvidia + CM_SQUAD_ACCURACY_DTYPE: float16 + CM_IMAGENET_ACCURACY_DTYPE: int32 + CM_CNNDM_ACCURACY_DTYPE: int32 + CM_LIBRISPEECH_ACCURACY_DTYPE: int8 + CM_DOCKER_USE_VIRTUAL_PYTHON: no + prehook_deps: + - names: + - nvidia-original-mlperf-inference + - nvidia-harness + - mlperf-inference-implementation + tags: reproduce,mlperf,nvidia,inference,_run_harness + skip_if_env: + CM_SKIP_RUN: + - yes + update_tags_from_env_with_prefix: + "_gpu_memory." : + - CM_NVIDIA_GPU_MEMORY + update_tags_from_env: + - CM_NVIDIA_HARNESS_GPU_VARIATION + + intel: + alias: intel-original + + intel-original: + group: + implementation + docker: + interactive: True + extra_run_args: ' --privileged' + mounts: + - "${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}" + - "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}" + skip_run_cmd: 'no' + shm_size: '32gb' + os: ubuntu + real_run: false + run: true + docker_input_mapping: + criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH + dlrm_data_path: DLRM_DATA_PATH + intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH + default_variations: + device: cpu + backend: pytorch + reproducibility: r4.1-dev_default + prehook_deps: + - names: + - intel + - intel-harness + - mlperf-inference-implementation + tags: reproduce,mlperf,inference,intel + skip_if_env: + CM_SKIP_RUN: + - yes + env: + CM_MLPERF_IMPLEMENTATION: intel + + intel-original,gptj_: + adr: + cnndm-accuracy-script: + tags: _int32 + + amd,r4.1_default: + docker: + base_image: rocm/pytorch:rocm6.1.2_ubuntu20.04_py3.9_pytorch_staging + extra_run_args: ' --device=/dev/kfd --device=/dev/dri --device=/dev/mem' + + amd: + group: + implementation + docker: + interactive: True + extra_run_args: ' --privileged' + mounts: + - "${{ LLAMA2_CHECKPOINT_PATH }}:${{ LLAMA2_CHECKPOINT_PATH }}" + - "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}" + skip_run_cmd: 'no' + shm_size: '32gb' + os: ubuntu + real_run: false + run: true + default_variations: + device: cpu + backend: pytorch + reproducibility: r4.1-dev_default + prehook_deps: + - names: + - amd + - amd-harness + - mlperf-inference-implementation + tags: reproduce,mlperf,inference,amd + skip_if_env: + CM_SKIP_RUN: + - yes + env: + CM_MLPERF_IMPLEMENTATION: amd + + redhat: + group: + implementation + default_variations: + device: cuda + backend: openshift + reproducibility: r4.1-dev_default + prehook_deps: + - names: + - redhat + - redhat-harness + - mlperf-inference-implementation + tags: reproduce,mlperf,inference,redhat + skip_if_env: + CM_SKIP_RUN: + - yes + env: + CM_MLPERF_IMPLEMENTATION: redhat + docker: + interactive: True + + qualcomm: + alias: kilt + + kilt: + group: + implementation + default_variations: + device: qaic + backend: glow + reproducibility: r4.1-dev_default + prehook_deps: + - names: + - kilt + - kilt-harness + - mlperf-inference-implementation + tags: reproduce,mlperf,inference,kilt + skip_if_env: + CM_SKIP_RUN: + - yes + env: + CM_MLPERF_IMPLEMENTATION: qualcomm + docker: + interactive: True + + kilt,qaic,resnet50: + default_variations: + precision: uint8 + + kilt,qaic,retinanet: + default_variations: + precision: uint8 + + kilt,qaic,bert-99: + default_variations: + precision: uint8 + + kilt,qaic,bert-99.9: + default_variations: + precision: float16 + + intel-original,resnet50: + default_variations: + precision: int8 + add_deps_recursive: + imagenet-accuracy-script: + tags: _int32 + + intel-original,retinanet: + default_variations: + precision: int8 + + intel-original,bert-99: + default_variations: + precision: int8 + + intel-original,bert-99.9: + default_variations: + precision: int8 + + intel-original,gptj-99: + default_variations: + precision: int4 + + intel-original,gptj-99.9: + default_variations: + precision: bfloat16 + + resnet50: + group: + model + default: + true + env: + CM_MODEL: + resnet50 + deps: + - tags: get,dataset-aux,imagenet-aux + add_deps_recursive: + mlperf-inference-implementation: + tags: _resnet50 + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - imagenet-accuracy-script + tags: run,accuracy,mlperf,_imagenet + docker: + deps: + - tags: get,dataset,imagenet,validation,original,_full + enable_if_env: + CM_USE_DATASET_FROM_HOST: + - 'yes' + names: + - imagenet-original + - dataset-original + + retinanet: + group: + model + env: + CM_MODEL: + retinanet + add_deps_recursive: + mlperf-inference-implementation: + tags: _retinanet + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - openimages-accuracy-script + tags: run,accuracy,mlperf,_openimages + + docker: + deps: + - names: + - openimages-original + enable_if_env: + CM_USE_DATASET_FROM_HOST: + - 'yes' + tags: get,dataset,original,openimages,_validation,_full,_custom-annotations + - names: + - openimages-calibration + enable_if_env: + CM_USE_DATASET_FROM_HOST: + - 'yes' + tags: get,dataset,original,openimages,_calibration + + 3d-unet-99: + group: + model + base: + - 3d-unet_ + env: + CM_MODEL: + 3d-unet-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _3d-unet-99 + + 3d-unet-99.9: + group: + model + base: + - 3d-unet_ + env: + CM_MODEL: + 3d-unet-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _3d-unet-99.9 + + 3d-unet_: + default_env: + CM_MLPERF_INFERENCE_TEST_QPS: "0.01" + env: + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - 3d-unet-accuracy-script + tags: run,accuracy,mlperf,_kits19,_int8 + + 3d-unet_,reference: + docker: + image_name: mlperf-inference-mlcommons-python-implementation-3d-unet + deps: + - enable_if_env: + CM_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST: + - 'yes' + tags: get,dataset,kits19,preprocessed + + rgat: + group: + model + add_deps_recursive: + mlperf-inference-implementation: + tags: _rgat + env: + CM_MODEL: + rgat + + sdxl: + group: + model + env: + CM_MODEL: stable-diffusion-xl + CM_MLPERF_INFERENCE_TEST_QPS: "0.05" + default_variations: + precision: float32 + add_deps_recursive: + mlperf-inference-implementation: + tags: _sdxl + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - coco2014-accuracy-script + tags: run,accuracy,mlperf,_coco2014 + + sdxl,nvidia-original: + docker: + deps: + - enable_if_any_env: + CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' + tags: get,ml-model,sdxl,_fp16,_rclone + + sdxl,reference,float16: + docker: + image_name: mlperf-inference-mlcommons-python-implementation-sdxl-float16 + deps: + - enable_if_any_env: + CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' + tags: get,ml-model,sdxl,_fp16,_rclone + + sdxl,reference,bfloat16: + docker: + image_name: mlperf-inference-mlcommons-python-implementation-sdxl-bfloat16 + deps: + - enable_if_any_env: + CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' + tags: get,ml-model,sdxl,_fp16,_rclone + + sdxl,reference,float32: + docker: + image_name: mlperf-inference-mlcommons-python-implementation-sdxl-float32 + deps: + - enable_if_any_env: + CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST: + - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' + tags: get,ml-model,sdxl,_fp32,_rclone + + llama2-70b_: + default_env: + CM_MLPERF_INFERENCE_TEST_QPS: "0.01" + env: + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - open-orca-accuracy-script + tags: run,accuracy,mlperf,_open-orca,_int32 + + llama2-70b-99: + group: + model + base: + - llama2-70b_ + env: + CM_MODEL: + llama2-70b-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _llama2-70b-99 + + llama2-70b-99.9: + group: + model + base: + - llama2-70b_ + env: + CM_MODEL: + llama2-70b-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _llama2-70b-99.9 + + llama2-70b_,reference: + docker: + image_name: mlperf-inference-mlcommons-python-implementation-llama2-70b + deps: + - enable_if_any_env: + CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' + tags: get,ml-model,llama2 + + llama2-70b_,amd: + docker: + image_name: mlperf-inference-amd-python-implementation-llama2-70b + mounts: + - "${{ CM_LLAMA2_FINAL_SAFE_TENSORS_PATH }}:${{ CM_LLAMA2_FINAL_SAFE_TENSORS_PATH }" + deps: + - enable_if_any_env: + CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' + tags: get,ml-model,llama2,_amd,_pytorch + + mixtral-8x7b: + group: + model + base: + - mixtral-8x7b + env: + CM_MODEL: + mixtral-8x7b + add_deps_recursive: + mlperf-inference-implementation: + tags: _mixtral-8x7b + env: + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - openorca-gsm8k-mbxp-combined-accuracy-script + tags: run,accuracy,mlperf,_openorca-gsm8k-mbxp,_int32 + + mixtral-8x7b,reference: + docker: + image_name: mlperf-inference-mlcommons-python-implementation-mixtral-8x7b + deps: + - tags: get,ml-model,mixtral + names: + - ml-model + - mixtral-model + enable_if_any_env: + CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + - 'yes' + CM_USE_MODEL_FROM_HOST: + - 'yes' + - tags: get,dataset-mixtral,openorca-mbxp-gsm8k-combined + names: + - openorca-mbxp-gsm8k-combined-preprocessed + enable_if_env: + CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + - 'yes' + mounts: + - "${{ MIXTRAL_CHECKPOINT_PATH }}:${{ MIXTRAL_CHECKPOINT_PATH }}" + - "${{ CM_DATASET_MIXTRAL_PREPROCESSED_PATH }}:${{ CM_DATASET_MIXTRAL_PREPROCESSED_PATH }}" + + rnnt: + group: + model + env: + CM_MODEL: + rnnt + add_deps_recursive: + mlperf-inference-implementation: + tags: _rnnt + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + skip_if_env: + CM_MLPERF_IMPLEMENTATION: + - nvidia + names: + - mlperf-accuracy-script + - librispeech-accuracy-script + tags: run,accuracy,mlperf,_librispeech + + rnnt,reference: + env: + CM_MLPERF_PRINT_SUMMARY: "no" + + gptj-99: + group: + model + base: + - gptj_ + env: + CM_MODEL: + gptj-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _gptj-99 + + gptj-99.9: + group: + model + base: + - gptj_ + env: + CM_MODEL: + gptj-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _gptj-99.9 + + gptj: + alias: gptj_ + + gptj_: + docker: + deps: + - tags: get,ml-model,gptj,raw + env: + CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - cnndm-accuracy-script + - mlperf-accuracy-script + tags: run,accuracy,mlperf,_cnndm + + bert_: + deps: + - skip_if_env: + CM_DATASET_SQUAD_VAL_PATH: "on" + tags: get,dataset,squad,language-processing + - skip_if_env: + CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: "on" + tags: get,dataset-aux,squad-vocab + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - squad-accuracy-script + - mlperf-accuracy-script + tags: run,accuracy,mlperf,_squad + add_deps_recursive: + inference-src: + tags: _deeplearningexamples + + bert-99: + group: + model + base: + - bert_ + env: + CM_MODEL: + bert-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _bert-99 + + bert-99.9: + group: + model + base: + - bert_ + env: + CM_MODEL: + bert-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _bert-99.9 + + dlrm_: + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - terabyte-accuracy-script + - mlperf-accuracy-script + tags: run,accuracy,mlperf,_terabyte,_float32 + + dlrm-v2-99: + group: + model + base: + - dlrm_ + env: + CM_MODEL: + dlrm-v2-99 + add_deps_recursive: + mlperf-inference-implementation: + tags: _dlrm-v2-99 + + dlrm-v2-99.9: + group: + model + base: + - dlrm_ + env: + CM_MODEL: + dlrm-v2-99.9 + add_deps_recursive: + mlperf-inference-implementation: + tags: _dlrm-v2-99.9 + + dlrm_,nvidia: + docker: + deps: + - tags: get,dlrm,data,mlperf,inference,_nvidia + mounts: + - "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2" + + dlrm_,intel: + docker: + deps: + - tags: get,preprocessed,dataset,criteo,_mlc + mounts: + - "${{ DLRM_DATA_PATH }}:${{ DLRM_DATA_PATH }}" + + dlrm_,reference: + docker: + deps: + - tags: get,preprocessed,dataset,criteo,_mlc + - tags: get,ml-model,dlrm,_pytorch,_fp32 + mounts: + - "${{ CM_ML_MODEL_FILE_WITH_PATH }}:${{ CM_ML_MODEL_FILE_WITH_PATH }}" + - "${{ DLRM_DATA_PATH }}:${{ DLRM_DATA_PATH }}" + dockerfile_env: + CM_ML_MODEL_FILE_WITH_PATH: "on" + + + mobilenet: + group: + model + env: + CM_MODEL: + mobilenet + add_deps_recursive: + mlperf-inference-implementation: + tags: _mobilenet + deps: + - tags: get,dataset-aux,imagenet-aux + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - imagenet-accuracy-script + tags: run,accuracy,mlperf,_imagenet + + efficientnet: + group: + model + env: + CM_MODEL: + efficientnet + add_deps_recursive: + mlperf-inference-implementation: + tags: _efficientnet + deps: + - tags: get,dataset-aux,imagenet-aux + posthook_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - accuracy + - all + CM_MLPERF_ACCURACY_RESULTS_DIR: + - 'on' + names: + - mlperf-accuracy-script + - imagenet-accuracy-script + tags: run,accuracy,mlperf,_imagenet + + onnxruntime: + group: backend + env: + CM_MLPERF_BACKEND: + onnxruntime + add_deps_recursive: + mlperf-inference-implementation: + tags: _onnxruntime + + tensorrt: + group: backend + env: + CM_MLPERF_BACKEND: + tensorrt + add_deps_recursive: + mlperf-inference-implementation: + tags: _tensorrt + + tensorflow: + alias: tf + + tf: + group: backend + env: + CM_MLPERF_BACKEND: + tf + add_deps_recursive: + mlperf-inference-implementation: + tags: _tf + + pytorch: + group: backend + env: + CM_MLPERF_BACKEND: + pytorch + add_deps_recursive: + mlperf-inference-implementation: + tags: _pytorch + + openshift: + group: backend + env: + CM_MLPERF_BACKEND: + openshift + add_deps_recursive: + mlperf-inference-implementation: + tags: _openshift + + ncnn: + group: backend + env: + CM_MLPERF_BACKEND: + ncnn + add_deps_recursive: + mlperf-inference-implementation: + tags: _ncnn + + deepsparse: + group: backend + default_variations: + precision: int8 + env: + CM_MLPERF_BACKEND: + deepsparse + add_deps_recursive: + mlperf-inference-implementation: + tags: _deepsparse + + tflite: + group: backend + env: + CM_MLPERF_BACKEND: tflite + add_deps_recursive: + mlperf-inference-implementation: + tags: _tflite + + glow: + group: backend + env: + CM_MLPERF_BACKEND: glow + add_deps_recursive: + mlperf-inference-implementation: + tags: _glow + + tvm-onnx: + group: backend + base: + - batch_size.1 + env: + CM_MLPERF_BACKEND: tvm-onnx + add_deps_recursive: + mlperf-inference-implementation: + tags: _tvm-onnx + + tvm-pytorch: + group: backend + base: + - batch_size.1 + env: + CM_MLPERF_BACKEND: tvm-pytorch + add_deps_recursive: + mlperf-inference-implementation: + tags: _tvm-pytorch + + tvm-tflite: + group: backend + base: + - batch_size.1 + env: + CM_MLPERF_BACKEND: tvm-tflite + add_deps_recursive: + mlperf-inference-implementation: + tags: _tvm-tflite + + ray: + group: backend + env: + CM_MLPERF_BACKEND: + ray + add_deps_recursive: + mlperf-inference-implementation: + tags: _ray + + cpu: + group: + device + default: + True + env: + CM_MLPERF_DEVICE: + cpu + add_deps_recursive: + mlperf-inference-implementation: + tags: _cpu + + cuda,reference: + docker: + base_image: nvcr.io/nvidia/pytorch:24.03-py3 + + cuda: + docker: + all_gpus: 'yes' + deps: + - tags: get,nvidia-docker + skip_if_env: + CM_SKIP_GET_NVIDIA_DOCKER: + - yes + group: + device + env: + CM_MLPERF_DEVICE: + gpu + add_deps_recursive: + mlperf-inference-implementation: + tags: _cuda + deps: + - tags: get,cuda-devices,_with-pycuda + skip_if_env: + CM_CUDA_DEVICE_PROP_GLOBAL_MEMORY: + - "yes" + - "on" + rocm: + docker: + all_gpus: 'yes' + group: + device + env: + CM_MLPERF_DEVICE: + rocm + add_deps_recursive: + mlperf-inference-implementation: + tags: _rocm + qaic: + group: + device + env: + CM_MLPERF_DEVICE: + qaic + add_deps_recursive: + mlperf-inference-implementation: + tags: _qaic + + tpu: + group: + device + env: + CM_MLPERF_DEVICE: + tpu + add_deps_recursive: + mlperf-inference-implementation: + tags: _tpu + + # Execution modes + fast: + group: execution-mode + env: + CM_FAST_FACTOR: '5' + CM_OUTPUT_FOLDER_NAME: fast_results + CM_MLPERF_RUN_STYLE: fast + + test: + group: execution-mode + default: true + env: + CM_OUTPUT_FOLDER_NAME: test_results + CM_MLPERF_RUN_STYLE: test + + valid,retinanet: + adr: + openimages-accuracy-script-disabled: + tags: _nvidia-pycocotools + + valid: + group: execution-mode + env: + CM_OUTPUT_FOLDER_NAME: valid_results + CM_MLPERF_RUN_STYLE: valid + + # Model precision + quantized: + alias: int8 + + fp32: + alias: float32 + + float32: + group: precision + default: true + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float32 + add_deps_recursive: + python-reference-mlperf-inference: + tags: _fp32 + kilt-harness: + tags: _fp32 + + float16: + group: precision + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: float16 + add_deps_recursive: + python-reference-mlperf-inference: + tags: _float16 + kilt-harness: + tags: _fp16 + + bfloat16: + group: precision + env: + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_MODEL_PRECISION: bfloat16 + add_deps_recursive: + python-reference-mlperf-inference: + tags: _bfloat16 + + int4: + group: precision + env: + CM_MLPERF_QUANTIZATION: on + CM_MLPERF_MODEL_PRECISION: int4 + add_deps_recursive: + mlperf-inference-implementation: + tags: _int4 + int8: + group: precision + env: + CM_MLPERF_QUANTIZATION: on + CM_MLPERF_MODEL_PRECISION: int8 + add_deps_recursive: + mlperf-inference-implementation: + tags: _int8 + kilt-harness: + tags: _int8 + + uint8: + group: precision + env: + CM_MLPERF_QUANTIZATION: on + CM_MLPERF_MODEL_PRECISION: uint8 + add_deps_recursive: + mlperf-inference-implementation: + tags: _uint8 + kilt-harness: + tags: _uint8 + + offline: + group: loadgen-scenario + default: true + env: + CM_MLPERF_LOADGEN_SCENARIO: Offline + add_deps_recursive: + mlperf-inference-implementation: + tags: _offline + multistream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: MultiStream + add_deps_recursive: + mlperf-inference-implementation: + tags: _multistream + singlestream: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: SingleStream + add_deps_recursive: + mlperf-inference-implementation: + tags: _singlestream + server: + group: loadgen-scenario + env: + CM_MLPERF_LOADGEN_SCENARIO: Server + add_deps_recursive: + mlperf-inference-implementation: + tags: _server + + power: + env: + CM_MLPERF_POWER: 'yes' + CM_SYSTEM_POWER: 'yes' + add_deps_recursive: + mlperf-runner: + tags: + _power + + batch_size.#: + group: batch_size + env: + CM_MLPERF_LOADGEN_MAX_BATCHSIZE: '#' + add_deps_recursive: + mlperf-inference-implementation: + tags: _batch_size.# + + # Reproducibility (past submissions) + r2.1_default: + group: + reproducibility + add_deps_recursive: + compiler: + tags: llvm + inference-src: + tags: _octoml + loadgen: + version: r2.1 + nvidia-inference-common-code: + version: r2.1 + tags: _custom + nvidia-inference-server: + version: r2.1 + tags: _custom + env: + CM_SKIP_SYS_UTILS: 'yes' + CM_TEST_QUERY_COUNT: '100' + + r3.0_default: + group: + reproducibility + add_deps_recursive: + compiler: + tags: gcc + cuda: + version_max: "11.8" + nvidia-inference-common-code: + version: r2.1 + tags: _custom + nvidia-inference-server: + version: r2.1 + tags: _custom + env: + CM_SKIP_SYS_UTILS: 'yes' + + r3.1_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + version: r3.0 + tags: _nvidia-only + nvidia-inference-server: + version: r3.0 + tags: _nvidia-only + default_env: + CM_SKIP_SYS_UTILS: 'yes' + CM_REGENERATE_MEASURE_FILES: 'yes' + env: + CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl' + + r4.0-dev_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + version: r3.1 + tags: _ctuning + nvidia-inference-server: + version: r3.1 + tags: _ctuning + intel-harness: + tags: _v3.1 + default_env: + CM_SKIP_SYS_UTILS: 'yes' + CM_REGENERATE_MEASURE_FILES: 'yes' + env: + CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl' + + r4.0_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + version: r3.1 + tags: _ctuning + nvidia-inference-server: + version: r3.1 + tags: _ctuning + intel-harness: + tags: _v3.1 + default_env: + CM_SKIP_SYS_UTILS: 'yes' + CM_REGENERATE_MEASURE_FILES: 'yes' + env: + CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl' + + #uses public code for inference v4.1 + + r4.1-dev_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + version: r4.0 + tags: _mlcommons + nvidia-inference-server: + version: r4.0 + tags: _mlcommons + intel-harness: + tags: _v4.0 + default_env: + CM_SKIP_SYS_UTILS: 'yes' + CM_REGENERATE_MEASURE_FILES: 'yes' + env: + CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' + + r4.1_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + version: r4.1 + tags: _go + nvidia-inference-server: + version: r4.1 + tags: _go + intel-harness: + tags: _v4.1 + default_env: + CM_SKIP_SYS_UTILS: 'yes' + CM_REGENERATE_MEASURE_FILES: 'yes' + env: + CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' + CM_MLPERF_INFERENCE_VERSION: '4.1' + +invalid_variation_combinations: + - + - retinanet + - tf + - + - nvidia-original + - tf + - + - nvidia-original + - onnxruntime + - + - nvidia-original + - pytorch + - + - nvidia + - tf + - + - nvidia + - onnxruntime + - + - nvidia + - pytorch + - + - gptj + - tf + +input_description: + scenario: + desc: "MLPerf inference scenario" + choices: + - Offline + - Server + - SingleStream + - MultiStream + default: Offline + mode: + desc: "MLPerf inference mode" + choices: + - performance + - accuracy + default: accuracy + test_query_count: + desc: "Specifies the number of samples to be processed during a test run" + target_qps: + desc: "Target QPS" + target_latency: + desc: "Target Latency" + max_batchsize: + desc: "Maximum batchsize to be used" + num_threads: + desc: "Number of CPU threads to launch the application with" + hw_name: + desc: "Valid value - any system description which has a config file (under same name) defined [here](https://github.com/mlcommons/cm4mlops/tree/main/script/get-configs-sut-mlperf-inference/configs)" + output_dir: + desc: "Location where the outputs are produced" + rerun: + desc: "Redo the run even if previous run files exist" + boolean: true + default: true + regenerate_files: + desc: "Regenerates measurement files including accuracy.txt files even if a previous run exists. This option is redundant if `--rerun` is used" + boolean: true + adr.python.name: + desc: "Python virtual environment name (optional)" + default: mlperf + adr.python.version_min: + desc: "Minimal Python version" + default: "3.8" + adr.python.version: + desc: "Force Python version (must have all system deps)" + adr.compiler.tags: + desc: "Compiler for loadgen" + default: gcc + adr.inference-src-loadgen.env.CM_GIT_URL: + desc: "Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations)" + adr.inference-src.env.CM_GIT_URL: + desc: "Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations)" + quiet: + desc: "Quiet run (select default values for all questions)" + boolean: true + default: false + readme: + desc: "Generate README with the reproducibility report" + debug: + desc: "Debug MLPerf script" + +gui: + title: "CM GUI for the MLPerf inference benchmark" + +docker: + use_host_group_id: True + use_host_user_id: True + pass_user_group: True #useful if docker is run by a different user fromt he one who built it and under the same group + deps: + - tags: get,mlperf,inference,results,dir,local + names: + - get-mlperf-inference-results-dir + skip_if_env: + OUTPUT_BASE_DIR: [ on ] + - tags: get,mlperf,inference,submission,dir,local + names: + - get-mlperf-inference-submission-dir + skip_if_env: + CM_MLPERF_INFERENCE_SUBMISSION_DIR: [ on ] + + pre_run_cmds: + #- cm pull repo && cm run script --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update + - cm pull repo + - cm rm cache --tags=inference,src -f + mounts: + - "${{ CM_DATASET_IMAGENET_PATH }}:${{ CM_DATASET_IMAGENET_PATH }}" + - "${{ CM_DATASET_OPENIMAGES_PATH }}:${{ CM_DATASET_OPENIMAGES_PATH }}" + - "${{ CM_OPENIMAGES_CALIBRATION_DATASET_PATH }}:${{ CM_OPENIMAGES_CALIBRATION_DATASET_PATH }}" + - "${{ CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}:${{ CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH }}" + - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}" + - "${{ OUTPUT_BASE_DIR }}:${{ OUTPUT_BASE_DIR }}" + - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}" + - "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}" + - "${{ CM_CRITEO_PREPROCESSED_PATH }}:${{ CM_CRITEO_PREPROCESSED_PATH }}" + - "${{ LLAMA2_CHECKPOINT_PATH }}:${{ LLAMA2_CHECKPOINT_PATH }}" + - "${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}:${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}" + - "${{ SDXL_CHECKPOINT_PATH }}:${{ SDXL_CHECKPOINT_PATH }}" + - "${{ CM_DATASET_KITS19_PREPROCESSED_PATH }}:${{ CM_DATASET_KITS19_PREPROCESSED_PATH }}" + skip_run_cmd: 'no' + shm_size: '32gb' + interactive: True + extra_run_args: ' --dns 8.8.8.8 --dns 8.8.4.4 --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + os: ubuntu + cm_repo: mlcommons@cm4mlops + cm_repo_branch: mlperf-inference + real_run: False + os_version: '22.04' + docker_input_mapping: + imagenet_path: IMAGENET_PATH + gptj_checkpoint_path: GPTJ_CHECKPOINT_PATH + criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH + results_dir: RESULTS_DIR + submission_dir: SUBMISSION_DIR + dlrm_data_path: DLRM_DATA_PATH + intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH + nvidia_llama2_dataset_file_path: CM_NVIDIA_LLAMA_DATASET_FILE_PATH + tp_size: CM_NVIDIA_TP_SIZE diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/build_dockerfiles.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/build_dockerfiles.py new file mode 100644 index 000000000..72aeaf766 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/build_dockerfiles.py @@ -0,0 +1,107 @@ +import cmind +import os +import pathlib +current_file_path = pathlib.Path(__file__).parent.resolve() +docker_os = { + "ubuntu": ["18.04", "20.04", "22.04"], + "rhel": ["9"] +} +dataset = { + "resnet50": "imagenet", + "retinanet": "openimages", + "bert-99.9": "squad" +} +variations = { + "resnet50": { + "tensorflow": { + "cpu": ["python"] + }, + "onnxruntime": { + "cpu": ["python", "cpp"] + }, + "pytorch": { + "cpu": [] + } + }, + "retinanet": { + "tensorflow": { + }, + "onnxruntime": { + "cpu": ["python", "cpp"] + }, + "pytorch": { + "cpu": ["python"] + } + }, + "bert-99.9": { + "tensorflow": { + "cpu": ["python"] + }, + "onnxruntime": { + "cpu": ["python"] + }, + "pytorch": { + "cpu": [] + } + } +} + +for _os in docker_os: + for version in docker_os[_os]: + for model in variations: + for backend in variations[model]: + for device in variations[model][backend]: + for implementation in variations[model][backend][device]: + variation_string = ",_" + model + ",_" + \ + backend + ",_" + device + ",_" + implementation + file_name_ext = "_" + implementation + "_" + backend + "_" + device + dockerfile_path = os.path.join( + current_file_path, + 'dockerfiles', + model, + _os + + '_' + + version + + file_name_ext + + '.Dockerfile') + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': 'app,mlperf,inference,generic' + variation_string, + 'adr': {'compiler': + {'tags': 'gcc'}, + 'inference-src': + {'tags': '_octoml'}, + 'openimages-preprocessed': + {'tags': '_50'} + }, + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + r = cmind.access(cm_input) + print_deps = r['new_state']['print_deps'] + comments = ["#RUN " + dep for dep in print_deps] + comments.append("") + comments.append( + "# Run CM workflow for MLPerf inference") + cm_docker_input = {'action': 'run', + 'automation': 'script', + 'tags': 'build,dockerfile', + 'docker_os': _os, + 'docker_os_version': version, + 'file_path': dockerfile_path, + 'comments': comments, + 'run_cmd': 'cm run script --tags=app,mlperf,inference,generic' + variation_string + ' --adr.compiler.tags=gcc --adr.inference-src.tags=_octoml', + 'script_tags': 'app,mlperf,inference,generic', + 'quiet': True, + 'print_deps': True, + 'real_run': True + } + r = cmind.access(cm_docker_input) + if r['return'] > 0: + print(r) + exit(1) + + print('') + print("Dockerfile generated at " + dockerfile_path) diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/customize.py new file mode 100644 index 000000000..07fb7cb4e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/customize.py @@ -0,0 +1,665 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils + +import os +import json +import shutil +import subprocess +import copy +import cmind as cm +import platform +import sys +import mlperf_utils +import re +from datetime import datetime, timezone + + +def preprocess(i): + + env = i['env'] + state = i['state'] + + if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'nvidia': + if env.get('CM_NVIDIA_GPU_NAME', '') in [ + "rtx_4090", "a100", "t4", "l4", "orin", "custom"]: + env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = "_" + \ + env['CM_NVIDIA_GPU_NAME'] + env['CM_NVIDIA_GPU_MEMORY'] = '' + else: + gpu_memory = i['state'].get( + 'cm_cuda_device_prop', '').get('Global memory') + gpu_memory_size = str( + int((float(gpu_memory) / (1024 * 1024 * 1024) + 7) / 8) * 8) + env['CM_NVIDIA_GPU_MEMORY'] = gpu_memory_size + env['CM_NVIDIA_HARNESS_GPU_VARIATION'] = '' + + if 'cmd' in i['input']: + state['mlperf_inference_run_cmd'] = "cm run script " + \ + " ".join(i['input']['cmd']) + + state['mlperf-inference-implementation'] = {} + + run_state = i['run_script_input']['run_state'] + state['mlperf-inference-implementation']['script_id'] = run_state['script_id'] + \ + ":" + ",".join(run_state['script_variation_tags']) + + if env.get('CM_VLLM_SERVER_MODEL_NAME', '') != '' and env.get( + 'CM_ML_MODEL_FULL_NAME', '') == '': + env['CM_ML_MODEL_FULL_NAME'] = env['CM_VLLM_SERVER_MODEL_NAME'].replace( + "/", "_") + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + + xsep = '^' if os_info['platform'] == 'windows' else '\\' + + env = i['env'] + inp = i['input'] + env['CMD'] = '' + state = i['state'] + + # if env.get('CM_MLPERF_USER_CONF', '') == '': + # return {'return': 0} + + output_dir = env['CM_MLPERF_OUTPUT_DIR'] + + result_sut_folder_path = env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH'] + + mode = env['CM_MLPERF_LOADGEN_MODE'] + + if not os.path.exists(output_dir) or not os.path.exists( + os.path.join(output_dir, "mlperf_log_summary.txt")): + # No output, fake_run? + return {'return': 0} + + # in power mode copy the log files from tmp_power directory + if env.get('CM_MLPERF_POWER', '') == "yes" and mode == "performance": + mlperf_power_logs_dir = os.path.join( + env['CM_MLPERF_OUTPUT_DIR'], "..", "power") + mlperf_ranging_logs_dir = os.path.join( + env['CM_MLPERF_OUTPUT_DIR'], "..", "ranging") + + if os.path.exists(os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], "power")): + if os.path.exists(mlperf_power_logs_dir): + shutil.rmtree(mlperf_power_logs_dir) + shutil.copytree( + os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], + "power"), + mlperf_power_logs_dir) + + if os.path.exists(os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], "ranging")): + if os.path.exists(mlperf_ranging_logs_dir): + shutil.rmtree(mlperf_ranging_logs_dir) + shutil.copytree( + os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], + "ranging"), + mlperf_ranging_logs_dir) + + if os.path.exists(os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], "run_1", "spl.txt")): + shutil.copyfile( + os.path.join( + env['CM_MLPERF_POWER_LOG_DIR'], + "run_1", + "spl.txt"), + os.path.join( + env['CM_MLPERF_OUTPUT_DIR'], + "spl.txt")) + + model = env['CM_MODEL'] + model_full_name = env.get('CM_ML_MODEL_FULL_NAME', model) + + if mode == "accuracy" or mode == "compliance" and env[ + 'CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] == "TEST01": + if model == "resnet50": + accuracy_filename = "accuracy-imagenet.py" + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + accuracy_filename) + dataset_args = " --imagenet-val-file " + \ + os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt") + accuracy_log_file_option_name = " --mlperf-accuracy-file " + datatype_option = " --dtype " + env['CM_IMAGENET_ACCURACY_DTYPE'] + + elif model == "retinanet": + accuracy_filename = "accuracy-openimages.py" + accuracy_filepath = os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + accuracy_filename) + dataset_args = " --openimages-dir " + \ + os.getcwd() # just to make the script happy + accuracy_log_file_option_name = " --mlperf-accuracy-file " + datatype_option = "" + + elif 'bert' in model: + accuracy_filename = "accuracy-squad.py" + accuracy_filepath = os.path.join( + env['CM_MLPERF_INFERENCE_BERT_PATH'], accuracy_filename) + dataset_args = " --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + "' --vocab_file '" + \ + env['CM_DATASET_SQUAD_VOCAB_PATH'] + \ + "' --out_file predictions.json " + accuracy_log_file_option_name = " --log_file " + datatype_option = " --output_dtype " + \ + env['CM_SQUAD_ACCURACY_DTYPE'] + + elif 'stable-diffusion-xl' in model: + pass # No compliance check for now + elif 'gpt' in model: + pass # No compliance check for now + elif 'llama2-70b' in model: + pass # No compliance check for now + elif 'mixtral-8x7b' in model: + pass # No compliance check for now + else: + pass # Not giving an error now. But accuracy paths need to be done for other benchmarks which may need the non-determinism test + # return {'return': 1, 'error': f'Accuracy paths not done for model + # {model}'} + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + + if not state.get('cm-mlperf-inference-results'): + state['cm-mlperf-inference-results'] = {} + if not state.get('cm-mlperf-inference-results-last'): + state['cm-mlperf-inference-results-last'] = {} + if not state['cm-mlperf-inference-results'].get( + state['CM_SUT_CONFIG_NAME']): + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']] = {} + if not state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ].get(model): + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model] = {} + if not state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model].get(scenario): + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario] = {} + + # if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes" and mode == + # "performance" and scenario != "Server": + if mode == "performance" and scenario != "Server": + os.chdir(output_dir) + if not os.path.exists("mlperf_log_summary.txt"): + return {'return': 0} + + if scenario in ["Offline", "Server"]: + metric = "target_qps" + elif scenario.endswith("Stream"): + metric = "target_latency" + else: + return {'return': 1, + 'error': 'Unsupported scenario: {}'.format(scenario)} + + import re + import yaml + pattern = {} + pattern["Offline"] = "Samples per second: (.*)\n" + pattern["SingleStream"] = "Mean latency \\(ns\\)\\s*:(.*)" + pattern["MultiStream"] = "Mean latency \\(ns\\)\\s*:(.*)" + print("\n") + with open("mlperf_log_summary.txt", "r") as fp: + summary = fp.read() + + result = re.findall(pattern[scenario], summary) + + if not result: + return { + 'return': 1, 'error': f'No {metric} found in performance summary. Pattern checked "{pattern[metric]}"'} + + value = result[0].strip() + if "\\(ns\\)" in pattern[scenario]: + value = str(float(value) / 1000000) # convert to milliseconds + + sut_name = state['CM_SUT_CONFIG_NAME'] + sut_config = state['CM_SUT_CONFIG'][sut_name] + sut_config_path = state['CM_SUT_CONFIG_PATH'][sut_name] + if scenario not in sut_config[model_full_name]: + sut_config[model_full_name][scenario] = {} + sut_config[model_full_name][scenario][metric] = value + + print( + f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} updated as {value}") + print(f"New config stored in {sut_config_path}") + with open(sut_config_path, "w") as f: + yaml.dump(sut_config, f) + + if mode in ["performance", "accuracy"]: + # if measurements file exist read it + if os.path.exists("measurements.json"): + with open("measurements.json", "r") as file: + measurements = json.load(file) # Load JSON data from the file + else: + measurements = {} + measurements['starting_weights_filename'] = env.get( + 'CM_ML_MODEL_STARTING_WEIGHTS_FILENAME', env.get( + 'CM_ML_MODEL_FILE', measurements.get( + 'starting_weights_filename', ''))) + measurements['retraining'] = env.get( + 'CM_ML_MODEL_RETRAINING', measurements.get( + 'retraining', 'no')) + measurements['input_data_types'] = env.get( + 'CM_ML_MODEL_INPUTS_DATA_TYPE', measurements.get( + 'input_data_types', 'fp32')) + measurements['weight_data_types'] = env.get( + 'CM_ML_MODEL_WEIGHTS_DATA_TYPE', measurements.get( + 'weight_data_types', 'fp32')) + measurements['weight_transformations'] = env.get( + 'CM_ML_MODEL_WEIGHT_TRANSFORMATIONS', measurements.get( + 'weight_transformations', 'none')) + + os.chdir(output_dir) + + if not os.path.exists("mlperf_log_summary.txt"): + return {'return': 0} + + mlperf_log_summary = '' + if os.path.isfile("mlperf_log_summary.txt"): + with open("mlperf_log_summary.txt", "r") as fp: + mlperf_log_summary = fp.read() + + if mlperf_log_summary != '': + state['app_mlperf_inference_log_summary'] = {} + for x in mlperf_log_summary.split('\n'): + y = x.split(': ') + if len(y) == 2: + state['app_mlperf_inference_log_summary'][y[0].strip().lower() + ] = y[1].strip() + + if env.get("CM_MLPERF_PRINT_SUMMARY", "").lower() not in [ + "no", "0", "false"]: + print("\n") + print(mlperf_log_summary) + + with open("measurements.json", "w") as fp: + json.dump(measurements, fp, indent=2) + + cm_sut_info = {} + cm_sut_info['system_name'] = state['CM_SUT_META']['system_name'] + cm_sut_info['implementation'] = env['CM_MLPERF_IMPLEMENTATION'] + cm_sut_info['device'] = env['CM_MLPERF_DEVICE'] + cm_sut_info['framework'] = state['CM_SUT_META']['framework'] + cm_sut_info['run_config'] = env['CM_MLPERF_INFERENCE_SUT_RUN_CONFIG'] + with open(os.path.join(result_sut_folder_path, "cm-sut-info.json"), "w") as fp: + json.dump(cm_sut_info, fp, indent=2) + + system_meta = state['CM_SUT_META'] + with open("system_meta.json", "w") as fp: + json.dump(system_meta, fp, indent=2) + + # map the custom model for inference result to the official model + # if custom model name is not set, the official model name will be + # mapped to itself + official_model_name = model + model_mapping = {model_full_name: official_model_name} + with open("model_mapping.json", "w") as fp: + json.dump(model_mapping, fp, indent=2) + + # Add to the state + state['app_mlperf_inference_measurements'] = copy.deepcopy( + measurements) + + if os.path.exists(env['CM_MLPERF_CONF']): + shutil.copy(env['CM_MLPERF_CONF'], 'mlperf.conf') + + if os.path.exists(env['CM_MLPERF_USER_CONF']): + shutil.copy(env['CM_MLPERF_USER_CONF'], 'user.conf') + + result, valid, power_result = mlperf_utils.get_result_from_log( + env['CM_MLPERF_LAST_RELEASE'], model, scenario, output_dir, mode, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION')) + power = None + power_efficiency = None + if power_result: + power_result_split = power_result.split(",") + if len(power_result_split) == 2: # power and power efficiency + power = power_result_split[0] + power_efficiency = power_result_split[1] + + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario][mode] = result + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario][mode + '_valid'] = valid.get(mode, False) + + state['cm-mlperf-inference-results-last'][mode] = result + state['cm-mlperf-inference-results-last'][mode + + '_valid'] = valid.get(mode, False) + + if power: + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario]['power'] = power + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario]['power_valid'] = valid['power'] + state['cm-mlperf-inference-results-last']['power'] = power + state['cm-mlperf-inference-results-last']['power_valid'] = valid['power'] + if power_efficiency: + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario]['power_efficiency'] = power_efficiency + state['cm-mlperf-inference-results-last']['power_efficiency'] = power_efficiency + + # Record basic host info + host_info = { + "os_version": platform.platform(), + "cpu_version": platform.processor(), + "python_version": sys.version, + "cm_version": cm.__version__ + } + + x = '' + if env.get('CM_HOST_OS_FLAVOR', '') != '': + x += env['CM_HOST_OS_FLAVOR'] + if env.get('CM_HOST_OS_VERSION', '') != '': + x += ' ' + env['CM_HOST_OS_VERSION'] + if x != '': + host_info['os_version_sys'] = x + + if env.get('CM_HOST_SYSTEM_NAME', '') != '': + host_info['system_name'] = env['CM_HOST_SYSTEM_NAME'] + + # Check CM automation repository + repo_name = 'mlcommons@cm4mlops' + repo_hash = '' + r = cm.access({'action': 'find', 'automation': 'repo', + 'artifact': 'mlcommons@cm4mlops,9e97bb72b0474657'}) + if r['return'] == 0 and len(r['list']) == 1: + repo_path = r['list'][0].path + if os.path.isdir(repo_path): + repo_name = os.path.basename(repo_path) + + # Check dev + # if repo_name == 'cm4mlops': repo_name = 'mlcommons@cm4mlops' + + r = cm.access({'action': 'system', + 'automation': 'utils', + 'path': repo_path, + 'cmd': 'git rev-parse HEAD'}) + if r['return'] == 0 and r['ret'] == 0: + repo_hash = r['stdout'] + + host_info['cm_repo_name'] = repo_name + host_info['cm_repo_git_hash'] = repo_hash + + with open("cm-host-info.json", "w") as fp: + fp.write(json.dumps(host_info, indent=2) + '\n') + + # Prepare README + if "cmd" in inp: + cmd = "cm run script \\\n\t" + " \\\n\t".join(inp['cmd']) + xcmd = "cm run script " + xsep + "\n\t" + \ + (" " + xsep + "\n\t").join(inp['cmd']) + else: + cmd = "" + xcmd = "" + + readme_init = "This experiment is generated using the [MLCommons Collective Mind automation framework (CM)](https://github.com/mlcommons/cm4mlops).\n\n" + + readme_init += "*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.*\n\n" + + readme_body = "## Host platform\n\n* OS version: {}\n* CPU version: {}\n* Python version: {}\n* MLCommons CM version: {}\n\n".format(platform.platform(), + platform.processor(), sys.version, cm.__version__) + + x = repo_name + if repo_hash != '': + x += ' --checkout=' + str(repo_hash) + + readme_body += "## CM Run Command\n\nSee [CM installation guide](https://docs.mlcommons.org/inference/install/).\n\n" + \ + "```bash\npip install -U cmind\n\ncm rm cache -f\n\ncm pull repo {}\n\n{}\n```".format( + x, xcmd) + + readme_body += "\n*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf (CM scripts),\n" + \ + " you should simply reload {} without checkout and clean CM cache as follows:*\n\n".format(repo_name) + \ + "```bash\ncm rm repo {}\ncm pull repo {}\ncm rm cache -f\n\n```".format( + repo_name, repo_name) + + extra_readme_init = '' + extra_readme_body = '' + if env.get('CM_MLPERF_README', '') == "yes": + extra_readme_body += "\n## Dependent CM scripts\n\n" + + script_tags = inp['tags'] + script_adr = inp.get('adr', {}) + + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'print_deps': True, + 'env': env, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + r = cm.access(cm_input) + if r['return'] > 0: + return r + + print_deps = r['new_state']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + str(count) + ". `" + dep + "`\n" + count = count + 1 + + if state.get( + 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('print_deps'): + + extra_readme_body += "\n## Dependent CM scripts for the MLPerf Inference Implementation\n" + + print_deps = state['mlperf-inference-implementation']['print_deps'] + count = 1 + for dep in print_deps: + extra_readme_body += "\n\n" + \ + str(count) + ". `" + dep + "`\n" + count = count + 1 + + readme = readme_init + readme_body + extra_readme = extra_readme_init + extra_readme_body + + with open("README.md", "w") as fp: + fp.write(readme) + if extra_readme: + with open("README-extra.md", "w") as fp: + fp.write(extra_readme) + + elif mode == "compliance": + + test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + + RESULT_DIR = os.path.split(output_dir)[0] + COMPLIANCE_DIR = output_dir + OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR) + + SCRIPT_PATH = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "compliance", + "nvidia", + test, + "run_verification.py") + if test == "TEST06": + cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" + else: + cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}" + + print(cmd) + os.system(cmd) + + if test == "TEST01": + + run_script_input = i['run_script_input'] + automation = i['automation'] + + SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, + "create_accuracy_baseline.sh") + TEST01_DIR = os.path.join(OUTPUT_DIR, "TEST01") + OUTPUT_DIR = os.path.join(OUTPUT_DIR, "TEST01", "accuracy") + if not os.path.exists(OUTPUT_DIR): + os.makedirs(OUTPUT_DIR) + + ACCURACY_DIR = os.path.join(RESULT_DIR, "accuracy") + if not os.path.exists(ACCURACY_DIR): + print("Accuracy run not yet completed") + return { + 'return': 1, 'error': 'TEST01 needs accuracy run to be completed first'} + + cmd = "cd " + TEST01_DIR + " && bash " + SCRIPT_PATH + " " + os.path.join(ACCURACY_DIR, "mlperf_log_accuracy.json") + " " + \ + os.path.join(COMPLIANCE_DIR, "mlperf_log_accuracy.json") + env['CMD'] = cmd + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: + return r + + verify_accuracy_file = os.path.join( + TEST01_DIR, "verify_accuracy.txt") + with open(verify_accuracy_file, 'r') as file: + data = file.read().replace('\n', '\t') + + if 'TEST PASS' not in data: + print("\nDeterministic TEST01 failed... Trying with non-determinism.\n") + # #Normal test failed, trying the check with non-determinism + + CMD = "cd " + ACCURACY_DIR + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ + os.path.join(TEST01_DIR, "mlperf_log_accuracy_baseline.json") + dataset_args + datatype_option + " > " + \ + os.path.join(OUTPUT_DIR, "baseline_accuracy.txt") + + env['CMD'] = CMD + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: + return r + + CMD = "cd " + ACCURACY_DIR + " && " + env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ + os.path.join(TEST01_DIR, "mlperf_log_accuracy.json") + dataset_args + datatype_option + " > " + \ + os.path.join(OUTPUT_DIR, "compliance_accuracy.txt") + + env['CMD'] = CMD + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'verify_accuracy'}) + if r['return'] > 0: + return r + import submission_checker as checker + is_valid = checker.check_compliance_perf_dir( + COMPLIANCE_DIR) if test != "TEST06" else True + state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME'] + ][model][scenario][test] = "passed" if is_valid else "failed" + + # portion of the code where the avg utilisation and system informations are extracted + # NOTE: The section is under development and print statements are added + # for further debugging + if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": + import pandas as pd + system_utilisation_info_dump = {} + logs_dir = output_dir + # logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) + sys_utilisation_log = pd.read_csv( + os.path.join( + logs_dir, + 'sys_utilisation_info.txt'), + dtype={ + 'cpu_utilisation': float, + 'used_memory_gb': float}) + with open(os.path.join(logs_dir, 'mlperf_log_detail.txt'), 'r') as file: + log_txt = file.read() + # patterns for matching the power_begin and power_end in mlperf log + pattern_begin = r'\"key\"\:\s\"power_begin\"\,\s\"value\"\:\s\"(.*?)\"' + pattern_end = r'\"key\"\:\s\"power_end\"\,\s\"value\"\:\s\"(.*?)\"' + # match the patterns with the text present in the log details file + match_begin = re.findall(pattern_begin, log_txt)[0] + match_end = re.findall(pattern_end, log_txt)[0] + power_begin_time = pd.Timestamp(datetime.strptime( + match_begin, '%m-%d-%Y %H:%M:%S.%f')).replace(tzinfo=timezone.utc) + power_end_time = pd.Timestamp(datetime.strptime( + match_end, '%m-%d-%Y %H:%M:%S.%f')).replace(tzinfo=timezone.utc) + # converts timestamp key value to datetime objects + sys_utilisation_log['timestamp'] = pd.to_datetime( + sys_utilisation_log['timestamp']) + ''' + for i in range(len(sys_utilisation_log['timestamp'])): + print(f"{sys_utilisation_log['timestamp'][i]} {power_begin_time}") + print(sys_utilisation_log['timestamp'][i]>=power_begin_time) + ''' + # print(f"{sys_utilisation_log['timestamp'][0]} {power_begin_time}") + # print(sys_utilisation_log['timestamp'][0]>=power_begin_time) + filtered_log = sys_utilisation_log[(sys_utilisation_log['timestamp'] >= power_begin_time) & + (sys_utilisation_log['timestamp'] <= power_end_time)] + # print(filtered_log) + # Calculate average of cpu_utilisation and used_memory_gb + system_utilisation_info_dump["avg_cpu_utilisation"] = filtered_log['cpu_utilisation'].mean( + ) + system_utilisation_info_dump["avg_used_memory_gb"] = filtered_log['used_memory_gb'].mean( + ) + print("\nSystem utilisation info for the current run:") + print(system_utilisation_info_dump) + print("\n") + + if state.get( + 'mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): + env['CM_MLPERF_RUN_JSON_VERSION_INFO_FILE'] = os.path.join( + output_dir, "cm-version-info.json") + env['CM_MLPERF_RUN_DEPS_GRAPH'] = os.path.join( + output_dir, "cm-deps.png") + env['CM_MLPERF_RUN_DEPS_MERMAID'] = os.path.join( + output_dir, "cm-deps.mmd") + with open(os.path.join(output_dir, "cm-version-info.json"), "w") as f: + f.write( + json.dumps( + state['mlperf-inference-implementation']['version_info'], + indent=2)) + + if env.get('CM_DUMP_SYSTEM_INFO', True): + dump_script_output( + "detect,os", + env, + state, + 'new_env', + os.path.join( + output_dir, + "os_info.json")) + dump_script_output( + "detect,cpu", + env, + state, + 'new_env', + os.path.join( + output_dir, + "cpu_info.json")) + env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + env['CM_MLPERF_OUTPUT_DIR'], "pip_freeze.raw") + dump_script_output( + "dump,pip,freeze", + env, + state, + 'new_state', + os.path.join( + output_dir, + "pip_freeze.json")) + + return {'return': 0} + + +def dump_script_output(script_tags, env, state, output_key, dump_file): + + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'env': env, + 'state': state, + 'quiet': True, + 'silent': True, + } + r = cm.access(cm_input) + if r['return'] > 0: + return r + with open(dump_file, "w") as f: + f.write(json.dumps(r[output_key], indent=2)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/run.sh new file mode 100644 index 000000000..1d0c1244c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +cmd="${CMD}" +if [[ -n ${cmd} ]]; then + echo "$cmd" + eval "$cmd" + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/run_config.yml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/run_config.yml new file mode 100644 index 000000000..03ec8b027 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/run_config.yml @@ -0,0 +1,11 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + +run_with_default_inputs: true #if false the script won't run automatic tests +variation_combinations: + - _bert,_nvidia-original: + minimum_system_requirements: + ram: 4 #in GB + disk_space: 6 #in GB diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/verify_accuracy.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/verify_accuracy.sh new file mode 100644 index 000000000..5a8cec92a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-inference/verify_accuracy.sh @@ -0,0 +1,4 @@ +#/bin/bash +echo "Running: $CMD" +eval $CMD +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/README.md new file mode 100644 index 000000000..2de199bf8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/_cm.yaml new file mode 100644 index 000000000..a2fad3584 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/_cm.yaml @@ -0,0 +1,156 @@ +# Identification of this CM script +alias: app-mlperf-training-nvidia +uid: 1e2e357618cc4674 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf training benchmark pipeline" + +# User-friendly tags to find this CM script +tags: + - app + - vision + - language + - mlcommons + - mlperf + - training + - nvidia + +# Default environment +default_env: + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: nvidia + +# Map script inputs to environment variables +input_mapping: + docker: CM_RUN_DOCKER_CONTAINER + hw_name: CM_HW_NAME + num_threads: CM_NUM_THREADS + model: CM_MLPERF_CUSTOM_MODEL_PATH + output_dir: OUTPUT_BASE_DIR + rerun: CM_RERUN + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + # Get MLPerf training source + - tags: get,mlperf,training,src + names: + - training-src + - mlperf-training-src + + + - tags: get,git,repo,_repo.https://github.com/mlcommons/training_results_v2.1 + extra_cache_tags: mlperf,training,results,v2.1 + names: + - training-results + - mlperf-training-results + + # Detect CUDA if required + - tags: get,cuda + enable_if_env: + CM_MLPERF_DEVICE: + - cuda + + + ## Torchvision (CUDA) + - tags: get,generic-python-lib,_torchvision_cuda + names: + - ml-engine-torchvision + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - cuda + + - tags: get,generic-python-lib,_mlperf_logging + + + ######################################################################## + # Prepare Data + + ## BERT + - tags: prepare,mlperf,training,data,bert,_nvidia + names: + - prepare-data + - bert-model + enable_if_env: + CM_MLPERF_MODEL: + - bert + + + +# Variations to customize dependencies +variations: + pytorch: + group: framework + env: + CM_MLPERF_BACKEND: pytorch + CM_MLPERF_BACKEND_VERSION: <<>> + + tf: + group: framework + env: + CM_MLPERF_BACKEND: tf + CM_MLPERF_BACKEND_VERSION: <<>> + + tensorflow: + alias: tf + + # Reference MLPerf models + bert: + env: + CM_MLPERF_MODEL: bert + deps: + - tags: get,generic-python-lib,_protobuf + names: + - protobuf + version_max: "3.19" + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + - tags: get,generic-python-lib,_torch + names: + - ml-engine-pytorch + tpu: + group: device + env: + CM_MLPERF_DEVICE: tpu + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: no + + cuda: + group: device + default: true + env: + CM_MLPERF_DEVICE: cuda + USE_CUDA: yes diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/customize.py new file mode 100644 index 000000000..1686d4f7c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/customize.py @@ -0,0 +1,82 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json +import shutil +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + return {'return': 0} + + if env.get('CM_MLPERF_POWER', '') == "yes": + power = "yes" + else: + power = "no" + + rerun = True if env.get("CM_RERUN", "") != '' else False + + if 'CM_MLPERF_MODEL' not in env: + return { + 'return': 1, 'error': "Please select a variation specifying the model to run"} + + if 'CM_NUM_THREADS' not in env: + if 'CM_MINIMIZE_THREADS' in env: + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + + print("Using MLCommons Training source from '" + + env['CM_MLPERF_TRAINING_SOURCE'] + "'") + + NUM_THREADS = env['CM_NUM_THREADS'] + + if "bert" in env['CM_MLPERF_MODEL']: + env['CM_RUN_DIR'] = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + "NVIDIA", + "benchmarks", + "bert", + "implementations", + "pytorch-22.09") + + if "resnet" in env['CM_MLPERF_MODEL']: + env['CM_RUN_DIR'] = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + "NVIDIA", + "benchmarks", + "resnet", + "implementations", + "mxnet-22.04") + + env['CM_RESULTS_DIR'] = os.getcwd() + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/run-bert-training.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/run-bert-training.sh new file mode 100644 index 000000000..1515404f3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/run-bert-training.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +source ./config_DGXA100_1x8x56x1.sh +results_dir=${CM_RESULTS_DIR} +cmd="CONT=mlperf-nvidia:language_model DATADIR=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength DATADIR_PHASE2=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/training-4320/hdf5_4320_shards_varlength EVALDIR=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/hdf5/eval_varlength/ CHECKPOINTDIR=${results_dir} CHECKPOINTDIR_PHASE1=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/phase1 ./run_with_docker.sh" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/run.sh new file mode 100644 index 000000000..2f15ea73b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-nvidia/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +cmd="cd ${CM_RUN_DIR}" +echo "$cmd" +eval "$cmd" + +if [[ ${CM_MLPERF_MODEL} == "bert" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/README.md b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/README.md new file mode 100644 index 000000000..e4fece2f2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/_cm.yaml new file mode 100644 index 000000000..56b4ad05d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/_cm.yaml @@ -0,0 +1,150 @@ +# Identification of this CM script +alias: app-mlperf-training-reference +uid: 0c4b11bdcf494b4f + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular MLPerf training benchmark pipeline" + +# User-friendly tags to find this CM script +tags: + - app + - vision + - language + - mlcommons + - mlperf + - training + - reference + - ref + +# Default environment +default_env: + CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: reference + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: '' + +# Map script inputs to environment variables +input_mapping: + docker: CM_RUN_DOCKER_CONTAINER + hw_name: CM_HW_NAME + num_threads: CM_NUM_THREADS + model: CM_MLPERF_CUSTOM_MODEL_PATH + output_dir: OUTPUT_BASE_DIR + rerun: CM_RERUN + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + - CM_HW_NAME + - CM_ML_MODEL_* + +new_state_keys: + - mlperf-inference-implementation + - CM_SUT_* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + # Get MLPerf training source + - tags: get,mlperf,training,src + names: + - training-src + + # Detect CUDA if required + - tags: get,cuda + enable_if_env: + CM_MLPERF_DEVICE: + - cuda + + + ## Torchvision (CUDA) + - tags: get,generic-python-lib,_torchvision_cuda + names: + - ml-engine-torchvision + enable_if_env: + CM_MLPERF_BACKEND: + - pytorch + CM_MLPERF_DEVICE: + - cuda + + - tags: get,generic-python-lib,_mlperf_logging + + + ######################################################################## + # Prepare Data + + ## BERT + - tags: prepare,mlperf,training,data,bert,_reference + names: + - prepare-data + - bert-model + enable_if_env: + CM_MLPERF_MODEL: + - bert + + + +# Variations to customize dependencies +variations: + pytorch: + group: framework + env: + CM_MLPERF_BACKEND: pytorch + CM_MLPERF_BACKEND_VERSION: <<>> + + tf: + group: framework + env: + CM_MLPERF_BACKEND: tf + CM_MLPERF_BACKEND_VERSION: <<>> + + tensorflow: + alias: tf + + # Reference MLPerf models + bert: + env: + CM_MLPERF_MODEL: bert + deps: + - tags: get,generic-python-lib,_protobuf + names: + - protobuf + version_max: "3.19" + enable_if_env: + CM_MLPERF_BACKEND: + - tf + - tflite + - tags: get,generic-python-lib,_torch + names: + - ml-engine-pytorch + tpu: + group: device + env: + CM_MLPERF_DEVICE: tpu + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: no + + cuda: + group: device + default: true + env: + CM_MLPERF_DEVICE: cuda + USE_CUDA: yes diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/customize.py b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/customize.py new file mode 100644 index 000000000..8dfd04c4d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/customize.py @@ -0,0 +1,69 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json +import shutil +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": + return {'return': 0} + + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + return {'return': 0} + + if env.get('CM_MLPERF_POWER', '') == "yes": + power = "yes" + else: + power = "no" + + rerun = True if env.get("CM_RERUN", "") != '' else False + + if 'CM_MLPERF_MODEL' not in env: + return { + 'return': 1, 'error': "Please select a variation specifying the model to run"} + + if 'CM_NUM_THREADS' not in env: + if 'CM_MINIMIZE_THREADS' in env: + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + + print("Using MLCommons Training source from '" + + env['CM_MLPERF_TRAINING_SOURCE'] + "'") + + NUM_THREADS = env['CM_NUM_THREADS'] + + if "bert" in env['CM_MLPERF_MODEL']: + env['CM_RUN_DIR'] = os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + "language_model", + "tensorflow", + "bert") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/run-bert-training.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/run-bert-training.sh new file mode 100644 index 000000000..08ed5b70a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/run-bert-training.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +export TF_XLA_FLAGS='--tf_xla_auto_jit=2' +train_batch_size=24 +cmd="python run_pretraining.py \ + --bert_config_file=${CM_MLPERF_TRAINING_BERT_CONFIG_PATH} \ + --output_dir=/tmp/output/ \ + --input_file=${CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH}/part* \ + --nodo_eval \ + --do_train \ + --eval_batch_size=8 \ + --learning_rate=0.0001 \ + --init_checkpoint=${CM_MLPERF_TRAINING_BERT_DATA_PATH}/phase1/model.ckpt-28252 \ + --iterations_per_loop=1000 \ + --max_predictions_per_seq=76 \ + --max_seq_length=512 \ + --num_train_steps=107538 \ + --num_warmup_steps=1562 \ + --optimizer=lamb \ + --save_checkpoints_steps=6250 \ + --start_warmup_step=0 \ + --num_gpus=1 \ + --train_batch_size=${train_batch_size}" +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? + diff --git a/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/run.sh new file mode 100644 index 000000000..2f15ea73b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-mlperf-training-reference/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +cmd="cd ${CM_RUN_DIR}" +echo "$cmd" +eval "$cmd" + +if [[ ${CM_MLPERF_MODEL} == "bert" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-bert-training.sh + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/README-extra.md new file mode 100644 index 000000000..ecab8070e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/README-extra.md @@ -0,0 +1,30 @@ +# Examples + +CM interface for https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/onnx + +```bash +cm run script "install python-venv" --name=sd-test +cm run script "get generic-python-lib _package.optimum[onnxruntime]" --adr.python.name=sd-test +cm run script "activate python-venv" --name=sd-test + +cm run script "python app stable-diffusion onnx" --adr.python.name=sd-test --text="crazy programmer" + +cm rm cache -f +cm run script "python app stable-diffusion onnx _cuda" --adr.python.name=sd-test --text="crazy programmer" + +cm docker script "python app stable-diffusion onnx" --text="crazy programmer" --output=. --docker_cm_repo=ctuning@mlcommons-ck --env.CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=xyz4 + +``` + + + +# Resources + +* https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0 +* https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main +* https://huggingface.co/CompVis/stable-diffusion-v1-4/tree/main +* https://huggingface.co/runwayml/stable-diffusion-v1-5 +* https://huggingface.co/bes-dev/stable-diffusion-v1-4-onnx +* https://onnxruntime.ai/docs/tutorials/csharp/stable-diffusion-csharp.html +* https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main +* https://huggingface.co/docs/optimum/onnxruntime/usage_guides/models diff --git a/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/README.md b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/README.md new file mode 100644 index 000000000..33366d992 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py](https://docs.mlcommons.org/cm4mlops/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/_cm.yaml new file mode 100644 index 000000000..306bebbb5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/_cm.yaml @@ -0,0 +1,110 @@ +alias: app-stable-diffusion-onnx-py +uid: 4d33981ac3534b3b + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Modular AI/ML application pipeline" + +tags: +- app +- modular +- stable +- diffusion +- stable-diffusion +- onnx +- python + +tags_help: "modular python app stable-diffusion onnx" + + +deps: +- tags: detect,os +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 + +- tags: get,cuda + names: + - cuda + enable_if_env: + USE_CUDA: + - yes +- tags: get,cudnn + names: + - cudnn + enable_if_env: + USE_CUDA: + - yes + + + + + + + +- tags: get,generic-python-lib,_package.optimum[onnxruntime] + names: + - optimum + skip_if_env: + USE_CUDA: + - yes + +- tags: get,generic-python-lib,_package.optimum[onnxruntime-gpu] + names: + - optimum + enable_if_env: + USE_CUDA: + - yes + +- tags: get,generic-python-lib,_package.diffusers + names: + - diffusers + + +- tags: get,ml-model,huggingface,zoo,_model-stub.runwayml/stable-diffusion-v1-5 + revision: onnx + model_filename: model_index.json + full_subfolder: . + + +variations: + cuda: + docker: + all_gpus: 'yes' + group: target + env: + USE_CUDA: yes + CM_DEVICE: cuda:0 + + cpu: + group: target + default: yes + env: + USE_CPU: yes + CM_DEVICE: cpu + +input_mapping: + text: CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT + output: CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT + + +input_description: + text: + desc: "Text to generate image" + output: + desc: "Output directory" + + +docker: + skip_run_cmd: 'no' + input_paths: + - output + add_quotes_to_keys: + - text + skip_input_for_fake_run: + - text + - output + - env.CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO diff --git a/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/process.py b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/process.py new file mode 100644 index 000000000..86bbd3c3b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/process.py @@ -0,0 +1,36 @@ +# https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/onnx + +import os + +from optimum.onnxruntime import ORTStableDiffusionPipeline + +output = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT', '') + +f = os.path.join(output, 'output.png') + +if os.path.isfile(f): + os.remove(f) + +cm_model_path = os.environ.get('CM_ML_MODEL_PATH', '') +if cm_model_path == '': + print('Error: CM_ML_MODEL_PATH env is not defined') + exit(1) + +device = os.environ.get('CM_DEVICE', '') + +pipeline = ORTStableDiffusionPipeline.from_pretrained( + cm_model_path, local_files_only=True).to(device) + +text = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT', '') +if text == '': + text = "a photo of an astronaut riding a horse on mars" + + +print('') +print('Generating imaged based on "{}"'.format(text)) + +image = pipeline(text).images[0] + +image.save(f) + +print('Image recorded to "{}"'.format(f)) diff --git a/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/run.bat b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/run.bat new file mode 100644 index 000000000..fbcf3a07e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\process.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/run.sh b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/run.sh new file mode 100644 index 000000000..efffec67f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/app-stable-diffusion-onnx-py/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN} ${CM_TMP_CURRENT_SCRIPT_PATH}/process.py +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/README.md b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/README.md new file mode 100644 index 000000000..3b47ec95d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts//authenticate-github-cli](https://docs.mlcommons.org/cm4mlops/scripts//authenticate-github-cli) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/_cm.yaml new file mode 100644 index 000000000..605cc955f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/_cm.yaml @@ -0,0 +1,16 @@ +alias: authenticate-github-cli +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +input_mapping: + with_token: CM_GH_AUTH_TOKEN + with-token: CM_GH_AUTH_TOKEN +tags: +- auth +- authenticate +- github +- gh +- cli +uid: 7b57673ac14a4337 +deps: + - tags: get,gh,cli diff --git a/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/customize.py b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/customize.py new file mode 100644 index 000000000..661349e6a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/customize.py @@ -0,0 +1,45 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + cmd = "gh auth login" + if env.get('CM_GH_AUTH_TOKEN', '') != '': + if os_info['platform'] == 'windows': + with open("token", "w") as f: + f.write(env['CM_GH_AUTH_TOKEN']) + cmd = f"{cmd} --with-token < token" + else: + cmd = f" echo {env['CM_GH_AUTH_TOKEN']} | {cmd} --with-token" + + env['CM_RUN_CMD'] = cmd + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/run.bat b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/run.bat new file mode 100644 index 000000000..2366ffc07 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/run.bat @@ -0,0 +1,19 @@ +@echo off +echo Running gh auth: +REM Not printing CM_RUN_CMD as it can contain secret +REM echo %CM_RUN_CMD% +echo. + +REM Check if CM_FAKE_RUN is not equal to "yes" +if not "%CM_FAKE_RUN%"=="yes" ( + + REM Execute the command stored in CM_RUN_CMD + REM %CM_RUN_CMD% + echo %CM_GH_AUTH_TOKEN% | gh auth login --with-token + + REM Check the exit code and exit with error if non-zero + if %ERRORLEVEL% neq 0 ( + exit /b 1 + ) +) + diff --git a/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/run.sh b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/run.sh new file mode 100644 index 000000000..58c52dad6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/authenticate-github-cli/run.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running gh auth: " #Not printing as it can contain secret +#echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi + diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/README.md b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/README.md new file mode 100644 index 000000000..9233bb2e9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/_cm.yaml new file mode 100644 index 000000000..5f1ae4ad6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/_cm.yaml @@ -0,0 +1,318 @@ +alias: benchmark-any-mlperf-inference-implementation +uid: 8d3cd46f54464810 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: MLPerf benchmark support + +deps: + - tags: detect,cpu + +default_env: + DIVISION: open + CATEGORY: edge + +input_mapping: + models: MODELS + backends: BACKENDS + devices: DEVICES + division: DIVISION + category: CATEGORY + power_server: POWER_SERVER + power_server_port: POWER_SERVER_PORT + extra_args: EXTRA_ARGS + +tags: + - benchmark + - run + - natively + - all + - inference + - any + - mlperf + - mlperf-implementation + - implementation + - mlperf-models + +variations: + + mil: + group: implementation + env: + IMPLEMENTATION: mil + default_env: + MODELS: resnet50,retinanet + BACKENDS: onnxruntime + DEVICES: cpu,cuda + + reference: + group: implementation + env: + IMPLEMENTATION: reference + default_env: + MODELS: resnet50,retinanet,bert-99,3d-unet-99,rnnt + CATEGORY: edge + + nvidia: + group: implementation + env: + IMPLEMENTATION: nvidia-original + default_env: + MODELS: resnet50,retinanet,bert-99,bert-99.9,3d-unet-99,rnnt,gptj-99,gptj-99.9,dlrmv2-99,dlrmv2-99.9 + BACKENDS: tensorrt + DEVICES: cuda + + qualcomm: + group: implementation + env: + IMPLEMENTATION: qualcomm + default_env: + MODELS: resnet50,retinanet,bert-99,bert-99.9 + DIVISION: closed + BACKENDS: glow + DEVICES: qaic + + intel: + group: implementation + env: + IMPLEMENTATION: intel + default_env: + MODELS: resnet50,retinanet,bert-99,3d-unet-99,rnnt + DIVISION: closed + BACKENDS: pytorch + DEVICES: cpu + + deepsparse: + group: implementation + env: + DIVISION: open + IMPLEMENTATION: deepsparse + default_env: + MODELS: bert-99 + BACKENDS: deepsparse + DEVICES: cpu + + tflite-cpp: + group: implementation + env: + IMPLEMENTATION: tflite_cpp + default_env: + MODELS: mobilenets + CATEGORY: edge + DIVISION: open + BACKENDS: tflite + DEVICES: cpu + + performance-only: + group: power + default: true + + power: + group: power + env: + POWER: yes + default_env: + POWER_SERVER: 192.168.1.166 + POWER_SERVER_PORT: 4950 + + phoenix,power: + default_env: + POWER_SERVER: 192.168.1.79 + rb6,power: + default_env: + POWER_SERVER: 192.168.1.166 + orin,power: + default_env: + POWER_SERVER: 192.168.1.166 + rpi4,power: + default_env: + POWER_SERVER: 192.168.1.166 + mini,power: + default_env: + POWER_SERVER: 192.168.1.166 + rb6: + group: sut + orin: + group: sut + rpi4: + group: sut + mini: + group: sut + phoenix: + group: sut + env: + CATEGORY: edge + DIVISION: closed + state: + resnet50: + cpu: + onnxruntime: + offline_target_qps: 250 + + phoenix,reference: + default_env: + DEVICES: cpu,cuda + + phoenix,nvidia: + default_env: + EXTRA_ARGS: " --gpu_name=rtx_4090" + state: + resnet50: + cuda: + tensorrt: + offline_target_qps: 45000 + retinanet: + cuda: + tensorrt: + offline_target_qps: 850 + bert-99: + cuda: + tensorrt: + offline_target_qps: 4000 + bert-99.9: + cuda: + tensorrt: + offline_target_qps: 2000 + 3d-unet-99.9: + cuda: + tensorrt: + offline_target_qps: 4 + rnnt: + cuda: + tensorrt: + offline_target_qps: 15000 + gptj-99: + cuda: + tensorrt: + offline_target_qps: 4.5 + + sapphire-rapids.24c,nvidia: + default_env: + EXTRA_ARGS: " --gpu_name=rtx_4090 --adr.mlperf-inference-implementation.tags=_num-gpus.2" + state: + resnet50: + cuda: + tensorrt: + offline_target_qps: 90000 + server_target_qps: 75000 + retinanet: + cuda: + tensorrt: + offline_target_qps: 1700 + server_target_qps: 1600 + bert-99: + cuda: + tensorrt: + offline_target_qps: 8000 + bert-99.9: + cuda: + tensorrt: + offline_target_qps: 4000 + server_target_qps: 3300 + 3d-unet-99.9: + cuda: + tensorrt: + offline_target_qps: 8 + singlestream_target_latency: 400 + 3d-unet-99.9: + cuda: + tensorrt: + offline_target_qps: 8 + singlestream_target_latency: 400 + rnnt: + cuda: + tensorrt: + offline_target_qps: 30000 + server_target_qps: 28200 + gptj-99: + cuda: + tensorrt: + offline_target_qps: 9 + server_target_qps: 8.2 + gptj-99.9: + cuda: + tensorrt: + offline_target_qps: 9 + server_target_qps: 8.2 + dlrm-v2-99: + cuda: + tensorrt: + offline_target_qps: 1500 + offline_target_qps: 1200 + dlrm-v2-99.9: + cuda: + tensorrt: + offline_target_qps: 1500 + offline_target_qps: 1200 + + orin.32g: + group: sut + env: + CATEGORY: edge + DIVISION: closed + + sapphire-rapids.24c: + group: sut + env: + CATEGORY: edge + DIVISION: closed + + macbookpro-m1: + group: sut + env: + CATEGORY: edge + DIVISION: closed + + aws-dl2q.24xlarge: + group: sut + default_env: + EXTRA_ARGS: " --adr.mlperf-inference-implementation.tags=_dl2q.24xlarge" + CATEGORY: datacenter + DIVISION: closed + default_variations: + implementation: qualcomm + + aws-dl2q.24xlarge,qualcomm: + state: + resnet50: + qaic: + glow: + offline_target_qps: 153000 + server_target_qps: 149000 + retinanet: + qaic: + glow: + offline_target_qps: 2500 + server_target_qps: 2200 + bert-99.9: + qaic: + glow: + offline_target_qps: 350 + server_target_qps: 300 + rb6: + group: sut + default_env: + CATEGORY: edge + DIVISION: closed + default_variations: + implementation: qualcomm + + rb6,qualcomm: + default_env: + EXTRA_ARGS: " --adr.mlperf-inference-implementation.tags=_rb6 --env.CM_MLPERF_SHORT_RANGING_RUN=no" + state: + resnet50: + qaic: + glow: + offline_target_qps: 6800 + retinanet: + qaic: + glow: + offline_target_qps: 125 + bert-99: + qaic: + glow: + offline_target_qps: 255 diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/customize.py b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/customize.py new file mode 100644 index 000000000..e502ba3bd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/customize.py @@ -0,0 +1,204 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + meta = i['meta'] + script_path = i['run_script_input']['path'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + models = env['MODELS'].split(",") + + input_backends = env.get('BACKENDS') + if input_backends: + input_backends = input_backends.split(",") + + devices = env.get('DEVICES') + if devices: + devices = devices.split(",") + + implementation = env['IMPLEMENTATION'] + + power = env.get('POWER', '') + + if str(power).lower() in ["yes", "true"]: + POWER_STRING = " --power=yes --adr.mlperf-power-client.power_server=" + env.get( + 'POWER_SERVER', + '192.168.0.15') + " --adr.mlperf-power-client.port=" + str( + env.get( + 'POWER_SERVER_PORT', + '4950')) + " " + else: + POWER_STRING = "" + + if not devices: + return { + 'return': 1, 'error': 'No device specified. Please set one or more (comma separated) of {cpu, qaic, cuda, rocm} for --env.DEVICES=<>'} + + cmds = [] + run_script_content = '#!/bin/bash\n\n' + run_script_content += "POWER_STRING=\"" + POWER_STRING + "\"\n" + run_script_content += "DIVISION=\"" + env['DIVISION'] + "\"\n" + run_script_content += "CATEGORY=\"" + env['CATEGORY'] + "\"\n" + run_script_content += "EXTRA_ARGS=\"" + env.get('EXTRA_ARGS', '') + "\"\n" + run_script_content += 'source ' + \ + os.path.join(script_path, "run-template.sh") + \ + "\nPOWER_STRING=\"" + POWER_STRING + "\"\n\n" + + run_file_name = 'tmp-' + implementation + '-run' + + for model in models: + env['MODEL'] = model + + if "mobilenets" in model: + cmd = 'export extra_option=""' + cmds.append(cmd) + cmd = 'export extra_tags=""' + cmds.append(cmd) + assemble_tflite_cmds(cmds) + cmd = 'export extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on"' + cmds.append(cmd) + assemble_tflite_cmds(cmds) + + if env.get('CM_HOST_CPU_ARCHITECTURE', '') == "aarch64": + extra_tags = ",_armnn,_use-neon" + cmd = f'export extra_tags="{extra_tags}"' + cmds.append(cmd) + assemble_tflite_cmds(cmds) + cmd = 'export extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on"' + cmds.append(cmd) + assemble_tflite_cmds(cmds) + + continue + + if not input_backends: + backends = None + if implementation == "reference": + if model == "resnet50": + backends = "tf,onnxruntime" + elif model == "retinanet": + backends = "onnxruntime,pytorch" + elif "bert" in model: + backends = "tf,onnxruntime,pytorch" + elif "3d-unet" in model: + backends = "tf,onnxruntime,pytorch" + elif model == "rnnt": + backends = "pytorch" + elif "gptj" in model: + backends = "pytorch" + elif "stable-diffusion-xl" in model: + backends = "pytorch" + elif "llama2-70b" in model: + backends = "pytorch" + if not backends: + return { + 'return': 1, 'error': f'No backend specified for the model: {model}.'} + backends = backends.split(",") + + else: + backends = input_backends + + for backend in backends: + + for device in devices: + add_to_run_cmd = '' + offline_target_qps = ( + ((state.get( + model, + {})).get( + device, + {})).get( + backend, + {})).get('offline_target_qps') + if offline_target_qps: + add_to_run_cmd += f" --offline_target_qps={offline_target_qps}" + server_target_qps = ( + ((state.get( + model, + {})).get( + device, + {})).get( + backend, + {})).get('server_target_qps') + if server_target_qps: + add_to_run_cmd += f" --server_target_qps={server_target_qps}" + + else: # try to do a test run with reasonable number of samples to get and record the actual system performance + if device == "cpu": + if model == "resnet50": + test_query_count = 1000 + else: + test_query_count = 100 + else: + if model == "resnet50": + test_query_count = 40000 + else: + test_query_count = 2000 + cmd = f'run_test "{model}" "{backend}" "{test_query_count}" "{implementation}" "{device}" "$find_performance_cmd"' + cmds.append(cmd) + # second argument is unused for submission_cmd + cmd = f'run_test "{model}" "{backend}" "100" "{implementation}" "{device}" "$submission_cmd" "{add_to_run_cmd}"' + + singlestream_target_latency = ( + ((state.get( + model, + {})).get( + device, + {})).get( + backend, + {})).get('singlestream_target_latency') + if singlestream_target_latency: + cmd += f" --singlestream_target_latency={singlestream_target_latency}" + + cmds.append(cmd) + + run_script_content += "\n\n" + "\n\n".join(cmds) + + with open(os.path.join(script_path, run_file_name + ".sh"), 'w') as f: + f.write(run_script_content) + print(run_script_content) + + run_script_input = i['run_script_input'] + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': run_file_name}) + + if r['return'] > 0: + return r + + return {'return': 0} + + +def assemble_tflite_cmds(cmds): + cmd = 'run "$tflite_accuracy_cmd"' + cmds.append(cmd) + cmd = 'run "$tflite_performance_cmd"' + cmds.append(cmd) + cmd = 'run "$tflite_readme_cmd"' + cmds.append(cmd) + return + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/run-template.sh b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/run-template.sh new file mode 100644 index 000000000..17c1ffa00 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-any-mlperf-inference-implementation/run-template.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division=$DIVISION +model=$MODEL +device=$DEVICE +category=$CATEGORY +rerun=$RERUN + +function run_test() { + model=$1 + backend=$2 + test_query_count=$3 + implementation=$4 + device=$5 + EXTRA_RUN_ARGS=$7 + echo "model=$model, backend=$2, test_query_count=$3, implementation=$4, device=$5, EXTRA_RUN_ARGS=$7" + run "$6" +} + +#power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +results_dir=$HOME/results_dir + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun ${EXTRA_ARGS}' + +find_ss_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=SingleStream --quiet --test_query_count=$test_query_count $rerun ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet \ +--skip_submission_generation=yes --execution-mode=valid ${POWER_STRING} ${EXTRA_RUN_ARGS} ${EXTRA_ARGS}' + + +tflite_accuracy_cmd='cm run script --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +--adr.compiler.tags=gcc \ +${extra_option} \ + ${EXTRA_ARGS}' + +tflite_performance_cmd='cm run script --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +${POWER_STRING} \ +--adr.compiler.tags=gcc \ +${extra_option} \ + ${EXTRA_ARGS}' + +tflite_readme_cmd='cm run script --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +${POWER_STRING} \ +--adr.compiler.tags=gcc \ +${extra_option} \ + ${EXTRA_ARGS}' diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/README.md b/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/README.md new file mode 100644 index 000000000..c01559df1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/_cm.yaml new file mode 100644 index 000000000..ed532f8bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/_cm.yaml @@ -0,0 +1,35 @@ +alias: benchmark-program-mlperf +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Modular MLPerf inference benchmark pipeline +default_env: {} +tags: +- mlperf +- benchmark-mlperf +uid: cfff0132a8aa4018 +variations: + no-power: + default: true + group: power-mode + post_deps: + - names: + - benchmark-program + tags: benchmark-program,program + power: + env: + CM_MLPERF_POWER: 'yes' + CM_SAVE_CONSOLE_LOG: 'no' + group: power-mode + new_env_keys: + - CM_MLPERF_* + post_deps: + - enable_if_env: + CM_MLPERF_LOADGEN_MODE: + - performance + names: + - mlperf-power-client + tags: run,mlperf,power,client + prehook_deps: + - names: + - benchmark-program + tags: benchmark-program,program diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/customize.py b/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/customize.py new file mode 100644 index 000000000..9fdc936ae --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program-mlperf/customize.py @@ -0,0 +1,75 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD') + + if env.get('CM_MLPERF_POWER', '') == "yes": + + if env.get('CM_MLPERF_SHORT_RANGING_RUN', '') != 'no': + # Write '0' to the count.txt file in CM_RUN_DIR + count_file = os.path.join(env.get('CM_RUN_DIR', ''), 'count.txt') + with open(count_file, 'w') as f: + f.write('0') + + if os_info['platform'] != 'windows': + # Construct the shell command with proper escaping + env['CM_MLPERF_RUN_CMD'] = r""" +CM_MLPERF_RUN_COUNT=\$(cat \${CM_RUN_DIR}/count.txt); +echo \${CM_MLPERF_RUN_COUNT}; +CM_MLPERF_RUN_COUNT=\$((CM_MLPERF_RUN_COUNT+1)); +echo \${CM_MLPERF_RUN_COUNT} > \${CM_RUN_DIR}/count.txt; + +if [ \${CM_MLPERF_RUN_COUNT} -eq 1 ]; then +export CM_MLPERF_USER_CONF="${CM_MLPERF_RANGING_USER_CONF}"; +else +export CM_MLPERF_USER_CONF="${CM_MLPERF_TESTING_USER_CONF}"; +fi +; + + """ + env.get('CM_RUN_CMD', '').strip() + else: + env['CM_MLPERF_RUN_CMD'] = r""" +:: Read the current count from the file +set /p CM_MLPERF_RUN_COUNT=<%CM_RUN_DIR%\count.txt +echo !CM_MLPERF_RUN_COUNT! + +:: Increment the count +set /a CM_MLPERF_RUN_COUNT=!CM_MLPERF_RUN_COUNT! + 1 +echo !CM_MLPERF_RUN_COUNT! > %CM_RUN_DIR%\count.txt + +:: Check the value and set the environment variable accordingly +if !CM_MLPERF_RUN_COUNT! EQU 1 ( + set CM_MLPERF_USER_CONF=%CM_MLPERF_RANGING_USER_CONF% +) else ( + set CM_MLPERF_USER_CONF=%CM_MLPERF_TESTING_USER_CONF% +) + """ + env.get('CM_RUN_CMD', '').strip() + else: + # Just use the existing CM_RUN_CMD if no ranging run is needed + env['CM_MLPERF_RUN_CMD'] = env.get('CM_RUN_CMD', '').strip() + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/README-extra.md new file mode 100644 index 000000000..d0cdc3143 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/README-extra.md @@ -0,0 +1,3 @@ +This is a universal script to run and profile programs. + +It is a part of our universal benchmarking and optimization roadmap: https://github.com/mlcommons/cm4mlops/issues/23 diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program/README.md b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/README.md new file mode 100644 index 000000000..69461d856 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/benchmark-program](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/benchmark-program) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/_cm.yaml new file mode 100644 index 000000000..4abb48d60 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/_cm.yaml @@ -0,0 +1,38 @@ +alias: benchmark-program +automation_alias: script +automation_uid: 5b4e0237da074764 +category: DevOps automation +default_env: + CM_ENABLE_NUMACTL: '0' + CM_ENABLE_PROFILING: '0' +deps: +- tags: detect,cpu +- enable_if_env: + CM_SET_PERFORMANCE_MODE: + - 'on' + - 'yes' + - 'True' + - true + tags: set,performance,mode,_performance +new_env_keys: +- CM_RUN_CMD +tags: +- program +- benchmark +- benchmark-program +tags_help: benchmark program +uid: 19f369ef47084895 +variations: + numactl: + default_env: + CM_ENABLE_NUMACTL: 1 + CM_NUMACTL_MEMBIND: --localalloc + numactl-interleave: + default_env: + CM_ENABLE_NUMACTL: 1 + CM_NUMACTL_MEMBIND: --interleave=all + profile: + default_env: + CM_ENABLE_PROFILING: 1 + deps: + - tags: get,profiler diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program/customize.py b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/customize.py new file mode 100644 index 000000000..b74385895 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/customize.py @@ -0,0 +1,135 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + env = i['env'] + + q = '"' if os_info['platform'] == 'windows' else "'" + + if env.get('CM_RUN_CMD', '') == '': + if env.get('CM_BIN_NAME', '') == '': + x = 'run.exe' if os_info['platform'] == 'windows' else 'run.out' + env['CM_BIN_NAME'] = x + + if os_info['platform'] == 'windows': + env['CM_RUN_CMD'] = env.get( + 'CM_RUN_PREFIX', '') + env['CM_BIN_NAME'] + if env.get('CM_RUN_SUFFIX', '') != '': + env['CM_RUN_CMD'] += ' ' + env['CM_RUN_SUFFIX'] + + else: + if env['CM_ENABLE_NUMACTL'].lower() in ["on", "1", "true", "yes"]: + env['CM_ENABLE_NUMACTL'] = "1" + CM_RUN_PREFIX = "numactl " + env['CM_NUMACTL_MEMBIND'] + ' ' + else: + CM_RUN_PREFIX = '' + + CM_RUN_PREFIX += env.get('CM_RUN_PREFIX', '') + + env['CM_RUN_PREFIX'] = CM_RUN_PREFIX + + CM_RUN_SUFFIX = ( + env['CM_REDIRECT_OUT'] + + ' ') if 'CM_REDIRECT_OUT' in env else '' + CM_RUN_SUFFIX += (env['CM_REDIRECT_ERR'] + + ' ') if 'CM_REDIRECT_ERR' in env else '' + + env['CM_RUN_SUFFIX'] = env['CM_RUN_SUFFIX'] + \ + CM_RUN_SUFFIX if 'CM_RUN_SUFFIX' in env else CM_RUN_SUFFIX + + if env.get('CM_RUN_DIR', '') == '': + env['CM_RUN_DIR'] = os.getcwd() + + env['CM_RUN_CMD'] = CM_RUN_PREFIX + ' ' + os.path.join( + env['CM_RUN_DIR'], env['CM_BIN_NAME']) + ' ' + env['CM_RUN_SUFFIX'] + + x = env.get('CM_RUN_PREFIX0', '') + if x != '': + env['CM_RUN_CMD'] = x + ' ' + env.get('CM_RUN_CMD', '') + + if os_info['platform'] != 'windows' and str( + env.get('CM_SAVE_CONSOLE_LOG', True)).lower() not in ["no", "false", "0"]: + logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) + env['CM_RUN_CMD'] += r" 2>&1 | tee " + q + os.path.join( + logs_dir, "console.out") + q + r"; echo \${PIPESTATUS[0]} > exitstatus" + + # additional arguments and tags for measuring system informations(only if + # 'CM_PROFILE_NVIDIA_POWER' is 'on') + if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] = '' + # this section is for selecting the variation + if env.get('CM_MLPERF_DEVICE', '') == "gpu": + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cuda' + elif env.get('CM_MLPERF_DEVICE', '') == "cpu": + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ',_cpu' + # this section is for supplying the input arguments/tags + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --log_dir=\'' + \ + logs_dir + '\'' # specify the logs directory + # specifying the interval in which the system information should be + # measured + if env.get('CM_SYSTEM_INFO_MEASUREMENT_INTERVAL', '') != '': + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] += ' --interval=\"' + \ + env['CM_SYSTEM_INFO_MEASUREMENT_INTERVAL'] + '\"' + + # generate the pre run cmd - recording runtime system infos + pre_run_cmd = "" + + if env.get('CM_PRE_RUN_CMD_EXTERNAL', '') != '': + pre_run_cmd += env['CM_PRE_RUN_CMD_EXTERNAL'] + + if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": + if pre_run_cmd != '': + pre_run_cmd += ' && ' + + # running the script as a process in background + pre_run_cmd = pre_run_cmd + 'cm run script --tags=runtime,system,utilisation' + \ + env['CM_SYS_UTILISATION_SCRIPT_TAGS'] + ' --quiet & ' + # obtain the command if of the background process + pre_run_cmd += r" cmd_pid=\$! && echo CMD_PID=\$cmd_pid" + print( + f"Pre run command for recording the runtime system information: {pre_run_cmd}") + + env['CM_PRE_RUN_CMD'] = pre_run_cmd + + # generate the post run cmd - for killing the process that records runtime + # system infos + post_run_cmd = "" + if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": + post_run_cmd += r"echo killing process \$cmd_pid && kill -TERM \${cmd_pid}" + print( + f"Post run command for killing the process that measures the runtime system information: {post_run_cmd}") + + env['CM_POST_RUN_CMD'] = post_run_cmd + + # Print info + print('***************************************************************************') + print('CM script::benchmark-program/run.sh') + print('') + print('Run Directory: {}'.format(env.get('CM_RUN_DIR', ''))) + + print('') + print('CMD: {}'.format(env.get('CM_RUN_CMD', ''))) + + print('') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run-ubuntu.sh new file mode 100644 index 000000000..1f19ed80b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run-ubuntu.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +cd ${CM_TMP_CURRENT_SCRIPT_PATH} +if [ ${CM_ENABLE_NUMACTL} == "1" ]; then + sudo apt-get install numactl +fi + +bash ./run.sh diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run.bat b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run.bat new file mode 100644 index 000000000..d15449355 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run.bat @@ -0,0 +1,39 @@ +@echo off + +if "%CM_RUN_DIR%" == "" ( + echo CM_RUN_DIR is not set + exit 1 +) + +cd %CM_RUN_DIR% + +if "%CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM%" == "True" ( + echo ***************************************************** + echo You are now in Debug shell with pre-set CM env and can run the following command line manually: + + echo. + if not "%CM_RUN_CMD0%" == "" ( + echo %CM_RUN_CMD0% + ) else ( + echo %CM_RUN_CMD% + ) + + echo. + echo Type exit to return to CM script. + echo. + + cmd + + exit 0 +) + +rem Check CM_RUN_CMD0 +if not "%CM_RUN_CMD0%" == "" ( + echo. + %CM_RUN_CMD0% +) else ( + echo. + %CM_RUN_CMD% +) + +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run.sh b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run.sh new file mode 100644 index 000000000..6eb39d333 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/benchmark-program/run.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +# function to safely exit the background process +safe_exit() { + if [[ "${CM_POST_RUN_CMD}" != "" ]]; then + eval ${CM_POST_RUN_CMD} + if [ $? -eq 0 ]; then + exit 0 + else + exit $? + fi + fi +} + +# trap signals to redirect the execution flow to safe_exit +trap safe_exit SIGINT SIGTERM + +if [[ ${CM_MLPERF_POWER} == "yes" && ${CM_MLPERF_LOADGEN_MODE} == "performance" ]]; then + exit 0 +fi + +# Run +if [ -z ${CM_RUN_DIR} ]; then + echo "CM_RUN_DIR is not set" + exit 1 +fi + +cd ${CM_RUN_DIR} + +if [[ "${CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM}" == "True" ]]; then + echo "*****************************************************" + echo "You are now in Debug shell with pre-set CM env and can run the following command line manually:" + + echo "" + if [[ "${CM_RUN_CMD0}" != "" ]]; then + echo "${CM_RUN_CMD0}" + else + echo "${CM_RUN_CMD}" + fi + + echo "" + echo "Type exit to return to CM script." + echo "" +# echo "You can also run . ./debug-script-benchmark-program.sh to reproduce and customize run." +# echo "" +# +# cp -f tmp-run.sh debug-script-benchmark-program.sh +# +# sed -e 's/CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM="True"/CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM="False"/g' -i debug-script-benchmark-program.sh + + bash + + # do not re-run command below to pick up manual run! + exit 0 +fi + +echo $CM_PRE_RUN_CMD +eval ${CM_PRE_RUN_CMD} + +# Function to run command and check exit status +run_command() { + local cmd="$1" + + if [[ -n "$cmd" ]]; then + echo "$cmd" + eval "$cmd" + exitstatus=$? + + # If 'exitstatus' file exists, overwrite the exit status with its content + if [[ -e exitstatus ]]; then + exitstatus=$(cat exitstatus) + fi + + # If exitstatus is non-zero, exit with that status + if [[ $exitstatus -ne 0 ]]; then + exit $exitstatus + fi + fi +} + +# Run CM_RUN_CMD0 if it exists, otherwise run CM_RUN_CMD +if [[ -n "$CM_RUN_CMD0" ]]; then + run_command "$CM_RUN_CMD0" +fi + +run_command "$CM_RUN_CMD" + + +# Run post-run command if it exists +if [[ -n "$CM_POST_RUN_CMD" ]]; then + eval "$CM_POST_RUN_CMD" + post_exitstatus=$? + # Exit if post-run command fails + if [[ $post_exitstatus -ne 0 ]]; then + exit $post_exitstatus + fi +fi + +# Final check for exitstatus and exit with the appropriate code +if [[ $exitstatus -ne 0 ]]; then + exit $exitstatus +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/README-extra.md new file mode 100644 index 000000000..79b2c1b09 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/README-extra.md @@ -0,0 +1,16 @@ +# Build CM Docker Image +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds a dockerfile with for using CM. + +## How to use +```bash +cm run script --tags=build,docker,image --dockerfile=[DOCKERFILEPATH] --gh_token=[GITHUB_AUTH_TOKEN] --image_repo=[IMAGE_REPO] --image_name=[IMAGE_NAME] --image_tag=[IMAGE_TAG] --cache=[yes,no] +``` +where +* `[DOCKERFILEPATH]` is the path to the dockerfile. If not given, the [dockerfile build script](../build-dockerfile) will be called. +* `[GITHUB_AUTH_TOKEN]`: is passed as a build argument to docker build. +* `[IMAGE_REPO]`: Repo name to add the docker image. Default is `local`. +* `[IMAGE_NAME]`: Name to add the docker image. Default is `cm`. +* `[IMAGE_TAG]`: Tag for the docker image. Default is `latest`. +* `--cache`: If `no` turns off docker build caching. Default is cache on. +* `[--docker_os, --docker_os_version, --cm_repo and --script_tags]` are additional options which are passed to the [dockerfile build script](../build-dockerfile) if needed. + diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/README.md b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/README.md new file mode 100644 index 000000000..98705ac21 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Docker-automation/build-docker-image](https://docs.mlcommons.org/cm4mlops/scripts/Docker-automation/build-docker-image) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/_cm.yaml new file mode 100644 index 000000000..a9dc8cb67 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/_cm.yaml @@ -0,0 +1,45 @@ +alias: build-docker-image +uid: 2c3c4ba2413442e7 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: Docker automation + +tags: +- build +- docker +- image +- docker-image +- dockerimage + +default_env: + CM_DOCKER_IMAGE_REPO: local + CM_DOCKER_IMAGE_TAG: latest + +input_mapping: + cache: CM_DOCKER_CACHE + cm_repo: CM_MLOPS_REPO + docker_os: CM_DOCKER_OS + docker_os_version: CM_DOCKER_OS_VERSION + dockerfile: CM_DOCKERFILE_WITH_PATH + gh_token: CM_GH_TOKEN + image_name: CM_DOCKER_IMAGE_NAME + image_repo: CM_DOCKER_IMAGE_REPO + image_tag: CM_DOCKER_IMAGE_TAG + post_run_cmds: CM_DOCKER_POST_RUN_COMMANDS + pre_run_cmds: CM_DOCKER_PRE_RUN_COMMANDS + real_run: CM_REAL_RUN + script_tags: CM_DOCKER_RUN_SCRIPT_TAGS + push_image: CM_DOCKER_PUSH_IMAGE + +new_env_keys: +- CM_DOCKER_* + +prehook_deps: +- enable_if_env: + CM_BUILD_DOCKERFILE: + - 'yes' + tags: build,dockerfile diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/customize.py b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/customize.py new file mode 100644 index 000000000..4746e98c3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/customize.py @@ -0,0 +1,147 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +from os.path import exists + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') + if dockerfile_path != '' and os.path.exists(dockerfile_path): + build_dockerfile = False + env['CM_BUILD_DOCKERFILE'] = "no" + os.chdir(os.path.dirname(dockerfile_path)) + else: + build_dockerfile = True + env['CM_BUILD_DOCKERFILE'] = "yes" + env['CM_DOCKERFILE_BUILD_FROM_IMAGE_SCRIPT'] = "yes" + + CM_DOCKER_BUILD_ARGS = env.get('+ CM_DOCKER_BUILD_ARGS', []) + + if env.get('CM_GH_TOKEN', '') != '': + CM_DOCKER_BUILD_ARGS.append("CM_GH_TOKEN=" + env['CM_GH_TOKEN']) + + if CM_DOCKER_BUILD_ARGS: + build_args = "--build-arg " + \ + " --build-arg ".join(CM_DOCKER_BUILD_ARGS) + else: + build_args = "" + + env['CM_DOCKER_BUILD_ARGS'] = build_args + +# if 'CM_DOCKERFILE_WITH_PATH' not in env or not exists(env['CM_DOCKERFILE_WITH_PATH']): +# env['CM_BUILD_DOCKERFILE'] = "yes" +# else: +# env['CM_BUILD_DOCKERFILE'] = "no" +# + if env.get("CM_DOCKER_IMAGE_REPO", "") == '': + env['CM_DOCKER_IMAGE_REPO'] = "local" + + docker_image_name = env.get('CM_DOCKER_IMAGE_NAME', '') + if docker_image_name == '': + docker_image_name = "cm-script-" + \ + env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '').replace( + ',', '-').replace('_', '-') + env['CM_DOCKER_IMAGE_NAME'] = docker_image_name + + if env.get("CM_DOCKER_IMAGE_TAG", "") == '': + env['CM_DOCKER_IMAGE_TAG'] = "latest" + + if str(env.get("CM_DOCKER_CACHE", "yes")).lower() in ["no", "false", "0"]: + env["CM_DOCKER_CACHE_ARG"] = " --no-cache" + + CMD = '' + + image_name = get_image_name(env) + + if build_dockerfile: + dockerfile_path = r"\${CM_DOCKERFILE_WITH_PATH}" + + # Write .dockerignore + with open('.dockerignore', 'w') as f: + f.write('.git\n') + + # Prepare CMD to build image + XCMD = [ + 'docker build ' + env.get('CM_DOCKER_CACHE_ARG', ''), + ' ' + build_args, + ' -f "' + dockerfile_path + '"', + ' -t "' + image_name, + ' .' + ] + + with open(dockerfile_path + '.build.sh', 'w') as f: + f.write(' \\\n'.join(XCMD) + '\n') + + with open(dockerfile_path + '.build.bat', 'w') as f: + f.write(' ^\n'.join(XCMD) + '\n') + + CMD = ''.join(XCMD) + + print('================================================') + print('CM generated the following Docker build command:') + print('') + print(CMD) + + print('') + + env['CM_DOCKER_BUILD_CMD'] = CMD + + return {'return': 0} + + +def get_image_name(env): + + image_name = env.get('CM_DOCKER_IMAGE_REPO', '') + '/' + \ + env.get('CM_DOCKER_IMAGE_NAME', '') + ':' + \ + env.get('CM_DOCKER_IMAGE_TAG', '') + '"' + + return image_name + + +def postprocess(i): + + env = i['env'] + + # Check if need to push docker image to the Docker Hub + if env.get('CM_DOCKER_PUSH_IMAGE', '') in ['True', True, 'yes']: + image_name = get_image_name(env) + + # Prepare CMD to build image + PCMD = 'docker image push ' + image_name + + dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') + if dockerfile_path != '' and os.path.isfile(dockerfile_path): + with open(dockerfile_path + '.push.sh', 'w') as f: + f.write(PCMD + '\n') + + with open(dockerfile_path + '.build.bat', 'w') as f: + f.write(PCMD + '\n') + + print('================================================') + print('CM generated the following Docker push command:') + print('') + print(PCMD) + + print('') + + r = os.system(PCMD) + print('') + + if r > 0: + return {'return': 1, 'error': 'pushing to Docker Hub failed'} + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/0-common.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/0-common.bat new file mode 100644 index 000000000..721cc1b5d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/0-common.bat @@ -0,0 +1,21 @@ +set DOCKER_IMAGE_REPO=cknowledge + +set DOCKER_OS=ubuntu + +rem set DOCKER_OS_VER=22.04 +set DOCKER_OS_VER=23.04 +set DOCKER_PIP_EXTRA_FLAGS=--break-system-packages + +rem set DOCKER_IMAGE_NAME=cm-base +set DOCKER_IMAGE_NAME=cm-script-app-image-classification-onnx-py +set DOCKER_IMAGE_POST_FILE=%CD%\extra-cmd.cm-script-app-image-classification-onnx-py + +rem set DOCKER_IMAGE_TAG=%DOCKER_OS%-%DOCKER_OS_VER%-20230804 + +set DOCKER_IMAGE_TAG=%DOCKER_OS%-%DOCKER_OS_VER%-latest +set DOCKERFILE_EXT=%DOCKER_IMAGE_NAME%-%DOCKER_IMAGE_TAG% + +set DOCKER_PACKAGE_MANAGER_UPDATE_CMD="apt-get update -y && apt-get upgrade -y" + +set DOCKER_CM_MLOPS_REPO="ctuning@mlcommons-ck" +rem set DOCKER_CM_MLOPS_REPO="mlcommons@ck" diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/0-generate.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/0-generate.bat new file mode 100644 index 000000000..443d029ae --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/0-generate.bat @@ -0,0 +1,9 @@ +call 0-common.bat + +cmr "build dockerfile" --file_path=%CD%\Dockerfile.%DOCKERFILE_EXT% ^ + --docker_os=%DOCKER_OS% ^ + --docker_os_version=%DOCKER_OS_VER% ^ + --package_manager_update_cmd=%DOCKER_PACKAGE_MANAGER_UPDATE_CMD% ^ + --pip_extra_flags=%DOCKER_PIP_EXTRA_FLAGS% ^ + --post_file=%DOCKER_IMAGE_POST_FILE% ^ + --cm_repo=%DOCKER_CM_MLOPS_REPO% diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/1-build.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/1-build.bat new file mode 100644 index 000000000..2356eb032 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/1-build.bat @@ -0,0 +1,8 @@ +call 0-common.bat + +cmr "build docker image" --dockerfile=%CD%\Dockerfile.%DOCKERFILE_EXT% ^ + --docker_os=%DOCKER_OS% ^ + --docker_os_version=%DOCKER_OS_VER% ^ + --image_repo=%DOCKER_IMAGE_REPO% ^ + --image_name=%DOCKER_IMAGE_NAME% ^ + --image_tag=%DOCKER_IMAGE_TAG% diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command1.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command1.bat new file mode 100644 index 000000000..eeeadd311 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command1.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +docker run -it %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% "cmr 'detect os' -j" diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command2.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command2.bat new file mode 100644 index 000000000..ac1c8a3a6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command2.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_name=%DOCKER_IMAGE_NAME% --image_tag=%DOCKER_IMAGE_TAG% --run_cmd="cmr 'detect os' -j" diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command3.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command3.bat new file mode 100644 index 000000000..e690f093c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command3.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_tag=%DOCKER_IMAGE_TAG% --script_tags=detect,os diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command4.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command4.bat new file mode 100644 index 000000000..c2e6f801c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command4.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_tag=%DOCKER_IMAGE_TAG% --script_tags=detect,os --it diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command5.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command5.bat new file mode 100644 index 000000000..d153437f1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-cm-command5.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cm docker script --tags=detect,os -j diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-interactive1.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-interactive1.bat new file mode 100644 index 000000000..917dda930 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-interactive1.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +docker run -it %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% -c bash diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-interactive2.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-interactive2.bat new file mode 100644 index 000000000..67dd22650 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/2-run-interactive2.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +cmr "run docker container" --image_repo=%DOCKER_IMAGE_REPO% --image_name=%DOCKER_IMAGE_NAME% --image_tag=%DOCKER_IMAGE_TAG% --it diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/3-push-to-docker-hub.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/3-push-to-docker-hub.bat new file mode 100644 index 000000000..2c9eb634d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/3-push-to-docker-hub.bat @@ -0,0 +1,3 @@ +call 0-common.bat + +docker push %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 new file mode 100644 index 000000000..418e73363 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-22.04-20230804 @@ -0,0 +1,38 @@ +FROM ubuntu:22.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y && apt-get upgrade -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck --dummy + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm version diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 new file mode 100644 index 000000000..478e155f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-20230804 @@ -0,0 +1,38 @@ +FROM ubuntu:23.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y && apt-get upgrade -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests --break-system-packages + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo mlcommons@ck --dummy + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm version diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest new file mode 100644 index 000000000..832a37669 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-base-ubuntu-23.04-latest @@ -0,0 +1,38 @@ +FROM ubuntu:23.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y && apt-get upgrade -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests --break-system-packages + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo ctuning@mlcommons-ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm version diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest new file mode 100644 index 000000000..7ce0af2fb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/Dockerfile.cm-script-app-image-classification-onnx-py-ubuntu-23.04-latest @@ -0,0 +1,45 @@ +FROM ubuntu:23.04 + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +SHELL ["/bin/bash", "-c"] +ARG CM_GH_TOKEN + +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +# Install system dependencies +RUN apt-get update -y && apt-get upgrade -y +RUN apt-get install -y python3 python3-pip git sudo wget + +# Install python packages +RUN python3 -m pip install cmind requests --break-system-packages + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ="US/Pacific" +ENV PATH="${PATH}:/home/cmuser/.local/bin" +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +RUN groupadd cm +RUN useradd -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +USER cmuser:cm +WORKDIR /home/cmuser + +# Download CM repo for scripts +RUN cm pull repo ctuning@mlcommons-ck + +# Install all system dependencies +RUN cm run script --quiet --tags=get,sys-utils-cm + +# Run commands +RUN cm version + +# Create virtual python environment +RUN cmr "install python-venv" --name=cm --quiet + +# Run image classification and install all related CM components automatically +RUN cmr "python app image-classification onnx" --adr.python.name=cm --quiet + diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/README.md b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/README.md new file mode 100644 index 000000000..8035bc429 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/README.md @@ -0,0 +1 @@ +https://hub.docker.com/r/cknowledge/cm-base/tags diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/computer_mouse.jpg b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/computer_mouse.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7f8abb6fe93d18af393ea036b24b907cc48e786 GIT binary patch literal 41154 zcmce-XIxWVvp2kxkWfM|0YZy3LkT^U2pC$ZB3%dqL0V`cO(~Zcs&oNCy3#vH6%|mr zbOX{vR8&MjL_x%^&-S{n`<(l{&-wCvc{jhE_0Q~;*|TTO%$l`k^LzRC8o+5rG9>{J z2m~+z|A5~w#H)-Ve7ym{)KnH=0sw#o;DA8?M%Tb^oFHZf+w0)T4S@j=u>EtQA@IN3 zBoGVzgEK*l{ihEU#LD1r*}=9BJXJu<3%0kwlLz|We0(o6{LN>P-p zIsf7x0baT$oS|G_mN&iMy7ffy+JM}B1`B}mzSYZqGfFYN-LW?}!v;Df^d!P+1l zTJ^8~AQu4k9~k}*MuT+3f6y^iP(To*_5Zmqc>H;e2m}Cdfh`Mo{%<4lztI#xIXuDs zh2Z(8bpU1g)BZXB@cB2r3cSG|x{|7z=KuWs)APR%17#Hp3p0IXO@|Yhe_;b9W#bbd zD@IVp|JDx_9O*wY0~iRv`fm(=3jXpBUjVSt z(ge|e-|bH~(0?HR+!NFpSY8utY{qP0X_alNe_dh( zGr`2v9v1I+{t90AoR_vqm6H_913`^k&lRYbf)vYWY7o{of<+}F&T~uHGtR4b%=GG6 zD=CSl?pRM+y`#Fxq{3Vx1v`nD>R$YD`XYu6QOEd9sO0Q}+ZODIHBlZe~}g)_AAruSinUbMs_m^JnF1RBZH2 zkbGIT!yXp(PF8nJZAxAdyv+LTs>)AtwP$%9F;fvxKgl)NH!O72gjpqMg86EU^<2+> zybMGdoj`NqO~58~uqf zg5>5NTkUXGf}kCns@j*!GOETYpqwsTrQ9qNC^-q^DQgD7`~mr8@o2{NZRH)wyLtS_67W0(z~q4oR58kIDq1 z%6oCXaGF&j+e5w~maZW!rj_n`ldZ(ts|?6?DOTL=2?p6%L#(8U zIB$v#1~Hdl0E!2||1#rC=K>A9l}`?FYS{N0+chOwaP)cuufeZDcfj#e8Q|(og~{{C zdtEj`IYFmd720YXr8yG?$k2*+LSAp6FO4LJK>gNQhe8L5#mS7W!QGq+edWxzJCW>r z`7Ub$Ce@C_+v%FI?y${w$4oo>s#XGdA4$(07Wxw3VUqPFxn$v(WOK7@tRQ!lK9{T^ zgiD-$YsKLihABn=$XKpiFOZij8%0tRonup-C)!_Qy*a8fF6%Iq%kWfNPvuKy_8i@k zR$6Y--k;qYNRJ1Fj}5^r8?+CorNWpHqTrCacFK| z4e(l%j$+82-q|yalEvl4 zC@C5b&m_ujL=-;2GIh)VHyG!4MJxa;SWblU*b0q9jAHX=kxz zdZRD*VU}S(jc&8QYkqRi+@AUs zONE0M=%U%Ept!OQ$NM-(g0Xbg zEhJjR1V-xi$(M_we?+Fg+=9b1Xu{(~r+6D1(Rl7U;e)I|@ds2BFV>p1_Y6T3p@q_( z=N1r*+;0}{{JJXE^YxPN9M7UJF$tbnwlRIjk9ie$@+$E=&O!JjAY!<<7&(A^Jn8C z4$X7Ve0iul?D3H$)0e-e{pZGJ#ZTZH*;#kG?MLA7c+y~8r=A?C@bw8oN91-4H0n3N z?y05{(2&968z?uG@i64C!kMS80z-6ODytT<9>Bx~8a#bZvY^Y3N^ zlZ&LWve9q>jj@t$eTF)8R98)Pj?d-_ za?eR|iT{aXMmUu}jkUSOBj&0}2Ay&kl4@^yE=O*mJaOor1ed+9O$}bPtrcp0$}h}U zhuk7l!DcZP0KC_>&D0!hiB3Cx6E}a(@0-0{kjqU<;5X*8nyrZnd#B6n7 zhVH(y7(-_)3NVq8kPK#eR@Hv7`Y`{Q26s!tqUTdbKigH2S54WT`Z6k9ed=>M4}!n{ zCCPl(=&8hRvC&g6hrT(Swe-N8oV6$VufaZH6w56|&K2ye{!TPRH+)8w@O3mec}mXB z-r?}nhnspIP}gg>Y&A3ky4iAV{B^P4Ca(QJj{S9VOH-F^h!XsGbI`-J2&Z?>G1@Tx zP;V>dXN|wrd&Un70o&EuB!%qL-Rvu!rG25lR`0*wc$le5R^#*%u+hn4J8?WL%c>M>V}!7mk07TwgZREP1P0O}rD8_uQUzT;=4TM>FB{6&7Dl z-lumK&)0ugKW@smRI=$mXBM_tibtN{m5rsd+nT4kOsxpL=sneTN-?xgqW<{GCEvcY zp{avQn%b|}Tn`r1ZTuf)2_?ueF%mciIm@5qvu*6uqjC)QyV)$#s&?VIl30QRdROK1EePwnKbIbw!kr^}+Q zUcJdNNXzTf9K**iUKy2ti`83{vGyLU7?EY>`o4Blp{7xemonj_hNnMck7oHyy+-LcUoV^LC#S87STr zs(-5)llbf1()t;-<$I%$W`X(%h1F&(GK5yoe$s@cq>9CEm8gLodLPk4{O0;d@~1aB zkV_sh$B=y|*CZcmcpKrE24QNje8hD&@}$K>7!?6CGI7DkM?u;S2di(>imq@h`6%bdr?wyQU!!(O--i54ch zhfQ^=FoOy0x>;VX!X(4wOBF*QCMKQw`T5cLRo~DjZNGt1j%4)wL>t3di7rl+ETe%` z*WbX4ZJuVvgYoa%gy;ubd)X#-ju}?)jfuLF>yfTC$AwFD;zO?3>v$_P6r^SPr;RCQIUGwnme zb=SDl(gqun@#(hwN*Vn8dr}H~FB+CVdXF4G_;B#>S?JQL){mo80=@PDt_m$jx$GMj z8%CZkueM3A&%Bz;=PY@ZChP92^PvDaOJpCpxA*nkhwBlJQ%@RXab{f(d1?x~1NL^O zb0V1!x+`bctB1Z4r0f~_$q$n+O@A?to!sy#jFNg0Td;BE#+mwONnD-Q3;_#-8?d6!<@tS>>A--I3KtN9^T`ATb;=?6; zLfIM@(!{oK8Lu;ZaoC|s?%G4mq)gk`(3IMnN|I8nc-@Fv=?R`^k-tQH9CP>6+i$tV zU3g%}*CB5kwc}zvc1O%J#$JfyLgBqBrLbUX(P&X_fnBwA%!0abf#`4G*9qSdE-UxF zSl?5}BrL=JugKbwfSR+) zJVsX<>Q5AA(D)?>A1IgH@g&P$Y-r+TezvyBs_sUjJ<*4kxEd!0ywUlETl%3cuQ7>DK;gO%>IQ z<|voUmym12@q|?e3G6QeBVVhd=nNm9r)Ix_hhHZ`x=g)s^htN55-sAq61z9})YS&) zx789zS0gjA#PyH7!00+VW>ubTFSkPeDID>3ip;u(uE- z-rX`MbwMY$TPgba@J;w@!tBS>_BxgizFDmPDDWc)-;l4(^UD1&hG=wo7`mwT;T?aB zP2zS)>V3yYpHj7DE~F`UdcAyzL59-;Uo7`)IEtB0Wk+5OXqM+FG{p=S*lIs~gx6PL zy;ZOt48Ldo8z|h8^(l(-sByj=^6mL$p4yGt!&KuWt<{I5gJVRym#g+6$0r7}bYAEG z22^K+N)GNDgzN92JWe*c$BH4uddI$LFy9iwM|xB# z7ybG&_1JhXOlC{-H(*+zn!aXXEBmx{x9f8M@=xYcijw0GoOXG&&WclVYj2wyW<%1o<9!7kwDzZ7F&5a~ zYtpf=ckkM{z^(S317@Wz%s%_ccgFMfN<AoB zyJ_o-A>k9OWcwnfULm{K3lUw$1)s#bWcB6j4~xDn9PwNDi2u|O`V5e}uhrbLSF|8~ ztIzTr@38jbQnV}HwQANoVp~%iZ(-y!m#kosIv1=Srn1V+`{aj_n|HyxH!rkyk>f&Q znOZgSsA`j^;TI3ze<-Rx?ZX$$WW}@?!%IKuG_bb0rexQe%YTC4@2;RRCJ~?>@4@uo zHNz+;qwP1whmQO>KbpB0;F6GU7vR;mo)-Kx8h!J(|1I~LlcdyJ8EPqZ$IFW*Mv zFuUlj(Mgq;RZGn-cP3MNTtAGzcam5%{25e;XeuAFrAjyQ`8}h!b{DYfP@BoUpJStD z$$7o~edHcPzxhI&n+18~RylIj;!^zvsw?o$w8K$OUXe9MzSFX8lEBg2J?U^;%?Ify z_V;?^3I3m?*O6^6XCyyOeLc&(t7RX&w4c(k_KaWC>-3URt>4t~>JyijMIILToowz# zt}|ct8fc&e+h3@A+I!-yU3TPZkS6eh!-bqaqc)pk=A{f}m}l?_Y(w zuA8h||M1z5+6P;NK$~#lS0ADHVM=hg(sBdNtXZ`>J26?ca%~san04k-;&6v9wDI(l z&=dDLf4z))h&p#3nY{UJ{g#+$+_=LNjilY9NgH1k+^@cJMqVlNwG-}9+U2lTS#mRd zT{t4_En1ATKiAK9?ySIxvW8<1iy}hPf(zO7^lKWHRW6wCUO#nseL82)5~(`aY-T@V zc+5|DV8_@%0(DHxWkaud!$0YWBeaQ0$8gAbsxEBt4RoP*1l?UA-3+kNxk|a@=BRSsaA}xh6S*SCgl9&xIJKx!7p4$mdgxm5 zD-Ks2%zo_H$ZtS#qW;b)^+V&%-#}~WQMPkl@;xLC73eL+qMP{I?*(ZY7nnc9%Srnb zJvzMQ7v>nU@AKDrfUSCFkU8^%>?rdUt@BD2Uc8~5FaeVC1#zN=GC6~FLw;j}D6Pcm z?%1!KtslxC%KE0tX^Iv=IX$Qx8#=>CUz>zM8l3Gm<6SN5L~IO#u(%Ds_ytuvs9#|N zhg}?!*b3cR36Mak6B_t3JQd_Xz_ z{&CY`nduPW@TDcn9A|$U3rQRkT{~j2;uTioqF?&tn>e~Tt zYT%?z-f`t4*;~j$_XdtLcg50^MsT+%F{)F>EWy_VR<7awAkS)UF1my$XuN(~e?=<) zFyTS@{0LU4RrDjipy>&-k-6P)MeIh1MB-noed6n}KFqSDXl__O` z%7vinOP87i%vE1msI-;mSDx_~p;<>ZR1W>6H*7yDfv-DP5OS9HV|VPMS2~RFoY#?l zPZ;B>O@3kS&3^fqsSI;hkh9HQ*E(-*OyW*vk69o+EqJ)xFlym0gns7A0IwPUX*tnq_`M_xzea(34H>d#e|o$jm;Qc0{f z4}^}qMuz9&uw=TzG-6toY}Lu?ftWtB!dklKSj>bvHs$>mK{j$%^88w0Ne`G~@{ae2 zOs88cCK#yG73QYstn?$(Ya+dq$~5o3b>2{5dc$c2Qe2W+*$GqY>5f47@);}1Kb2bU zSAGL+I*W0fY>V}PJ;#r;@h==b{YV-QMgrlKqBSK^?>e?n{MTHuKhsSpeuCZAu9rE| zW(%Caw}B2=b= z7Mas!Tj=s$1@9R~jqKBT1ig>x@hn>JMtyU&Y{?D`37>2#e4LhR!%T3wb}WZ1QWKA{yCX*IgjKpSZ@^E2Jsn zCAe0&&ac=j7^(lk2qYnOpH@5l7*PG1=yaXA*yG3zs&q`dxYOZB==6~y=HjW;YgCkP z`V(T0TW(y-Q;J}Z>G?PhmQf{nZ`B}%>E3vO{^Vr zPGc!$$oh~JZm|GqdQvhSLlJW;M6ZRv!}Qka-a)_o_G4-Zs}+e}!yn`+LgMz3EAoDk zS#DEh8&&lYFx#n`+j^4|{4~5qE?&D|tA2y|Muc=d;^79w>6cQ#=Qzc{vdMlqowBrN z%q5?5apKA3pOnX+bA47=d&k2{R9q$K|?g5Q^QDbD*N%P3Xamv!|$!!%}; zs_x7BMDHrpT<@w58f=$v%8R>D_d;Jr`T)i z_qDgXxwrg2I3pVMd0ug1^6ZVbk@YjdOR?sEl=U-8d)^fHrnfxacB;OOqO9PGn9fRAHV1%`~S$>=%!p~IC zX{~UW=eUXbB^6}HK*#x9MTA?-xyBAhVuy)$oXeLe|_2l|^PNV(17$ zO#8CZ0~Vh&F-z)oglAclAMK_5e*9m>*#BR_pE5H57J3nI1_T1E05c+(SU6c&n3-94 z**RD_`FRBd_<2z%w1|`#T1Y||g~G^VB&3hZ$jS(c;goP#B`K^7_U~FXBO@aV6AKRu z3lA2JLSz4*kH2fx4B-0||DRg*-^Jwrs8&NE0E__+$_ZAKO~G3ApJFvwXh$$GK$#f- z6sx(8z?2!_de#VT_dkVd9$s;kHbx13n}JzA)iWWIaW6g?PaY%AIa_tcE+JVNJQ zf~-*gSJ4_|1=7*~6s`ZGY7OB!0v5vmXVv;^9;GX0$og({o_qp_;>E<*ZV;(?dne%3 zPo(SOak5lEKmHD{)!X?D3bV6NZCzt|4zM`fl(IH&b*WxVZw}AE@*Ls2oMHmA%y`X5 zS%|})Dz6LotcV+TF(#G49Hl5*t_l7ajuh;cONyP99DSZ31+W<$N&(osT|$TEA?&In z-g%p#;sh&pt!ffM?~-U6<1Xx}inmPd7}a}t*Yr<|qaP($ig`NZ4kWNwm~>M_0~xPS3u~I??iXI^cK0t|Xo|W# z=_y?QV_IsV6umWnT$$zQW`(xYSpQzIm>5zkhwvWXYpaFOtHtE&KzqXzB>y zm+`w{{tzl`Id9(1Y#Ac|{j6D781>lmI^mTIePKw~%cRnrdkK&Rq>xEEdmv|^vN&Hlq zm`jtOY)oPkOI@8%Uioon_3s#|^M>VN8m=^S2Y%VkyN8CR1|v(kp=Go zqvplM8dAZlYGx}lDXyRqSJ>PHpYn+RIk@^at_Cp+%5)g$2RRO0vlUj*=ETv@1U2I> z$V?7p|4sR0`NRg$xx|f0cMsUyY3N;4W`-x81|x}BqcIx`iH)mUj*{8N>h~C$u1p#> z5JUa38Qw{Y>f>?@kP7?EViK$v>84ukc$&zIN;0cpKj9Oi8{d@OTmacYwa;`>*&s9K z$I3dYeO48YEknw{@o-DPfnJ?iQR{ul3Zl;Ql?#Jh#@`-ydIBKYTFF&Sn~N&^+q8{u zjB8=RyNG1?U&-Iu$|i3^%I=nTn#sl=Na#mr)1q?vbSpfmw051W0$25wR5nNxTT{DE z3Ni9Y=rJ$c_|U0ro*92#cEkhNc?kyq$9>rlm@WD<27X`|fuk=QHZDUP@u1SV+0$o<-h6^( zFhjr5Z|GK_A~HHNnGR``64CiA0cz-s=jH;s9rTGJA(LE2_2fqcYc#BsJ82=ii6#(L zDm%Dz4(7Cr2#@BH-(e&mGek~bfVrjJ7M>cd7&<8l$w`{Uvit1^*z^}kL1wz~loN0Y z(~#6`DIu|j5!Fs=4fB`uzc(!WHcB#ku6=Nk3M>+r$`{Hy85Ni2GkoZqi~5USm^OZn zJ0vZ@TV9-gE8SAz_gUf;9b$O1p!d%5v_pTWclp^uxiZEzM30E*3t!0e_chk-McBzz z_OP)9$7e76)re^1j|awdw(xBrx+Nh@J#L~x|1*YD;TBgnG1T|Xx^e7X7|=-~NbGuE zjFsCYb8ma!-SI`V`)o^RoQUPdUy!X>JO`=1ySy+rX|zwnb}gGaK($qWwFArKs?CtC_=Eh|^(Cp=4!-bJF2;2XUsKwFV`bpnNqOzMuNy)G$tpU-AWiYdG3-{=Ch+Z&3C+<z+pt>}ip%lE&*^GmFe0K3 zXLY!J@*Lcnfn<+Hu3Dr+hji)!RDk2<@Y$<%VxFHJGTg)$q9OXm+J%S2kryw(S7^U^G25zRDg**;uq}wj#rF^KX+Y zI~(AEnJk!*U-)E7(La(BTenIYNw32_VT`a7ajY0S=>-kMoka4`IyL{ra{tU2* zN!JP$B6^&9;=GC@R;@8Wz(wh7KcdGvhYv-Qe0Y1M-GPM-ok(iH@Vd`p>qJi25v0NZ zbiynZYmE(~zzoYfDRAz-2ZmG(y5mfR_IF-Xyg{WEtcBU2QmJgoCOWm%uOA}MRf@%N zBHH3iMX>Uab}6Xqn9hc#8gc`))N831*e)5Nf#H5IK z@{&FvoKZ!h;VAKDgLL>VsxHj9Rd}UBP5mx+H()J0nk`G)0F%s)TLZyF7UUJ{b{*Pl zyM6S)Y-|=HPQ53G#&NDQGaVB`&2SbnWZGD86cLd-Ld6o;C_y1gFgK;%j55IM^1PZx z8^Ic&D$Z}x-JD@p9gLmZy7bNi?iYu5!O7%B*2!ZD*rmJ3)qo6&L`gUDiNwWdcx%}q z(t)t57&9QeGA{5~Dyt=o1SXHmFtfp7qVAHTivFhO%`$MNYP^`*dUk|6)qZsz60LH8 z^P$f(eDZGKqx7S9Q)ew87rYyJ1_TYkn2J9OsgAey-y{Rz6xULQmI7zEzdsOl;6(<8 z^hFY>31V2@mE&0!U?#!|Gi(YVNZ0`tYCE1S1A3s-O8`1`%W|Z$IJ34><#LoVdQ_=o zGrSD$BT4o3ITHn-2M~)UvyAHPX60RmRDje@K)TS6lG?JmuJMik zNdXxEI+|b)+V>9srfyXOh9hv0{>;^s&K{mM?o3dNpX*;=I4{w^a*MweQf58Rq1(^G z+8(ya>~WE%vm+wrQ3-2>m377TU581vLb4C|r*KLY`$I=AFu`~9lv;J48a2vA;`k65 zuglT(5S?igQ_537HZ|ML!-_jaztV~aJMWs*lxwzo;(?+6It0(&YE~vC6f%34W-&ZV z<8`;L17;IIOCH3)D_N4Q{1IG$JU+YKrjLfRmXs!OqK~#d?agWDWu+HM5Q$6H_^Z)0 za=-sw0Wx5Z$h;zt11BFl6*a*Brj2rID^iad%}ryKsNh7T7+|0#88$-8IKZDSzx5N^ z&lhLzuHsP#&9~|(^JY|!qo)(8Y@S|;j|iB@jla)1t~dG8vMe-WK$J9u z>)-fbe7hj@0MM4w6FHA`A&Qp!%iUgap(-AOBa%nwn247+({bS}-xJTPSeSt6_Sd2e z{R7|=PP%EH47FEbEvA`epR;R)X!P6pVWs=)o&xdKVk6}~^7Ngu4Vwla~$*fBj8 zKqQMR9f2EeGi;l26_vs4{CEtbX>ttvnHf%ilqWTik`c95S**&()xdGItO+d}N)xoC?;!Y(|A=eW&m3nE| zY32%30ESCg1m~SMl4d&^v`z?BhtUwTv8V*5#^8d|L4T1nxCqGO9GA(kQfPNa9mrl{ z*&=Tnpt6a8LuN&OQ18%;Je%2&Qh?oj3^E zi?kdbFi}VXX0arH0H)GuhRr>5Amu_se-$iuVC_eq0cUR)8jh<`#LQz}`i3@S8Ix?xF-_ORClK7s&=PzgWGrYnRGOOvMoPdnp$yNJcF6<6 zJnshM<)_Xo#@>SW@ES&esSYNUi6#dG!TY?N@%X#o9|tC5epTNY;6QV}hX`o_%p9T#3%NxVp8bJkKjf1X*sOluaqIY5V);2fVViqJT2gA-6S zF{dAp59G=Uji6lcF^z^62(Pyf;hNc+62H^Lh85k_%vjjm=~8`<=S-E%&&9exzh|0~ z$3z|0P;sM&NZ4*YOI-@3CR_GU+2&zA6+7Euv~6`Hf4<(cli^oV6Dq?9(ZgR(9YCZ{ zQ0h`LE4F=F^)}D+Wf*a$8~!vwVhXVpDl92?X6{}{(gW<|)7l$zGE9)VJ41o@n zc21O;5RypjitRL{$%QcQT9%>P)#Si6llQURUZ( ze6bU|WfS+dRTQLraoAIMWgAVepwY>4$+C zw1q8Tst+nC!LOAW&Bv8%hDjlkl9z(HJ_8Ga*OL3CH*KvD?cQmI;6q+^VK#!clX}>8 z%rFdo4|dG}e7c;pW#00_Q%YsAV=F$s>y!n2I6lo0N%$JVD6FLQ842czEn%oIcPc>l z8>O4DsT{ZNiTi z*Q3G$G;Zj1qA?R4G)tK`?kbS+VK(xyS9M2OSk~xdLf0yelKe)}a;QwrioBY^wks%~ zeR?xK8sSW1l13#6mUmK_~|p~($tnyzb9>-P8O#9 z!l2?E$KFCP96-!vQ@LeU_5k?+6=JAl(l0$Qgf6+awrX`b$2k&AxW93R`VGj7V7UQ! zN)8c`r4;5z7Z83<=@ESqQ+9D9-LC0=eU*h!&7gC@iCNO2a$VO)s9>Hf{#2VG>C-k@ z3Sx35mXOs{8p5C|qXKRZLYQm;3pafTo4z|Nj6*Mnrs$!P#eDxxnjthC&qNcgmZ^ie zWH^uq8Vb0J(HU~GgRQ3BJMcgm_$0lQsoyMQb8{)W!hsdOe|0jb4cs-7a-~Il%urGb zFYf6FWGFbwmZKD?RO?0SF)2H}ndBM55AV#YzSA08R|n@eaL*rab#H%QY>i$%onu@- z|ME-9r}R@59SNVppM7ipLMifJ9j|Q_)zIO6R&yz`9(*+>_Mv@XW8M7?599l12M;^w zKkxOr;(hkUJ*G61cV8)nsYQ1iQa@R5-2XnU-dB3vP%AUx?OxS)ZFuBKEY5RwK0U9d z=m4KUq+cJdz-E`jcOcOW7v+yYz>Kc7wak~V5Cd~$B!TO%tCE)`QGk1NrZXd$wgiM` zFEl|NVK=r>N-EHO{!c>704laa$PsA*e1qB>_{H_F(;c4y-)NHjtp$$oBz^xnC&2wo zJ|*^ZrFKzXetlbd-q|w=hJ|-o(yiG@vQVLN69(UcDyXVTr+!Kt?>xg1ll~-`yX#CI z*0kY@S8buc3wI{8SN zQ5mLl#kXeM$QpLl$&QtsIkilKuqZ*QlO&2=7cS0DD0s%I=M zinMnnisbzDb$y=hMA%-tcQ0J}ZQGNRm#v@6Z@$!fX-rafcK)Q8yZb8Zc2>kLCYM8M znEMJv+sQykxEeN27RV~e9(L0tFV;NW=QS@*s~A4F;{qgoqlxDcE~v2cPGZc_Jl^Xc~!+$<=Bfm zwOoU*TrS?a@zMCB;x6M>q3lEc_*b0~&3K$ly<&rQaUU(iGlMw`po_jSE`cNpo@eXS z*cQYfrP6M@_9JepfoX6+Z0wB5757#pq}DiPvwskFpw$&$2@~(9rrR&TU;hS($T8O; zf4br_d(2w)LRDxE=wf>e`Jt6N2ypd*N;=#@(I9!J>bA4UBiW~K)V#Q4P`Qn0x4XF6 z>4*C(Lp(Z9V2stDgqO6=8E9-fH8xhkwPyG>mjNsj^rJD!{ppF`+q48Z^?rl19+)G2 zW@AfM%AzBm452D~MYJS2CGHF(JOtb6Cu<}qKYLw#qI)@K5SCq6{b#jl9hZS_mBlNG zlT1ueG&OvJ{v;E7RQf0FAg;rVt&w@cy;mFBlLqEw0{3FN7w+9o1dCP4`a$)E>C*5t z3&HQ~wFePfSlLw3%p2F#zhw-^$0wh*+&w~-_|s0qE7;jF!P=KO)$AkolhN8Kh8wRo z^Er%begbc`5;w` z({byf7thVR4d05{JS^!cJ$>zpz_sg*kDtHDZ+mHp-RtVC)a;U?ay%ya`8|?5E;0~` zJL@7P;{7SY|HtzYj?hoSihQ-hzN1kgHffK`j`j6FxVxH;)rmXgK~8?cO3Mf!oCcL!HY^g3jGPS8jOGZ2XZtd&m>a)}1*g2aW>odxw2#4qkT}+NrL$jIJl1yi>s4 z9@vdLkLNYp@!PSbFa?EL7wRcTfkkOH@eh)5>`fcx_TWm@`;k%^H??3TbjO2$Y@W;OR5_#-;33u_c*Gt_li9`OS+0>Xux?cZGZH4ljL*|vGNNL! zrPZ9bV+SfruAMwmSxk)BCsN-_G8@J}_+tdG&5o5+b;CwV{uqSWQ9&XHd$4xo@W)!7 zgqMton%{(dcqZpF-!Ko$4b7*15mv)@-8p-6<=kf+YB$PlLG*0ZNm0L8vFg{oL+0sp z+edky544|d{t%Dq6}|sV)BOIc%cc^av>OY3_wLG`yB0OFTR=YKkmr5^>seO$gk^|H z^gqT)>no6ITWN;REwkG#lL_7N#aIqtH;m#h3F#GnUOwAW77dnQJSgtOYtkq~LD+Ht zCX~7{Gq?Pbj&LozNcvT)gp}wnaD);Gkl4>d+5(NoiVR(=Jsl%(5LJ@0lVN3;EdBt> zd4ea*S`JaMOJTnL8pHV{uK?=R+S+Q?O+(@-O^KW%cnB*vs_9@#k?bdv#hoyl-{x!r zJDPx+dYT=}1}^d$L#oL5^H8h0PQNZEqJF0=xDzPh$2?(Kk#}n*_2`TH)ZXtqYWIgU zO=I@`K2_aM(zz%8A;xWV_0z|gGq0+^H`Ue7DuMq)xBYNp`)e(BYind8)K59p;farn zY0zfO)x%2@&$&h)7QQw9Om>SmTl~2FOKg7VWxC|}PrwfCdecrT}dIQaauaoLd+R~ygZX4p z;ib@|3hg_?udq)a^SzkXy`21}zW=SRTvGUtz0KJ-GeMm-$=b4h zgO)kmTf5SJ5?Qa*I|%?B{+k-NzQADbmzNVwg%GpqUaF4;as8vpS)5JtCkjbl3Lj@| z(^3SgqGPd7k@FeglDT1kC@JHJ$hUPcC*7oRx03Fb2Y}lV<=pLDZ2I=z6jTw8aK{UfV`PbL6nRI$4WE+|^=on^GX|q(j(3jzzUtGm(on6h)=PXa$OXfKB zGBc=lToZI9Y4CZYcQo{%&&XYcK748TRG`v@26R7PuXuitk~}}c{{M_H{cu0b;?Du zr_k&f*^v5HbG|-txNdzG4ekShSu6UO8jALkP9FGEN$}no^&R=x@gK>)eV}t({fYwrSsKbdfE@p&HT?uiO0duBrIKBdCZnRp1j$=ckq{9 zOZE46STo%M9g&ymUbVYgm$=HO%EA8=d}|+6^ZAQ#HOH3>)3CVxA5LG{#oSI^p0}kJ zCoE2j-F$tQ{wmUcG2zEb!=-y^Pc@bnRBE(cs4^p?elI^0_FZ*Pb8y~viF7TwKY8^h zo8}{JS2-hp{agM%kMqR39~G6@>ifLWJ~nl|%8jGtH*k^Fr~aGdJGlPQlEUoRnkKft zl6v4}FrtqI{4;j_5E&tav`f}%6%i2Ku}6G|->Z1CEKh4SUp{%RH4gkgIn@$yw^EV) zF56G`pgKs)fQNe|pbdF$J^me@aP!{oG%4o)kaU$%QNB-m=>};KWRdPgdTCHtx*I74 zmTr{pkZurI>6C8i?vxS`=?3X8-{<##Kd{epIQs$4ea*}@GuO<0?fCX1!^o+5YkCeN zS#&fiP`EKJ?5>QFp-8_a=^`d+2*8Roea-$OA%K&j!K$Q$)r}`t@SPQw(eN27s3C~r zo8;I6+kuVbOMdJaNwO&EGJcR%gIq&m;VeA^8&7Pn+;>*8kOVD9Q-U;_@G*ZYKe~tc zy6t7^e<0_ge;{whM~%NnaX)V#`V>>0;)2&L{(&gJ*Zw+%bvBz|^CaAn!Ea5{bTy7)`8a`JpF-Y|Nr%}in44J;^N z>9L9pcz3g%kR+W9IXPLS zi3bs79n)oSi~C2bYQUA59soWo>YddAfW9QDL_m9Ch0lqWUnXf4oE*;(TPtaWuOXQz z!snXEvdQ3@cqszF@r$S@ebRqEE)iug5z0T)ye048=iiU`^$1C8C_0RoR{=b*!Xd51 zn@tD%n^@*!1ETg<%(qL=to~Dvo%!sIf3Gb6B-Hbpp5MXIxJ062lZ~V-U5?@;zB&nu z`#Lr+8K`idH~GO8o^!n3PHa88t_khghUIeA|!t;##|G@ISHs=)VC-|LxC6{aJq63dKn+pd1UqCU_Yxt=T3KCf%( z=V05?3q7x7JU^B=+FD&WX!9a9Y4X;y87|}(`jb*IMR+VPGOVXS4-h5VwD^&hNTYxo z(6EE6_bU_4S*aRGx`6V= z&VCHt{Fb7hVu1VI8!!EM)P74)a``N(3_qzkrVWuErx%EbO`1)HkgmXBC=rtti|(fe zGJ{t(v1YXCob!A=5?K){me9p{bqK4J}fnw`pO-gN@!##P`(e9pO$c5bAWMy zm0fALz-PiW6%_o>@4+UY@4dh3ww+Eg#c`~e_4E{1Fz?J~04HrxD`C+Kgi5|d-Pz4i z57oLLMA`;CTPjT$=%;^~q7j5>m-X*;9O_7rs$Sm5ls8DJ9yMnZFyFaD2(7{G@?NsmYs5UB^S zy3XK|C>lYZ{1YRz@1XZBOw-UnLD;f-;yZz`>fq0D{HF?6^RHx`&Z-!ijJZ!gvGu_ zuV{2fHjArVHX710)HQ5cnGF$bklfO~vkLf;*=^PvUmmXs$d|dzAI&xgB8V08OYxZX zW8G)UZkUR?qOrpEoKmSt+8LPBz{cP-MzfUuLc^$0JZ^(is1n)Uo++=Y{b;iL0*}SI z9ooIABkpx{MEKX|jmWx3h4^%I+Uzy4QJl%T+)6gZgQtJ#QSmJ4rZNqfvhcb^^)6L? zclfiAylxJx{&W?5{V0*>c$47)?{HX>z{sz-!H45%KfnK@xbW13SJLpBENfdAb|A~@ z7{C*KjioQFCS~fcKEH1BCGQ%ABu$MQ$4Gy|Xpo8gK+?GTgRgWKE)jT$Xc^z}wVewL zN{g~U=ZSI&T2+e)ZCxPfBpFV!v?q5ZD$A7|@GG)>0!0s4vJ5bHb(uSUh{sj^aAe9K zUmuOSA52n+x{Cajj%uOaX6QvS8#n5ZY!1-aHKCg#0^&W3Ri3(a%4;?B$VQelryx9E zdvUO|*V2sIOi;7@<=a_=+dAun0(WHN$}pJitSVxAoZq77G|_pjepUGj?Ek?qOjLs6 zHq2)?szi{6Sa}{Ha$k3zQPVz$4SjxMvnUcoY7C zBAq3CHgbK)qC0U7{(%H+i)#;jZBtJDJbjCb1|x>=K4pZZ#>QrZ6BHyy>%Q|#eQ86e z9y`Qqqs{TMT9^?PJz~~P0i|mvA?*+1a&7`oMf{K*VC?#X#xLe)KI6SBNH3;}g2j!v zcBx98^xB9?+Ec|y((+$t>Cam0>7zkB1dQL zhTNMkAxiv~1&}V(V$()*s3MtSnc~{vBJbfk(k9YHji7+l+5?FExod`Y{z^y&(hBDe z5p^L|bPaL@0)6cuX2Z}mf=;?Rt}ua^R^B%OL$kKr#JQzuxdGn{1qn- zj>|MuKfaDUSj`h}UY}4b>~hNR0b^Hi94MDh7#WI!F>gIxvdD)HHDl%`8uRv_Q}#NF zPn!KPfB5n(Jt{GZQ%MWIRCsgy&C1O(n9EH;SXV<0C6~nw(3j4u5(Jxk-sKH_K8c(u z+-KRnblmd%Ykms-?&YPnVVapu!2dn*sL6!XDvCyFQ^N|6Mx`a{d;(8M*}(R@5TE*R zPQAMoCy5J?jv1i5qb>;g`7@^;=vVW(Tj9NMu_A)$pkuHC6uJUg5A(8vX~%h6H2=#+ z1?RiA4Ed|f-w%%jUq)0lT(a^VG(Pe}>;=Sr>O%E8h6`|;@{5DQ>o#iwkLo@5wUflOnQKmGM{8wBw*e_W% zw5#vnIJw=YHB;-;v5}7*9lQFYN53@_AB1~-#l{SToO$C2$e9`Hv;JO+8*Ws(bD8z| z{tj;{%GA60{rnG<`?n3gA)>kFq0f{A5YUs-+Z0G$RMo_dZ+v7+t$O5iRQQ_VR+E_bw-b_y_6k1NSFOb(5rUFpv z1$4u}Sjp6tQ0nc2fI-^!A(UgXfo{&T0ErxyS&K#`d#YS)N2k)}_Az)Jio=L<)Q@*> zO8QBL26C9TIgJwBfzq#w+UpjQp*;1HDzx$dWtm)a+Iql%_K@Vh(=C*ekW{k`Ta_Xa zagsDF1gQ^3Zt!`C(|A3q!>3GVnnSl2E3oLeFZ4Y_K46UAX0aYSdLhXy+< z**N64e44-KZaz%I(l!Z54=y}M--fqyp+g7{C3K(S1`llv@rw#~OE)L1nSK{o=(yb2 zuQSeN!%lU>Rv|$fw?ih}b9zz#K;2RM$M=t|S>%C_e3#FY=@*!O_xkIX1)X%ypAUc6 zT+wKDY1`OvByhTI`(RH8A%1PZYjbtDZ7@kJ!+x??OESJim*)HvfTUJ75WA1RNP3 z@JYzra_Rv?`k%?T{`cTSa|WoJ{&?Vs-2aT=QM2n zAIQeXd%c+{4GTkaqk;E%VfV^*i}1XI+1N*yw5;m;@2mpE)~e1Q#EGjt*s1z=W%MSV zGE_y}gOzFKz&AVI*^62gGLwcqJ#${X9jB`0qgti;$HOt};5^3bGZ&32t0P!T5n~Zv zO3A!aQPK2k_K_gEO=Tk+r~Ai<5xjZxHmyHle%-8a^oms3kEz9f^-_WW3LV~Vys;b0 z&XaGJZ~X1ZS;tiRy_uJ5^or?uTQ_0UL;h^}QygCLT?3=;qC0$gyBO~ki`70`7((kq zJAuqvmf^K1-(A@EM@?fRh(p!A&!lf@s@}NA(wDQMIEcDW+)FD0a^2DmW%Zo{c2OfW zSPXwsbi#__FJ{V|Dx+N=2=C0Cp=U~V>w4kNjM5{7C)g>v<;btB_YwB{R9K^}P^sEm zR=WBUSpb%#Xu1O8z}2`%WkZSxjVB{J75rgR4HGZ$jtIR+b4l?IjrqQSOOFT`=t4F1 z3pO+L{ReV$^o@@EOC?)873Y#r+4a$}`Fj&PGSNBXfdUF8MalO*$XnEplysNKq#P9e zRLkRJk(mks$sY=Akkze9sY>2OlD=$7Wyycd7|bXw7o=X&np)$SgB~Zd;Yj3!h>{;r zwmDPw!y|dFsufd>dbBU>Vm(Ea4(diO8D#eo98ab=BXo+1E5wZK1|I~j9*tnd4(2Q{HLX6VZsn)=8 zwCrUTqE+5PB7M6;?#zcNju=f*L*UnWN7>pbQ?0)c`dE@?qMYpX2#;FoAORdY*^y)vxF0gSk=>T6Age;%IDf&)o@otjgQ;cXKz#^|x(k#PiCq z&n#;_TB2M>XJK*Q>jFff4tN{1t7Q7@mzT^|M)UP_s+uVtl^`xZA*S! zIKSUy`j6EelN|65eJP9JA4QgG`s2rLZmy=}@o#vzF-@dv^-;~dNurUvIm3RAw?Bkp z(Y|S~%At@ObWHWTIWHvVlDeCy7J&7ZinXd#1I zsIY=Jg?A<}@OLWrMSniT^i4?_qs)tkh!0K-!s#u=X3kv=+Ui5XLmk%=*gd~FwKIorCMPd8)`;6sJnO2(MUzsZqIVC zX0~&h1fLKd^TQvRw(Rr4Uzei(1TIE?-h)r8((Ad{UQ2UdAjkLgM~M8Cx8Xp&P#hgc z6{f6lc>H9@W^nGQ$`zNU2!knG@9h^-46r3C=1FMw?p3_vr^q?=;dA4gme=2!G6z(G z+)`Gtd6mix!t^meI=4SxRvoBW&1zHhx5%5A>jaP}ydunF;Za)x25E3$vR^O+;&8so zI>lxa=TgnDZ(k>vjI~rn)_WemPHd&k$c9&7It%k$?A!D!Knkf}c6omEdau(7$=-7I zx+j!M9!9cW6CI9E~vW3(-4U>9gqUo%)9T+l=tYpp~DpTU=&e z(cx)aD@z!hdm2ZZEwGVV`Q0{X+CS1{?17HJc1=ZQrxHH!_O$^ z{#!vlCQMa@jXUzZr1Hx;PtAJB*PskDSszO)TfFGl|J;raARzQ1KO6`LP2y}V>cu8wUAaE?U5In}s8W@28tJ<` zI==JB$Vi+;6qhoyj&e5H2i6HxQWwhMC4{!N&{q5w?a&^aSF)ci7+mioqMvi4TBcj-eh^ zFo5TzWT9_(;><8%ubK;ps;Ud0$1-Xf&cI}F7N1bqZvJ!ne^?fI93n{tIxk5T7+8Ea zLiOqcazw&HW%fqZd>E__SoGxy6LR<(P`SSVqPN-qfCd_mOdMbK z7oDdK{Jh==hdfn?Qvs9Tm*Vq;X~fra`Rqk0eh~N2%hzCr*|<~(F=6p_>y=)Ffemj| zRM|*Q!*s9JulMvi|3IZ?XlMv$W+DqoO6d_d zxJ&E2(GwR!W5LmrWQrS}yuUHxxJ7;6hD_4I@gzk(x7JH)_B@HYd)3iAGQO#TwazWS zPlDAoaA-uYyZ3)QI&(hI?7$0mB%bL^KoXuK+dJ!StM}?V?Qtf<*^e{;VN?(cW|az3 zvLgm@%eh>k289GOR)3{H%XE22fBX`3jnhkv+b+2+*8TRYzGUhP#vj{_pmQX@0P7E7 zJwGv451ShW`aM1L1o&sV1^7vD61>_pqCZj|q4Tg=Vip;i&drvtNdCc*-P8V- zAWODm49L;(ju_qvl}WydCLFngQFqPV2O>96kgxat!^w|1jj0}~5MMG(7WBHL{>1y1 z1VuKN0zmn@92{9vqJZdRV45s6I>r=92Po>i$RUdj1f;+;kXOBx^fH5En|!mm{B7oq z@UPd<;TI0%CMS>JB?UH7+01j>xAke4-c@g-H(@aqP?u*;=W6AB{;dtV)U$nv`@R0r zZ95BusWK|Mp8w-cj7a6-!)GmR4^E(kJ9lZzvP&DQ0UbJIaE+%fa7kE#u{scLIvXtc zh=&z*w9WF>Z)~%QB^s$_pW|V9^!9lzAI8l1uP?KoJ1x2B_4Yn@g5Orf3OzY^j$it` z87WwD(b%}6ul)y7?Y+vsMqr@ikKE$X;DkoTrY36)>MqBK;(HG_e}>LP3q6w&J85GL zo=&jW+7=fL?)YDMj?kCxJUH+V-WeD8*F1PqZ($T|`^2TWe64DBYH|8z>B!?x<~bug zap)QyBs8`z7JDHy#2(TpH1=04mWOtoH>~($TP*K^`lhn^dg<~xlXE&oOzD2}0NfM7 zM7n81BT%I|GCLw_q*Na2aV`c1)=q7}kG;p0vVy<(`Oz8IvVuFdah?ejpGq#W2*^_h zM|<{*7ir$d0}4AHZm1mYKG&dNHPSU|ldu?2*uN5#CVe4`4n>y4qR$joWEPx4qE%b! zqUj=&uKj$BvHCqtdL!D>&VIa>$4IF(Fje`I$LJe9H?wkNqX1!~0N!kYVZis20$*TU zgP-(8DU!ObxecdzUIfSH2b~ww1Cn!AM~NY#GX-fb(n3u&7bZ*LQCT7I;XA)VTY0}{ zXa5w5b>s5jndfOA3oeTaal=t}_Tb|Xn>SDiUPP9ELB%WfuXQF?P}juyYRmmeS)z0IRg zTa38TADmp^w~FEGm-AU+8EPRw{W3UsN=;WEx7BHhT?O+*RuMT5PbQyk3SeRC>Oi;t zMZcgL9%ba09xN7ffL?#32dU{T(MzHSzXoJqJp)sE)b4Zz8Me4*o|a92__H}Ts2&>g zFPzT3W&VMf4~P2=gu*n9KSCr@u9n+~I-E^tqnkaSMC%ci~!qXBOd&CUQShhKP@z(P~OS_!E^ zXJJt=upRM?*TCzgx+^04U0nJtJZ%1~De|iM7Z`)au&0wNHuE!3Au;;M1HRe33I@;2rgjb9U(xoRStQx6WIp9h}^?%^UKBe<7sA z9z8)MB9ADN8h~RRPfQDq<%j$nfcq-P64RdTHuw0y9ZKIEV;`6kHPZ1kXpbY-0^Z%p z9@xcb5--jSoDa6*90x~7d&eIMj7DBxw~flUsdR&-*69t@VV26I23F-c+@o~y6x3T} zPzNk-6qIFqq$f%I@}bGS54h*Gl-^M}7S+@%e*(^-yqOC*9bL9Lixc{dk_I@0FHuf* zWZB$K?Wr6F_TW=rudvHXsRToU^t;G9W48(9JCx;pdsNRJu_dlYkLf;ah^v$3x437s;4{bI@|82>=l*-=l0x4AC( z`Zo?_wvUPtQ3mcEh)*uhtCw6w68xJhwpAO(-nr|ss_Zic*=NLtP|jzG3k{V3&)6BEq2R&U z<#ma-Lm8sJk~95tgCBe6;3?ldyqddc>M|yBDQ-Twoff5Ke^u}wChp#Gy|Ars)RTC- z_6K_^t4u$s3tSRek}sznFzjHBbc7LDEf=}d%{kC>AA|yJpNKKt8A4eO^-h{aT|vHP zN9h4cXvHe+bA%?NW4!fN6XroC3+d^4xK~MUlzn#?{9!TPH_8tCJbKtuFo()zF|8&| zjo&ilOM4_?O9UZFUID`wy?}gW38gm-@7%r+C~GwO3|_Zxpw_RC2%$*3|lYcj1ydq=A`23aX%zXjpe*jJ1W6 zaBeS##dz~gVfI(reP`OD{RNu$Gr8=W=#Q8s1FS6l91M-HruF)^nxjc@=uBfPO%Geb zi@a}mgjh)GN?K@w%HuL96#bYX3$kqVZZQrSHn?7n(tl1o*u1nu@wP)T^Rg9V$s0^uLsjJKzt&Syv%73$T21tf;`Vx#X zZ~#tu12{)Zj8|`?0l>+zBVu`khqfW#>6+!KEk7lUSirCl=AW|$<3wLLWs3T$?D{HC zQfY^JWK8-EhW062_h|YOZrk$ybTwvF@Q0J%e(Ws6zPgW@RPU(BmfE70_;ZWLqN6d{ ze0wa_Ee?r|vb}K{6%Q{2L~b%{?LqytZfvN5udCbQU2O%(+h0*F?=mj4@FH5i(WuxV zcby7g*_8(kFQE))-cddT_;#ID1ESOvx8)o;PBVN-Z*zRRXeEdX>CGcy=JG(8T-is8 zyvyYf&d(^k^pxCetT-|_!+H%+D zavI6?U^UB6%Uc9KWG})P-a86pbN>oT4afRg)HLhuQ~L~Cyfkb2eUt6+4i)+7Tvh_uN$w9efDO*^U?57&I>YAL#QKy|_ebMZ-pwrm}yj zyK%|QdkA^Y*`x5UHTT?#g8Tj$A{gbB@Yp6W@X1vSyeY43asm`_lfL{0D#xB2tD$vt zNRRjemD}VkZ-gr4M_&u60#O^QmVm}rl~~6xOE!P`RA}64-@18xR!z5R2ca=F?S8i% zy65NKRLYl^X}|vJ$GE5S%V|W_@0!1DoTD8VeZC!ek1zbg=fbk{PEWYrGh+dZ_(eNO zvP+L`*Ysp$S?T!`a};U1KcoR8x7?$4SbNdTjj78u~bW%Rw&d1+x8X32BK%T^;l+vQLs%VWwgYF5J;pg23L zNQ69yX={g4y%8B3zkDe!vSYH|LAB|gbEw{XwEnzr=GSy8_MB0k{pP9G731260EPT+ z>#C@ojQrqP)}}*Wuol|n$f4;ULA`Y@X>|2ETLVe;*Yjwz!Hzg%SYv_ck|^DSwefr> z+uh79I8J=-fn__}^>lT`vP?57JM&p@-PfldcFdP>bZd3@!9!HkK7X-WBky%?&g&c^ zHaa$JnRjfMh0F(Pta4g}R>SVB>gpx6BU*_>Fnc_qPQfe%EJo%2;kJsz!;YtbGpl4z z1PqUpQ+YP;yl{2ROe77#vy1|f@K?g>mP_Q{TlsJ&DQK(QbIdE0I zQ3G&y=?h|AKyuqmZmGMhqJa}I)>q~eJf^OxgREQo^wA5{{2vNRw;ns+oIck7NE>ib z{@it@yqv7NHVYKS{Y&pluY{B$9}7eQjoY}@ZB(!qfkmISA1Bn`G2-}As1h7KV}M`+ zlt=gdi!%{ZGxH(2WoSZed~?~Uo1*N6#ow>)R643qD%!0(yPh^M_L<6DLnfti2V6Vw z?u7RC&R*L70};LX^mMQ~b#Vzz+>khBT3S8I$o^^JqYg_Fo6w-&sAo9*OJea@_Xyb< zTe8KI_yC&VZM_t3g-$vFh!rokZmA1dptkf_s$#a6XM{&U2Q*6RS8cJ^%Qz=TE-D58 zHu%K*w}y$Wx-Pjs^IKUzMZZ!pyr)0^+muQse2Nggtz>#BY|G58!4*v-;9o*yxHI=bJ+JR;Soz#uM#nln{V=EAoFKK(%%`P; zc3%SZzVnmXSp+l7{(&rKiD%pJkMjBlVY{dK@GrOa;+oFWNmZ`@#qT;W+4;0jsGF&`U|9*x?FaFX=CH>f>8AX?m7b#_~ZuY^< zlnkAT>;(lCb*y6v8lnJ7y@gE#hvIuv7j*3kxFk#~ZPP->5tb84LW-4=!~nmuoZvCn zWvvTYoVQySARJU@{uGDtW&N_4ac4&PGR4kw=99+-%CxHktmlGN`n^^|A*axt8KfcU zymDlPKo^3msM7Iw%%dHmDwco6181$CxsX^F{jK`aXH}mcHJ@hS1hpgz!i=Ojb>@ z?;7VFT0(L(K|6d)qvDG-MH%XFonfHPjw3oZMM)TPE^Sq=!mAX9R6i+|{$sVgfp*{u z8+_40ZL3q^r7A0FjXV0gD($AU)N_C7+a}$kHvX}KZxXEsy3acPqBbYdgzvXopl`-- zgLo-^BVEQVOkx)fT;Q`-onNMA_#gZK10@|A80PQ(1J&Z86|WyNYjYep)95_Wll}u; z!>4}uPlUqirnO1jBPOU`5x$5oXOYW&4czZVnu11x8X1&shpLvB6OzTF)uTl?4JqB) zw`~`;uAjdZeb<-n=D8#)W5ZgIoPXzReJ|lUFrRp#IJ$Fgcz)MDq2SK`4pHS?MCaVu zm#KxpSsDE&G(r7{j2#E?&5~w>K}t%vw8Mxk)?pdL_UXYgpe`6|9}9zWqsD4}OoGbc zyTVytF*gsGx+V#Ye`A>7HFeDtQaa=fS=plAn&5@Y|L5$A&Ydzvly5{mt5LJ?HGBt| zlahl%nxM+DFxG0Qa+Y{m_iy#Ax(sLMbs_76Nlv!NM=ADkoAwf zPNy?Lg`}{=dC9~D{$(Q(v}BAQ?|Oz%x+qqi!~%fED-JCIv)}<#12tXB)RxYdg@=|0 zT9-eHf=$3KggVG!gjgA)?dW5tMfyr@a=US9#MaZJJzkd`xp%;qb3=_i~N7YfZhyygUhvUXQ}+_SMSApbz2b!!G; z+9N9WC~K@pfzi?x^9tg-54Ms-asdgMF9LKIn#He49m3El~T#_(}Jm9QvhSxTkNEjwG zy}vQs2+1E~KzWnD?)(>lGlj+9rGJiJZ{lYaDDviW)wHn*2*@t z*ph$KLA34fzMfiH4h$=%?E(49W6fFHhq;WW`OMBwzT^tngz{)W+DXq&e}x%M^ug3j z54R!#m{T&}rO)>;^;JMY{q}k|G~Olknv5&Q3b~gvq?Zo4t8aCT1BsTe%xoB0H3!*y zE6$8NKu-)t%Z-Jt-1V>+u@aqLz@VyQYiVjxV~fhaK=SM011O0_T@C9E?Q14vLv_by zLdh#}&Y1@j{0kO$AJKoHJN?HSvB`aW>*DK20>VO*Y!&sp{JUOlk)ql!Gtn)L=z@Fb zggA3ACUapc{_whsTZuF;)~aWyzw4C`{iD?*?5J1U#zR-0*-u%2@X~+wsS5i*E`?^! zj=@zG<=rb{A~@eb5Rq?Rz}F-s&XFK3p1#Gb!Wgikx zWxL8bk1cyWFvr*MQbMbPmgI;&zy*2$3gt}WBOI}dR1mXnK zUuw%PVmRfSafEpnv2QHzN5FF9T{G_xOcQl*tZL{m!W=kfRIo23>KPz^<4O+Xr+-TIl5liAJ_;OB2i9MvI z2hQ0JA!3n(m{s$YfSy2or({}W=z<66G8hNwtVjVSDqVhgX69(2DVrPQ)o6&tpFvt6 zzt)5!efud|N`J{wAFQ1A00wUn_5<=GjFp>(Nf*D+g=qn$cZSFh>1K&b3&Vkif|@U| zLp=zj19kgMWt1>#*~~Z}yE4+F>h4yvt$cqWoLdOM@CusC=C0tLDWsD~ z9QN>fBN-Y_*(<23sW=YSC4b5kuPWro)ABA6i4>+05B@>l9?!?%`c?{$Cy=K&UYN-c zEd?}c%9+foXE*<%d|r_;o}JsSQQMH)i>QY)V@MH2K#|_e4tK$@I}^oJo?AXu2`|79 zHF9x632@d0r}}`=Y5~W(rU9pHIY0tTH&$tr>3>hVO~H6nz<3V@oHA**1rMB)gq0Bh zrvRW*1g(n(Fd}L%>xO6&GZEa(?K(}IPqnD0{}bZnPkI5Vnf75y?xapR;Q#qag8EP6 zM4L+e)m3q6DdfMyRG+B&u8iuseO(6g14>7F{xSzVGXFZ94(S_$P}+Q;eb zB2z-I1tke1X1g?XOJl0Z{$zfjnnJUXV!AiIw^_e0cr~58N7(?ebUS z5ppGdShPPQky+2QUPj5RETZftk#h&5q!CGTYI|=~VJ|fP%dc$rL;mG^hDs7obq_H( zTB%0O9`gRzW)%td?^~(ipF?DS5GP)W~ z7lTqlYXxLa(d;-Ass;Z46FmvFP(Hm5@%5$B)}r}@OT1JPR6bH^TY$uTLh}Rg9>6fc z<3XK1qJ+d89KEnJ!RrAU1}r@hNSBeAt+tt1d85Aqe!)i06&_Pc8*4D57E;Kp9%F+<*4?zM#SycG~UrS!^&0T4;in_vCJHvhbK`zr{~y;(GOrw~epyuu}i|G2*#6 zZ|i-_`@vXs`KQFsOh9vTDmE~wjiY#E4os&8K?FnC5(~=0+=fU`_<&7(XeWgKX&ux z0Y$Il@pvvlNk}L?dRqe;L_GV!SoivtCerG@>91~zZ!uo( znA@hyv_HJmWaQdkV)o9VK(31VkHQTt#eL^dQ@U>_+x%Js$@Xh()9g&Exzwc!8&%3m+~42l3dS>|yV1E+^gYXwlM7v2XgXxItB zXrZXO>^RpHfa%8Evn8~U# zv(^H?>*9q|)(j&oyNKYFZ9pKf9Fhb=L#&Ia=Mam{&?Vv_oeL$@fm6(LDd_Kk=kjM_ ze5&xQ#`^DsI#0d?-&|EhG-Ee*%Svt*{n?JN_%{y54{Q`PQX2m@&9(@;%+JLlh?BSW zk>eI`1(SaK1N9#co=J>kOE=zRQExl13CRs~dR#Ccc&D0|YVhI~OAF}820%)MU!F>T z>FT?|BGIQbzX6IYZ$_O2Es#QM#JYx}UE+VEe#rrV!d(s0*V7|13c6;;8A7{44O>Ct z2+*S<_B|_Z7NIlH!4RTV}+wz70oCn*Zqn!w2c%={z!HNy^T^77_RwB z>Xo_imYX$oe%Ln}S=A+{bx#MS4vmC>D#IxQSQA;*Yb$$s^jmxv3Vdy-N z-6*I)e$`3jW(`jG+fb*CW4bD1KC}4Y+5G*PSnkuL4%~v$u}fAOAcL`;2t<|}0t{Ex zGnKc!whFoxMIED>Ps5I9T9ebK86El@+2l`I7oDdowl}_>j6G-CSORC1{b)#ZR?` zF1B@v@ZX?q#Y(!>3b@*(qPWnTy5JU|*b&7WqQpzPpk^U&VbF3ExMsEwvr;by)WV^P z&D0&mP;T~Ad<{#xT~;K3C|D#xhpz$uP%sJw@=PELe53q0Asme&;DiW;(ufy)$uDJt zMO5$s!C&8Qzf4s9_Jq_m>cR;j3S1i%!FW&d4d}}eUcd)!pR$ttKZmVUHSYnc2!Gxi z(MtWU16O=DVPWf;$H`hekBdj%5sNXQLTJ^p)bEbs=}Wi#LNto+iWpHN9+*y|9DHk~ zGBh6V%FdL*Q9QGFMYFvKwNY-#Mt!<+QR?>}B*vQa5h4 zd+!`STFG^mcD98BaXb4z-h)Q50)<;$MVL~bd|xX=h^FqiMgepP&&V^d`z^c7A`V`dd671f}qug0=&oXrr`(T!AhqW-drj>fdEJ@$UmvU{Eeez z07zf9u;lPq&39Q}YCs#&0r;eM?6MBg%Ol_%sV)<{lyX>5(@iLuhLDXC%LF{8jOXA$ zeXsdOARHxc_GM~YC)?ixsZEbG1Q)ULo|WRwxxW8MTCUJUvBAha8O}<2WO`l9X zDQU=&ei>OX#O$`%>t#YWv*`H~Rt17pgJ+czk4W^uCBU|kzk6g5wGc|=g#w@_*xqx^ z?uj#43i6fQQ^E_C?HO4e;|wrL9Qhl1jFg09;oG%h;zU10vq-ickM@vIC!rynhdeaW z(poG?r$Fq_57=rCZbVjtzz0ZD8w0P*pmr zSroU9ZDXgw41j;l2K*H=m;&iW#xLSXuEJ!IM)SxUD z2%gZ47|y>o7CfD8h7n!eS~Ydsh-y5>HPzg)_H)HN%2lE-Xk5#sqg*|so)K*?{R6pf z?7pWdYheBd3g>#BEWLUqrU1V4{56kA<+}2d6=LefycJ7}av5DvRIAbeh z?S3ur`>d!vV7PVtDEOi%(K~_0@oTKT_+jb9xansN!LREFHAf8ljGqtGe~mf585!R} zDNAS@guD^$a=#!P+S!89g1^SXB;HU5I7qpm0FZHzOb*zhltW`R#h0?fmt>X0*CBh7 zTp&}WO`?xy1i}}z*+E{Ex{#Km5XL$*oiN}s0Gde{&}_Tp+i{vrM@O7K&&KsQN@@SUoH)5ycT1aMTW_CeOT@X{ThnNS%))8_~U)K$7w0OU{Uk=&B zi(JnI{f?w#2coBz<+plI^3j0_I(< z8RxR&N9uO-B!>J6==Cc;p)aytu39s&fk(sjabx*@ zL6Y4b~C81@JkVlM3-ex?Mj zgvpnHUR(chz`&@e8Mw zK7p4w9;Fc9%<#iM5cX}JsB|3G7~G)M`cTo@OYzsNUR;UOqx5X#@K&j@>bSM-2+@(Z z168C!1k?FD&*u+!v21#fulZG(TaIr=$5gprwGFsHp@c*Dl~T#ygv<6^FNnubqAOV@ zU`%_ll7h5Qz4x``v{ZKZQZ9FaLxIgmLtJ_&3DPb&rpcvVfy48X8u8S@tN^x13S5(c zmttCuEoa1&JQTstga(Yl_RYF}u-_RaD+K?W^bBP+U>UdLTcdyj^Eyb>RH6X69#9@(Nqf5Mgv7BSjG) zqd4$e?Rlnp5Ts#_j&xml<4eHaYfKf6MR9n>s;Ixq{_#pZG7`OjHlHMMp}ahhFr-ly zbve4vRQap%abGtr|0a?*X=rCWWw4n5#sR8riD9>KchV)Ok!)lI)D?}ZM0704ME0DQ zDtUvfxXe+GcY_@_1);+MEm9g5V7PPATIWq6doFfqFN3M*%dOJ>ipf-us-Bn2 zEt(r1n4`L(e+E6@KI9(?#`FpeI2Tpmf+wbf9WexIl+Ew6BYnt=` zf7e{ZvWL&FH*DwkS&yn}hzuxvsY3nf03pE8-|twwg!1c|`nC3Ev1Q|Jtv5%xPH9sd z_>+h|RFyugP(?KU_>-#5MS@q6|DH=n*& zh-$^|(Q(TQ5_gwj>S~t@!cmZ$3w=Cwz&1kkh1-SN`-)=>+GcZfAb<*B&{6@8ZwrGi zlvRz97K$`Fi^i%xp@CZDjOcPr@6p8FT|T>V*U&c=Q-0gC;Zd^R-}sCXJl0&9jfcL? zX7um?j@m)|){RbzHjGX3)J;1kry2d`y01K{7@C%!@tnQKC2F$BnV&(C zu#9$Wg9r^~jYcxUpoOm%HK^vi^eEgLqd!cXqF2@G$FAwk^a!TP1JUk7lps#i5>9gE zxO8GcW2-})ffQ5?ff6}Xd#RY16d7H5>O;ocW~Q)N9#3|hF2dGoKF%iGbW}$tUcsMe z)i?{BxI_T`qU}`6dkz5xyUg}|k!fi*uZO^D;yYS%TP%N+bLpyL!bVLYuD+OAxM;r7 z>B3YMprqk%w2J}qslAqm@$8;FOn9@{=~;3yv)GH}U-^@@#@&W3B@N$jgkaTqHe!`y zpU0_AdXi&|U%vPF#<**erXiGgo3%8yXL@@gaoY511W}B7b^Cu6U1?m>+4mL@H?%Sn zL#DKF8AHO+gse1ep#{yd{-#4B);C%pa7;}HDKRxoDVNfeN}H_omnA`o8Y(VRkWyM^ zP>NP)N(|wW=GOeb^9En=0zc1v&OOh0&bjvjW9vO@;?KtII~@fc{f1T9yM}wndlx<5c8mNB76FdqY1x!%l8GTbT2c>)-UNTfpsm?OXSq zMXlJ>fBAXrp$;-NfaUPJW6lB|`>|gC*zbA~T6mc|?0(I-ZYJGLd|rO(vFqc!71H5} zUj*M}hf;M~2TWn!A(?JSXO1Z>i>>E%cR0uS?WSEx9rd_=Z*!K5x?_Ks*FirL_H72( zp7dMkCi(3(e`amMQ#C$&=I60bD{$*Rf0^j0Sf>fV&q2N=J@g3;+1xJJJsQ6mtDN8R zpe?AXquE$v_;c;%QpVUB6j^b1LVL;Kd2{>N`m;`yoSwZOCx(ABdrGt`6e5Yx=xM zY4?$XMdoABT^ZW&`#p=#rlQ~%DZglEg+AOo{OTeZj9^|MA}otm@Mg2Ss9xJhtzp#D zU5o48`5kr0AwotW)eCJ1OBdzP#1x1dYsS648g4jN3spCy*1+AvylhK6-{pBAZEBy5 znEjUBg`i9GQbBvK7cP#7^n?ST&5KA>rmCwg{lOYxmpk{Ba^jrDxPho#A7q?;_2cmv zII~D``)sb5k(~D{wu$TB_bL8e#HVP?gyTG+W^tAFUHas3!|rvjn%5=Vu z>C(*#&TfxFdt+u}+W{`+vT?`SclF1UCtZJDxLml|?|G6#?i)!P&t-ON(gl9kovL5I{(UasUlXq}>dN61?70)IC&Ruv{XM%!3=FUbkO$p&+yu6hl-M+6BCyecV)b0r1{gI_{5agn~-fXbuuA0DE4_`VxT#Bfgki1;~{9luX%k!6Kg`uTmq4@0FgB3>> zc$dpN1Sg_D<{ZmuKmFuh!y8avrxYxNmgy!?($K8lvcC#B+ORUwtk*U z3C+Tk_OhGvj|`8l6kHs$kxcy|(S(B6LWTAiAN!dTK6^Ae*lU>6hZd)~uStns^0k_l zI{)0E^T#EuW2uNy5k9ag_R#%HpDUW4Z#e*7DG;%vyZ&5e-6Gq2HufHCMG8aAZHqOz z2Mz-Jey|P>KFJt_9^TrTTO8}(u5v2hM&n*oMI0!Sak7OXsC?i^K>H-rOQBT`T3fUla@@@GD&=&N zDTg=sp#Ka;WuaPr^W$h=7Vjt%|L3oB7g_w6rXlgp`@=7IlS{cKcc_EvqSwDB@@XT~ za%yG+2A6u(v5S2=%A|X9V1MacYIVEUvfhwRGwJelp{-{T0~TrLt8U=&0cI+;#WP)K zpT{rp!U7neNp?_Y>1p5Kdc|_|8QH_<@7}@n+~`+#P@LPHqFqzYlbb>_>l4K<)Avud zd`(C}wd|6Z|2pUNaAbF=$vZHE+aczE*;Xvw_xV-JhWq1VOcOSp?;l1fBae6xaphuKOUyE z`Q|modzP&V-O1Js4Fv{a?|qBZ9P_xb_Q=0Q06yqx=-7QW(;q`-vVbc^z9n|Ky?r^_gx=9f4ek8BotzjVINjAJ=6Y#gSTPQj@2s% zLuuqnTfLbF&1B2wIL*$=UmMfZ)U8p+SNqm(1s6+e8iTbo|L`r~PB4)HC*<@rLQ9y4 zN6htNmIM+VcNZ32Zd2Mu-)c&RL$yqYfbu%LN>ON0f) zR4sJg68sZbO9mfg*v3p}+5n&-EqhR+-;*vwc9KSE1b7UGb!@d4E~7}M*^Pe=VY>zi zedN)%qRiwzEuIC*&dc8JR3uoJ?Om5TjnyOVQT(JB=6PGgu}5gx5@kt#Ds_r(q@(2| zcFkczh@z2~H4UO>nV8$!4|&RyUKI)tFf(IR)>E(nI9N46&mfYp29!6{XclfHEsFpm zy$EL92Px7P> z2EjFXFbV7H&8F>w*V0>6j%_$)PwFl&Nt*EcYL{yi$q0R%G%Bi;Tc`oLkGsfGT^s4!bIq?UK=vMFIk-art9j2jfwSk3wy$ElPDDy_q zY}z=Zd_}43!;hk|Jcs(4RbuLvQb2f@Q44mx>>{^3W|$u;0%_=)_g`MbJFkxPSByb^ zcg=5V0fENQnN})_0wsWIET;PSpR#FTON|UD;V^OB*`eO)FoCR-RWNZwC?PgJZQuw| zhcv>4&4;NLdPEYT9l^<9=t!sX(Mkv^YmasrKVK9bi)~%BU^B9({8(LN1#&FaH@_sm zA<>lWBHA>!bWay48RMJr)an&YiW1h^Qq?feYtfaqnVp)Q?d0k}W3Q@KZVQ_CQr+#!g-gHNYZWSjt+g4y3rq;@W zCCA~-`^{dcK`IuE74Xiwg=*0XLDX)C(^ME;h^lq~?F5JYR2UbO(WPyGq;Anf;qQx{ z3GJs{GnGe?of_^Ibv2wG>I}NZP3@;L1HYT6PEKZ|x@}&kS zJ1*+4=vYZNL_lZq-yM!{u&+BqeU@yK+K{i`k^n8}R8e5NGLaYq}4iI8bk%$$kKc}ENtFltwMu{OvzDQ(cB#O5#;#3GTqWf z<8J8Y^4);}9hdGj*px`GUd8Fr--+$F2#mioM0W!Vp9PmClE%ppw`i}mon>1xxMWT_ z80?(V1;{yXm8DNa2o38yoA%aGcIG?*@!fUy%0?Xn#NVN`Qw0JmqHa-pnuGs%zRd)-~Q4zwQwVB4LcqQ;DAgfNhWnk}H>}5xnLbUHv?dKfZ03jM^J_MKAN=@cFdl3o&L|u)fFYLAh z*u!9+dJvB}j$}Y0YgY1p6v(!L;@L`LncT-$n>?7$>QsALf%3>2POSGVf`d`)O)wMN zN(_Y!Q;x+X?%I?g#7#Q0we(hT4D%*ay|OnA zXF%EN(NGmrIo}%C01d7+8_UBcl3LY*J_{A4?^SHA8G986B_d{S4@dqqW8$V^1n^o@ zArIz$p>NMFmU$h#>0_Z!2ZF}p#evFgmmb|Ba6SNrg7Tp3LE4^aSUln=`(B_BGm|go z?&?b0MBVkM1P`}uE?Hqv-~0%mo+3O@B#!BWf-a|MDT;trJq)sB(UJvgk}Rk|&e?L# zw0Dx+CP3f2Od0>JYO9DSWEQ7xNbuD<^l4}-zMPJ(+@yJ?+smhkBNi#n1r8*V4KPte z%Cn|&tPcW^nh071Kg<*$kF|uuIdq}A8UcRPtO{4ghzUys3Os})v;%C4jBshe>=HhY z?Iu)v@}dl++4LefO9|-6uwD5bu?jW1CKmfDj2zI;>SCLOQ7!v!*luUj1`ZRiXU;Bh zlWQ!tf{{CU~IN%w06vg$8M znm55=jdMt5>k#uM7y&pvvQ=f-l+MIuP6UD|L;C?fk1}IdWnm0r*HMUjH$KU$PSgq{v(MCamFPOK^{=(o5D|>i8+9bv%j$rk1(?CSSDDoEs`F+mE6u*-!X!Zj zq2MyXjKp}dj95T`ic>6_KyDURVc$~RMy2ossTXv#n;0A?}G%}5?;?bj=kWHue9!boY0Rp*qA8ZBHp?P@^# z&?v`24r&5NlMpfNfC_exc?{b>ZJ>+aB*V74+=LEni7NLgY8Uydm~gJV-#a;NmY350 z)U8Em=UT9^WgrahuEG^eEsoy%KTQZt6k%yVPWygG(84ykk*9rVyocbg-@0Cl zU<*mJ`O=aC8DCpW$(Ke##o%(?s9l27-ZjQGi;zY5s&!MZ>U{3BU21p#u$K6UA1&~B- zv7-muyk${z|CIsX-LKuMWsI1;MG5Uo+ql_@Qx<2Myy$I!bP}S**D>GepbemipkYBl zmM+QwCcv-Q&IBc`mI1AIY}GuI1Y-M&c=Qoy<2d<;*<8Vh*}#!x;c0O+2|95k&Yd=4 zr<{OjIQc>rNp>o~0Rc=v*e=1iJh=N6(rm#$=?HEJS2lF~oes`6ZE6BlNJy$6-iZJM zn!`-{?>ekaXG5l~qIPO_-rO~;w+#WF08+#D@P9JLNk5UuE_*S7^fQQ!g8lv@qd}$2 z;IUIOp0+%!O+wH>HzP^^DZ*?C4#0gLz|171!6i8~yWV7$UpU~(FQQ?-!E!AZkVVh0~ zgN@*dPr4RX2}Fx4>yM>a^?%#p@-@a|!|~g4VE|$JNO8TVhN<6vDobNg*7>T(V&HHj zF4C?7&c3T{*v84=QC<$R*Ji1gNuW<{@?O(IeJl9M0MiqUyo58C2;XPJxiE`rY8NfOJ5v_!>Un+BfUSqIl-xU%d$a`Uz}Dz@&TIy$Nw%3VG!a7^ZuFvjm`nX; z7RG^sLRz?@;GZLq<~XWsqpC(%(WHY_-qCtPUsmzHNMX1R z7BkmMjn*T_4CL?7emzoo<`KF%&{s>Ov;LPh%jX#!#-h>2698cf+)!JYO3fceV8@2?zyH4P>dwjYgdJg*BQOhE~x zIh3%e0y*ALc=Y(?zRPQ5z0=Y{b?IEesP*!jcSnK~6|r2$H>!Is#`lUt_wBds04BPT zW4b>k68}cLV$<#4ZlI5b-g@!%=hV`I`xd%8{I@wuK{PDwYI6{g`(i`=+K2qtz0X~A zImkwnvw$YH+g{7zVktx0$E~>O3B;gC_Bs2^IWG?=4Sw*BrS4lEu*QN zS*(MxU7EAlC%vpDbmV6r#Suv5IcG^v5tNNC7)MVL1z;PZrW_Q6FRoD!6OdCbH|AjW z4Zc$_W-m0(br4sevYPT$p?zX@e~pU(tu6z--%ODNA#5M5qiP#0*ekXUBLI=vKzpu7 zX_tNTt+ma!(2a^DtJ-Db$8>0j+KB=?S9No*eUnDp5F;XXnGW?4nMEjnxxJpOX)UY5 zSK8~}V3IH~Y?E-git=hCq`^d>Z=F`u9P7f(@z{U1;OCYq&h5o=)4<|%$g5i3Z`;() z_RsRof9f8#IncKRG$YK|reM!!X*b&EsN>wyu}6X*ODv|m?>Bc9UusH!0ywr|Yw2?a pa^S(Ya07d;T!oUBXtdHJHMdN_ePy^)-FR_TZvWDzTTcI~{~thZMP&d0 literal 0 HcmV?d00001 diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py new file mode 100644 index 000000000..981f9b94d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/extra-cmd.cm-script-app-image-classification-onnx-py @@ -0,0 +1,6 @@ + +# Create virtual python environment +RUN cmr "install python-venv" --name=cm --quiet + +# Run image classification and install all related CM components automatically +RUN cmr "python app image-classification onnx" --adr.python.name=cm --quiet diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat new file mode 100644 index 000000000..c4f8e2204 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.bat @@ -0,0 +1,6 @@ +rem call this script with computer_mouse.jpg as input + +call 0-common.bat + +rem docker run -v %CD%:/tmp/host -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/%1" +docker run -v %CD%:/tmp/host -it --rm %DOCKER_IMAGE_REPO%/%DOCKER_IMAGE_NAME%:%DOCKER_IMAGE_TAG% -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/%1" \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh new file mode 100644 index 000000000..55314e9e4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx-with-file.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +# call this script with computer_mouse.jpg as input + +docker run -v $PWD:/tmp/host -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm --input=/tmp/host/$1" diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat new file mode 100644 index 000000000..762ed99fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx.bat @@ -0,0 +1 @@ +docker run -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm" diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh new file mode 100644 index 000000000..a24a06ed9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/examples/run-cm-image-classification-python-onnx.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker run -it --rm cknowledge/cm-script-app-image-classification-onnx-py:ubuntu-23.04-latest -c "time cmr 'python app image-classification onnx' --adr.python.name=cm" diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/run.bat b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/run.bat new file mode 100644 index 000000000..d3a1b061d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/run.bat @@ -0,0 +1,14 @@ +if exist %CM_DOCKERFILE_WITH_PATH% ( +rem echo .git > .dockerignore + +rem echo. +rem echo docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f %CM_DOCKERFILE_WITH_PATH% -t %CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG% . + +rem echo. +rem docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f "%CM_DOCKERFILE_WITH_PATH%" -t "%CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG%" . + + %CM_DOCKER_BUILD_CMD% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + + echo. +) diff --git a/cmx4mlops/cmx4mlops/repo/script/build-docker-image/run.sh b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/run.sh new file mode 100644 index 000000000..6aa2390c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-docker-image/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +if [ -f "${CM_DOCKERFILE_WITH_PATH}" ]; then +# echo ".git" > .dockerignore + +# echo "" +# echo "docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f ${CM_DOCKERFILE_WITH_PATH} -t ${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG} ." + +# docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f "${CM_DOCKERFILE_WITH_PATH}" -t "${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG}" . + + eval "${CM_DOCKER_BUILD_CMD}" + test $? -eq 0 || exit 1 + + echo "" +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/README-extra.md new file mode 100644 index 000000000..992fee4b3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/README-extra.md @@ -0,0 +1,27 @@ +# Build CM Dockerfile +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds a dockerfile with for using CM. + +## How to use +```bash +cm run script --tags=build,dockerfile --docker_os=[DOCKER_OS] --docker_os_version=[DOCKER_OS_VERSION] --build --image_repo=[IMAGE_REPO] --image_tag=[IMAGE_TAG] --gh_token=[GITHUB_AUTH_TOKEN] --script_tags=[CM_SCRIPT_TAGS] +``` +where +* `[DOCKER_OS]` is one of `ubuntu` or `rhel`. Default is `ubuntu`. +* `[DOCKER_OS_VERSION]` is one of `18.04`, `20.04`, `22.04` for `ubuntu` and `9` for `rhel`. Default is `20.04`. +* `--build` option calls the [CM docker image build script](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/build-docker-image) to build a docker image from the generated dockerfile. Default is off. +* `[GITHUB_AUTH_TOKEN]`: Github auth token to be passed to docker build to use as build argument. This is optional. +* `[CM_SCRIPT_TAGS]`: Tags for the CM script which should be run as the last command inside dockerfile. This script will do a fake run and set up all its dependencies in the docker image once built. +* `[IMAGE_REPO]`: Repo name to add the docker image. Default is `local`. +* `[IMAGE_TAG]`: Tag for the docker image. Default is `latest`. + + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 + +## Sample dockerfiles +1. [Ubuntu 18.04](dockerfiles/ubuntu_18.04.Dockerfile) +2. [Ubuntu 20.04](dockerfiles/ubuntu_20.04.Dockerfile) +1. [Ubuntu 22.04](dockerfiles/ubuntu_22.04.Dockerfile) +1. [rhel9](dockerfiles/rhel_9.Dockerfile) + diff --git a/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/README.md b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/README.md new file mode 100644 index 000000000..c92f4ef69 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Docker-automation/build-dockerfile](https://docs.mlcommons.org/cm4mlops/scripts/Docker-automation/build-dockerfile) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/_cm.yaml new file mode 100644 index 000000000..7535311ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/_cm.yaml @@ -0,0 +1,71 @@ +alias: build-dockerfile +uid: e66a7483230d4641 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- build +- dockerfile + +cache: false + +category: Docker automation + +default_env: + CM_DOCKER_BUILD_SLIM: 'no' + CM_DOCKER_IMAGE_EOL: ' + + ' + CM_DOCKER_OS: ubuntu + CM_DOCKER_NOT_PULL_UPDATE: False + CM_MLOPS_REPO_BRANCH: mlperf-inference + +input_mapping: + build: CM_BUILD_DOCKER_IMAGE + cache: CM_DOCKER_CACHE + cm_repo: CM_MLOPS_REPO + cm_repo_flags: CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO + cm_repos: CM_DOCKER_EXTRA_CM_REPOS + cm_repo_branch: CM_MLOPS_REPO_BRANCH + comments: CM_DOCKER_RUN_COMMENTS + copy_files: CM_DOCKER_COPY_FILES + docker_base_image: CM_DOCKER_IMAGE_BASE + docker_os: CM_DOCKER_OS + docker_os_version: CM_DOCKER_OS_VERSION + dockerfile_env: CM_DOCKERFILE_ENV + extra_sys_deps: CM_DOCKER_EXTRA_SYS_DEPS + fake_docker_deps: CM_DOCKER_FAKE_DEPS + fake_run_option: CM_DOCKER_FAKE_RUN_OPTION + file_path: CM_DOCKERFILE_WITH_PATH + gh_token: CM_GH_TOKEN + image_repo: CM_DOCKER_IMAGE_REPO + image_tag: CM_DOCKER_IMAGE_TAG + package_manager_update_cmd: CM_PACKAGE_MANAGER_UPDATE_CMD + pip_extra_flags: CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS + post_file: DOCKER_IMAGE_POST_FILE + post_run_cmds: CM_DOCKER_POST_RUN_COMMANDS + pre_run_cmds: CM_DOCKER_PRE_RUN_COMMANDS + real_run: CM_REAL_RUN + run_cmd: CM_DOCKER_RUN_CMD + run_cmd_extra: CM_DOCKER_RUN_CMD_EXTRA + script_tags: CM_DOCKER_RUN_SCRIPT_TAGS + skip_cm_sys_upgrade: CM_DOCKER_SKIP_CM_SYS_UPGRADE + push_image: CM_DOCKER_PUSH_IMAGE + docker_not_pull_update: CM_DOCKER_NOT_PULL_UPDATE + +new_env_keys: +- CM_DOCKERFILE_* + +post_deps: +- enable_if_env: + CM_BUILD_DOCKER_IMAGE: + - 'yes' + names: + - build-docker-image + tags: build,docker,image + +variations: + slim: + env: + CM_DOCKER_BUILD_SLIM: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/customize.py b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/customize.py new file mode 100644 index 000000000..3fdd1613e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/customize.py @@ -0,0 +1,452 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import json +import re +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if env["CM_DOCKER_OS"] not in ["ubuntu", "rhel", "arch"]: + return { + 'return': 1, 'error': f"Specified docker OS: {env['CM_DOCKER_OS']}. Currently only ubuntu, rhel and arch are supported in CM docker"} + + path = i['run_script_input']['path'] + + with open(os.path.join(path, "dockerinfo.json")) as f: + config = json.load(f) + + build_args = [] + build_args_default = {} + input_args = [] + copy_files = [] + + if env.get('CM_DOCKER_RUN_SCRIPT_TAGS', '') != '': + script_tags = env['CM_DOCKER_RUN_SCRIPT_TAGS'] + found_scripts = cm.access( + {'action': 'search', 'automation': 'script', 'tags': script_tags}) + scripts_list = found_scripts['list'] + + if not scripts_list: + return {'return': 1, + 'error': 'No CM script found for tags ' + script_tags} + + if len(scripts_list) > 1: + return { + 'return': 1, 'error': 'More than one scripts found for tags ' + script_tags} + + script = scripts_list[0] + input_mapping = script.meta.get('input_mapping', {}) + default_env = script.meta.get('default_env', {}) + + for input_, env_ in input_mapping.items(): + if input_ == "docker": + continue + arg = env_ + if env_ in default_env: # other inputs to be done later + arg = arg + "=" + str(default_env[env_]) + # build_args.append(arg) + # input_args.append("--"+input_+"="+"$"+env_) + + if "CM_DOCKER_OS_VERSION" not in env: + env["CM_DOCKER_OS_VERSION"] = "20.04" + + docker_image_base = get_value(env, config, 'FROM', 'CM_DOCKER_IMAGE_BASE') + if not docker_image_base: + return { + 'return': 1, 'error': f"Version \"{env['CM_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['CM_DOCKER_OS']}\" "} + + # Handle cm_mlops Repository + if env.get("CM_REPO_PATH", "") != "": + use_copy_repo = True + cm_repo_path = os.path.abspath(env["CM_REPO_PATH"]) + + if not os.path.exists(cm_repo_path): + return { + 'return': 1, 'error': f"Specified CM_REPO_PATH does not exist: {cm_repo_path}"} + + cmr_yml_path = os.path.join(cm_repo_path, "cmr.yaml") + if not os.path.isfile(cmr_yml_path): + return { + 'return': 1, 'error': f"cmr.yaml not found in CM_REPO_PATH: {cm_repo_path}"} + + # Define the build context directory (where the Dockerfile will be) + build_context_dir = os.path.dirname( + env.get( + 'CM_DOCKERFILE_WITH_PATH', + os.path.join( + os.getcwd(), + "Dockerfile"))) + os.makedirs(build_context_dir, exist_ok=True) + + # Create cm_repo directory relative to the build context + repo_build_context_path = os.path.join(build_context_dir, "cm_repo") + + # Remove existing directory if it exists + if os.path.exists(repo_build_context_path): + shutil.rmtree(repo_build_context_path) + + try: + print( + f"Copying repository from {cm_repo_path} to {repo_build_context_path}") + shutil.copytree(cm_repo_path, repo_build_context_path) + except Exception as e: + return { + 'return': 1, 'error': f"Failed to copy repository to build context: {str(e)}"} + + if not os.path.isdir(repo_build_context_path): + return { + 'return': 1, 'error': f"Repository was not successfully copied to {repo_build_context_path}"} + + # (Optional) Verify the copy + if not os.path.isdir(repo_build_context_path): + return { + 'return': 1, 'error': f"cm_repo was not successfully copied to the build context at {repo_build_context_path}"} + else: + print( + f"cm_repo is present in the build context at {repo_build_context_path}") + + relative_repo_path = os.path.relpath( + repo_build_context_path, build_context_dir) + else: + # CM_REPO_PATH is not set; use cm pull repo as before + use_copy_repo = False + + if env.get("CM_MLOPS_REPO", "") != "": + cm_mlops_repo = env["CM_MLOPS_REPO"] + # the below pattern matches both the HTTPS and SSH git link formats + git_link_pattern = r'^(https?://github\.com/([^/]+)/([^/]+)(?:\.git)?|git@github\.com:([^/]+)/([^/]+)(?:\.git)?)$' + if match := re.match(git_link_pattern, cm_mlops_repo): + if match.group(2) and match.group(3): + repo_owner = match.group(2) + repo_name = match.group(3) + elif match.group(4) and match.group(5): + repo_owner = match.group(4) + repo_name = match.group(5) + cm_mlops_repo = f"{repo_owner}@{repo_name}" + print( + f"Converted repo format from {env['CM_MLOPS_REPO']} to {cm_mlops_repo}") + else: + cm_mlops_repo = "mlcommons@cm4mlops" + + cm_mlops_repo_branch_string = f" --branch={env['CM_MLOPS_REPO_BRANCH']}" + + if env.get('CM_DOCKERFILE_WITH_PATH', '') == '': + env['CM_DOCKERFILE_WITH_PATH'] = os.path.join( + os.getcwd(), "Dockerfile") + + dockerfile_with_path = env['CM_DOCKERFILE_WITH_PATH'] + dockerfile_dir = os.path.dirname(dockerfile_with_path) + + extra_dir = os.path.dirname(dockerfile_with_path) + + if extra_dir != '': + os.makedirs(extra_dir, exist_ok=True) + + f = open(dockerfile_with_path, "w") + EOL = env['CM_DOCKER_IMAGE_EOL'] + f.write('FROM ' + docker_image_base + EOL) + + # Maintainers + f.write(EOL) + f.write( + '# Automatically generated by the CM workflow automation meta-framework' + + EOL) + f.write('# https://github.com/mlcommons/ck' + EOL) + f.write(EOL) + + f.write('LABEL github=""' + EOL) + f.write('LABEL maintainer=""' + EOL) + f.write('LABEL license=""' + EOL) + + f.write(EOL) + + image_label = get_value(env, config, 'LABEL', 'CM_DOCKER_IMAGE_LABEL') + if image_label: + f.write('LABEL ' + image_label + EOL) + f.write(EOL) + + shell = get_value(env, config, 'SHELL', 'CM_DOCKER_IMAGE_SHELL') + if shell: + f.write('SHELL ' + shell + EOL) + f.write(EOL) + + for arg in config['ARGS_DEFAULT']: + arg_value = config['ARGS_DEFAULT'][arg] + f.write('ARG ' + f"{arg}={arg_value}" + EOL) + + for arg in config['ARGS']: + f.write('ARG ' + arg + EOL) + + for build_arg in build_args: + f.write('ARG ' + build_arg + EOL) + + for build_arg in sorted(build_args_default): + v = build_args_default[build_arg] + f.write('ARG ' + build_arg + '="' + str(v) + '"' + EOL) + + f.write(EOL) + copy_cmds = [] + if 'CM_DOCKER_COPY_FILES' in env: + for copy_file in env['CM_DOCKER_COPY_FILES']: + copy_split = copy_file.split(":") + if len(copy_split) != 2: + return { + 'return': 1, 'error': 'Invalid docker copy input {} given'.format(copy_file)} + filename = os.path.basename(copy_split[0]) + if not os.path.exists(os.path.join(dockerfile_dir, filename)): + shutil.copytree( + copy_split[0], os.path.join( + dockerfile_dir, filename)) + f.write('COPY ' + filename + " " + copy_split[1] + EOL) + + f.write( + EOL + + '# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes' + + EOL + + '# Install system dependencies' + + EOL) + f.write( + 'RUN ' + + get_value( + env, + config, + 'package-manager-update-cmd', + 'CM_PACKAGE_MANAGER_UPDATE_CMD') + + EOL) + f.write('RUN ' + get_value(env, config, 'package-manager-get-cmd') + " " + " ".join(get_value(env, config, + 'packages')) + EOL) + + if env.get('CM_DOCKER_EXTRA_SYS_DEPS', '') != '': + f.write('RUN ' + env['CM_DOCKER_EXTRA_SYS_DEPS'] + EOL) + + if env['CM_DOCKER_OS'] == "ubuntu": + if int(env['CM_DOCKER_OS_VERSION'].split('.')[0]) >= 23: + if "--break-system-packages" not in env.get( + 'CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''): + env['CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS'] = " --break-system-packages" + pip_extra_flags = env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', '') + + f.write(EOL + '# Setup docker environment' + EOL) + + entry_point = get_value( + env, + config, + 'ENTRYPOINT', + 'CM_DOCKER_IMAGE_ENTRYPOINT') + if entry_point: + f.write('ENTRYPOINT ' + entry_point + EOL) + + for key, value in config['ENV'].items(): + f.write('ENV ' + key + "=\"" + value + "\"" + EOL) + for cmd in config['RUN_CMDS']: + f.write('RUN ' + cmd + EOL) + + f.write(EOL + '# Setup docker user' + EOL) + docker_user = get_value(env, config, 'USER', 'CM_DOCKER_USER') + docker_group = get_value(env, config, 'GROUP', 'CM_DOCKER_GROUP') + + if docker_user: + + f.write('RUN groupadd -g $GID -o ' + docker_group + EOL) + + DOCKER_USER_ID = "-m -u $UID " + DOCKER_GROUP = "-g $GID -o" + + user_shell = json.loads(shell) + f.write('RUN useradd ' + DOCKER_USER_ID + DOCKER_GROUP + ' --create-home --shell ' + user_shell[0] + ' ' + + docker_user + EOL) + f.write( + 'RUN echo "' + + docker_user + + ' ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers' + + EOL) + f.write('USER ' + docker_user + ":" + docker_group + EOL) + + dockerfile_env = env.get('CM_DOCKERFILE_ENV', {}) + dockerfile_env_input_string = "" + for docker_env_key in dockerfile_env: + dockerfile_env_input_string = dockerfile_env_input_string + " --env." + \ + docker_env_key + "=" + str(dockerfile_env[docker_env_key]) + workdir = get_value(env, config, 'WORKDIR', 'CM_DOCKER_WORKDIR') + if workdir: + f.write('WORKDIR ' + workdir + EOL) + + f.write(EOL + '# Install python packages' + EOL) + python = get_value(env, config, 'PYTHON', 'CM_DOCKERFILE_PYTHON') + + docker_use_virtual_python = env.get('CM_DOCKER_USE_VIRTUAL_PYTHON', "yes") + if str(docker_use_virtual_python).lower() not in ["no", "0", "false"]: + f.write('RUN {} -m venv /home/cmuser/venv/cm'.format(python) + " " + EOL) + f.write('ENV PATH="/home/cmuser/venv/cm/bin:$PATH"' + EOL) + # f.write('RUN . /opt/venv/cm/bin/activate' + EOL) + f.write( + 'RUN {} -m pip install '.format(python) + + " ".join( + get_value( + env, + config, + 'python-packages')) + + ' ' + + pip_extra_flags + + ' ' + + EOL) + + f.write(EOL + '# Download CM repo for scripts' + EOL) + + if use_copy_repo: + docker_repo_dest = "/home/cmuser/CM/repos/mlcommons@cm4mlops" + f.write( + f'COPY --chown=cmuser:cm {relative_repo_path} {docker_repo_dest}' + + EOL) + + f.write(EOL + '# Register CM repository' + EOL) + f.write('RUN cm pull repo --url={} --quiet'.format(docker_repo_dest) + EOL) + f.write(EOL) + + else: + # Use cm pull repo as before + x = env.get('CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO', '') + if x != '': + x = ' ' + x + + f.write( + 'RUN cm pull repo ' + + cm_mlops_repo + + cm_mlops_repo_branch_string + + x + + EOL) + + # Check extra repositories + x = env.get('CM_DOCKER_EXTRA_CM_REPOS', '') + if x != '': + for y in x.split(','): + f.write('RUN ' + y + EOL) + + if str(env.get('CM_DOCKER_SKIP_CM_SYS_UPGRADE', False) + ).lower() not in ["true", "1", "yes"]: + f.write(EOL + '# Install all system dependencies' + EOL) + f.write('RUN cm run script --tags=get,sys-utils-cm --quiet' + EOL) + + if 'CM_DOCKER_PRE_RUN_COMMANDS' in env: + for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']: + f.write('RUN ' + pre_run_cmd + EOL) + + run_cmd_extra = " " + \ + env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=") + gh_token = get_value(env, config, "GH_TOKEN", "CM_GH_TOKEN") + if gh_token: + run_cmd_extra = " --env.CM_GH_TOKEN=$CM_GH_TOKEN" + + f.write(EOL + '# Run commands' + EOL) + for comment in env.get('CM_DOCKER_RUN_COMMENTS', []): + f.write(comment + EOL) + + skip_extra = False + if 'CM_DOCKER_RUN_CMD' not in env: + env['CM_DOCKER_RUN_CMD'] = "" + if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: + env['CM_DOCKER_RUN_CMD'] += "cm version" + skip_extra = True + else: + if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False') + ).lower() not in ["yes", "1", "true"]: + env['CM_DOCKER_RUN_CMD'] += "cm pull repo && " + env['CM_DOCKER_RUN_CMD'] += "cm run script --tags=" + \ + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' + else: + if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False') + ).lower() not in ["yes", "1", "true"]: + env['CM_DOCKER_RUN_CMD'] = "cm pull repo && " + \ + env['CM_DOCKER_RUN_CMD'] + + print(env['CM_DOCKER_RUN_CMD']) + fake_run = env.get("CM_DOCKER_FAKE_RUN_OPTION", + " --fake_run") + dockerfile_env_input_string + fake_run = fake_run + \ + " --fake_deps" if env.get('CM_DOCKER_FAKE_DEPS') else fake_run + + x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] + + if not skip_extra: + x += fake_run + if '--quiet' not in x: + x += ' --quiet' + if run_cmd_extra != '': + x += ' ' + run_cmd_extra + + f.write(x + EOL) + + # fake_run to install the dependent scripts and caching them + if not "run" in env['CM_DOCKER_RUN_CMD'] and str( + env.get('CM_REAL_RUN', False)).lower() in ["false", "0", "no"]: + fake_run = dockerfile_env_input_string + + x = 'RUN ' + env['CM_DOCKER_RUN_CMD'] + fake_run + run_cmd_extra + if '--quiet' not in x: + x += ' --quiet ' + x += EOL + + f.write(x) + + if 'CM_DOCKER_POST_RUN_COMMANDS' in env: + for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']: + f.write('RUN ' + post_run_cmd + EOL) + + post_file = env.get('DOCKER_IMAGE_POST_FILE', '') + if post_file != '': + r = utils.load_txt(post_file) + if r['return'] > 0: + return r + + s = r['string'] + f.write(s + EOL) + + print(f"""Dockerfile written at {dockerfile_with_path}""") + + f.close() + + # f = open(env['CM_DOCKERFILE_WITH_PATH'], "r") + # print(f.read()) + + return {'return': 0} + + +def get_value(env, config, key, env_key=None): + if not env_key: + env_key = key + + if env.get(env_key, None) is not None: + return env[env_key] + + docker_os = env['CM_DOCKER_OS'] + docker_os_version = env['CM_DOCKER_OS_VERSION'] + + version_meta = config['distros'][docker_os]['versions'].get( + docker_os_version, '') + if key in version_meta: + return version_meta[key] + + distro_meta = config['distros'][docker_os] + if key in distro_meta: + return distro_meta[key] + + if key in config: + return config[key] + + return None diff --git a/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/dockerinfo.json b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/dockerinfo.json new file mode 100644 index 000000000..df9c6c90a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-dockerfile/dockerinfo.json @@ -0,0 +1,87 @@ +{ + "python-packages": [ + "wheel", "cmind", "requests", "giturlparse", "tabulate" + ], + "ARGS": [ + "CM_GH_TOKEN" + ], + "ARGS_DEFAULT": { + "UID" : "1000", + "GID" : "1000" + }, + "ENTRYPOINT": "[\"/bin/bash\", \"-c\"]", + "ENV": { + "TZ": "US/Pacific", + "PATH": "${PATH}:/home/cmuser/.local/bin" + }, + "RUN_CMDS": [ + "ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone" + ], + "PYTHON": "python3", + "UID": "", + "USER": "cmuser", + "GID": "", + "GROUP": "cm", + "SHELL": "[\"/bin/bash\", \"-c\"]", + "WORKDIR": "/home/cmuser", + "distros": { + "ubuntu": { + "package-manager-update-cmd": "apt-get update -y", + "package-manager-get-cmd": "apt-get install -y", + "packages": [ + "python3", "python3-pip", "git", "sudo", "wget", "python3-venv" + ], + "versions": { + "18.04": { + "FROM": "ubuntu:18.04" + }, + "20.04": { + "FROM": "ubuntu:20.04" + }, + "22.04": { + "FROM": "ubuntu:22.04" + }, + "23.04": { + "FROM": "ubuntu:23.04" + }, + "24.04": { + "FROM": "ubuntu:24.04" + } + } + }, + "rhel": { + "FROM": "registry.access.redhat.com/ubi9", + "package-manager-update-cmd": "dnf update -y", + "package-manager-get-cmd": "dnf install -y", + "packages": [ + "python3", "python-pip", "git", "wget", "sudo", "binutils" + ], + "versions": { + "9": { + }, + "8": { + "FROM": "registry.access.redhat.com/ubi8", + "packages": [ + "python39", "python39-pip", "git", "wget", "sudo", "binutils" + ], + "python-packages": [ + "cmind", "requests", "giturlparse", "tabulate" + ], + "PYTHON": "python3.9" + } + } + }, + "arch": { + "FROM": "archlinux", + "package-manager-update-cmd": "pacman -Syu --noconfirm", + "package-manager-get-cmd": "pacman -Sy --noconfirm", + "packages": [ + "python", "python-pip", "git", "wget", "sudo", "binutils" + ], + "versions": { + "latest": { + } + } + } + } +} diff --git a/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/README-extra.md new file mode 100644 index 000000000..f05fd8322 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/README-extra.md @@ -0,0 +1,2 @@ +# About +This CM script builds the Nvidia C++ implementation of MLPerf Inference diff --git a/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/README.md b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/README.md new file mode 100644 index 000000000..2d0724327 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/_cm.yaml new file mode 100644 index 000000000..c5003f67c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/_cm.yaml @@ -0,0 +1,398 @@ +# Identification of this CM script +alias: build-mlperf-inference-server-nvidia +uid: f37403af5e9f4541 +cache: true +automation_alias: script +automation_uid: 5b4e0237da074764 +default_version: r3.1 + +category: "MLPerf benchmark support" + + +# User-friendly tags to find this CM script +tags: + - build + - mlcommons + - mlperf + - inference + - inference-server + - server + - nvidia-harness + - nvidia + + +new_env_keys: + - CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH + +default_env: + CM_MAKE_BUILD_COMMAND: build + CM_MAKE_CLEAN: "no" + CM_CUSTOM_SYSTEM_NVIDIA: "yes" + +input_mapping: + custom_system: CM_CUSTOM_SYSTEM_NVIDIA + clean: CM_MAKE_CLEAN + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect python3 + - tags: get,python3 + names: + - python + - python3 + + # Detect CUDA + - tags: get,cuda,_cudnn + names: + - cuda + enable_if_env: + CM_MLPERF_DEVICE: + - cuda + - inferentia + + # Detect Tensorrt + - tags: get,tensorrt,_dev + names: + - tensorrt + enable_if_env: + CM_MLPERF_DEVICE: + - cuda + - inferentia + skip_if_env: + CM_TENSORRT_SYSTEM_DETECT: + - yes + + # Detect gcc + - tags: get,gcc + + # Detect CMake + - tags: get,cmake + version: "3.25.1" + + # Detect Google Logger + - tags: get,generic,sys-util,_glog-dev + + # Detect GFlags + - tags: get,generic,sys-util,_gflags-dev + + # Detect libgmock-dev + - tags: get,generic,sys-util,_libgmock-dev + + # Detect libre2-dev + - tags: get,generic,sys-util,_libre2-dev + + # Detect libnuma-dev + - tags: get,generic,sys-util,_libnuma-dev + + # Detect libboost-all-dev + - tags: get,generic,sys-util,_libboost-all-dev + + # Detect rapidjson-dev + - tags: get,generic,sys-util,_rapidjson-dev + + + # Download Nvidia Submission Code + - tags: get,nvidia,mlperf,inference,common-code + names: + - nvidia-inference-common-code + + - tags: get,generic-python-lib,_package.pybind11 + + # Detect pycuda + - tags: get,generic-python-lib,_pycuda + version: "2022.2.2" + skip_if_env: + CM_RUN_STATE_DOCKER: + - 'yes' + - True + - 'True' + + # Detect opencv-python + - tags: get,generic-python-lib,_opencv-python + + # Detect nvidia-dali + - tags: get,generic-python-lib,_nvidia-dali + + # Get Nvidia scratch space where data and models get downloaded + - tags: get,mlperf,inference,nvidia,scratch,space + names: + - nvidia-scratch-space + + +post_deps: + # Detect nvidia system + - tags: add,custom,system,nvidia + names: + - custom-system-nvidia + - nvidia-inference-common-code + skip_if_env: + CM_CUSTOM_SYSTEM_NVIDIA: + - "no" + - False + - "False" + +variations: + # Target devices + cpu: + group: device + env: + CM_MLPERF_DEVICE: cpu + inferentia: + group: device + env: + CM_MLPERF_DEVICE: inferentia + cuda: + group: device + default: true + env: + CM_MLPERF_DEVICE: cuda + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + ctuning: + group: code + default: true + add_deps_recursive: + nvidia-inference-common-code: + tags: _ctuning + go: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _go + nvidia-only: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _nvidia-only + custom: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _custom + mlcommons: + group: code + add_deps_recursive: + nvidia-inference-common-code: + tags: _mlcommons + + r4.0: + group: version + add_deps_recursive: + nvidia-inference-common-code: + version: r4.0 + nvidia-scratch-space: + tags: _version.4_1 + deps: + - tags: get,generic,sys-util,_git-lfs + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v4.0 + names: + - pytorch + - torch + skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + - tags: get,generic-python-lib,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl + enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + + - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0 + names: + - pytorchvision + - torchvision + skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + - tags: get,generic-python-lib,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl + enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + +versions: + r2.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r2.1 + nvidia-scratch-space: + tags: _version.2_1 + + r3.0: + add_deps_recursive: + nvidia-inference-common-code: + version: r3.0 + nvidia-scratch-space: + tags: _version.3_0 + r3.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r3.1 + nvidia-scratch-space: + tags: _version.4_0 + deps: + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + names: + - pytorch + - torch + - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v3.1 + names: + - pytorchvision + - torchvision + - tags: install,nccl,libs,_cuda + + r4.0: + add_deps_recursive: + nvidia-inference-common-code: + version: r4.0 + nvidia-scratch-space: + tags: _version.4_0 + default_env: + BUILD_TRTLLM: 1 + deps: + - tags: get,generic,sys-util,_nlohmann-json3-dev + - tags: get,generic,sys-util,_git-lfs + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v4.0 + names: + - pytorch + - torch + skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + - tags: get,generic-python-lib,_package.torch,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl + enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0 + names: + - pytorchvision + - torchvision + skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + - tags: get,generic-python-lib,_package.torchvision,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl + enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + + r4.1-dev: + add_deps_recursive: + nvidia-inference-common-code: + version: r4.0 + nvidia-scratch-space: + tags: _version.4_1 + default_env: + BUILD_TRTLLM: 1 + deps: + - tags: get,generic,sys-util,_nlohmann-json3-dev + - tags: get,generic,sys-util,_git-lfs + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v4.0 + names: + - pytorch + - torch + skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + - tags: get,generic-python-lib,_package.torch,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torch-2.1.0a0+git32f93b1-cp38-cp38-linux_x86_64.whl + enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0 + names: + - pytorchvision + - torchvision + skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + - tags: get,generic-python-lib,_package.torchvision,_whl-url.https://github.com/mlcommons/cm4mlperf-inference/releases/download/mlperf-inference-v4.0/torchvision-0.16.0a0+657027f-cp38-cp38-linux_x86_64.whl + enable_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + CM_PYTHON_MINOR_VERSION: + - 8 + + r4.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r4.1 + nvidia-scratch-space: + tags: _version.4_1 + default_env: + BUILD_TRTLLM: 1 + +docker: + skip_run_cmd: 'no' + all_gpus: 'yes' + shm_size: '32gb' + extra_run_args: ' --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + os: ubuntu + cm_repo_flags1: ' --branch=mlperf-inference' + real_run: False + interactive: True + os_version: '20.04' + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v3.1-cuda12.2-cudnn8.9-x86_64-ubuntu20.04-l4-public + docker_input_mapping: + imagenet_path: IMAGENET_PATH + gptj_checkpoint_path: GPTJ_CHECKPOINT_PATH + criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH + results_dir: RESULTS_DIR + submission_dir: SUBMISSION_DIR + cudnn_tar_file_path: CM_CUDNN_TAR_FILE_PATH + tensorrt_tar_file_path: CM_TENSORRT_TAR_FILE_PATH + cuda_run_file_path: CUDA_RUN_FILE_LOCAL_PATH + dlrm_data_path: DLRM_DATA_PATH + scratch_path: MLPERF_SCRATCH_PATH + deps: + - tags: get,mlperf,inference,nvidia,scratch,space + - tags: get,mlperf,inference,results,dir,local + - tags: get,mlperf,inference,submission,dir,local + - tags: get,nvidia-docker + skip_if_env: + CM_SKIP_GET_NVIDIA_DOCKER: + - yes + + pre_run_cmds: + - cm pull repo + run_cmd_prefix: sudo apt remove -y cmake + mounts: + - "${{ IMAGENET_PATH }}:/data/imagenet-val" + - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}" + - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}" + - "${{ RESULTS_DIR }}:/home/cmuser/results_dir" + - "${{ SUBMISSION_DIR }}:/home/cmuser/submission_dir" + - "${{ CM_CUDNN_TAR_FILE_PATH }}:${{ CM_CUDNN_TAR_FILE_PATH }}" + - "${{ CM_TENSORRT_TAR_FILE_PATH }}:${{ CM_TENSORRT_TAR_FILE_PATH }}" + - "${{ CUDA_RUN_FILE_LOCAL_PATH }}:${{ CUDA_RUN_FILE_LOCAL_PATH }}" + - "${{ MLPERF_SCRATCH_PATH }}:${{ MLPERF_SCRATCH_PATH }}" + - "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2" diff --git a/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/customize.py b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/customize.py new file mode 100644 index 000000000..727cd4387 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/customize.py @@ -0,0 +1,59 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + if '+LIBRARY_PATH' not in env: + env['+LIBRARY_PATH'] = [] + + if 'CM_TENSORRT_INSTALL_PATH' in env: + env['+LIBRARY_PATH'].append(os.path.join( + env['CM_TENSORRT_INSTALL_PATH'], "lib")) + + cxxflags = [ + "-Wno-error=switch", + "-DDALI_1_15=1", + "-Wno-error=maybe-uninitialized"] + + if env.get('CM_GCC_VERSION', '') != '': + gcc_major_version = env['CM_GCC_VERSION'].split(".")[0] + if int(gcc_major_version) > 10: + if env.get('CM_MLPERF_INFERENCE_VERSION', '') != "4.1": + cxxflags.append("-Wno-error=range-loop-construct") + + if env.get('CM_MLPERF_DEVICE', '') == "inferentia": + env['USE_INFERENTIA'] = "1" + env['USE_NIGHTLY'] = "0" + env['CM_MAKE_BUILD_COMMAND'] = "build" + + if '+ CXXFLAGS' not in env: + env['+ CXXFLAGS'] = [] + + env['+ CXXFLAGS'] += cxxflags + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/run.sh b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/run.sh new file mode 100644 index 000000000..e03aaa72b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/build-mlperf-inference-server-nvidia/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash +CUR=$PWD + +cd ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} + +if [[ ${CM_MAKE_CLEAN} == "yes" ]]; then + make clean +fi + +if [[ ${CM_MLPERF_DEVICE} == "inferentia" ]]; then + make prebuild +fi + +SKIP_DRIVER_CHECK=1 make ${CM_MAKE_BUILD_COMMAND} + +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/README.md b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/README.md new file mode 100644 index 000000000..01c451c51 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-optimization/calibrate-model-for.qaic](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-optimization/calibrate-model-for.qaic) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/_cm.yaml new file mode 100644 index 000000000..958d9f846 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/_cm.yaml @@ -0,0 +1,146 @@ +alias: calibrate-model-for.qaic +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML optimization +deps: +- tags: detect,cpu +- names: + - qaic-apps-sdk + tags: get,qaic,apps,sdk +- enable_if_env: + CM_CALIBRATE_OPENIMAGES: + - 'yes' + names: + - openimages-cal + - preprocessed-dataset + tags: get,preprocessed,dataset,_calibration,openimages,_for.retinanet.onnx,_NCHW,_fp32,_custom-annotations +- enable_if_env: + CM_CALIBRATE_IMAGENET: + - 'yes' + names: + - imagenet-cal + - preprocessed-calibration-dataset + tags: get,dataset,imagenet,preprocessed,_calibration,_for.resnet50,_float32,_rgb32 +- enable_if_env: + CM_CALIBRATE_SQUAD: + - 'on' + names: + - squad-cal + - preprocessed-dataset + tags: get,dataset,preprocessed,_calib1,squad,_pickle,_seq-length.384,_packed +- names: + - model-src + tags: get,ml-model +new_env_keys: +- CM_QAIC_MODEL_PROFILE_* +tags: +- qaic +- calibrate +- profile +- qaic-profile +- qaic-calibrate +uid: 817bad70df2f4e45 +variations: + bert-99: + base: + - bert_ + env: + CM_CALIBRATE_SQUAD: 'yes' + CM_QAIC_COMPILER_ARGS: '' + CM_QAIC_COMPILER_PARAMS: -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>> + -input-list-file=<<>> -num-histogram-bins=512 + -profiling-threads=<<>> + CM_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf + group: model + bert_: + adr: + model-src: + tags: bert-large,_onnx,_packed + default-variations: + seq-length: seq.384 + env: + CM_CREATE_INPUT_BATCH: 'no' + CM_QAIC_MODEL_NAME: bert-large + bs.#: + env: + CM_CREATE_INPUT_BATCH: 'yes' + CM_QAIC_MODEL_BATCH_SIZE: '#' + group: batch-size + bs.1: + env: + CM_CREATE_INPUT_BATCH: 'yes' + CM_QAIC_MODEL_BATCH_SIZE: '1' + group: batch-size + filter-size.#: + ad: + preprocessed-dataset: + tags: _filter-size.#,_filter,_size.# + group: calib-dataset-filter-size + first.#: + adr: + preprocessed-dataset: + tags: first.# + mlperf.option1: + adr: + preprocessed-dataset: + tags: _mlperf.option1 + group: calibration-option + mlperf.option2: + adr: + preprocessed-dataset: + tags: _mlperf.option2 + group: calibration-option + resnet50: + adr: + model-src: + tags: resnet50,_tf + default_variations: + calibration-option: mlperf.option1 + model-framework: tf + env: + CM_CALIBRATE_IMAGENET: 'yes' + CM_QAIC_COMPILER_ARGS: '' + CM_QAIC_COMPILER_PARAMS: -output-node-name=ArgMax -profiling-threads=<<>> + CM_QAIC_MODEL_NAME: resnet50 + CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf + CM_QAIC_OUTPUT_NODE_NAME: -output-node-name=ArgMax + group: model + resnet50,tf: + adr: + model-src: + tags: _fix-input-shape + preprocessed-dataset: + tags: _NHWC + env: + CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf + retinanet: + adr: + model-src: + tags: retinanet,_no-nms,_onnx + env: + CM_CALIBRATE_OPENIMAGES: 'yes' + CM_QAIC_COMPILER_ARGS: '' + CM_QAIC_COMPILER_PARAMS: -enable-channelwise -profiling-threads=<<>> + -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>> + CM_QAIC_MODEL_NAME: retinanet + CM_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf + group: model + new_env_keys: + - CM_QAIC_MODEL_RETINANET_* + seq.#: + ad: + squad-preprocessed: + tags: _seq.# + env: + CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#' + group: seq-length + seq.384: + ad: + squad-preprocessed: + tags: _seq.384 + env: + CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: '#' + group: seq-length + tf: + group: model-framework diff --git a/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/customize.py b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/customize.py new file mode 100644 index 000000000..26845e3e6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/customize.py @@ -0,0 +1,231 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import sys +import yaml + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes': + r = create_batched_inputs(env) + if r['return'] > 0: + return r + + r = construct_calibration_cmd(env) + if r['return'] > 0: + return r + cmd = r['cmd'] + + print("Profiling from " + os.getcwd()) + + env['CM_RUN_CMD'] = cmd + + return {'return': 0} + + +def create_batched_inputs(env): + original_images_file = env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] + batchsize = env['CM_QAIC_MODEL_BATCH_SIZE'] + + file_paths = [] + with open(original_images_file) as f: + file_paths = f.read().splitlines() + + i = 0 + outfile = None + lastfile = None + outfiles = [] + os.makedirs(os.path.join(os.getcwd(), "raw"), exist_ok=True) + for file in file_paths: + if i % int(batchsize) == 0: + filename = os.path.basename(file).replace(".rgb32", ".raw") + outfile = os.path.join(os.getcwd(), "raw", filename) + outfiles.append(outfile) + with open(outfile, "wb") as f: + pass + with open(outfile, "ab") as f: + with open(file, "rb") as infile: + f.write(infile.read()) + i = i + 1 + lastfile = file + + while i % int(batchsize) != 0: + with open(outfile, "ab") as f: + with open(lastfile, "rb") as infile: + f.write(infile.read()) + i = i + 1 + with open("batched_input_files", "w") as f: + f.write("\n".join(outfiles)) + + return {'return': 0} + + +def construct_calibration_cmd(env): + compiler_params = env['CM_QAIC_COMPILER_PARAMS'] + batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE', "1") + cmd = env['CM_QAIC_EXEC_PATH'] + " " + if env.get('CM_CREATE_INPUT_BATCH', '') == 'yes': + cmd += " -input-list-file=batched_input_files -batchsize=" + batchsize + " " + cmd += compiler_params + " -dump-profile=profile.yaml -model=" + \ + env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return': 0, 'cmd': cmd} + + +def postprocess(i): + + env = i['env'] + profile_file_path = os.path.join(os.getcwd(), "profile.yaml") + env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] = profile_file_path + + if env.get('CM_ML_MODEL_INPUT_LAYER_NAME', '') != '': + input_layer_names = [env.get('CM_ML_MODEL_INPUT_LAYER_NAME')] + else: + input_layer_names = ["images:0", "images/:0"] + + output_layer_names_conf = [[], []] + output_layer_names_loc = [[], []] + + output_layer_names_loc[0] = [ + "/GatherElements/:0", + "/GatherElements_1/:0", + "/GatherElements_2/:0", + "/GatherElements_3/:0", + "/GatherElements_4/:0" + ] + + output_layer_names_conf[0] = [ + "/TopK/:0", + "/TopK_1/:0", + "/TopK_2/:0", + "/TopK_3/:0", + "/TopK_4/:0" + ] + + output_layer_names_loc[1] = [ + "GatherElements_588/:0", + "GatherElements_598/:0", + "GatherElements_608/:0", + "GatherElements_618/:0", + "GatherElements_628/:0" + ] + + output_layer_names_conf[1] = [ + "TopK_570/:0", + "TopK_572/:0", + "TopK_574/:0", + "TopK_576/:0", + "TopK_578/:0" + ] + + if env.get('CM_QAIC_MODEL_NAME', '') == "retinanet": + with open(profile_file_path, "r") as stream: + try: + output_min_val_loc = sys.maxsize + output_max_val_loc = -sys.maxsize + output_min_val_conf = sys.maxsize + output_max_val_conf = -sys.maxsize + docs = yaml.load_all(stream, yaml.FullLoader) + for doc in docs: + + +if isinstance(doc, if ) + node_names = [k['NodeOutputName'] for k in doc] + oindex = None + + for output in output_layer_names_loc: + if output[0] in node_names: + oindex = output_layer_names_loc.index(output) + break + + if oindex is None: + return { + 'return': 1, 'error': 'Output node names not found for the given retinanet model'} + + for k in doc: + if k["NodeOutputName"] in input_layer_names: + min_val = k['Min'] + max_val = k['Max'] + scale, offset = get_scale_offset( + min_val, max_val) + env['CM_QAIC_MODEL_RETINANET_IMAGE_SCALE'] = scale + env['CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] = offset + + if k["NodeOutputName"] in output_layer_names_loc[oindex]: + min_val = k['Min'] + max_val = k['Max'] + if min_val < output_min_val_loc: + output_min_val_loc = min_val + if max_val > output_max_val_loc: + output_max_val_loc = max_val + loc_scale, loc_offset = get_scale_offset( + min_val, max_val) + index = output_layer_names_loc[oindex].index( + k["NodeOutputName"]) + env[f'CM_QAIC_MODEL_RETINANET_LOC_SCALE{index}'] = loc_scale + # to uint8 is done in NMS code + env[f'CM_QAIC_MODEL_RETINANET_LOC_OFFSET{index}'] = loc_offset - 128 + + total_range = max_val - min_val + scale = total_range / 256.0 + offset = round(-min_val / scale) + + if k["NodeOutputName"] in output_layer_names_conf[oindex]: + min_val = k['Min'] + max_val = k['Max'] + if min_val < output_min_val_conf: + output_min_val_conf = min_val + if max_val > output_max_val_conf: + output_max_val_conf = max_val + conf_scale, conf_offset = get_scale_offset( + min_val, max_val) + index = output_layer_names_conf[oindex].index( + k["NodeOutputName"]) + env[f'CM_QAIC_MODEL_RETINANET_CONF_SCALE{index}'] = conf_scale + # to uint8 is done in NMS code + env[f'CM_QAIC_MODEL_RETINANET_CONF_OFFSET{index}'] = conf_offset - 128 + total_range = max_val - min_val + scale = total_range / 256.0 + offset = round(-min_val / scale) + + loc_scale, loc_offset = get_scale_offset( + output_min_val_loc, output_max_val_loc) + conf_scale, conf_offset = get_scale_offset( + output_min_val_conf, output_max_val_conf) + env['CM_QAIC_MODEL_RETINANET_LOC_SCALE'] = loc_scale + env['CM_QAIC_MODEL_RETINANET_LOC_OFFSET'] = loc_offset - 128 # to uint8 is done in NMS code + env['CM_QAIC_MODEL_RETINANET_CONF_SCALE'] = conf_scale + env['CM_QAIC_MODEL_RETINANET_CONF_OFFSET'] = conf_offset - 128 # to uint8 is done in NMS code + + except yaml.YAMLError as exc: + return {'return': 1, 'error': exc} + + return {'return': 0} + + +def get_scale_offset(min_val, max_val): + total_range = max_val - min_val + scale = total_range /256.0 + offset = round(-min_val / scale) + return scale, offset diff --git a/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/run.sh b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/run.sh new file mode 100644 index 000000000..59b1aed3d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/calibrate-model-for.qaic/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +run "$CM_RUN_CMD" + diff --git a/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/_cm.yaml new file mode 100644 index 000000000..079fe309d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/_cm.yaml @@ -0,0 +1,45 @@ +alias: clean-nvidia-mlperf-inference-scratch-space +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- clean +- nvidia +- scratch +- space +- mlperf +- inference +uid: bb41f6e3608e4e8a +input_mapping: + extra_cache_rm_tags: CM_CLEAN_EXTRA_CACHE_RM_TAGS +deps: + # Get Nvidia scratch space where data and models get downloaded + - tags: get,mlperf,inference,nvidia,scratch,space + names: + - nvidia-scratch-space + +variations: + sdxl: + group: model + env: + CM_MODEL: sdxl + downloaded-data: + group: artifact + env: + CM_CLEAN_ARTIFACT_NAME: downloaded_data + preprocessed-data: + group: artifact + env: + CM_CLEAN_ARTIFACT_NAME: preprocessed_data + downloaded-model: + group: artifact + env: + CM_CLEAN_ARTIFACT_NAME: downloaded_model + v4.1: + group: version + env: + CM_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.1 + v4.0: + group: version + env: + CM_NVIDIA_MLPERF_INFERENCE_CODE_VERSION: v4.0 diff --git a/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/customize.py b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/customize.py new file mode 100644 index 000000000..f2b1fa4fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/customize.py @@ -0,0 +1,66 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import cmind as cm + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + clean_cmd = '' + cache_rm_tags = '' + extra_cache_rm_tags = env.get('CM_CLEAN_EXTRA_CACHE_RM_TAGS', '') + + if env.get('CM_MODEL', '') == 'sdxl': + if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'downloaded_data': + clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "data", "coco", "SDXL")} """ + cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl" + if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'preprocessed_data': + clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "preprocessed_data", "coco2014-tokenized-sdxl")} """ + cache_rm_tags = "nvidia-harness,_preprocess_data,_sdxl" + if env.get('CM_CLEAN_ARTIFACT_NAME', '') == 'downloaded_model': + clean_cmd = f"""rm -rf {os.path.join(env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], "models", "SDXL")} """ + cache_rm_tags = "nvidia-harness,_download_model,_sdxl" + + cache_rm_tags = cache_rm_tags + extra_cache_rm_tags + + if cache_rm_tags: + r = cm.access({'action': 'rm', 'automation': 'cache', + 'tags': cache_rm_tags, 'f': True}) + print(r) + if r['return'] != 0 and r['return'] != 16: # ignore missing ones + return r + if r['return'] == 0: # cache entry found + if clean_cmd != '': + env['CM_RUN_CMD'] = clean_cmd + else: + if clean_cmd != '': + env['CM_RUN_CMD'] = clean_cmd + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/run.bat b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/run.sh b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/run.sh new file mode 100644 index 000000000..4c23c380e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/clean-nvidia-mlperf-inference-scratch-space/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running: " +echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/README.md b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/README.md new file mode 100644 index 000000000..770f808d4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-optimization/compile-model-for.qaic](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-optimization/compile-model-for.qaic) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/_cm.yaml new file mode 100644 index 000000000..634bb948b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/_cm.yaml @@ -0,0 +1,257 @@ +alias: compile-model-for.qaic +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML optimization +deps: +- tags: detect,cpu +- names: + - qaic-apps-sdk + skip_if_env: + CM_REGISTER_CACHE: + - 'on' + tags: get,qaic,apps,sdk +- enable_if_env: + CM_COMPILE_RETINANET: + - 'yes' + names: + - retinanet-profile + - qaic-profile + tags: qaic,calibrate,_retinanet +- enable_if_env: + CM_COMPILE_RESNET: + - 'on' + names: + - resnet-profile + - qaic-profile + skip_if_env: + CM_REGISTER_CACHE: + - 'on' + tags: qaic,calibrate,_resnet50 +- names: + - model-src + tags: get,ml-model +input_mapping: + register: CM_REGISTER_CACHE +new_env_keys: +- CM_QAIC_MODEL* +- CM_ML_MODEL_FILE_WITH_PATH +tags: +- qaic +- compile +- model +- model-compile +- qaic-compile +uid: 3f0f43b5d0304d1c +variations: + bert-99: + adr: + model-src: + tags: bert-large,_onnx,_packed + deps: + - names: + - bert-profile + - qaic-profile + tags: calibrate,qaic,_bert-99 + env: + CM_COMPILE_BERT: 'on' + CM_QAIC_MODEL_COMPILER_ARGS: '' + CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax + -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -quantization-precision-bias=Int32 + -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 + -multicast-weights -combine-inputs=false -combine-outputs=false + CM_QAIC_MODEL_TO_CONVERT: calibrate_bert_mlperf + bert-99,offline: + env: + CM_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536 + -vtcm-working-set-limit-ratio=1 + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2 + bert-99,offline,nsp.14: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 + bert-99,offline,nsp.16: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=2 + bert-99,server: + env: + CM_QAIC_MODEL_COMPILER_ARGS: -allocator-dealloc-delay=2 -size-split-granularity=1536 + -vtcm-working-set-limit-ratio=1 + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 + bert-99,server,nsp.14: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=3 + bert-99,singlestream: + env: + CM_QAIC_MODEL_COMPILER_ARGS: '' + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1 + bert-99,singlestream,nsp.14: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=8 -ols=1 + bert-99.9: + adr: + model-src: + tags: bert-large,_onnx,_packed + base: + - no-quantized + env: + CM_COMPILE_BERT: 'on' + CM_QAIC_MODEL_COMPILER_ARGS: '' + CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -convert-to-fp16 + -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 + -combine-inputs=false -combine-outputs=false + CM_QAIC_MODEL_TO_CONVERT: bert_mlperf + bert-99.9,offline: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 + bert-99.9,offline,nsp.14: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 + bert-99.9,offline,nsp.16: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 -mos=1 -ols=2 + bert-99.9,server: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 + bert-99.9,server,nsp.14: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=2 + bs.#: + adr: + qaic-profile: + tags: _bs.# + env: + CM_QAIC_MODEL_BATCH_SIZE: '#' + group: batch-size + bs.1: + adr: + qaic-profile: + tags: _bs.1 + env: + CM_QAIC_MODEL_BATCH_SIZE: '1' + group: batch-size + filter-size.#: + ad: + qaic-profile: + tags: _filter-size.# + group: calib-dataset-filter-size + multistream: + group: mlperf-scenario + no-quantized: + env: + CM_QAIC_MODEL_QUANTIZATION: 'no' + group: quantization + nsp.14: + group: nsp + nsp.16: + group: nsp + nsp.8: + group: nsp + nsp.9: + group: nsp + offline: + group: mlperf-scenario + pc.#: + env: + CM_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE: '#' + CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS: -quantization-calibration=Percentile -percentile-calibration-value=<<>> + group: percentile-calibration + quantized: + default: true + env: + CM_QAIC_MODEL_QUANTIZATION: 'yes' + group: quantization + resnet50: + adr: + model-src: + tags: resnet50,_tf + default_variations: + model-framework: tf + env: + CM_COMPILE_RESNET: 'on' + CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8 + -quantization-precision=Int8 -output-node-name=ArgMax -vvv -compile-only -use-producer-dma=1 + CM_QAIC_MODEL_TO_CONVERT: compile_resnet50_tf + resnet50,multistream: + env: + CM_QAIC_MODEL_COMPILER_ARGS: '' + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1 -ols=1 + resnet50,multistream,nsp.14: + default_variations: + batch-size: bs.1 + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 + resnet50,offline: + env: + CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -multicast-weights + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4 + resnet50,offline,nsp.14: + default_variations: + batch-size: bs.8 + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -mos=1,2 -ols=4 + resnet50,server: + env: {} + resnet50,server,nsp.14: + default_variations: + batch-size: bs.8 + env: + CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=2,2 -mos=1,2 -multicast-weights + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4 + resnet50,server,nsp.16: + default_variations: + batch-size: bs.8 + env: + CM_QAIC_MODEL_COMPILER_ARGS: -sdp-cluster-sizes=4,4 -mos=1,4 + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=4 -ols=4 + resnet50,singlestream: + env: + CM_QAIC_MODEL_COMPILER_ARGS: -aic-num-of-instances=1 + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 + resnet50,singlestream,nsp.14: + default_variations: + batch-size: bs.1 + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 + resnet50,tf: + ad: + model-src: + tags: _fix-input-shape + env: + CM_QAIC_MODEL_TO_CONVERT: calibrate_resnet50_tf + retinanet: + adr: + model-src: + tags: retinanet,_no-nms + env: + CM_COMPILE_RETINANET: 'on' + CM_QAIC_MODEL_COMPILER_ARGS: -aic-enable-depth-first + CM_QAIC_MODEL_COMPILER_PARAMS_BASE: -aic-hw -aic-hw-version=2.0 -compile-only + -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>> + -quantization-schema-constants=symmetric_with_uint8 -quantization-schema-activations=asymmetric + -quantization-calibration=None + CM_QAIC_MODEL_TO_CONVERT: calibrate_retinanet_no_nms_mlperf + new_env_keys: + - CM_QAIC_MODEL_RETINANET_* + retinanet,multistream: {} + retinanet,nsp.14: + env: {} + retinanet,offline: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=1 -mos=1 -ols=1 + retinanet,offline,nsp.14: {} + retinanet,server: {} + retinanet,server,nsp.14: {} + retinanet,singlestream: + env: + CM_QAIC_MODEL_COMPILER_ARGS: '' + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 + retinanet,singlestream,nsp.14: + env: + CM_QAIC_MODEL_COMPILER_ARGS_SUT: -aic-num-cores=8 -mos=1 -ols=1 + server: + group: mlperf-scenario + singlestream: + default: true + group: mlperf-scenario + tf: + group: model-framework diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/customize.py b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/customize.py new file mode 100644 index 000000000..b23476f8c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/customize.py @@ -0,0 +1,100 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_REGISTER_CACHE', '') == '': + + r = construct_compilation_cmd(env) + if r['return'] > 0: + return r + cmd = r['cmd'] + + print("Compiling from " + os.getcwd()) + + env['CM_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd + + env['CM_RUN_CMD'] = cmd + else: + import shutil + print( + "Creating cache entry from " + + env['CM_REGISTER_CACHE'] + + " to " + + os.getcwd()) + r = shutil.copytree( + env['CM_REGISTER_CACHE'], + os.path.join( + os.getcwd(), + "elfs")) + print(r) + + return {'return': 0} + + +def construct_compilation_cmd(env): + compiler_params_base = env['CM_QAIC_MODEL_COMPILER_PARAMS_BASE'] + compiler_args = env['CM_QAIC_MODEL_COMPILER_ARGS'] + \ + ' ' + env.get('CM_QAIC_MODEL_COMPILER_ARGS_SUT', '') + batchsize = env.get('CM_QAIC_MODEL_BATCH_SIZE') + + if env.get('CM_QAIC_MODEL_QUANTIZATION', '') == 'yes': + profile_string = " -load-profile=" + \ + env['CM_QAIC_MODEL_PROFILE_WITH_PATH'] + else: + profile_string = '' + + compiler_params = compiler_params_base + ' ' + compiler_args + + if batchsize: + compiler_params += " -batchsize=" + batchsize + + percentile_calibration_params = env.get( + 'CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS') + if percentile_calibration_params: + compiler_params += " " + percentile_calibration_params + + aic_binary_dir = os.path.join(os.getcwd(), "elfs") + + cmd = env['CM_QAIC_EXEC_PATH'] + \ + " -model=" + env['CM_ML_MODEL_FILE_WITH_PATH'] + \ + profile_string + ' -aic-binary-dir=' + aic_binary_dir + ' ' \ + + compiler_params + + return {'return': 0, 'cmd': cmd} + + +def postprocess(i): + + env = i['env'] + env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] = os.path.join( + os.getcwd(), "elfs", "programqpc.bin") + if not os.path.isdir(os.path.join(os.getcwd(), "elfs")): + return { + 'return': 1, 'error': 'elfs directory not found inside the compiled directory'} + + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_QAIC_MODEL_COMPILED_BINARY_WITH_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/run.sh b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/run.sh new file mode 100644 index 000000000..c5c3c04cb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-model-for.qaic/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +run "rm -rf elfs" +run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-program/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/compile-program/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-program/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-program/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/compile-program/README-extra.md new file mode 100644 index 000000000..87d157282 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-program/README-extra.md @@ -0,0 +1,3 @@ +This script compiles C and C++ programs. + +It is a part of our universal benchmarking and optimization roadmap: https://github.com/mlcommons/cm4mlops/issues/23 diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-program/README.md b/cmx4mlops/cmx4mlops/repo/script/compile-program/README.md new file mode 100644 index 000000000..f8b29a0bb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-program/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/compile-program](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/compile-program) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-program/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/compile-program/_cm.yaml new file mode 100644 index 000000000..2e5c9d0fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-program/_cm.yaml @@ -0,0 +1,23 @@ +alias: compile-program +automation_alias: script +automation_uid: 5b4e0237da074764 +category: DevOps automation +clean_files: +- tmp-run.out +default_env: + SKIP_RECOMPILE: 'no' +deps: +- tags: detect,cpu +- names: + - compiler + tags: get,compiler +- tags: get,compiler-flags +tags: +- compile +- program +- c-program +- cpp-program +- compile-program +- compile-c-program +- compile-cpp-program +uid: c05042ba005a4bfa diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-program/customize.py b/cmx4mlops/cmx4mlops/repo/script/compile-program/customize.py new file mode 100644 index 000000000..9630788d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-program/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + + env = i['env'] + CPPFLAGS = env.get('+ CPPFLAGS', []) + env['CM_C_COMPILER_FLAGS'] = " ".join(env.get('+ CFLAGS', []) + CPPFLAGS) + env['CM_CXX_COMPILER_FLAGS'] = " ".join( + env.get('+ CXXFLAGS', []) + CPPFLAGS) + env['CM_F_COMPILER_FLAGS'] = " ".join(env.get('+ FFLAGS', [])) + + CPATH = env.get('+CPATH', []) + env['CM_C_INCLUDE_PATH'] = " -I".join([" "] + + env.get('+C_INCLUDE_PATH', []) + + CPATH) + env['CM_CPLUS_INCLUDE_PATH'] = " -I".join( + [" "] + env.get('+CPLUS_INCLUDE_PATH', []) + CPATH) + env['CM_F_INCLUDE_PATH'] = " -I".join([" "] + + env.get('+F_INCLUDE_PATH', []) + + CPATH) + + # If windows, need to extend it more ... + if os_info['platform'] == 'windows' and env.get( + 'CM_COMPILER_FAMILY', '') != 'LLVM': + print("WARNING: compile-program script should be extended to support flags for non-LLVM compilers on Windows") + return {'return': 0} + + LDFLAGS = env.get('+ LDFLAGS', []) + + env['CM_C_LINKER_FLAGS'] = " ".join(env.get('+ LDCFLAGS', []) + LDFLAGS) + env['CM_CXX_LINKER_FLAGS'] = " ".join( + env.get('+ LDCXXFLAGS', []) + LDFLAGS) + env['CM_F_LINKER_FLAGS'] = " ".join(env.get('+ LDFFLAGS', []) + LDFLAGS) + + if env.get('CM_LINKER_LANG', 'C') == "C": + env['CM_LINKER_BIN'] = env['CM_C_COMPILER_BIN'] + env['CM_LINKER_WITH_PATH'] = env['CM_C_COMPILER_WITH_PATH'] + env['CM_LINKER_COMPILE_FLAGS'] = env['CM_C_COMPILER_FLAGS'] + env['CM_LINKER_FLAGS'] = env['CM_C_LINKER_FLAGS'] + + elif env.get('CM_LINKER_LANG', 'C') == "CXX": + env['CM_LINKER_BIN'] = env['CM_CXX_COMPILER_BIN'] + env['CM_LINKER_WITH_PATH'] = env['CM_CXX_COMPILER_WITH_PATH'] + env['CM_LINKER_COMPILE_FLAGS'] = env['CM_CXX_COMPILER_FLAGS'] + env['CM_LINKER_FLAGS'] = env['CM_CXX_LINKER_FLAGS'] + + elif env.get('CM_LINKER_LANG', 'C') == "F": + env['CM_LINKER_BIN'] = env['CM_F_COMPILER_BIN'] + env['CM_LINKER_WITH_PATH'] = env['CM_F_COMPILER_WITH_PATH'] + env['CM_LINKER_COMPILE_FLAGS'] = env['CM_F_COMPILER_FLAGS'] + env['CM_LINKER_FLAGS'] = env['CM_F_LINKER_FLAGS'] + + env['CM_LD_LIBRARY_PATH'] = " -L".join([" "] + + env.get('+LD_LIBRARY_PATH', [])) + env['CM_SOURCE_FOLDER_PATH'] = env['CM_SOURCE_FOLDER_PATH'] if 'CM_SOURCE_FOLDER_PATH' in env else env[ + 'CM_TMP_CURRENT_SCRIPT_PATH'] if 'CM_TMP_CURRENT_SCRIPT_PATH' in env else '' + + return {'return': 0} + + +def postprocess(i): + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-program/run.bat b/cmx4mlops/cmx4mlops/repo/script/compile-program/run.bat new file mode 100644 index 000000000..ece5d9e9c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-program/run.bat @@ -0,0 +1,35 @@ +rem Compile program + +set BIN_NAME=%CM_BIN_NAME% +IF NOT DEFINED CM_BIN_NAME SET BIN_NAME=run.exe + +set RUN_DIR=%CM_RUN_DIR% +IF NOT DEFINED CM_RUN_DIR SET RUN_DIR=. + +echo. +echo Checking compiler version ... +echo. + +"%CM_C_COMPILER_WITH_PATH%" %CM_C_COMPILER_FLAG_VERSION% + +echo. +echo Compiling source files ... +echo. + +if not exist %RUN_DIR% mkdir %RUN_DIR% + +cd %CM_SOURCE_FOLDER_PATH% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +if not "%CM_C_SOURCE_FILES%" == "" ( + echo %CM_C_COMPILER_WITH_PATH% %CM_C_COMPILER_FLAGS% %CM_C_INCLUDE_PATH% %CM_C_SOURCE_FILES% %CM_LD_LIBRARY_PATH% %LDCFLAGS% %CM_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + "%CM_C_COMPILER_WITH_PATH%" %CM_C_COMPILER_FLAGS% %CM_C_INCLUDE_PATH% %CM_C_SOURCE_FILES% %CM_LD_LIBRARY_PATH% %LDCFLAGS% %CM_C_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) + +if not "%CM_CXX_SOURCE_FILES%" == "" ( + echo %CM_CXX_COMPILER_WITH_PATH% %CM_CXX_SOURCE_FILES% %CM_CXX_COMPILER_FLAGS% %CM_CPLUS_INCLUDE_PATH% %CM_LD_LIBRARY_PATH% %LDCXXFLAGS% %CM_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + "%CM_CXX_COMPILER_WITH_PATH%" %CM_CXX_SOURCE_FILES% %CM_CXX_COMPILER_FLAGS% %CM_CPLUS_INCLUDE_PATH% %CM_LD_LIBRARY_PATH% %LDCXXFLAGS% %CM_CXX_COMPILER_FLAG_OUTPUT%"%RUN_DIR%\%BIN_NAME%" + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) + diff --git a/cmx4mlops/cmx4mlops/repo/script/compile-program/run.sh b/cmx4mlops/cmx4mlops/repo/script/compile-program/run.sh new file mode 100644 index 000000000..7e98bc47d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/compile-program/run.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +# Compile + +BIN_NAME=${CM_BIN_NAME:-run.out} +RUN_DIR=${CM_RUN_DIR:-.} +echo "RUN_DIR=$RUN_DIR" + +if [[ ${CM_SKIP_RECOMPILE} == "yes" ]]; then + if [ -f ${RUN_DIR}/${BIN_NAME} ]; then + exit 0 + fi +fi + +rm -f ${RUN_DIR}/${BIN_NAME} + +if [ -z "${CM_SOURCE_FOLDER_PATH}" ]; then + echo "No source directory (CM_SOURCE_FOLDER_PATH} specified" + exit 1 +fi + +if [[ -z "${CM_C_SOURCE_FILES}" && -z "${CM_CXX_SOURCE_FILES}" && -z "${CM_F_SOURCE_FILES}" ]]; then + echo "No source files (CM_C_SOURCE_FILES or CM_CXX_SOURCE_FILES or CM_F_SOURCE_FILES) specified" + exit 1 +fi + +echo "" +echo "Checking compiler version ..." +echo "" + +${CM_C_COMPILER_WITH_PATH} ${CM_C_COMPILER_FLAG_VERSION} + +echo "" +echo "Compiling source files ..." +echo "" + +cd ${CM_SOURCE_FOLDER_PATH} +test $? -eq 0 || exit 1 + +IFS=';' read -ra FILES <<< "${CM_C_SOURCE_FILES}" +for file in "${FILES[@]}"; do + base="$(basename -- $file)" + base_name=${base%.*} + echo $base + echo $basename + CMD="${CM_C_COMPILER_WITH_PATH} -c ${CM_C_COMPILER_FLAGS} ${CM_C_INCLUDE_PATH} $file ${CM_C_COMPILER_FLAG_OUTPUT}$base_name.o" + echo $CMD + eval $CMD + test $? -eq 0 || exit 1 +done + +IFS=';' read -ra FILES <<< "${CM_CXX_SOURCE_FILES}" +for file in "${FILES[@]}"; do + base="$(basename -- $file)" + base_name=${base%.*} + echo $base + echo $basename + CMD="${CM_CXX_COMPILER_WITH_PATH} -c ${CM_CXX_COMPILER_FLAGS} ${CM_CPLUS_INCLUDE_PATH} $file ${CM_CXX_COMPILER_FLAG_OUTPUT}$base_name.o" + echo $CMD + eval $CMD + test $? -eq 0 || exit 1 +done + + +echo "" +echo "Linking ..." +echo "" +CMD="${CM_LINKER_WITH_PATH} ${CM_LINKER_COMPILE_FLAGS} *.o -o ${RUN_DIR}/${BIN_NAME} ${CM_LD_LIBRARY_PATH} ${CM_LINKER_FLAGS}" +echo $CMD +eval $CMD + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/README.md b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/README.md new file mode 100644 index 000000000..64d800238 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/convert-csv-to-md](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/convert-csv-to-md) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/_cm.yaml new file mode 100644 index 000000000..962601431 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/_cm.yaml @@ -0,0 +1,34 @@ +alias: convert-csv-to-md +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: DevOps automation +deps: +- names: + - python, python3 + tags: get,python3 +- names: + - pandas + tags: get,generic-python-lib,_pandas + version_min: '1.0' +- names: + - tabulate + tags: get,generic-python-lib,_package.tabulate +docker_input_mapping: {} +input_description: {} +input_mapping: + csv_file: CM_CSV_FILE + md_file: CM_MD_FILE +new_env_keys: [] +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- csv-to-md +- convert +- to-md +- from-csv +uid: 200a95b80bee4a25 +variations: {} +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/customize.py b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/customize.py new file mode 100644 index 000000000..52dd88927 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/customize.py @@ -0,0 +1,42 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + csv_file = env.get('CM_CSV_FILE', '') + md_file = env.get('CM_MD_FILE', '') + process_file = os.path.join(i['run_script_input']['path'], "process.py") + + env['CM_RUN_CMD'] = '{} {} {} {} '.format( + env["CM_PYTHON_BIN_WITH_PATH"], process_file, csv_file, md_file) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/process.py b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/process.py new file mode 100644 index 000000000..df1637c67 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/process.py @@ -0,0 +1,10 @@ +import pandas as pd +import sys + +csv_file = sys.argv[1] if len(sys.argv) > 1 else "summary.csv" +md_file = sys.argv[2] if len(sys.argv) > 2 else "converted.md" + +df = pd.read_csv(csv_file, engine='python') + +with open(md_file, "w") as md: + df.to_markdown(buf=md) diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/run.bat b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/run.sh b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/run.sh new file mode 100644 index 000000000..59b1aed3d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-csv-to-md/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +run "$CM_RUN_CMD" + diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/README.md b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/README.md new file mode 100644 index 000000000..ae5444ad0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/_cm.yaml new file mode 100644 index 000000000..54f54b0e5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/_cm.yaml @@ -0,0 +1,29 @@ +alias: convert-ml-model-huggingface-to-onnx +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +deps: +- names: + - python3 + - python + tags: get,python3 +- tags: get,generic-python-lib,_transformers +- tags: get,generic-python-lib,_onnxruntime +env: {} +new_env_keys: +- CM_ML_MODEL* +- CM_MODEL_HUGG_PATH +- HUGGINGFACE_ONNX_FILE_PATH +tags: +- ml-model +- model +- huggingface-to-onnx +- onnx +- huggingface +- convert +uid: eacb01655d7e49ac +variations: + model-path.#: + env: + CM_MODEL_HUGG_PATH: '#' diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/customize.py b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/customize.py new file mode 100644 index 000000000..5aedebf40 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/customize.py @@ -0,0 +1,39 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get("CM_MODEL_HUGG_PATH", "") == "": + return {'return': 1, 'error': 'CM_MODEL_HUGG_PATH is not set'} + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + return {'return': 0} + + +def postprocess(i): + os_info = i['os_info'] + + env = i['env'] + env['HUGGINGFACE_ONNX_FILE_PATH'] = os.path.join(os.getcwd(), "model.onnx") + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/run.sh b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/run.sh new file mode 100644 index 000000000..56be76db9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/convert-ml-model-huggingface-to-onnx/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash +python -m transformers.onnx --model=${CM_MODEL_HUGG_PATH} ${PWD} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/README.md b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/README.md new file mode 100644 index 000000000..6932b1b7d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/copy-to-clipboard](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/copy-to-clipboard) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/_cm.yaml new file mode 100644 index 000000000..de631040b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/_cm.yaml @@ -0,0 +1,32 @@ +alias: copy-to-clipboard +uid: 8b3aaa97ce58474d + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: "DevOps automation" + +tags: +- copy +- to +- clipboard +- copy-to-clipboard + +deps: + + # Get Python + - tags: get,python3 + names: + - python + - python3 + + # Extra package + - tags: get,generic-python-lib,_package.pyperclip + +input_mapping: + text: CM_COPY_TO_CLIPBOARD_TEXT + t: CM_COPY_TO_CLIPBOARD_TEXT + add_quotes: CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES + q: CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES diff --git a/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/code.py b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/code.py new file mode 100644 index 000000000..0a1aa014a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/code.py @@ -0,0 +1,13 @@ +import os +import pyperclip as pc + +text = os.environ.get('CM_COPY_TO_CLIPBOARD_TEXT', '') + +add_quotes = os.environ.get( + 'CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES', '') in [ + True, 'True', 'yes'] + +if add_quotes: + text = '"' + text + '"' + +pc.copy(text) diff --git a/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/run.bat b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/run.bat new file mode 100644 index 000000000..545178f20 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/run.bat @@ -0,0 +1,4 @@ +rem native script + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/run.sh b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/run.sh new file mode 100644 index 000000000..fa6f579f7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/copy-to-clipboard/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/create-conda-env/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-conda-env/README.md b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/README.md new file mode 100644 index 000000000..f771df5db --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/create-conda-env](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/create-conda-env) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-conda-env/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/_cm.yaml new file mode 100644 index 000000000..56a61f0f1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/_cm.yaml @@ -0,0 +1,30 @@ +alias: create-conda-env +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: DevOps automation +clean_files: [] +deps: +- tags: detect,os +- names: + - conda + tags: get,conda +new_env_keys: +- +PATH +- +LD_LIBRARY_PATH +- CM_CONDA_PREFIX +- CONDA_PREFIX +- CM_CONDA_BIN_PATH +- CM_CONDA_LIB_PATH +tags: +- create +- get +- env +- conda-env +- conda-environment +- create-conda-environment +uid: e39e0b04c86a40f2 +variations: + name.#: + env: + CM_CONDA_ENV_NAME: '#' diff --git a/cmx4mlops/cmx4mlops/repo/script/create-conda-env/customize.py b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/customize.py new file mode 100644 index 000000000..af8a82fcf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/customize.py @@ -0,0 +1,44 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + automation = i['automation'] + run_script_input = i['run_script_input'] + + recursion_spaces = i['recursion_spaces'] + + if env.get('CM_CONDA_ENV_NAME', '') == '': + return {'return': 1, 'error': 'Please use "_name." variation'} + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + conda_prefix = os.getcwd() + env['CONDA_PREFIX'] = conda_prefix + env['CM_CONDA_PREFIX'] = conda_prefix + env['CM_CONDA_BIN_PATH'] = os.path.join(conda_prefix, "bin") + env['CM_CONDA_LIB_PATH'] = os.path.join(conda_prefix, "lib") + + env['+PATH'] = [env['CM_CONDA_BIN_PATH']] + env['+LD_LIBRARY_PATH'] = [env['CM_CONDA_LIB_PATH']] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/create-conda-env/run.sh b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/run.sh new file mode 100644 index 000000000..540dde9b1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-conda-env/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +cmd="${CM_CONDA_BIN_WITH_PATH} create -p ${PWD}" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + diff --git a/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/README.md b/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/README.md new file mode 100644 index 000000000..e6020d866 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CM-automation/create-custom-cache-entry](https://docs.mlcommons.org/cm4mlops/scripts/CM-automation/create-custom-cache-entry) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/_cm.yaml new file mode 100644 index 000000000..7272bb99a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/_cm.yaml @@ -0,0 +1,27 @@ +alias: create-custom-cache-entry +uid: 485741440fbe4236 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- create +- custom +- cache +- entry + +category: CM automation + +cache: true + +input_mapping: + env_key: CM_CUSTOM_CACHE_ENTRY_ENV_KEY + env_key2: CM_CUSTOM_CACHE_ENTRY_ENV_KEY2 + path: CM_CUSTOM_CACHE_ENTRY_PATH + to: CM_CUSTOM_CACHE_ENTRY_PATH + +new_env_keys: +- CM_CUSTOM_CACHE_ENTRY* + +print_env_at_the_end: + CM_CUSTOM_CACHE_ENTRY_PATH: "Path to custom cache entry" diff --git a/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/customize.py b/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/customize.py new file mode 100644 index 000000000..c6eb8dd35 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-custom-cache-entry/customize.py @@ -0,0 +1,59 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + # CM script internal variables + env = i['env'] + + extra_cache_tags = [] + if env.get('CM_EXTRA_CACHE_TAGS', '').strip() == '': + print('') + extra_cache_tags_str = input( + 'Enter extra tags for the custom CACHE entry separated by comma: ') + + extra_cache_tags = extra_cache_tags_str.strip().split(',') + + return {'return': 0, 'add_extra_cache_tags': extra_cache_tags} + + +def postprocess(i): + + env = i['env'] + + path = env.get('CM_CUSTOM_CACHE_ENTRY_PATH', '').strip() + + if path != '': + if not os.path.isdir(path): + os.makedirs(path) + else: + path = os.getcwd() + + x = '' + env_key = env.get('CM_CUSTOM_CACHE_ENTRY_ENV_KEY', '') + if env_key != '': + x = env_key + '_' + + env['CM_CUSTOM_CACHE_ENTRY_{}PATH'.format(x)] = path + env['CM_CUSTOM_CACHE_ENTRY_PATH'] = path + + env_key2 = env.get('CM_CUSTOM_CACHE_ENTRY_ENV_KEY2', '') + v = env.get(env_key2, '') + real_path = v if v != '' else path + + env['CM_CUSTOM_CACHE_ENTRY_{}REAL_PATH'.format(x)] = real_path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/README.md b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/README.md new file mode 100644 index 000000000..3f462acee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml](https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/_cm.yaml new file mode 100644 index 000000000..3ad1cdc9b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/_cm.yaml @@ -0,0 +1,44 @@ +alias: create-fpgaconvnet-app-tinyml +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: TinyML automation +deps: +- names: + - config-generator + tags: create,fpgaconvnet,config +- names: + - xilinx-sdk + tags: get,xilinx,sdk + version: '2019.1' +- names: + - tensorflow + tags: get,tensorflow +input_description: {} +input_mapping: {} +new_env_keys: [] +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- create +- app +- fpgaconvnet +uid: 618f3520e98e4728 +variations: + ic: + add_deps: + config-generator: + tags: _ic + default: true + group: benchmark + zc706: + add_deps: + config-generator: + tags: _zc706 + default: true + env: + CM_TINY_BOARD: zc706 + group: board +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/customize.py b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/customize.py new file mode 100644 index 000000000..fd9016782 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/customize.py @@ -0,0 +1,54 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] + run_dir = env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] + + run_cmd = "cd " + run_dir + " && xsct create_boot_image.tcl" + + env['CM_RUN_CMD'] = run_cmd + env['CM_RUN_DIR'] = run_dir + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + return {'return': 1} + + network = env['CM_TINY_NETWORK_NAME'] + json_location = os.path.join( + env['CM_RUN_DIR'], + env['CM_TINY_NETWORK_NAME'] + ".json") + if os.path.exists(json_location): + print( + f"JSON configuration file for {network} created at {json_location}") + else: + return {'return': 1, 'error': "JSON configuration file generation failed"} + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/run.sh b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/run.sh new file mode 100644 index 000000000..fe67c233c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-app-tinyml/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + fi + exit_if_error +} + +#Add your run commands here... +run "${CM_RUN_CMD}" + diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/README.md b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/README.md new file mode 100644 index 000000000..08441f209 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml](https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/_cm.yaml new file mode 100644 index 000000000..f74a3165d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/_cm.yaml @@ -0,0 +1,40 @@ +alias: create-fpgaconvnet-config-tinyml +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: TinyML automation +deps: +- tags: get,python3 +- names: + - ml-model + tags: get,ml-model,tiny +- tags: get,git,repo,_repo.https://github.com/mlcommons/submissions_tiny_v1.1 +input_description: {} +input_mapping: {} +new_env_keys: +- CM_TINY_FPGACONVNET* +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- create +- config +- fpgaconvnet +uid: f6cdad166cfa47bc +variations: + ic: + add_deps: + ml-model: + tags: resnet,_onnx + default: true + group: benchmark + zc706: + default: true + env: + CM_TINY_BOARD: zc706 + group: board + zc706,ic: + env: + CM_TINY_NETWORK_NAME: zc706-resnet +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/customize.py b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/customize.py new file mode 100644 index 000000000..9b44d6358 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/customize.py @@ -0,0 +1,75 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + code_path = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + "closed", + "fpgaconvnet", + "code") + network_env_name = env['CM_TINY_NETWORK_NAME'].replace("-", "_").upper() + env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] = network_env_name + env['CM_TINY_FPGACONVNET_' + network_env_name + '_CODE_PATH'] = code_path + + board = env.get('CM_TINY_BOARD', 'zc706') + + benchmark = env.get('CM_TINY_BENCHMARK', 'ic') + + run_dir = os.path.join(code_path, board, benchmark) + env['CM_TINY_FPGACONVNET_' + network_env_name + '_RUN_DIR'] = run_dir + + run_cmd = "cd " + run_dir + " && " + \ + env['CM_PYTHON_BIN_WITH_PATH'] + " " + "create_config.py" + + env['ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['CM_RUN_CMD'] = run_cmd + env['CM_RUN_DIR'] = run_dir + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + network = env['CM_TINY_NETWORK_NAME'] + env['CM_TINY_FPGACONVNET_NETWORK_NAME'] = network + network_env_name = env['CM_TINY_FPGACONVNET_NETWORK_ENV_NAME'] + + json_location = os.path.join( + env['CM_RUN_DIR'], + env['CM_TINY_NETWORK_NAME'] + ".json") + if os.path.exists(json_location): + print( + f"JSON configuration file for {network} created at {json_location}") + else: + return {'return': 1, 'error': "JSON configuration file generation failed"} + + env['CM_TINY_FPGACONVNET_CONFIG_FILE_' + + network_env_name + '_PATH'] = json_location + env['CM_GET_DEPENDENT_CACHED_PATH'] = json_location + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/run.sh b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/run.sh new file mode 100644 index 000000000..fe67c233c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-fpgaconvnet-config-tinyml/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + fi + exit_if_error +} + +#Add your run commands here... +run "${CM_RUN_CMD}" + diff --git a/cmx4mlops/cmx4mlops/repo/script/create-patch/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/create-patch/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-patch/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-patch/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/create-patch/README-extra.md new file mode 100644 index 000000000..de783504d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-patch/README-extra.md @@ -0,0 +1,5 @@ +# Examples + +``` +cmr "create patch" --new=new --old=old --exclude=.git,__pycache_ +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/create-patch/README.md b/cmx4mlops/cmx4mlops/repo/script/create-patch/README.md new file mode 100644 index 000000000..ed7f38ac8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-patch/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/create-patch](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/create-patch) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/create-patch/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/create-patch/_cm.yaml new file mode 100644 index 000000000..cbcedb648 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-patch/_cm.yaml @@ -0,0 +1,22 @@ +uid: 0659dc1f75664c65 +alias: create-patch + +category: "DevOps automation" + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +deps: +- tags: detect,os + +input_mapping: + new: CM_CREATE_PATCH_NEW + old: CM_CREATE_PATCH_OLD + exclude: CM_CREATE_PATCH_EXCLUDE + +tags: +- create +- patch + diff --git a/cmx4mlops/cmx4mlops/repo/script/create-patch/customize.py b/cmx4mlops/cmx4mlops/repo/script/create-patch/customize.py new file mode 100644 index 000000000..3aa82abfd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/create-patch/customize.py @@ -0,0 +1,68 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + new_dir = env.get('CM_CREATE_PATCH_NEW', '') + if new_dir == '': + return {'return': 1, 'error': 'specify NEW directory using --new'} + if not os.path.isdir(new_dir): + return {'return': 1, + 'error': 'NEW directory doesn\'t exist {}'.format(new_dir)} + + old_dir = env.get('CM_CREATE_PATCH_OLD', '') + if old_dir == '': + return {'return': 1, 'error': 'specify OLD directory using --old'} + if not os.path.isdir(old_dir): + return {'return': 1, + 'error': 'OLD directory doesn\'t exist {}'.format(old_dir)} + + exclude = env.get('CM_CREATE_PATCH_EXCLUDE', '').strip() + x_exclude = '' + + if exclude != '': + for e in exclude.split(','): + x_exclude += ' --exclude={}'.format(e) + + cmd = 'diff -Naur {} {} {} > patch.patch'.format( + x_exclude, old_dir, new_dir) + + if not quiet: + print('') + print('Running command:') + print('') + print(cmd) + print('') + + os.system(cmd) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/README-extra.md new file mode 100644 index 000000000..8768e0fc7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/README-extra.md @@ -0,0 +1 @@ +This CM script is automatically called from run-terraform script when `--destroy` option is given. diff --git a/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/README.md b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/README.md new file mode 100644 index 000000000..91c937248 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/destroy-terraform](https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/destroy-terraform) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/_cm.yaml new file mode 100644 index 000000000..5935f6d7a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/_cm.yaml @@ -0,0 +1,13 @@ +alias: destroy-terraform +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Cloud automation +deps: +- names: + - terraform + tags: get,terraform +tags: +- destroy +- terraform +- cmd +uid: 3463458d03054856 diff --git a/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/customize.py b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/customize.py new file mode 100644 index 000000000..f42f5d9f2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/customize.py @@ -0,0 +1,29 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + return {'return': 0} + + +def postprocess(i): + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/run.bat b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/run.sh b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/run.sh new file mode 100644 index 000000000..9e0ae31ac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/destroy-terraform/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +source ${CM_TERRAFORM_CONFIG_DIR}/credentials.sh +source ${CM_TERRAFORM_CONFIG_DIR}/apply_credentials.sh +cd ${CM_TERRAFORM_RUN_DIR} +terraform destroy --auto-approve +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-cpu/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-cpu/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/README-extra.md new file mode 100644 index 000000000..c2326c281 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/README-extra.md @@ -0,0 +1,17 @@ +# Detect CPU +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the host CPU details and exports them in a unified list of environment variables to be reused across the supported operating systems. + +## Exported Variables +* `CM_HOST_CPU_L1I_CACHE_SIZE` +* `CM_HOST_CPU_L2_CACHE_SIZE` +* `CM_HOST_CPU_MEMSIZE` +* `CM_HOST_CPU_SOCKETS` +* `CM_HOST_CPU_THREADS_PER_CORE` +* `CM_HOST_CPU_TOTAL_CORES` +* `CM_HOST_CPU_TOTAL_LOGICAL_CORES` +* `CM_HOST_CPU_TOTAL_PHYSICAL_CORES` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 +3. macOS 12.6 diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-cpu/README.md b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/README.md new file mode 100644 index 000000000..85059677d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Platform-information/detect-cpu](https://docs.mlcommons.org/cm4mlops/scripts/Platform-information/detect-cpu) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-cpu/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/_cm.yaml new file mode 100644 index 000000000..1da7a920e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/_cm.yaml @@ -0,0 +1,22 @@ +alias: detect-cpu +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Platform information +clean_files: +- tmp-lscpu.out +- tmp-systeminfo.csv +- tmp-wmic-cpu.csv +deps: +- tags: detect,os +new_env_keys: +- CM_HOST_CPU_* +- CM_HOST_MEMORY_CAPACITY +- CM_HOST_DISK_CAPACITY +new_state_keys: +- host_device_raw_info +tags: +- detect +- cpu +- detect-cpu +- info +uid: 586c8a43320142f7 diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-cpu/customize.py b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/customize.py new file mode 100644 index 000000000..f54829b20 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/customize.py @@ -0,0 +1,203 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + +lscpu_out = 'tmp-lscpu.out' + + +def preprocess(i): + + if os.path.isfile(lscpu_out): + os.remove(lscpu_out) + + return {'return': 0} + + +def postprocess(i): + + state = i['state'] + + env = i['env'] + + os_info = i['os_info'] + + automation = i['automation'] + logger = automation.cmind.logger + + if os_info['platform'] == 'windows': + sys = [] + sys1 = [] + cpu = [] + cpu1 = [] + + import csv + + try: + f = 'tmp-systeminfo.csv' + + if not os.path.isfile(f): + print('WARNING: {} file was not generated!'.format(f)) + else: + keys = {} + j = 0 + with open(f, 'r') as csvf: + for s in csv.reader(csvf): + if j == 0: + keys = s + else: + x = {} + for k in range(0, len(s)): + x[keys[k]] = s[k] + + sys.append(x) + + if j == 1: + sys1 = x + + j += 1 + + except Exception as e: + logger.warning( + 'WARNING: problem processing file {} ({})!'.format( + f, format(e))) + pass + + try: + f = 'tmp-wmic-cpu.csv' + if not os.path.isfile(f): + logger.warning('WARNING: {} file was not generated!'.format(f)) + else: + + keys = {} + j = 0 + + with open(f, 'r', encoding='utf16') as csvf: + for s in csv.reader(csvf): + if j == 1: + keys = s + elif j > 1: + x = {} + for k in range(0, len(s)): + x[keys[k]] = s[k] + + cpu.append(x) + + if j == 2: + cpu1 = x + + j += 1 + + except Exception as e: + logger.warning( + 'WARNING: problem processing file {} ({})!'.format( + f, format(e))) + pass + + state['host_device_raw_info'] = { + 'sys': sys, 'sys1': sys1, 'cpu': cpu, 'cpu1': cpu1} + + logger.warning( + 'WARNING: need to unify system and cpu output on Windows') + + return {'return': 0} + + ########################################################################## + # Linux + if not os.path.isfile(lscpu_out): + print('WARNING: lscpu.out file was not generated!') + + # Currently ignore this error though probably should fail? + # But need to check that is supported on all platforms. + return {'return': 0} + + r = utils.load_txt(file_name=lscpu_out) + if r['return'] > 0: + return r + + ss = r['string'] + + # state['cpu_info_raw'] = ss + + # Unifying some CPU info across different platforms + unified_env = { + 'CM_CPUINFO_CPUs': 'CM_HOST_CPU_TOTAL_CORES', + 'CM_CPUINFO_L1d_cache': 'CM_HOST_CPU_L1D_CACHE_SIZE', + 'CM_CPUINFO_L1i_cache': 'CM_HOST_CPU_L1I_CACHE_SIZE', + 'CM_CPUINFO_L2_cache': 'CM_HOST_CPU_L2_CACHE_SIZE', + 'CM_CPUINFO_L3_cache': 'CM_HOST_CPU_L3_CACHE_SIZE', + 'CM_CPUINFO_Sockets': 'CM_HOST_CPU_SOCKETS', + 'CM_CPUINFO_NUMA_nodes': 'CM_HOST_CPU_NUMA_NODES', + 'CM_CPUINFO_Cores_per_socket': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', + 'CM_CPUINFO_Cores_per_cluster': 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', + 'CM_CPUINFO_Threads_per_core': 'CM_HOST_CPU_THREADS_PER_CORE', + 'CM_CPUINFO_Architecture': 'CM_HOST_CPU_ARCHITECTURE', + 'CM_CPUINFO_CPU_family': 'CM_HOST_CPU_FAMILY', + 'CM_CPUINFO_CPU_max_MHz': 'CM_HOST_CPU_MAX_MHZ', + 'CM_CPUINFO_Model_name': 'CM_HOST_CPU_MODEL_NAME', + 'CM_CPUINFO_On_line_CPUs_list': 'CM_HOST_CPU_ON_LINE_CPUS_LIST', + 'CM_CPUINFO_Vendor_ID': 'CM_HOST_CPU_VENDOR_ID', + 'CM_CPUINFO_hw_physicalcpu': 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', + 'CM_CPUINFO_hw_logicalcpu': 'CM_HOST_CPU_TOTAL_CORES', + 'CM_CPUINFO_hw_packages': 'CM_HOST_CPU_SOCKETS', + 'CM_CPUINFO_hw_memsize': 'CM_HOST_CPU_MEMSIZE', + 'CM_CPUINFO_hw_l1icachesize': 'CM_HOST_CPU_L1I_CACHE_SIZE', + 'CM_CPUINFO_hw_l1dcachesize': 'CM_HOST_CPU_L1D_CACHE_SIZE', + 'CM_CPUINFO_hw_l2cachesize': 'CM_HOST_CPU_L2_CACHE_SIZE' + } + + if env['CM_HOST_OS_TYPE'] == 'linux': + vkeys = ['Architecture', 'Model name', 'Vendor ID', 'CPU family', 'NUMA node(s)', 'CPU(s)', + 'On-line CPU(s) list', 'Socket(s)', 'Core(s) per socket', 'Core(s) per cluster', 'Thread(s) per core', 'L1d cache', 'L1i cache', 'L2 cache', + 'L3 cache', 'CPU max MHz'] + elif env['CM_HOST_OS_FLAVOR'] == 'macos': + vkeys = ['hw.physicalcpu', 'hw.logicalcpu', 'hw.packages', 'hw.ncpu', 'hw.memsize', 'hw.l1icachesize', + 'hw.l2cachesize'] + if vkeys: + for s in ss.split('\n'): + v = s.split(':') + key = v[0] + if key in vkeys: + env_key = 'CM_CPUINFO_' + key.replace( + " ", + "_").replace( + '(', + '').replace( + ')', + '').replace( + '-', + '_').replace( + '.', + '_') + if env_key in unified_env: + env[unified_env[env_key]] = v[1].strip() + else: + env[env_key] = v[1].strip() + + if env.get('CM_HOST_CPU_SOCKETS', '') == '-': # assume as 1 + env['CM_HOST_CPU_SOCKETS'] = '1' + + if env.get('CM_HOST_CPU_TOTAL_CORES', '') != '' and env.get( + 'CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') == '': + env['CM_HOST_CPU_TOTAL_LOGICAL_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] + + if env.get('CM_HOST_CPU_TOTAL_LOGICAL_CORES', '') != '' and env.get( + 'CM_HOST_CPU_TOTAL_PHYSICAL_CORES', '') != '' and env.get('CM_HOST_CPU_THREADS_PER_CORE', '') == '': + env['CM_HOST_CPU_THREADS_PER_CORE'] = str(int(int(env['CM_HOST_CPU_TOTAL_LOGICAL_CORES']) // + int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']))) + + if env.get('CM_HOST_CPU_SOCKETS', '') != '' and env.get('CM_HOST_CPU_TOTAL_PHYSICAL_CORES', + '') != '' and env.get('CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET', '') == '': + env['CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET'] = str( + int(env['CM_HOST_CPU_TOTAL_PHYSICAL_CORES']) // int(env['CM_HOST_CPU_SOCKETS'])) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-cpu/run.bat b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/run.bat new file mode 100644 index 000000000..32347c87d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/run.bat @@ -0,0 +1,2 @@ +rem systeminfo /fo csv > tmp-systeminfo.csv +wmic cpu get /FORMAT:csv > tmp-wmic-cpu.csv diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-cpu/run.sh b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/run.sh new file mode 100644 index 000000000..2ca2fcc9b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-cpu/run.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Function to extract a field from /proc/cpuinfo +extract_field() { + local key="$1" + local default="$2" + # Use awk to find the first occurrence and extract the value + local value=$(awk -F: -v key="$key" '$1 ~ key {print $2; exit}' /proc/cpuinfo | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + + # Check if value is empty and assign default if needed + echo "${value:-$default}" +} + +if [[ ${CM_HOST_OS_FLAVOR} == "macos" ]]; then + sysctl -a | grep hw > tmp-lscpu.out +else + lscpu > tmp-lscpu.out + memory_capacity=`free -h --si | grep Mem: | tr -s ' ' | cut -d' ' -f2` + echo "CM_HOST_MEMORY_CAPACITY=$memory_capacity">>tmp-run-env.out + disk_capacity=`df -h --total -l |grep total |tr -s ' '|cut -d' ' -f2` + echo "CM_HOST_DISK_CAPACITY=$disk_capacity">>tmp-run-env.out + + # extract cpu information which are not there in lscpu + CM_HOST_CPU_WRITE_PROTECT_SUPPORT=$(extract_field "wp" "Not Found") + CM_HOST_CPU_MICROCODE=$(extract_field "microcode" "Not Found") + CM_HOST_CPU_FPU_SUPPORT=$(extract_field "fpu" "Not Found") + CM_HOST_CPU_FPU_EXCEPTION_SUPPORT=$(extract_field "fpu_exception" "Not Found") + CM_HOST_CPU_BUGS=$(extract_field "bugs" "Not Found") + CM_HOST_CPU_TLB_SIZE=$(extract_field "TLB size" "Not Found") + CM_HOST_CPU_CFLUSH_SIZE=$(extract_field "clflush size" "Not Found") + CM_HOST_CACHE_ALIGNMENT_SIZE=$(extract_field "cache_alignment" "Not Found") + CM_HOST_POWER_MANAGEMENT=$(extract_field "power management" "Not Found") + + # Write results to a file + { + echo "CM_HOST_CPU_WRITE_PROTECT_SUPPORT=$CM_HOST_CPU_WRITE_PROTECT_SUPPORT" + echo "CM_HOST_CPU_MICROCODE=$CM_HOST_CPU_MICROCODE" + echo "CM_HOST_CPU_FPU_SUPPORT=$CM_HOST_CPU_FPU_SUPPORT" + echo "CM_HOST_CPU_FPU_EXCEPTION_SUPPORT=$CM_HOST_CPU_FPU_EXCEPTION_SUPPORT" + echo "CM_HOST_CPU_BUGS=$CM_HOST_CPU_BUGS" + echo "CM_HOST_CPU_TLB_SIZE=$CM_HOST_CPU_TLB_SIZE" + echo "CM_HOST_CPU_CFLUSH_SIZE=$CM_HOST_CPU_CFLUSH_SIZE" + echo "CM_HOST_CACHE_ALIGNMENT_SIZE=$CM_HOST_CACHE_ALIGNMENT_SIZE" + echo "CM_HOST_POWER_MANAGEMENT=$CM_HOST_POWER_MANAGEMENT" + } >> tmp-run-env.out +fi + + diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-os/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/detect-os/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-os/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-os/README.md b/cmx4mlops/cmx4mlops/repo/script/detect-os/README.md new file mode 100644 index 000000000..a52b374f7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-os/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Platform-information/detect-os](https://docs.mlcommons.org/cm4mlops/scripts/Platform-information/detect-os) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-os/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/detect-os/_cm.yaml new file mode 100644 index 000000000..12266a02a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-os/_cm.yaml @@ -0,0 +1,30 @@ +alias: detect-os +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Platform information +clean_files: +- tmp-run.out +new_env_keys: +- CM_HOST_OS_* +- +CM_HOST_OS_* +- CM_HOST_PLATFORM_* +- CM_HOST_PYTHON_* +- CM_HOST_SYSTEM_NAME +- CM_RUN_STATE_DOCKER +- +PATH +new_state_keys: +- os_uname_* +post_deps: +- enable_if_env: + CM_HOST_OS_TYPE: + - windows + skip_if_env: + CM_WINDOWS_SYS_UTILS_MIN_INSTALL: + - 'yes' + tags: get,sys-utils-min +tags: +- detect-os +- detect +- os +- info +uid: 863735b7db8c44fc diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-os/customize.py b/cmx4mlops/cmx4mlops/repo/script/detect-os/customize.py new file mode 100644 index 000000000..4d2d9f6fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-os/customize.py @@ -0,0 +1,120 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess + + +def preprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + # Update env variables + env['CM_HOST_OS_TYPE'] = os_info['platform'] + env['CM_HOST_OS_BITS'] = os_info['bits'] + env['CM_HOST_PYTHON_BITS'] = os_info['python_bits'] + + # Update state (demo) + # state['os_info'] = os_info + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + if os_info['platform'] != 'windows': + if os_info['platform'] == 'linux': + sys_cmd = "ld --verbose | grep SEARCH_DIR " + result = subprocess.check_output( + sys_cmd, shell=True).decode("utf-8") + result = result.replace("SEARCH_DIR(\"=", "") + result = result.replace("SEARCH_DIR(\"", "") + result = result.replace("\")", "") + result = result.replace(" ", "") + result = result.replace("\n", "") + dirs = result.split(';') + lib_dir = [] + for _dir in dirs: + if _dir != '' and _dir not in lib_dir: + lib_dir.append(_dir) + env['+CM_HOST_OS_DEFAULT_LIBRARY_PATH'] = lib_dir + + r = utils.load_txt(file_name='tmp-run.out', + check_if_exists=True, + split=True) + if r['return'] > 0: + return r + + s = r['list'] + + state['os_uname_machine'] = s[0] + state['os_uname_all'] = s[1] + + env['CM_HOST_OS_MACHINE'] = state['os_uname_machine'] + + else: + env['CM_HOST_OS_PACKAGE_MANAGER'] = "choco" + + import platform + + env['CM_HOST_SYSTEM_NAME'] = platform.node() + + if 'CM_HOST_OS_PACKAGE_MANAGER' not in env: + if env.get('CM_HOST_OS_FLAVOR', '') == "ubuntu" or \ + "debian" in env.get('CM_HOST_OS_FLAVOR_LIKE', '') or \ + env.get('CM_HOST_OS_FLAVOR', '') == "debian": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "apt" + if env.get('CM_HOST_OS_FLAVOR', '') == "rhel" or \ + "rhel" in env.get('CM_HOST_OS_FLAVOR_LIKE', ''): + env['CM_HOST_OS_PACKAGE_MANAGER'] = "dnf" + if env.get('CM_HOST_OS_FLAVOR', '') == "amzn": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "yum" + if env.get('CM_HOST_OS_FLAVOR_LIKE', '') == "arch": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "arch" + if env.get('CM_HOST_OS_FLAVOR', '') == "macos": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "brew" + if env.get('CM_HOST_OS_FLAVOR', '') == "sles": + env['CM_HOST_OS_PACKAGE_MANAGER'] = "zypper" + if env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "apt": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "DEBIAN_FRONTEND=noninteractive apt-get install -y" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "apt-get update -y" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "dnf": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "dnf install -y" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "dnf update -y" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "pacman": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "pacman -Sy --noconfirm" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "pacman -Syu" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "brew": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "brew install" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "brew update" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "yum": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "yum install -y --skip-broken" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "yum update -y" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "zypper": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "zypper install -y" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "zypper update -y" + elif env.get('CM_HOST_OS_PACKAGE_MANAGER', '') == "choco": + env['CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD'] = "choco install -y" + env['CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD'] = "choco upgrade -y" + + if os.path.exists("/.dockerenv"): + env['CM_RUN_INSIDE_DOCKER'] = "yes" + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-os/run.bat b/cmx4mlops/cmx4mlops/repo/script/detect-os/run.bat new file mode 100644 index 000000000..89b468ecc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-os/run.bat @@ -0,0 +1 @@ +echo {"detect-os-test":"win"} > tmp-run-state.json diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-os/run.sh b/cmx4mlops/cmx4mlops/repo/script/detect-os/run.sh new file mode 100644 index 000000000..9e3c56cd9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-os/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +uname -m > tmp-run.out +uname -a >> tmp-run.out +if test -f "/etc/os-release"; then + echo "CM_HOST_OS_FLAVOR=`cat /etc/os-release | grep '^ID=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "CM_HOST_OS_FLAVOR_LIKE=`cat /etc/os-release | grep '^ID_LIKE=' | cut -d'=' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "CM_HOST_OS_VERSION=`cat /etc/os-release | grep '^VERSION_ID=' | cut -d'=' -f2 | cut -d'"' -f2 | cut -d'"' -f2 | tr '[:upper:]' '[:lower:]'`" >> tmp-run-env.out + echo "CM_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out + echo "CM_HOST_PLATFORM_FLAVOR=`uname -m`" >> tmp-run-env.out + echo "CM_HOST_OS_GLIBC_VERSION=`ldd --version | tail -n +1 | head -1 | cut -d')' -f2 | cut -d' ' -f2`" >> tmp-run-env.out +else + CM_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f2 | tr '[:upper:]' '[:lower:]'` + if [ -z ${CM_HOST_OS_FLAVOR} ]; then + CM_HOST_OS_FLAVOR=`sw_vers | grep '^ProductName:' | cut -f3 | tr '[:upper:]' '[:lower:]' ` + fi + echo "CM_HOST_OS_FLAVOR=${CM_HOST_OS_FLAVOR}" >> tmp-run-env.out + echo "CM_HOST_OS_VERSION=`sw_vers | grep '^ProductVersion:' | cut -f2 | tr '[:upper:]' '[:lower:]' `" >> tmp-run-env.out + echo "CM_HOST_OS_KERNEL_VERSION=`uname -r`" >> tmp-run-env.out + echo "CM_HOST_PLATFORM_FLAVOR=`uname -m `" >> tmp-run-env.out +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-os/run_config.yml b/cmx4mlops/cmx4mlops/repo/script/detect-os/run_config.yml new file mode 100644 index 000000000..938e3b641 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-os/run_config.yml @@ -0,0 +1,6 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + +run_with_default_inputs: true #if false the script won't run automatic tests diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-sudo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-sudo/README.md b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/README.md new file mode 100644 index 000000000..20583b96c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/detect-sudo](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/detect-sudo) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-sudo/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/_cm.yaml new file mode 100644 index 000000000..64b60a5f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/_cm.yaml @@ -0,0 +1,17 @@ +uid: 1d47ffc556e248dc +alias: detect-sudo + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: DevOps automation + +new_env_keys: + - CM_SUDO* + +tags: +- detect +- sudo +- access diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-sudo/customize.py b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/customize.py new file mode 100644 index 000000000..56cedd4fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/customize.py @@ -0,0 +1,172 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess +import select +import sys +import grp + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if os.geteuid() == 0: + env['CM_SUDO'] = '' # root user does not need sudo + env['CM_SUDO_USER'] = "yes" + else: + if can_execute_sudo_without_password() or prompt_sudo() == 0: + env['CM_SUDO_USER'] = "yes" + env['CM_SUDO'] = 'sudo' + + else: + env['CM_SUDO_USER'] = "no" + env['CM_SUDO'] = '' + + return {'return': 0} + + +def can_execute_sudo_without_password(): + try: + # Run a harmless command using sudo + result = subprocess.run( + # -n prevents sudo from prompting for a password + ['sudo', '-n', 'true'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + # Check the return code; if it's 0, sudo executed without needing a + # password + if result.returncode == 0: + return True + else: + return False + except Exception as e: + print(f"An error occurred: {e}") + return False + + +def reset_terminal(): + """Reset terminal to default settings.""" + subprocess.run(['stty', 'sane']) + + +def prompt_retry(timeout=10, default_retry=False): + """Prompt the user with a yes/no question to retry the command, with a 10-second timeout.""" + + # Check if we're in an interactive terminal + if not sys.stdin.isatty(): + if default_retry: + print(f"Non-interactive environment detected. Automatically retrying.") + else: + print(f"Non-interactive environment detected. Skipping retry.") + return default_retry # Automatically use the default in non-interactive terminals + + print( + f"Timeout occurred. Do you want to try again? (y/n): ", + end='', + flush=True) + + # Use select to wait for user input with a timeout + ready, _, _ = select.select([sys.stdin], [], [], timeout) + + if ready: + answer = sys.stdin.readline().strip().lower() + if answer in ['y', 'n']: + return answer == 'y' # Return True if 'y', False if 'n' + print("\nInvalid input. Please enter 'y' or 'n'.") + return prompt_retry(timeout) # Re-prompt on invalid input + else: + print("\nNo input received in 10 seconds. Exiting.") + return False # No input within the timeout, so don't retry + + +def is_user_in_sudo_group(): + """Check if the current user is in the 'sudo' group.""" + try: + sudo_group = grp.getgrnam('sudo').gr_mem + return os.getlogin() in sudo_group + except KeyError: + # 'sudo' group doesn't exist (might be different on some systems) + return False + except Exception as e: + print(f"Error checking sudo group: {str(e)}") + return False + + +def prompt_sudo(): + if os.geteuid() != 0 and not is_user_in_sudo_group(): # No sudo required for root user + + # Prompt for the password + import getpass + + if not os.isatty(sys.stdin.fileno()): + print("Skipping password prompt - non-interactive terminal detected!") + password = None + else: + password = getpass.getpass("Enter password (-1 to skip): ") + + # Check if the input is -1 + if password == "-1": + print("Skipping sudo command.") + return -1 + + # Run the command with sudo, passing the password + try: + if password is None: + r = subprocess.check_output( + ['sudo', '-S', 'echo'], + text=True, + stderr=subprocess.STDOUT, + timeout=15 # Capture the command output + ) + else: + r = subprocess.check_output( + ['sudo', '-S', 'echo'], + input=password + "\n", # Pass the password to stdin + text=True, + stderr=subprocess.STDOUT, + timeout=15 # Capture the command output + ) + return 0 + except subprocess.TimeoutExpired: + print("Timedout") + reset_terminal() # Reset terminal to sane state + if not prompt_retry(): # If the user chooses not to retry or times out + return -1 + except subprocess.CalledProcessError as e: + print(f"Command failed: {e.output.decode('utf-8')}") + reset_terminal() # Reset terminal in case of failure + return -1 + except Exception as e: + print(f"An error occurred: {str(e)}") + reset_terminal() # Always reset terminal after error + return -1 + + return 0 + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/detect-sudo/run.sh b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/run.sh new file mode 100644 index 000000000..3a584c10c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/detect-sudo/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/download-and-extract/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/download-and-extract/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/README-extra.md new file mode 100644 index 000000000..6573ab848 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/README-extra.md @@ -0,0 +1,109 @@ +# CM interface to download and extract files in a unified way on any system + +## Download and extract file without CM caching + +### Use internal CM download function + +This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/utils/module.py#L157) +to download and extract a given file to the current directory: + +```bash +cmr "download-and-extract file _extract" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip +``` +or + +```bash +cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" +``` + +#### Output environment variables + +You can check produced environment variables produced by this CM script by adding the `-j` flag: + +```bash +cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" -j +``` + +```json + "new_env": { + "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", + "CM_EXTRACT_EXTRACTED_PATH": "D:\\Work", + "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work" + }, +``` + +#### Input flags and equivalent environment variables + +* `--url` or `--env.CM_DAE_URL` - URL to download file +* `--verify` or `--env.CM_VERIFY_SSL` - set to `no` to skip SSL certificate verification +* `--download_path` or `--store` or `--env.CM_DOWNLOAD_PATH` - where to download file +* `--local_path` or `--from` or `--env.CM_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading +* `--extract_path` or `--to` or `--env.CM_EXTRACT_PATH` - where to extract files (--input should have full path then) +* `--extra_folder` or `--env.CM_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) + + +#### Variations + +* `_keep` or `_no-remove-extracted` or `--env.CM_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) + + + +### Use wget without SSL certificate verification + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no +``` + +### Use curl without SSL certificate verification + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _curl" --verify=no +``` + +### Check MD5SUM + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Save to another file + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_FILENAME=xyz --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Save to another place + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Reuse local file instead of downloading a file + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j +``` + + +### Simplified language to download, store and extract file + + +```bash +cmr "dae file _extract _keep _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --store=$HOME/dir1 --to=$HOME/dir2 +``` + + + +## Download and extract files with CM caching + +You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. +In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows: + +```bash +cmr "dae file _extract _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations +``` + +You can find it in CM cache using extra cache tags as follows: +```bash +cm show cache "dae file annotations coco 2017 val" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/download-and-extract/README.md b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/README.md new file mode 100644 index 000000000..d75afd05f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/download-and-extract](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/download-and-extract) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/download-and-extract/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/_cm.yaml new file mode 100644 index 000000000..bd4003b91 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/_cm.yaml @@ -0,0 +1,117 @@ +alias: download-and-extract +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +can_force_cache: true +category: DevOps automation +deps: [] +input_description: {} +input_mapping: + download_path: CM_DOWNLOAD_PATH + extra_folder: CM_EXTRACT_TO_FOLDER + extract_path: CM_EXTRACT_PATH + from: CM_DOWNLOAD_LOCAL_FILE_PATH + local_path: CM_DOWNLOAD_LOCAL_FILE_PATH + store: CM_DOWNLOAD_PATH + to: CM_EXTRACT_PATH + url: CM_DAE_URL + verify: CM_VERIFY_SSL +new_env_keys: +- CM_DOWNLOAD_DOWNLOADED_PATH* +- CM_EXTRACT_EXTRACTED_PATH +- <<>> +- <<>> +- <<>> +- CM_GET_DEPENDENT_CACHED_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: +- enable_if_env: + CM_DAE_EXTRACT_DOWNLOADED: + - 'yes' + - 'True' + names: + - extract-script + tags: extract,file + force_env_keys: + - CM_OUTDIRNAME + update_tags_from_env_with_prefix: + _path.: + - CM_DOWNLOAD_DOWNLOADED_PATH + - CM_TORRENT_DOWNLOADED_PATH +prehook_deps: +- names: + - download-script + skip_if_env: + CM_DAE_DOWNLOAD_USING_TORRENT: + - 'yes' + - 'True' + tags: download,file + force_env_keys: + - CM_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - CM_DAE_URL +tags: +- dae +- file +- download-and-extract +tags_help: download-and-extract file +uid: c67e81a4ce2649f5 +variations: + cmutil: + add_deps: + download-script: + tags: _cmutil + default: true + group: download-tool + curl: + add_deps: + download-script: + tags: _wget + group: download-tool + extract: + env: + CM_DAE_EXTRACT_DOWNLOADED: 'yes' + gdown: + add_deps: + download-script: + tags: _gdown + group: download-tool + keep: + default: 'true' + env: + CM_EXTRACT_REMOVE_EXTRACTED: 'no' + group: keep + no-remove-extracted: + env: + CM_EXTRACT_REMOVE_EXTRACTED: 'no' + group: keep + rclone: + add_deps: + download-script: + tags: _rclone + group: download-tool + torrent: + env: + CM_DAE_DOWNLOAD_USING_TORRENT: 'yes' + CM_TORRENT_DOWNLOADED_FILE_NAME: <<>> + CM_TORRENT_DOWNLOADED_PATH_ENV_KEY: CM_DAE_FILEPATH + CM_TORRENT_WAIT_UNTIL_COMPLETED: 'yes' + group: download-tool + new_env_keys: + - CM_TORRENT_DOWNLOADED_PATH + prehook_deps: + - tags: download,torrent + update_tags_from_env_with_prefix: + _torrent.: + - CM_DAE_TORRENT_PATH + url.#: + env: + CM_DAE_URL: '#' + wget: + add_deps: + download-script: + tags: _wget + group: download-tool +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/download-and-extract/customize.py b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/customize.py new file mode 100644 index 000000000..0dde6a37e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/customize.py @@ -0,0 +1,84 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import hashlib + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if i['input'].get('force_cache'): + extra_cache_tags = i['input'].get('extra_cache_tags', '') + r = automation.update_deps({ + 'deps': meta['prehook_deps'] + meta['posthook_deps'], + 'update_deps': { + 'download-script': { + 'extra_cache_tags': extra_cache_tags, + 'force_cache': True + }, + 'extract-script': { + 'extra_cache_tags': extra_cache_tags, + 'force_cache': True + } + } + }) + if r['return'] > 0: + return r + + if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'): + filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] + + if not os.path.exists(filepath): + return {'return': 1, + 'error': 'Local file {} doesn\'t exist'.format(filepath)} + + env['CM_EXTRACT_REMOVE_EXTRACTED'] = 'no' + + if str(env.get('CM_DAE_EXTRACT_DOWNLOADED') + ).lower() in ["yes", "1", "true"]: + if (env.get('CM_EXTRACT_FINAL_ENV_NAME', '') == '') and ( + env.get('CM_DAE_FINAL_ENV_NAME', '') != ''): + env['CM_EXTRACT_FINAL_ENV_NAME'] = env['CM_DAE_FINAL_ENV_NAME'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + filepath = env.get('CM_EXTRACT_EXTRACTED_PATH', '') + if filepath == '': + filepath = env.get('CM_DOWNLOAD_DOWNLOADED_PATH', '') + + if filepath == '': + return {'return': 1, + 'error': 'No extracted path set in "CM_EXTRACT_EXTRACTED_PATH"'} + if not os.path.exists(filepath): + return {'return': 1, + 'error': 'Extracted path doesn\'t exist: {}'.format(filepath)} + + if env.get('CM_DAE_FINAL_ENV_NAME'): + env[env['CM_DAE_FINAL_ENV_NAME']] = filepath + + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/download-and-extract/tests/download-and-extract-file.bat b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/tests/download-and-extract-file.bat new file mode 100644 index 000000000..0688461de --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/tests/download-and-extract-file.bat @@ -0,0 +1 @@ +cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract _no-remove-extracted" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/cmx4mlops/cmx4mlops/repo/script/download-and-extract/tests/download-and-extract-file2.bat b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/tests/download-and-extract-file2.bat new file mode 100644 index 000000000..af344b927 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-and-extract/tests/download-and-extract-file2.bat @@ -0,0 +1 @@ +cmr "download-and-extract file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget _extract" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/download-file/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/download-file/README-extra.md new file mode 100644 index 000000000..b645b0419 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/README-extra.md @@ -0,0 +1,98 @@ +# CM interface to download files in a unified way on any system + +## Download file without CM caching + +### Use internal CM download function + +This script will use [internal CM download function](https://github.com/mlcommons/ck/blob/master/cm-mlops/automation/utils/module.py#L157) +to download a given file to the current directory: + +```bash +cmr "download file" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip +``` +or + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip" +``` + +#### Output environment variables + +You can check produced environment variables produced by this CM script by adding the `-j` flag: + +```bash +cmr "download file" _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip -j +``` + +```json + "new_env": { + "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip", + "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Downloads\\coco-2017-val-annotations.zip" + }, +``` + +#### Input flags and equivalent environment variables + +* `--url` or `--env.CM_DAE_URL` - URL to download file +* `--download_path` or `--to` or `--env.CM_DOWNLOAD_PATH` - where to download file +* `--local_path` or `--from` or `--env.CM_DOWNLOAD_LOCAL_FILE_PATH` - where to take file from instead of downloading +* `--verify` or `--env.CM_VERIFY_SSL` - set to `no` to skip SSL certificate verification + + +### Use wget without SSL certificate verification + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no +``` + +### Use curl without SSL certificate verification + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _curl" --verify=no +``` + +### Check MD5SUM + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Save to another file + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_FILENAME=xyz --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Save to another place + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --download_path=D:\Work --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 +``` + +### Reuse local file instead of downloading a file + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --local_path="D:\Work\coco-2017-val-annotations.zip" --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 -j +``` + +Output environment variables produced by this CM script: +```json + "new_env": { + "CM_DOWNLOAD_DOWNLOADED_PATH": "D:\\Work\\coco-2017-val-annotations.zip", + "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work\\coco-2017-val-annotations.zip" + } +``` + +## Download file with CM caching + +You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. +In such case, a given file will be downloaded to CM cache and can be reused by other CM scripts and workflows: + +```bash +cmr "download file _url.https://cKnowledge.org/test/coco-2017-val-annotations.zip _wget" --verify=no --env.CM_DOWNLOAD_CHECKSUM=bbe2f8874ee9e33cf5d6906338027a56 --force_cache --extra_cache_tags=coco,2017,val,annotations +``` + +You can find it in CM cache using extra cache tags as follows: +```bash +cm show cache "download file annotations coco 2017 val" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/README.md b/cmx4mlops/cmx4mlops/repo/script/download-file/README.md new file mode 100644 index 000000000..bf3208f9b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/download-file](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/download-file) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/download-file/_cm.yaml new file mode 100644 index 000000000..aedf0ab58 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/_cm.yaml @@ -0,0 +1,79 @@ +alias: download-file +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +can_force_cache: true +category: DevOps automation +default_env: + CM_RCLONE_COPY_USING: sync +deps: +- tags: detect,os +- enable_if_env: + CM_DOWNLOAD_CHECKSUM: + - 'on' + CM_HOST_OS_FLAVOR: + - macos + tags: get,generic-sys-util,_md5sha1sum +input_description: {} +input_mapping: + download_path: CM_DOWNLOAD_PATH + from: CM_DOWNLOAD_LOCAL_FILE_PATH + local_path: CM_DOWNLOAD_LOCAL_FILE_PATH + md5sum: CM_DOWNLOAD_CHECKSUM + output_file: CM_DOWNLOAD_FILENAME + store: CM_DOWNLOAD_PATH + url: CM_DOWNLOAD_URL + verify: CM_VERIFY_SSL + verify_ssl: CM_VERIFY_SSL +new_env_keys: +- CM_DOWNLOAD_DOWNLOADED_PATH +- <<>> +- CM_GET_DEPENDENT_CACHED_PATH +new_state_keys: [] +post_deps: [] +prehook_deps: [] +tags: +- download +- file +- download-file +tags_help: download file +uid: 9cdc8dc41aae437e +variations: + cmutil: + default: true + env: + CM_DOWNLOAD_TOOL: cmutil + group: download-tool + curl: + default_env: + CM_DOWNLOAD_CURL_EMULATE_BROWSER: 'no' + env: + CM_DOWNLOAD_TOOL: curl + group: download-tool + gdown: + deps: + - tags: get,generic-python-lib,_package.gdown + env: + CM_DOWNLOAD_TOOL: gdown + group: download-tool + rclone: + deps: + - tags: get,rclone + - enable_if_env: + CM_RCLONE_CONFIG_NAME: + - 'on' + tags: get,rclone-config + update_tags_from_env_with_prefix: + _: + - CM_RCLONE_CONFIG_NAME + env: + CM_DOWNLOAD_TOOL: rclone + group: download-tool + url.#: + env: + CM_DOWNLOAD_URL: '#' + wget: + env: + CM_DOWNLOAD_TOOL: wget + group: download-tool +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/customize.py b/cmx4mlops/cmx4mlops/repo/script/download-file/customize.py new file mode 100644 index 000000000..ac84544cb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/customize.py @@ -0,0 +1,346 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess + + +def escape_special_chars(text, tool=None): + special_chars = [ + '&', '|', '(', ')' + ] + + for char in special_chars: + text = text.replace(char, f'^{char}') + + # handle URL special cases + if tool != "rclone": + text = text.replace('%', "%%") + + return text + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + # env to be passed to the subprocess + subprocess_env = os.environ.copy() + subprocess_env['PATH'] += os.pathsep + \ + os.pathsep.join(env.get('+PATH', '')) + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + tool = env.get('CM_DOWNLOAD_TOOL', '') + pre_clean = env.get('CM_PRE_DOWNLOAD_CLEAN', False) + + # xsep = '^&^&' if windows else '&&' + xsep = '&&' + + q = '"' if os_info['platform'] == 'windows' else "'" + + x = '*' if os_info['platform'] == 'windows' else '' + x_c = '-s' if os_info['platform'] == 'darwin_off' else '' + + # command for deleting file in windows and linux is different + if os_info['platform'] == 'windows': + del_cmd = "del /f" + else: + del_cmd = "rm -f" + + if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'): + filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] + + if not os.path.exists(filepath): + return {'return': 1, + 'error': 'Local file {} doesn\'t exist'.format(filepath)} + + env['CM_DOWNLOAD_CMD'] = "" + + env['CM_DOWNLOAD_FILENAME'] = filepath + + if not quiet: + print('') + print('Using local file: {}'.format(filepath)) + else: + url = env.get('CM_DOWNLOAD_URL', '') + + if url == '': + return { + 'return': 1, 'error': 'please specify URL using --url={URL} or --env.CM_DOWNLOAD_URL={URL}'} + + print('') + print('Downloading from {}'.format(url)) + + if '&' in url and tool != "cmutil": + if os_info['platform'] == 'windows': + url = '"' + url + '"' + else: + url = url.replace('&', '\\&') + + extra_download_options = env.get('CM_DOWNLOAD_EXTRA_OPTIONS', '') + + verify_ssl = env.get('CM_VERIFY_SSL', "True") + if str(verify_ssl).lower() in [ + "no", "false"] or os_info['platform'] == 'windows': + verify_ssl = False + else: + verify_ssl = True + + if env.get('CM_DOWNLOAD_PATH', '') != '': + download_path = env['CM_DOWNLOAD_PATH'] + if not os.path.exists(download_path): + os.makedirs(download_path, exist_ok=True) + os.chdir(download_path) + + if env.get('CM_DOWNLOAD_FILENAME', '') == '': + urltail = os.path.basename(env['CM_DOWNLOAD_URL']) + urlhead = os.path.dirname(env['CM_DOWNLOAD_URL']) + if "." in urltail and "/" in urlhead: + # Check if ? after filename + j = urltail.find('?') + if j > 0: + urltail = urltail[:j] + env['CM_DOWNLOAD_FILENAME'] = urltail + elif env.get('CM_DOWNLOAD_TOOL', '') == "rclone": + env['CM_DOWNLOAD_FILENAME'] = urltail + else: + env['CM_DOWNLOAD_FILENAME'] = "index.html" + + if tool == "cmutil": + cmutil_require_download = 0 + if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '': + if os_info['platform'] == 'windows': + checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{escape_special_chars(env['CM_DOWNLOAD_CHECKSUM_FILE'])}" + else: + checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" + checksum_result = subprocess.run( + checksum_cmd, + cwd=f'{q}{filepath}{q}', + capture_output=True, + text=True, + shell=True, + env=subprocess_env) + elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': + if os_info['platform'] == 'windows': + checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{escape_special_chars(env['CM_DOWNLOAD_FILENAME'])} | md5sum -c{x_c} -" + else: + checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{q}{env['CM_DOWNLOAD_FILENAME']}{q} | md5sum -c{x_c} -" + checksum_result = subprocess.run( + checksum_cmd, + capture_output=True, + text=True, + shell=True, + env=subprocess_env) + if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '' or env.get( + 'CM_DOWNLOAD_CHECKSUM', '') != '': + # print(checksum_result) #for debugging + if "checksum did not match" in checksum_result.stderr.lower(): + computed_checksum = subprocess.run( + f"md5sum {env['CM_DOWNLOAD_FILENAME']}", + capture_output=True, + text=True, + shell=True).stdout.split(" ")[0] + print( + f"WARNING: File already present, mismatch between original checksum({env.get('CM_DOWNLOAD_CHECKSUM')}) and computed checksum({computed_checksum}). Deleting the already present file and downloading new.") + try: + os.remove(env['CM_DOWNLOAD_FILENAME']) + print( + f"File {env['CM_DOWNLOAD_FILENAME']} deleted successfully.") + except PermissionError: + return { + "return": 1, "error": f"Permission denied to delete file {env['CM_DOWNLOAD_FILENAME']}."} + cmutil_require_download = 1 + elif "no such file" in checksum_result.stderr.lower(): + # print(f"No file {env['CM_DOWNLOAD_FILENAME']}. Downloading through cmutil.") + cmutil_require_download = 1 + elif checksum_result.returncode > 0: + return { + "return": 1, "error": f"Error while checking checksum: {checksum_result.stderr}"} + else: + print( + f"File {env['CM_DOWNLOAD_FILENAME']} already present, original checksum and computed checksum matches! Skipping Download..") + else: + cmutil_require_download = 1 + + if cmutil_require_download == 1: + cm = automation.cmind + for i in range(1, 5): + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url, + 'verify': verify_ssl}) + if r['return'] == 0: + break + oldurl = url + url = env.get('CM_DOWNLOAD_URL' + str(i), '') + if url == '': + break + print(f"Download from {oldurl} failed, trying from {url}") + + if r['return'] > 0: + return r + + env['CM_DOWNLOAD_CMD'] = "" + env['CM_DOWNLOAD_FILENAME'] = r['filename'] + + elif tool == "wget": + if env.get('CM_DOWNLOAD_FILENAME', '') != '': + extra_download_options += f" --tries=3 -O {q}{env['CM_DOWNLOAD_FILENAME']}{q} " + if not verify_ssl: + extra_download_options += "--no-check-certificate " + env['CM_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}" + for i in range(1, 5): + url = env.get('CM_DOWNLOAD_URL' + str(i), '') + if url == '': + break + env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && wget -nc {extra_download_options} {url})" + print(env['CM_DOWNLOAD_CMD']) + + elif tool == "curl": + if env.get('CM_DOWNLOAD_FILENAME', '') != '': + extra_download_options += f" --output {q}{env['CM_DOWNLOAD_FILENAME']}{q} " + + env['CM_DOWNLOAD_CMD'] = f"curl {extra_download_options} {url}" + for i in range(1, 5): + url = env.get('CM_DOWNLOAD_URL' + str(i), '') + if url == '': + break + env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && curl {extra_download_options} {url})" + + elif tool == "gdown": + if not verify_ssl: + extra_download_options += "--no-check-certificate " + env['CM_DOWNLOAD_CMD'] = f"gdown {extra_download_options} {url}" + for i in range(1, 5): + url = env.get('CM_DOWNLOAD_URL' + str(i), '') + if url == '': + break + env['CM_DOWNLOAD_CMD'] += f" || (({del_cmd} {env['CM_DOWNLOAD_FILENAME']} || true) && gdown {extra_download_options} {url})" + + elif tool == "rclone": + # keeping this for backward compatibility. Ideally should be done + # via get,rclone-config script + if env.get('CM_RCLONE_CONFIG_CMD', '') != '': + env['CM_DOWNLOAD_CONFIG_CMD'] = env['CM_RCLONE_CONFIG_CMD'] + rclone_copy_using = env.get('CM_RCLONE_COPY_USING', 'sync') + if rclone_copy_using == "sync": + pre_clean = False + if env["CM_HOST_OS_TYPE"] == "windows": + # have to modify the variable from url to temp_url if it is + # going to be used anywhere after this point + url = url.replace("%", "%%") + temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace( + "%", "%%") + env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P --error-on-no-transfer" + else: + env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P --error-on-no-transfer" + + filename = env['CM_DOWNLOAD_FILENAME'] + env['CM_DOWNLOAD_DOWNLOADED_FILENAME'] = filename + + filename = os.path.basename(env['CM_DOWNLOAD_FILENAME']) + filepath = os.path.join(os.getcwd(), filename) + + env['CM_DOWNLOAD_DOWNLOADED_PATH'] = filepath + + # verify checksum if file already present + if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '': + env['CM_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c {x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" + elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': + if os_info['platform'] == 'windows': + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{} | md5sum {} -c -".format( + env.get('CM_DOWNLOAD_CHECKSUM'), x, escape_special_chars( + env['CM_DOWNLOAD_FILENAME']), x_c) + else: + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {} -c -".format( + env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c) + for i in range(1, 5): + if env.get('CM_DOWNLOAD_CHECKSUM' + str(i), '') == '': + break + if os_info['platform'] == 'windows': + env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{} | md5sum {} -c -".format( + env.get( + 'CM_DOWNLOAD_CHECKSUM' + + str(i)), + x, + escape_special_chars( + env['CM_DOWNLOAD_FILENAME']), + x_c) + else: + env['CM_DOWNLOAD_CHECKSUM_CMD'] += " || echo {} {}{}{}{} | md5sum {} -c -".format( + env.get( + 'CM_DOWNLOAD_CHECKSUM' + + str(i)), + x, + q, + env['CM_DOWNLOAD_FILENAME'].replace( + "%", + "%%"), + q, + x_c) + # print(env['CM_DOWNLOAD_CHECKSUM_CMD']) + else: + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "" + + if not pre_clean: + env['CM_PRE_DOWNLOAD_CMD'] = '' + + if os_info['platform'] == 'windows' and env.get( + 'CM_DOWNLOAD_CMD', '') != '': + env['CM_DOWNLOAD_CMD'] = escape_special_chars( + env['CM_DOWNLOAD_CMD'], tool) + if pre_clean: + env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "del /Q %CM_DOWNLOAD_FILENAME%" + # Check that if empty CMD, should add "" + for x in ['CM_DOWNLOAD_CMD', 'CM_DOWNLOAD_CHECKSUM_CMD']: + env[x + '_USED'] = 'YES' if env.get(x, '') != '' else 'NO' + else: + env['CM_PRE_DOWNLOAD_CLEAN_CMD'] = "rm -f {}".format( + env['CM_DOWNLOAD_FILENAME']) + + return {'return': 0} + + +def postprocess(i): + + automation = i['automation'] + + env = i['env'] + + filepath = env['CM_DOWNLOAD_DOWNLOADED_PATH'] + + if not os.path.exists(filepath): + return { + 'return': 1, 'error': 'Downloaded path {} does not exist. Probably CM_DOWNLOAD_FILENAME is not set and CM_DOWNLOAD_URL given is not pointing to a file'.format(filepath)} + + if env.get('CM_DOWNLOAD_RENAME_FILE', '') != '': + file_dir = os.path.dirname(filepath) + new_file_name = env['CM_DOWNLOAD_RENAME_FILE'] + new_file_path = os.path.join(file_dir, new_file_name) + os.rename(filepath, new_file_path) + filepath = new_file_path + + if env.get('CM_DOWNLOAD_FINAL_ENV_NAME', '') != '': + env[env['CM_DOWNLOAD_FINAL_ENV_NAME']] = filepath + + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + + # Since may change directory, check if need to clean some temporal files + automation.clean_some_tmp_files({'env': env}) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/run.bat b/cmx4mlops/cmx4mlops/repo/script/download-file/run.bat new file mode 100644 index 000000000..5449c9ecf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/run.bat @@ -0,0 +1,56 @@ +rem Download file + +rem If MD5 is wrong, download again! + +rem Next line allows ERRORLEVEL inside if statements! +setlocal enabledelayedexpansion + +if NOT "%CM_DOWNLOAD_CONFIG_CMD%" == "" ( + echo. + echo %CM_DOWNLOAD_CONFIG_CMD% + echo. + %CM_DOWNLOAD_CONFIG_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! +) + +set require_download=1 + +if not "%CM_DOWNLOAD_LOCAL_FILE_PATH%" == "" ( + set require_download=0 +) + +if "%CM_DOWNLOAD_TOOL%" == "cmutil" ( + set require_download=0 +) + + +if exist "%CM_DOWNLOAD_DOWNLOADED_PATH%" ( + if "%CM_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" ( + echo. + echo %CM_DOWNLOAD_CHECKSUM_CMD% + cmd /c %CM_DOWNLOAD_CHECKSUM_CMD% + IF !ERRORLEVEL! NEQ 0 ( + if NOT "%CM_DOWNLOAD_LOCAL_FILE_PATH%" == "" exit 1 + if "%CM_DOWNLOAD_CMD_USED%" == "NO" exit 1 + ) else ( + set require_download=0 + ) + ) +) + +if "!require_download!" == "1" ( + echo. + cmd /c %CM_PRE_DOWNLOAD_CLEAN_CMD% + + echo. + echo %CM_DOWNLOAD_CMD% + cmd /c %CM_DOWNLOAD_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + + if "%CM_DOWNLOAD_CHECKSUM_CMD_USED%" == "YES" ( + echo. + echo %CM_DOWNLOAD_CHECKSUM_CMD% + cmd /c %CM_DOWNLOAD_CHECKSUM_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT 1 + ) +) diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/run.sh b/cmx4mlops/cmx4mlops/repo/script/download-file/run.sh new file mode 100644 index 000000000..c02e44f00 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/run.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# Execute config command if it exists +if [[ -n ${CM_DOWNLOAD_CONFIG_CMD} ]]; then + echo -e "\nExecuting: ${CM_DOWNLOAD_CONFIG_CMD}" + eval "${CM_DOWNLOAD_CONFIG_CMD}" || exit $? +fi + +# Assume download is required by default +require_download=1 + +# No download needed if a local file path is specified or the tool is 'cmutil' +if [[ -n "${CM_DOWNLOAD_LOCAL_FILE_PATH}" || ${CM_DOWNLOAD_TOOL} == "cmutil" ]]; then + require_download=0 +fi + +# If the file exists, check the checksum if necessary +if [[ -e "${CM_DOWNLOAD_DOWNLOADED_PATH}" && -n "${CM_DOWNLOAD_CHECKSUM_CMD}" ]]; then + echo -e "\nChecking checksum: ${CM_DOWNLOAD_CHECKSUM_CMD}" + eval "${CM_DOWNLOAD_CHECKSUM_CMD}" + + if [[ $? -ne 0 ]]; then + # If the checksum fails, handle errors based on whether the file is local + if [[ -n "${CM_DOWNLOAD_LOCAL_FILE_PATH}" ]]; then + echo "Checksum failed for local file. Exiting." + exit 1 + else + echo "Checksum failed. Marking for re-download." + CM_PRE_DOWNLOAD_CLEAN=true + fi + else + # If checksum succeeds, no download is required + require_download=0 + fi +fi + +# Perform download if required +if [[ ${require_download} == 1 ]]; then + echo "" + + # If a pre-download clean command is specified and needed, execute it + if [[ -n "${CM_PRE_DOWNLOAD_CLEAN}" && "${CM_PRE_DOWNLOAD_CLEAN,,}" != "false" ]]; then + echo "Executing pre-download clean: ${CM_PRE_DOWNLOAD_CLEAN_CMD}" + eval "${CM_PRE_DOWNLOAD_CLEAN_CMD}" || exit $? + fi + + # Execute the download command + echo "Downloading: ${CM_DOWNLOAD_CMD}" + eval "${CM_DOWNLOAD_CMD}" || exit $? +fi + +# Verify checksum again if necessary +if [[ ${CM_DOWNLOAD_TOOL} == "cmutil" || ${require_download} == 1 ]]; then + if [[ -n "${CM_DOWNLOAD_CHECKSUM_CMD}" ]]; then + echo -e "\nVerifying checksum after download: ${CM_DOWNLOAD_CHECKSUM_CMD}" + eval "${CM_DOWNLOAD_CHECKSUM_CMD}" || exit $? + fi +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/tests/download-file.bat b/cmx4mlops/cmx4mlops/repo/script/download-file/tests/download-file.bat new file mode 100644 index 000000000..442150282 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/tests/download-file.bat @@ -0,0 +1,2 @@ +cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _wget" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/download-file/tests/download-file2.bat b/cmx4mlops/cmx4mlops/repo/script/download-file/tests/download-file2.bat new file mode 100644 index 000000000..2032bc177 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-file/tests/download-file2.bat @@ -0,0 +1 @@ +cmr "download file _url.https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 _cmutil" -j --env.CM_VERIFY_SSL=False --env.CM_DOWNLOAD_CHECKSUM=af3f9525965b2c1acc348fb882a5bfd1 diff --git a/cmx4mlops/cmx4mlops/repo/script/download-torrent/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/download-torrent/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-torrent/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/download-torrent/README.md b/cmx4mlops/cmx4mlops/repo/script/download-torrent/README.md new file mode 100644 index 000000000..ac2991e6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-torrent/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/download-torrent](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/download-torrent) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/download-torrent/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/download-torrent/_cm.yaml new file mode 100644 index 000000000..d2e83b8fe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-torrent/_cm.yaml @@ -0,0 +1,29 @@ +alias: download-torrent +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: DevOps automation +default_env: + CM_TORRENT_WAIT_UNTIL_COMPLETED: 'no' +deps: +- tags: get,generic-sys-util,_transmission +input_description: {} +input_mapping: + wait: CM_TORRENT_WAIT_UNTIL_COMPLETED +new_env_keys: +- CM_TORRENT_DOWNLOADED_PATH +- <<>> +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- download +- torrent +- download-torrent +uid: 69b752c5618e45bb +variations: + torrent.#: + env: + CM_TORRENT_FILE: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/download-torrent/customize.py b/cmx4mlops/cmx4mlops/repo/script/download-torrent/customize.py new file mode 100644 index 000000000..0b2dda284 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-torrent/customize.py @@ -0,0 +1,48 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if not env.get('CM_TORRENT_DOWNLOADED_FILE_NAME'): + return {'return': 1, 'error': 'CM_TORRENT_DOWNLOADED_FILE_NAME is not set'} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + torrent_downloaded_path = os.path.join( + env['CM_TORRENT_DOWNLOADED_DIR'], + env['CM_TORRENT_DOWNLOADED_NAME']) + env['CM_TORRENT_DOWNLOADED_PATH'] = torrent_downloaded_path + + if 'CM_TORRENT_DOWNLOADED_PATH_ENV_KEY' in env: + key = env['CM_TORRENT_DOWNLOADED_PATH_ENV_KEY'] + env[key] = torrent_downloaded_path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = torrent_downloaded_path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/download-torrent/run.sh b/cmx4mlops/cmx4mlops/repo/script/download-torrent/run.sh new file mode 100644 index 000000000..c3d639ff1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/download-torrent/run.sh @@ -0,0 +1,34 @@ +#!/bin/bash +chmod 777 ${PWD} +#transmission-remote --no-auth --download-dir ${PWD} -a ${CM_TORRENT_FILE} +cmd="transmission-remote --download-dir ${PWD} -a ${CM_TORRENT_FILE}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? + +cmd="transmission-remote -l" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? + +if [[ ${CM_TORRENT_WAIT_UNTIL_COMPLETED} == "yes" ]]; then + while true; + do + out=`transmission-remote -l |grep ${CM_TORRENT_DOWNLOADED_FILE_NAME} | grep "100%"` + if [[ -z $out ]]; then + transmission-remote -l + sleep 6 + else + break + fi + done +fi + +id=`transmission-remote -l |grep ${CM_TORRENT_DOWNLOADED_FILE_NAME} |tr -s ' ' | cut -d' ' -f2` +test $? -eq 0 || exit $? +location=`transmission-remote -t${id} -i |grep Location |cut -d':' -f2 |tr -d ' '` +test $? -eq 0 || exit $? +echo "CM_TORRENT_DOWNLOADED_DIR=$location">> tmp-run-env.out +name=`transmission-remote -t${id} -i |grep Name |cut -d':' -f2 |tr -d ' '` +test $? -eq 0 || exit $? +echo "CM_TORRENT_DOWNLOADED_NAME=$name">> tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/_cm.yaml new file mode 100644 index 000000000..4cea12c42 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/_cm.yaml @@ -0,0 +1,21 @@ +alias: draw-graph-from-json-data +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- draw +- graph +- from-json +- from-json-data +uid: 2ed1ebcb6be548fd +input_mapping: + input: CM_JSON_INPUT_FILE + json_input_file: CM_JSON_INPUT_FILE + output_image_path: CM_OUTPUT_IMAGE_PATH +deps: + - tags: get,python3 + names: + - python + - python3 + - tags: get,generic-python-lib,_package.networkx + - tags: get,generic-python-lib,_package.matplotlib diff --git a/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/customize.py b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/customize.py new file mode 100644 index 000000000..ac7d293bb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/customize.py @@ -0,0 +1,43 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + env['CM_RUN_CMD'] = f"""{env['CM_PYTHON_BIN_WITH_PATH']} {os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'],"process-cm-deps.py")} {env['CM_JSON_INPUT_FILE']}""" + + if env.get('CM_OUTPUT_IMAGE_PATH', '') != '': + env['CM_RUN_CMD'] += f""" --output_image {env['CM_OUTPUT_IMAGE_PATH']}""" + + if env.get('CM_OUTPUT_MERMAID_PATH', '') != '': + env['CM_RUN_CMD'] += f""" --output_mermaid {env['CM_OUTPUT_MERMAID_PATH']}""" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/process-cm-deps.py b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/process-cm-deps.py new file mode 100644 index 000000000..01c427e97 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/process-cm-deps.py @@ -0,0 +1,122 @@ +import argparse +import networkx as nx +import matplotlib.pyplot as plt +import json + +# Function to parse the nested JSON structure + + +def parse_json_to_edges(json_data): + edges = [] + for root_key, nodes in json_data.items(): + for node in nodes: + for node_key, node_details in node.items(): + edges.append((node_details["parent"], node_key)) + return edges + + +def generate_mermaid_output(json_data, mermaid_file="graph.mmd"): + edges = parse_json_to_edges(json_data) + + # Start the Mermaid graph definition + mermaid_lines = ["graph TD"] # Use "TD" for top-down; "LR" for left-right + + # Add each edge in Mermaid syntax + for parent, child in edges: + mermaid_lines.append( + f""" {parent.replace(" ", "_")} --> {child.replace(" ", "_")}""") + + # Write to a Mermaid file + with open(mermaid_file, "w") as f: + f.write("\n".join(mermaid_lines)) + + print(f"Mermaid syntax saved to {mermaid_file}") + + +# Function to generate and visualize the graph +def generate_graph_from_nested_json(json_data, output_image="graph.png"): + # Parse the JSON to extract edges + edges = parse_json_to_edges(json_data) + + # Initialize a directed graph + G = nx.DiGraph() + + # Add edges to the graph + G.add_edges_from(edges) + + # Draw the graph using a spring layout for better visualization + plt.figure(figsize=(30, 25)) + # pos = nx.spectral_layout(G, seed=42) # Seed for consistent layout + pos = nx.shell_layout(G) # Seed for consistent layout + nx.draw( + G, + pos, + with_labels=True, + node_size=7000, + node_color="skyblue", + font_size=9, + font_color="darkblue", + edge_color="gray", + arrowsize=20, + linewidths=1.5 + ) + plt.title("Parent-Child Graph from Nested JSON", fontsize=16) + + # Save the visualization + plt.savefig(output_image, format="png", dpi=300) + print(f"Graph visualization saved as {output_image}") + # plt.show() + + return G + +# Function to export the graph data + + +def export_graph_data(graph, filename="graph.graphml"): + nx.write_graphml(graph, filename) + print(f"Graph data saved as {filename}") + +# Main function to handle argument parsing and processing + + +def main(): + parser = argparse.ArgumentParser( + description="Generate a graph from nested JSON input.") + parser.add_argument( + "json_file", + type=str, + help="Path to the JSON input file.") + parser.add_argument( + "--output_image", + type=str, + default="graph.png", + help="Output image file for the graph visualization.") + parser.add_argument( + "--output_mermaid", + type=str, + default="graph.mmd", + help="Output mermaid file for the graph data.") + parser.add_argument( + "--output_graphml", + type=str, + default="graph.graphml", + help="Output GraphML file for the graph data.") + + args = parser.parse_args() + + # Load the JSON input file + with open(args.json_file, "r") as f: + json_data = json.load(f) + + # Generate the graph + G = generate_graph_from_nested_json( + json_data, output_image=args.output_image) + + generate_mermaid_output(json_data, mermaid_file=args.output_mermaid) + + # Export the graph data + export_graph_data(G, filename=args.output_graphml) + + +if __name__ == "__main__": + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/run.bat b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/run.sh b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/run.sh new file mode 100644 index 000000000..4c23c380e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/draw-graph-from-json-data/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running: " +echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/README.md b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/README.md new file mode 100644 index 000000000..3205675ef --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts//dump-pip-freeze](https://docs.mlcommons.org/cm4mlops/scripts//dump-pip-freeze) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/_cm.yaml new file mode 100644 index 000000000..39acd5eee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/_cm.yaml @@ -0,0 +1,16 @@ +alias: dump-pip-freeze +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- dump +- pip +- freeze +new_state_keys: + - pip_freeze +deps: + - tags: get,python + names: + - python + - python3 +uid: 33eb0a8006664cae diff --git a/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/customize.py b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/customize.py new file mode 100644 index 000000000..92275ea2a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/customize.py @@ -0,0 +1,65 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + if env.get('CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', '') == '': + env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] = os.path.join( + os.getcwd(), "tmp-pip-freeze") + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + automation = i['automation'] + + pip_freeze = {} + pip_freeze_file = env['CM_DUMP_RAW_PIP_FREEZE_FILE_PATH'] + if not os.path.isfile(pip_freeze_file): + # If was not created, sometimes issues on Windows + # There is another workaround + if os_info['platform'] == 'windows': + r = automation.cmind.access({'action': 'system', + 'automation': 'utils', + 'cmd': 'py -m pip freeze', + 'stdout': pip_freeze_file}) + # skip output + + if os.path.isfile(pip_freeze_file): + with open(pip_freeze_file, "r") as f: + for line in f.readlines(): + if "==" in line: + split = line.split("==") + pip_freeze[split[0]] = split[1].strip() + + state['pip_freeze'] = pip_freeze + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/dump.py b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/dump.py new file mode 100644 index 000000000..c6d4dc2ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/dump.py @@ -0,0 +1,23 @@ +import os +from pip._internal.operations import freeze + +pip_freeze_out = os.environ.get( + 'CM_DUMP_RAW_PIP_FREEZE_FILE_PATH', + 'tmp-pip-freeze') + +if os.path.isfile(pip_freeze_out): + os.remove(pip_freeze_out) + +pkgs = freeze.freeze() + +x = '' + +try: + for pkg in pkgs: + x += pkg + '\n' +except BaseException: + pass + +if len(x) > 0: + with open(pip_freeze_out, "w") as f: + f.write(x) diff --git a/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/run.bat b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/run.bat new file mode 100644 index 000000000..b323ddc22 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/run.bat @@ -0,0 +1,4 @@ +if not "%CM_FAKE_RUN%" == "yes" ( + %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\dump.py + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) diff --git a/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/run.sh b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/run.sh new file mode 100644 index 000000000..a1cdb52eb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/dump-pip-freeze/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +run "${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/dump.py" diff --git a/cmx4mlops/cmx4mlops/repo/script/extract-file/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/extract-file/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/extract-file/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/extract-file/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/extract-file/README-extra.md new file mode 100644 index 000000000..fbd8ccaf4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/extract-file/README-extra.md @@ -0,0 +1,115 @@ +# CM interface to extract files in a unified way on any system + +## Extract files without CM caching + +You can use this script to extract `.tar`, `.gz`, `.zip`, `.bz2`, `.tag.gz` and `.tgz` files. + +Before using further examples, you can download `coco-2017-val-annotations.zip` using CM: +```bash +cmr "download file" --url=https://cKnowledge.org/test/coco-2017-val-annotations.zip +``` + +Extract this archive in the current path while keeping the archive file: + +```bash +cmr "extract file _keep" --input=coco-2017-val-annotations.zip +``` + +or + +```bash +cmr "extract file _keep _path.coco-2017-val-annotations.zip" +``` + +You can remove `_keep` to delete archive after extracting files: + +```bash +cmr "extract file" --input=coco-2017-val-annotations.zip +``` + +#### Output environment variables + +You can check produced environment variables produced by this CM script by adding the `-j` flag: + +```bash +cmr "extract file _keep" --input=coco-2017-val-annotations.zip -j +``` + +```json + "new_env": { + "CM_EXTRACT_EXTRACTED_PATH": "D:\\Work99.3 readme\\xyz", + "CM_GET_DEPENDENT_CACHED_PATH": "D:\\Work99.3 readme\\xyz" + }, +``` + +#### Input flags and equivalent environment variables + +* `--input` or `--env.CM_EXTRACT_FILEPATH` - input file +* `--extract_path` or `--to` or `--env.CM_EXTRACT_PATH` - where to extract files (--input should have full path then) +* `--extra_folder` or `--env.CM_EXTRACT_TO_FOLDER` - extra directory when extracting file (to avoid messing up current directory) + +#### Variations + +* `_keep` or `_no-remove-extracted` or `--env.CM_EXTRACT_REMOVE_EXTRACTED=no` - keep archive file (it will be deleted by default) + + + +### Extract to a specific folder + +Note that you need to provide a full path to the archive file if you want to extract it to some directory: + +```bash +cmr "extract file _keep" --input="$PWD/coco-2017-val-annotations.zip" --extract_path="$HOME/cm-test" +``` + +### Add extra folder to extracted files + +You can add extra folder when extracting files to avoid messing up current directory: + +```bash +cmr "extract file _keep" --input=coco-2017-val-annotations.zip --extra_folder=xyz +``` + + + + +## Extract 1 file and test MD5SUM without CM caching + +You can use this script to extract 1 archived file (model, captions, etc) and test MD5SUM. + +To test this CM script, download `captions_val2017.json.gz`: +```bash +cmr "download file _url.https://cKnowledge.org/test/captions_val2017.json.gz" +``` + +Then extract it and test MD5SUM as follows: + +```bash +cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f -j +``` + + +### Force another filename during extract + +Some workflows may need to use a different filename than original. You can change it as follows: +```bash +cmr "extract file _keep _path.captions_val2017.json.gz" --env.CM_EXTRACT_EXTRACTED_FILENAME=new-file.json --env.CM_EXTRACT_EXTRACTED_CHECKSUM=b7bec29ab7bd8971ae4cafc2390a658f +``` + + + + +## Extract file(s) to CM cache + +You can use all above commands with `--force_cache` and `--extra_cache_tags` flags. +In such case, file(s) will be extracted to the CM cache and can be reused by other CM scripts and workflows. +Note that you need to provide full path to the archive file. + +```bash +cmr "extract file _keep" --input=$HOME/coco-2017-val-annotations.zip --force_cache --extra_cache_tags=coco,2017,val,annotations +``` + +You can find it in CM cache using extra cache tags as follows: +```bash +cm show cache "extract file annotations coco 2017 val" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/extract-file/README.md b/cmx4mlops/cmx4mlops/repo/script/extract-file/README.md new file mode 100644 index 000000000..6ee544554 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/extract-file/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/extract-file](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/extract-file) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/extract-file/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/extract-file/_cm.yaml new file mode 100644 index 000000000..3cee898a0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/extract-file/_cm.yaml @@ -0,0 +1,47 @@ +alias: extract-file +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +can_force_cache: true +category: DevOps automation +deps: +- tags: detect,os +- enable_if_env: + CM_HOST_OS_FLAVOR: + - macos + skip_if_any_env: + CM_EXTRACT_EXTRACTED_CHECKSUM: + - 'off' + CM_EXTRACT_EXTRACTED_CHECKSUM_FILE: + - 'off' + tags: get,generic-sys-util,_md5sha1sum +input_description: {} +input_mapping: + extra_folder: CM_EXTRACT_TO_FOLDER + extract_path: CM_EXTRACT_PATH + input: CM_EXTRACT_FILEPATH + to: CM_EXTRACT_PATH +new_env_keys: +- CM_EXTRACT_EXTRACTED_PATH +- <<>> +- CM_GET_DEPENDENT_CACHED_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- extract +- file +tags_help: extract file +uid: 3f0b76219d004817 +variations: + keep: + env: + CM_EXTRACT_REMOVE_EXTRACTED: 'no' + no-remove-extracted: + env: + CM_EXTRACT_REMOVE_EXTRACTED: 'no' + path.#: + env: + CM_EXTRACT_FILEPATH: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/extract-file/customize.py b/cmx4mlops/cmx4mlops/repo/script/extract-file/customize.py new file mode 100644 index 000000000..b879967c9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/extract-file/customize.py @@ -0,0 +1,228 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import hashlib + + +def preprocess(i): + + variation_tags = i.get('variation_tags', []) + + os_info = i['os_info'] + + windows = os_info['platform'] == 'windows' + +# xsep = '^&^&' if windows else '&&' + xsep = '&&' + q = '"' if os_info['platform'] == 'windows' else "'" + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + filename = env.get('CM_EXTRACT_FILEPATH', '') + if filename == '': + return { + 'return': 1, 'error': 'Extract with no download requested and CM_EXTRACT_FILEPATH is not set'} + + if windows: + filename = filename.replace("%", "%%") + + env['CM_EXTRACT_FILENAME'] = filename + + # Check if extract to some path outside CM cache (to reuse large files + # later if cache is cleaned) + extract_path = env.get('CM_EXTRACT_PATH', '') + if extract_path != '': + if not os.path.exists(extract_path): + os.makedirs(extract_path, exist_ok=True) + + os.chdir(extract_path) + + # By default remove archive after extraction + remove_extracted = False if env.get( + 'CM_EXTRACT_REMOVE_EXTRACTED', + '').lower() == 'no' else True + + if filename.endswith(".zip") or filename.endswith(".pth"): + env['CM_EXTRACT_TOOL'] = "unzip" + elif filename.endswith(".tar.gz"): + if windows: + x = '"' if ' ' in filename else '' + env['CM_EXTRACT_CMD0'] = 'gzip -d ' + x + filename + x + filename = filename[:-3] # leave only .tar + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif os_info['platform'] == 'darwin': + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvzf ' + env['CM_EXTRACT_TOOL'] = 'tar ' + else: + env['CM_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvzf ' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".tar.xz"): + if windows: + x = '"' if ' ' in filename else '' + env['CM_EXTRACT_CMD0'] = 'xz -d ' + x + filename + x + filename = filename[:-3] # leave only .tar + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['CM_EXTRACT_TOOL'] = 'tar ' + else: + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvJf' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".tar"): + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".gz"): + # Check target filename + extracted_filename = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') + if extracted_filename == '': + extracted_filename = os.path.basename(filename)[:-3] + env['CM_EXTRACT_EXTRACTED_FILENAME'] = extracted_filename + + x = '-c' if windows else '-k' + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ + (x + ' ' if not remove_extracted else '') + \ + ' > ' + q + extracted_filename + q + ' < ' + + env['CM_EXTRACT_TOOL'] = 'gzip ' + elif env.get('CM_EXTRACT_UNZIP', '') == 'yes': + env['CM_EXTRACT_TOOL'] = 'unzip ' + elif env.get('CM_EXTRACT_UNTAR', '') == 'yes': + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' + env['CM_EXTRACT_TOOL'] = 'tar ' + elif env.get('CM_EXTRACT_GZIP', '') == 'yes': + env['CM_EXTRACT_CMD'] = 'gzip ' + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ + ('-k ' if not remove_extracted else '') + else: + return {'return': 1, + 'error': 'Neither CM_EXTRACT_UNZIP nor CM_EXTRACT_UNTAR is yes'} + + env['CM_EXTRACT_PRE_CMD'] = '' + + extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '') + + # Check if extract to additional folder in the current directory (or external path) + # to avoid messing up other files and keep clean directory structure + # particularly if archive has many sub-directories and files + if extract_to_folder != '': + if 'tar ' in env['CM_EXTRACT_TOOL']: + x = '' if windows else '-p' + y = '"' if ' ' in extract_to_folder else '' + + # env['CM_EXTRACT_TOOL_OPTIONS'] = ' --one-top-level='+ env['CM_EXTRACT_TO_FOLDER'] + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -C ' + y + extract_to_folder + \ + y + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + env['CM_EXTRACT_PRE_CMD'] = 'mkdir ' + x + ' ' + \ + y + extract_to_folder + y + ' ' + xsep + ' ' + env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder + + elif 'unzip' in env['CM_EXTRACT_TOOL']: + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -d ' + q + extract_to_folder + q + env['CM_EXTRACT_EXTRACTED_FILENAME'] = extract_to_folder + + x = '"' if ' ' in filename else '' + env['CM_EXTRACT_CMD'] = env['CM_EXTRACT_PRE_CMD'] + env['CM_EXTRACT_TOOL'] + ' ' + \ + env.get('CM_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ + ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '') + ' ' + x + filename + x + + print('') + print('Current directory: {}'.format(os.getcwd())) + print('Command line: "{}"'.format(env['CM_EXTRACT_CMD'])) + print('') + + final_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') + + if final_file != '': + if env.get('CM_EXTRACT_EXTRACTED_CHECKSUM_FILE', '') != '': + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = f"cd {q}{final_file}{q} {xsep} md5sum -c {q}{env['CM_EXTRACT_EXTRACTED_CHECKSUM_FILE']}{q}" + elif env.get('CM_EXTRACT_EXTRACTED_CHECKSUM', '') != '': + x = '*' if os_info['platform'] == 'windows' else '' + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "echo {} {}{q}{}{q} | md5sum -c".format( + env.get('CM_EXTRACT_EXTRACTED_CHECKSUM'), x, env['CM_EXTRACT_EXTRACTED_FILENAME']) + else: + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" + else: + env['CM_EXTRACT_EXTRACTED_CHECKSUM_CMD'] = "" + +# Not needed - can be simpler with cmd /c {empty} +# if os_info['platform'] == 'windows': +# # Check that if empty CMD, should add "" +# for x in ['CM_EXTRACT_CMD', 'CM_EXTRACT_EXTRACTED_CHECKSUM_CMD']: +# env[x+'_USED']='YES' if env.get(x,'')!='' else 'NO' + + # If force cache, add filepath to tag unless _path is used ... + path_tag = 'path.' + filename + + add_extra_cache_tags = [] + if path_tag not in variation_tags: + add_extra_cache_tags.append(path_tag) + + return {'return': 0, 'add_extra_cache_tags': add_extra_cache_tags} + + +def postprocess(i): + + automation = i['automation'] + + env = i['env'] + + extract_to_folder = env.get('CM_EXTRACT_TO_FOLDER', '') + extract_path = env.get('CM_EXTRACT_PATH', '') + + extracted_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') + + # Preparing filepath + # Can be either full extracted filename (such as model) or folder + + if extracted_file != '': + filename = os.path.basename(extracted_file) + +# We do not use this env variable anymore +# folderpath = env.get('CM_EXTRACT_EXTRACT_TO_PATH', '') + folderpath = extract_path if extract_path != '' else os.getcwd() + + filepath = os.path.join(folderpath, filename) + else: + + filepath = os.getcwd() # Extracted to the root cache folder + + if not os.path.exists(filepath): + return { + 'return': 1, 'error': 'Path {} was not created or doesn\'t exist'.format(filepath)} +# return {'return':1, 'error': 'CM_EXTRACT_EXTRACTED_FILENAME and +# CM_EXTRACT_TO_FOLDER are not set'} + + env['CM_EXTRACT_EXTRACTED_PATH'] = filepath + + # Set external environment variable with the final path + if env.get('CM_EXTRACT_FINAL_ENV_NAME', '') != '': + env[env['CM_EXTRACT_FINAL_ENV_NAME']] = filepath + + # Detect if this file will be deleted or moved + env['CM_GET_DEPENDENT_CACHED_PATH'] = filepath + + # Check if need to remove archive after extraction + if env.get('CM_EXTRACT_REMOVE_EXTRACTED', '').lower() != 'no': + archive_filepath = env.get('CM_EXTRACT_FILEPATH', '') + if archive_filepath != '' and os.path.isfile(archive_filepath): + os.remove(archive_filepath) + + # Since may change directory, check if need to clean some temporal files + automation.clean_some_tmp_files({'env': env}) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/extract-file/run.bat b/cmx4mlops/cmx4mlops/repo/script/extract-file/run.bat new file mode 100644 index 000000000..530ebbd2c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/extract-file/run.bat @@ -0,0 +1,39 @@ +rem Extract file + +rem If MD5 is wrong, extrat again! + +rem Next line allows ERRORLEVEL inside if statements! +setlocal enabledelayedexpansion + +set require_extract=1 + +if exist "%CM_EXTRACT_EXTRACTED_FILENAME%" ( + set require_extract=0 + + echo. + echo %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + cmd /c %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + IF !ERRORLEVEL! NEQ 0 ( + set require_extract=1 + del /Q %CM_EXTRACT_EXTRACTED_FILENAME% + ) +) + +if "!require_extract!" == "1" ( + if not "%CM_EXTRACT_CMD0%" == "" ( + echo. + echo %CM_EXTRACT_CMD0% + cmd /c %CM_EXTRACT_CMD0% + IF !ERRORLEVEL! NEQ 0 EXIT 1 + ) + + echo. + echo %CM_EXTRACT_CMD% + cmd /c %CM_EXTRACT_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT 1 + + echo. + echo %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + cmd /c %CM_EXTRACT_EXTRACTED_CHECKSUM_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT 1 +) diff --git a/cmx4mlops/cmx4mlops/repo/script/extract-file/run.sh b/cmx4mlops/cmx4mlops/repo/script/extract-file/run.sh new file mode 100644 index 000000000..4ee4f8512 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/extract-file/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +if [ -e "${CM_EXTRACT_EXTRACTED_FILENAME}" ] ; then + CMD=${CM_EXTRACT_EXTRACTED_CHECKSUM_CMD} + echo "" + echo "${CMD}" + eval "${CMD}" + test $? -eq 0 && exit 0 +fi + +CMD=${CM_EXTRACT_CMD} +echo "" +echo "${CMD}" +eval "${CMD}" +test $? -eq 0 || exit $? + +CMD=${CM_EXTRACT_EXTRACTED_CHECKSUM_CMD} +echo "" +echo "${CMD}" +eval "${CMD}" +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/fail/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/fail/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/fail/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/fail/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/fail/README-extra.md new file mode 100644 index 000000000..582991f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/fail/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/cmx4mlops/cmx4mlops/repo/script/fail/README.md b/cmx4mlops/cmx4mlops/repo/script/fail/README.md new file mode 100644 index 000000000..dce61cb8b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/fail/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/fail](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/fail) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/fail/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/fail/_cm.yaml new file mode 100644 index 000000000..9c5d8fcfc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/fail/_cm.yaml @@ -0,0 +1,18 @@ +uid: 3aaee82e19d243cd +alias: fail + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "DevOps automation" + +cache: false + +tags: +- fail +- filter + +variations: + windows: + env: + CM_FAIL_WINDOWS: true diff --git a/cmx4mlops/cmx4mlops/repo/script/fail/customize.py b/cmx4mlops/cmx4mlops/repo/script/fail/customize.py new file mode 100644 index 000000000..c1cf26c4f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/fail/customize.py @@ -0,0 +1,41 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + # Checking conditions + if env.get('CM_FAIL_WINDOWS', '').lower() == 'true': + if os_info['platform'] == 'windows': + return {'return': 1, + 'error': 'CM detected fail condition: running on Windows'} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/README-extra.md new file mode 100644 index 000000000..1c50fc8e7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/README-extra.md @@ -0,0 +1,16 @@ +This script flashes the ELF binary using Zephyr. +## Install +```bash +cm run script --tags=flash,tiny,_[VARIANT],_[MODEL] +``` +where, +* `[VARIANT]` is one of `cmsis_nn`,`native` +* `[MODEL]` is one of `ad`, `ic`, `kws`, `vww` + +We can also pass a known build directory like here: + +```bash +cm run script --tags=flash,tiny --build_dir=[BUILD_DIR] +``` +where, +* `[BUILD_DIR]` is the build folder containing the zephyr folder which in turn contains the built ELF binary diff --git a/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/README.md b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/README.md new file mode 100644 index 000000000..0d1b010ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/flash-tinyml-binary](https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/flash-tinyml-binary) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/_cm.yaml new file mode 100644 index 000000000..42ebb7ae7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/_cm.yaml @@ -0,0 +1,38 @@ +alias: flash-tinyml-binary +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: TinyML automation +default_version: r1.0 +deps: +- tags: detect,os +- names: + - zephyr + tags: get,zephyr +- names: + - zephyr-sdk + tags: get,zephyr-sdk +- inherit_variation_tags: 'True' + skip_if_env: + CM_TINY_BUILD_DIR: + - 'on' + tags: reproduce,tiny,mlperf +input_mapping: + build_dir: CM_TINY_BUILD_DIR +local_env_keys: +- CM_* +tags: +- flash +- tiny +- mlperf +- mlcommons +uid: 98913babb43f4fcb +variations: + NRF: {} + NUCLEO: {} + ad: {} + cmsis_nn: {} + ic: {} + kws: {} + native: {} + vww: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/customize.py b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/customize.py new file mode 100644 index 000000000..c75c8a572 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + if 'CM_TINY_BUILD_DIR' not in env: + return { + 'return': 1, 'error': 'Please set CM_TINY_BUILD_DIR to the build directory of the model'} + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/run.sh b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/run.sh new file mode 100644 index 000000000..962dc74d5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/flash-tinyml-binary/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +build_dir=${CM_TINY_BUILD_DIR} +cmd="cd ${CM_ZEPHYR_DIR}" +echo $cmd +eval $cmd +cmd="west flash --build-dir ${build_dir}" +echo $cmd +eval $cmd +test $? -eq 0 || exit 1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-docs-for-all-scripts.cmd b/cmx4mlops/cmx4mlops/repo/script/generate-docs-for-all-scripts.cmd new file mode 100644 index 000000000..c21a77b71 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-docs-for-all-scripts.cmd @@ -0,0 +1 @@ +cm doc script --repos=mlcommons@cm4mlops --output_dir=.. \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/README-extra.md new file mode 100644 index 000000000..0510432d5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/README-extra.md @@ -0,0 +1,12 @@ +# Generate MLPerf Inference Submission Folder +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) takes in a MLPerf Inference results folder (same folder structure assumed as produced by MLPerf inference reference implementation) and produces a valid submission folder as required by the [MLPerf Inference submission checker](https://github.com/mlcommons/inference/blob/master/tools/submission/submission-checker.py). + +## How To +```bash +cm run script --tags=generate,mlperf-inference-submission --results_dir=[MLPERF_RESULT_DIR] --submission_dir=[SUBMISSION_FOLDER] +``` + +### Additional Options +* `[--run_checker]:` Runs the MLPerf Inference submission checker on the produced submission folder +* `[--skip_truncation]:` If on will not run the truncation of the accuracy logs (useful for testing) +* `[--run_style]:` If set to "valid" will indicate the result folder is from a full and valid MLPerf inference run and will trigget the accuracy truncation script unless `--skip_truncation` flag is set. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/README.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/README.md new file mode 100644 index 000000000..c5eabd812 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/_cm.yaml new file mode 100644 index 000000000..32003a1b3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/_cm.yaml @@ -0,0 +1,119 @@ +alias: generate-mlperf-inference-submission +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +default_env: + CM_MLPERF_RUN_STYLE: valid + CM_MLPERF_SUBMISSION_DIR_SHARED: 'yes' + CM_RUN_MLPERF_ACCURACY: 'on' +deps: +- names: + - python + - python3 + tags: get,python3 +- names: + - inference-src + tags: mlcommons,inference,src +- tags: get,sut,system-description +- tags: install,pip-package,for-cmind-python,_package.tabulate +- tags: get,mlperf,inference,utils +- names: + - get-mlperf-results-dir + skip_if_env: + CM_MLPERF_INFERENCE_RESULTS_DIR_: + - 'on' + tags: get,mlperf,results,dir,local +- names: + - get-mlperf-submission-dir + skip_if_env: + CM_MLPERF_INFERENCE_SUBMISSION_DIR: + - 'on' + tags: get,mlperf,submission,dir +docker: + cm_repo: mlcommons@cm4mlops + cm_repo_branch: mlperf-inference + deps: + - names: get-mlperf-inference-results-dir + skip_if_env: + CM_MLPERF_INFERENCE_RESULTS_DIR_: + - 'on' + tags: get,mlperf,inference,results,dir,local + - names: get-mlperf-inference-submission-dir + skip_if_any_env: + CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR: + - 'on' + tags: get,mlperf,inference,submission,dir,local + docker_input_mapping: + results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR_ + submission_base_dir: CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR + extra_run_args: ' --cap-add SYS_ADMIN' + mounts: + - ${{ CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR + }} + - ${{ CM_MLPERF_INFERENCE_RESULTS_DIR_ }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR_ }} + os: ubuntu + os_version: '22.04' + pre_run_cmds: + - cm pull repo + real_run: false + use_host_group_id: true + use_host_user_id: true +input_mapping: + analyzer_settings_file: CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH + category: CM_MLPERF_SUBMISSION_CATEGORY + clean: CM_MLPERF_CLEAN_SUBMISSION_DIR + dashboard: CM_MLPERF_DASHBOARD + dashboard_wb_project: CM_MLPERF_DASHBOARD_WANDB_PROJECT + device: CM_MLPERF_DEVICE + division: CM_MLPERF_SUBMISSION_DIVISION + duplicate: CM_MLPERF_DUPLICATE_SCENARIO_RESULTS + hw_name: CM_HW_NAME + hw_notes_extra: CM_MLPERF_SUT_HW_NOTES_EXTRA + infer_scenario_results: CM_MLPERF_DUPLICATE_SCENARIO_RESULTS + power_settings_file: CM_MLPERF_POWER_SETTINGS_FILE_PATH + preprocess: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR + preprocess_submission: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR + results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR_ + run_checker: CM_RUN_SUBMISSION_CHECKER + run_style: CM_MLPERF_RUN_STYLE + skip_truncation: CM_SKIP_TRUNCATE_ACCURACY + submission_base_dir: CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: CM_MLPERF_SUBMITTER + sw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA + tar: CM_TAR_SUBMISSION_DIR + get_platform_details: CM_GET_PLATFORM_DETAILS +post_deps: +- enable_if_env: + CM_RUN_MLPERF_ACCURACY: + - 'on' + skip_if_env: + CM_SKIP_TRUNCATE_ACCURACY: + - 'yes' + tags: accuracy,truncate,mlc +- enable_if_env: + CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR: + - 'on' + - 'True' + - 'yes' + - true + tags: preprocess,mlperf,submission +- enable_if_env: + CM_RUN_SUBMISSION_CHECKER: + - 'yes' + names: + - mlperf-inference-submission-checker + - submission-checker + tags: submission,inference,checker,mlc +tags: +- generate +- submission +- mlperf +- mlperf-inference +- inference +- mlcommons +- inference-submission +- mlperf-inference-submission +- mlcommons-inference-submission +uid: 5f8ab2d0b5874d53 diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/customize.py b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/customize.py new file mode 100644 index 000000000..e6e350728 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/customize.py @@ -0,0 +1,744 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json +import shutil +import cmind +import sys +from tabulate import tabulate +import mlperf_utils + + +def preprocess(i): + return {'return': 0} + +# Helper function to fill dictionary from JSON file + + +def fill_from_json(file_path, keys, sut_info): + with open(file_path, 'r') as f: + data = json.load(f) + for key in keys: + if key in data and ( + sut_info[key] is None or sut_info[key] == "default"): + sut_info[key] = data[key] + elif key in data and sut_info[key] != data[key]: + return -1 # error saying there is a mismatch in the value of a key + return sut_info + +# Helper function to check whether all the keys(sut information) are assigned + + +def check_dict_filled(keys, sut_info): + for key in keys: + if key in sut_info and sut_info[key] is None: + return False + return True + +# The function checks whether the submitting model name belongs standard +# model names for MLPef Inference + + +def model_in_valid_models(model, mlperf_version): + import submission_checker as checker + config = checker.MODEL_CONFIG + + if model not in config[mlperf_version]['models']: + internal_model_name = config[mlperf_version]["model_mapping"].get( + model, '') # resnet50 -> resnet + if internal_model_name == '': + # Indicate failure with no internal model name + return (False, None) + else: + # Indicate success with internal model name + return (True, internal_model_name) + else: + return (True, model) + + +def generate_submission(env, state, inp, submission_division): + + # Save current user directory + cur_dir = os.getcwd() + + if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR_', '') == '': + results_dir = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_DIR'], + f"{env['CM_MLPERF_RUN_STYLE']}_results") + else: + results_dir = env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] + + mlperf_path = env['CM_MLPERF_INFERENCE_SOURCE'] + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + sys.path.append(submission_checker_dir) + + if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '': + from pathlib import Path + user_home = str(Path.home()) + env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join( + user_home, "mlperf_submission") + + submission_dir = env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') + if submission_dir == '': + submission_base_dir = env.get( + 'CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '') + if submission_base_dir == '': + return {'return': 1, 'error': f"Both CM_MLPERF_INFERENCE_SUBMISSION_DIR and CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR can not be empty!"} + else: + submission_dir = os.path.join( + submission_base_dir, "mlperf_inference_submission") + env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = submission_dir + + if env.get('CM_MLPERF_CLEAN_SUBMISSION_DIR', '') != '': + print('=================================================') + print( + 'Cleaning {} ...'.format( + env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'])) + if os.path.exists(submission_dir): + shutil.rmtree(submission_dir) + print('=================================================') + + if not os.path.isdir(submission_dir): + os.makedirs(submission_dir) + + if str(env.get('CM_MLPERF_SUBMISSION_DIR_SHARED', '') + ).lower() in ["yes", "true", "1"]: + os.chmod(submission_dir, 0o2775) + + print('* MLPerf inference submission dir: {}'.format(submission_dir)) + print('* MLPerf inference results dir: {}'.format(results_dir)) + results = [ + f for f in os.listdir(results_dir) if not os.path.isfile( + os.path.join( + results_dir, + f))] + + system_meta_default = state['CM_SUT_META'] + + # set pytorch as the default framework + if system_meta_default['framework'] == '': + system_meta_default['framework'] = "pytorch" + + system_meta_tmp = {} + if 'CM_MLPERF_SUBMISSION_SYSTEM_TYPE' in env: + system_meta_tmp['system_type'] = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE'] + + if submission_division != "": + system_meta_tmp['division'] = submission_division + division = submission_division + else: + division = system_meta_default['division'] + + if 'CM_MLPERF_SUBMISSION_CATEGORY' in env: + system_meta_tmp['system_type'] = env['CM_MLPERF_SUBMISSION_CATEGORY'].replace( + "-", ",") + + duplicate = ( + env.get( + 'CM_MLPERF_DUPLICATE_SCENARIO_RESULTS', + 'no') in [ + "yes", + "True"]) + + if division not in ['open', 'closed']: + return {'return': 1, 'error': '"division" must be "open" or "closed"'} + + print('* MLPerf inference division: {}'.format(division)) + + path_submission_root = submission_dir + path_submission_division = os.path.join(path_submission_root, division) + if not os.path.isdir(path_submission_division): + os.makedirs(path_submission_division) + + # Check submitter + if env.get('CM_MLPERF_SUBMITTER'): + submitter = env['CM_MLPERF_SUBMITTER'] + system_meta_tmp['submitter'] = submitter + else: + submitter = system_meta_default['submitter'] + env['CM_MLPERF_SUBMITTER'] = submitter + + print('* MLPerf inference submitter: {}'.format(submitter)) + + if env.get('CM_MLPERF_SUT_SW_NOTES_EXTRA', '') != '': + sw_notes = f"{system_meta_tmp['sw_notes']} {env['CM_MLPERF_SUT_SW_NOTES_EXTRA']}" + system_meta_tmp['sw_notes'] = sw_notes + + if env.get('CM_MLPERF_SUT_HW_NOTES_EXTRA', '') != '': + hw_notes = f"{system_meta_tmp['hw_notes']} {env['CM_MLPERF_SUT_HW_NOTES_EXTRA']}" + system_meta_tmp['hw_notes'] = hw_notes + + path_submission = os.path.join(path_submission_division, submitter) + if not os.path.isdir(path_submission): + os.makedirs(path_submission) + + # SUT base + system = env.get('CM_HW_NAME', 'default').replace(' ', '_') + + code_path = os.path.join(path_submission, "code") + + for res in results: + system_meta = {} + system_meta.update(system_meta_tmp) + result_path = os.path.join(results_dir, res) + # variable to check whether the sut_meta.json is present in the root + # folder + saved_system_meta_file_path = os.path.join( + result_path, 'system_meta.json') + # checks for json file containing system meta + sut_info = { + "hardware_name": None, + "implementation": None, + "device": None, + "framework": None, + "framework_version": "default", + "run_config": "default" + } # variable to store the system meta + + model_mapping_combined = {} # to store all the model mapping related to an SUT + + # check whether the root folder contains the sut infos + # if yes then there is no need to check for meta files inside + # individual model folders + if "cm-sut-info.json" in os.listdir(result_path): + sut_info = fill_from_json( + os.path.join( + result_path, + "cm-sut-info.json"), + sut_info.keys(), + sut_info) + if sut_info == -1: + return { + 'return': 1, 'error': f"key value mismatch. Refer the populating dictionary:\n{sut_info}\n and file {os.path.join(result_path, 'cm-sut-info.json')}"} + if check_dict_filled(sut_info.keys(), sut_info): + print( + f"sut info completely filled from {os.path.join(result_path, 'cm-sut-info.json')}!") + + # Check whether the root folder contains the model mapping file + # expects json file in the format: + # { + # custom_model1:official_model(could be any official model), + # custom_model2:official_model(could be any official model) + # } + if "model_mapping.json" in os.listdir(result_path): + with open(os.path.join(result_path, "model_mapping.json"), 'r') as f: + model_mapping_combined = json.load(f) + + # Preprocessing part. + # Even the model mapping json file is present in root directory, the folders are traversed + # and the data is updated provided not duplicated. + models = [ + f for f in os.listdir(result_path) if not os.path.isfile( + os.path.join( + result_path, f))] + if division == "open" and len(model_mapping_combined) == 0: + for model in models: + is_valid, returned_model_name = model_in_valid_models( + model, env.get('CM_MLPERF_LAST_RELEASE', 'v4.1')) + if not is_valid: + result_model_path = os.path.join(result_path, model) + scenarios = [ + f for f in os.listdir(result_model_path) if not os.path.isfile( + os.path.join( + result_model_path, f))] + for scenario in scenarios: + result_scenario_path = os.path.join( + result_model_path, scenario) + modes = [ + f for f in os.listdir(result_scenario_path) if not os.path.isfile( + os.path.join( + result_scenario_path, f))] + for mode in modes: + result_mode_path = os.path.join( + result_scenario_path, mode) + if mode == "performance": + compliance_performance_run_path = os.path.join( + result_mode_path, "run_1") + # model mapping part + tmp_model_mapping_file_path = os.path.join( + compliance_performance_run_path, "model_mapping.json") + if os.path.exists(tmp_model_mapping_file_path): + with open(tmp_model_mapping_file_path, 'r') as f: + new_model_mapping = json.load(f) + for new_custom_model in new_model_mapping: + if new_custom_model not in model_mapping_combined: + model_mapping_combined.update( + {new_custom_model: new_model_mapping[new_custom_model]}) + else: + return { + "return": 1, "error": f"model_mapping.json not found in {compliance_performance_run_path}"} + else: + if returned_model_name != model: + model_mapping_combined.update( + {model: returned_model_name}) + + if check_dict_filled(sut_info.keys(), sut_info): + system = sut_info["hardware_name"] + implementation = sut_info["implementation"] + device = sut_info["device"] + framework = sut_info["framework"].replace(" ", "_") + framework_version = sut_info["framework_version"] + run_config = sut_info["run_config"] + new_res = f"{system}-{implementation}-{device}-{framework}-{run_config}" + else: + new_res = res + + print(f"The SUT folder name for submission generation is: {new_res}") + + platform_prefix = inp.get('platform_prefix', '') + if platform_prefix: + sub_res = platform_prefix + "-" + new_res + else: + sub_res = new_res + + submission_path = os.path.join(path_submission, "results", sub_res) + measurement_path = os.path.join( + path_submission, "measurements", sub_res) + compliance_path = os.path.join(path_submission, "compliance", sub_res) + system_path = os.path.join(path_submission, "systems") + submission_system_path = system_path + + if not os.path.isdir(submission_system_path): + os.makedirs(submission_system_path) + system_file = os.path.join(submission_system_path, sub_res + ".json") + + # Save the model mapping json file + if model_mapping_combined: + with open(os.path.join(path_submission, "model_mapping.json"), "w") as fp: + json.dump(model_mapping_combined, fp, indent=2) + + models = [ + f for f in os.listdir(result_path) if not os.path.isfile( + os.path.join( + result_path, f))] + + results = {} + + model_platform_info_file = None + + for model in models: + results[model] = {} + platform_info_file = None + result_model_path = os.path.join(result_path, model) + submission_model_path = os.path.join(submission_path, model) + measurement_model_path = os.path.join(measurement_path, model) + compliance_model_path = os.path.join(compliance_path, model) + code_model_path = os.path.join(code_path, model) + scenarios = [ + f for f in os.listdir(result_model_path) if not os.path.isfile( + os.path.join( + result_model_path, f))] + submission_code_path = code_model_path + if not os.path.isdir(submission_code_path): + os.makedirs(submission_code_path) + if not os.path.exists(os.path.join( + submission_code_path, "README.md")): + with open(os.path.join(submission_code_path, "README.md"), mode='w') as f: + f.write("TBD") # create an empty README + + print('* MLPerf inference model: {}'.format(model)) + for scenario in scenarios: + # the system_info.txt is copied from the mode directory if + # found, else it would be looked under scenario directory + system_info_from_mode = False + results[model][scenario] = {} + result_scenario_path = os.path.join( + result_model_path, scenario) + submission_scenario_path = os.path.join( + submission_model_path, scenario) + measurement_scenario_path = os.path.join( + measurement_model_path, scenario) + compliance_scenario_path = os.path.join( + compliance_model_path, scenario) + + if duplicate and scenario == 'singlestream': + if not os.path.exists(os.path.join( + result_model_path, "offline")): + print( + 'Duplicating results from {} to offline:'.format(scenario)) + shutil.copytree( + result_scenario_path, os.path.join( + result_model_path, "offline")) + scenarios.append("offline") + if not os.path.exists(os.path.join( + result_model_path, "multistream")): + print( + 'Duplicating results from {} to multistream:'.format(scenario)) + shutil.copytree( + result_scenario_path, os.path.join( + result_model_path, "multistream")) + scenarios.append("multistream") + + modes = [ + f for f in os.listdir(result_scenario_path) if not os.path.isfile( + os.path.join( + result_scenario_path, f))] + power_run = False + + # we check for the existance of mlperf_log_summary.txt + # mlperf_log_detail.txt to consider a result folder as valid. + # Rest of the checks are done later by the submission checker + files_to_check = [ + "mlperf_log_summary.txt", + "mlperf_log_detail.txt"] + if not all([os.path.exists(os.path.join( + result_scenario_path, "performance", "run_1", f)) for f in files_to_check]): + continue + + if not os.path.isdir(measurement_scenario_path): + os.makedirs(measurement_scenario_path) + + for mode in modes: + result_mode_path = os.path.join(result_scenario_path, mode) + submission_mode_path = os.path.join( + submission_scenario_path, mode) + submission_measurement_path = measurement_scenario_path + submission_compliance_path = os.path.join( + compliance_scenario_path, mode) + if mode.startswith("TEST"): + submission_results_path = submission_compliance_path + else: + submission_results_path = submission_mode_path + if os.path.exists(submission_results_path): + shutil.rmtree(submission_results_path) + + if mode == 'performance': + + if os.path.exists(os.path.join( + result_mode_path, "power")): + power_run = True + result_power_path = os.path.join( + result_mode_path, 'power') + submission_power_path = os.path.join( + submission_mode_path, 'power') + os.makedirs(submission_power_path) + power_files = [] + for f in os.listdir(result_power_path): + # Todo add required check from + # submission_checker + power_files.append(f) + for f in power_files: + shutil.copy( + os.path.join( + result_power_path, f), os.path.join( + submission_power_path, f)) + + analyzer_settings_file = env.get( + 'CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH', os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], "default_files", "analyzer_table.md")) + power_settings_file = env.get( + 'CM_MLPERF_POWER_SETTINGS_FILE_PATH', os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], "default_files", "power_settings.md")) + + shutil.copy( + analyzer_settings_file, os.path.join( + submission_measurement_path, "analyzer_table.md")) + shutil.copy( + power_settings_file, os.path.join( + submission_measurement_path, "power_settings.md")) + + result_ranging_path = os.path.join( + result_mode_path, 'ranging') + submission_ranging_path = os.path.join( + submission_mode_path, 'ranging') + os.makedirs(submission_ranging_path) + ranging_files = [] + for f in os.listdir(result_ranging_path): + # Todo add required check from + # submission_checker + ranging_files.append(f) + for f in ranging_files: + shutil.copy( + os.path.join( + result_ranging_path, f), os.path.join( + submission_ranging_path, f)) + + result_mode_path = os.path.join( + result_mode_path, 'run_1') + submission_results_path = os.path.join( + submission_mode_path, 'run_1') + + if not os.path.exists(saved_system_meta_file_path): + if os.path.exists(os.path.join( + result_mode_path, "system_meta.json")): + saved_system_meta_file_path = os.path.join( + result_mode_path, "system_meta.json") + else: + print("WARNING: system_meta.json was not found in the SUT root or mode directory inside the results folder. CM is automatically creating one using the system defaults. Please modify them as required.") + if os.path.exists(saved_system_meta_file_path): + with open(saved_system_meta_file_path, "r") as f: + saved_system_meta = json.load(f) + for key in list(saved_system_meta): + if saved_system_meta[key] is None or str( + saved_system_meta[key]).strip() == '': + del (saved_system_meta[key]) + if saved_system_meta["division"] != "" and submission_division == "": + system_meta["division"] = saved_system_meta["division"] + # override the saved meta with the user inputs + system_meta = { + **saved_system_meta, **system_meta} + # add any missing fields from the defaults, if + # system_meta.json is not detected, default one will be + # written + system_meta = {**system_meta_default, **system_meta} + print(system_meta) + # check if framework version is there in system_meta, + # if not try to fill it from sut_info + if system_meta['framework'] == "": + system_meta['framework'] = sut_info.get( + 'framework', '') + sut_info.get('framework_version', '') + if system_meta['framework'] == "": + print( + "WARNING: framework field could not be filled from system_meta.json or sut_info.json. This will trigger error in submission checker") + + if not os.path.isdir(submission_results_path): + os.makedirs(submission_results_path) + + # if division == "closed" and not os.path.isdir(submission_compliance_path): + # os.makedirs(submission_compliance_path) + + user_conf_path = os.path.join( + result_scenario_path, "user.conf") + if os.path.exists(user_conf_path): + shutil.copy( + user_conf_path, os.path.join( + measurement_scenario_path, 'user.conf')) + else: + user_conf_path = os.path.join( + result_mode_path, "user.conf") + if os.path.exists(user_conf_path): + shutil.copy( + user_conf_path, os.path.join( + submission_measurement_path, 'user.conf')) + else: + if mode.lower() == "performance": + return { + "return": 1, "error": f"user.conf missing in both paths: {user_conf_path} and {os.path.join(result_scenario_path, 'user.conf')}"} + + measurements_json_path = os.path.join( + result_scenario_path, "measurements.json") + target_measurement_json_path = measurement_scenario_path + if not os.path.exists(measurements_json_path): + measurements_json_path = os.path.join( + result_mode_path, "measurements.json") + target_measurement_json_path = submission_measurement_path + + if os.path.exists(measurements_json_path): + with open(measurements_json_path, "r") as f: + measurements_json = json.load(f) + model_precision = measurements_json.get( + "weight_data_types", "fp32") + shutil.copy( + measurements_json_path, + os.path.join( + target_measurement_json_path, + sub_res + '.json')) + shutil.copy( + measurements_json_path, + os.path.join( + target_measurement_json_path, + 'model-info.json')) + else: + if mode.lower() == "performance": + return { + "return": 1, "error": f"measurements.json missing in both paths: {measurements_json_path} and {os.path.join(result_scenario_path, 'user.conf')}"} + + files = [] + readme = False + + for f in os.listdir(result_mode_path): + if mode.startswith("TEST"): + if f.startswith('verify_'): + files.append(f) + elif f == "performance": + compliance_performance_run_path = os.path.join( + result_mode_path, f, "run_1") + if os.path.exists( + compliance_performance_run_path): + target = os.path.join( + submission_results_path, "performance", "run_1") + os.makedirs(target) + for log_file in os.listdir( + compliance_performance_run_path): + if log_file.startswith("mlperf_"): + shutil.copy( + os.path.join( + compliance_performance_run_path, log_file), os.path.join( + target, log_file)) + elif f == "accuracy": + compliance_accuracy_run_path = os.path.join( + result_mode_path, f) + if os.path.exists( + compliance_accuracy_run_path): + target = os.path.join( + submission_results_path, "accuracy") + os.makedirs(target) + for log_file in os.listdir( + compliance_accuracy_run_path): + if log_file.startswith( + "mlperf_log_accuracy.json") or log_file.endswith("accuracy.txt"): + shutil.copy( + os.path.join( + compliance_accuracy_run_path, log_file), os.path.join( + target, log_file)) + else: + if f.startswith('mlperf_') and not f.endswith( + 'trace.json'): + files.append(f) + elif f == "spl.txt": + files.append(f) + elif f in ["README.md", "README-extra.md", "cm-version-info.json", "os_info.json", "cpu_info.json", "pip_freeze.json", "system_info.txt", "cm-deps.png", "cm-deps.mmd"] and mode == "performance": + shutil.copy( + os.path.join( + result_mode_path, f), os.path.join( + submission_measurement_path, f)) + if f == "system_info.txt" and not platform_info_file: + # the first found system_info.txt will be taken as platform info file for a specific model to be placed in + # measurements-model folder when generating + # the final submission + platform_info_file = os.path.join( + result_mode_path, f) + elif f in ["console.out"]: + shutil.copy( + os.path.join( + result_mode_path, f), os.path.join( + submission_measurement_path, mode + "_" + f)) + + if mode == "accuracy": + if os.path.exists(os.path.join( + result_mode_path, "accuracy.txt")): + files.append("accuracy.txt") + if model == "stable-diffusion-xl" and os.path.exists( + os.path.join(result_mode_path, "images")): + shutil.copytree( + os.path.join( + result_mode_path, "images"), os.path.join( + submission_results_path, "images")) + + for f in files: + print(' * ' + f) + p_target = os.path.join(submission_results_path, f) + shutil.copy( + os.path.join( + result_mode_path, + f), + p_target) + + if os.path.exists(os.path.join( + result_scenario_path, "system_info.txt")): + shutil.copy( + os.path.join( + result_scenario_path, "system_info.txt"), os.path.join( + submission_measurement_path, f)) + platform_info_file = os.path.join( + result_scenario_path, "system_info.txt") + + readme_file = os.path.join( + submission_measurement_path, "README.md") + if not os.path.exists(readme_file): + with open(readme_file, mode='w') as f: + f.write("TBD") # create an empty README + + readme_suffix = "" + result_string, result = mlperf_utils.get_result_string( + env['CM_MLPERF_LAST_RELEASE'], model, scenario, result_scenario_path, power_run, sub_res, division, system_file, model_precision, env.get('CM_MLPERF_INFERENCE_SOURCE_VERSION')) + + for key in result: + results[model][scenario][key] = result[key] + with open(readme_file, mode='a') as f: + f.write(result_string) + + # Copy system_info.txt to the submission measurements model folder + # if any scenario performance run has it + sys_info_file = None + + if os.path.exists(os.path.join( + result_model_path, "system_info.txt")): + sys_info_file = os.path.join( + result_model_path, "system_info.txt") + elif platform_info_file: + sys_info_file = platform_info_file + + if sys_info_file: + model_platform_info_file = sys_info_file + shutil.copy( + sys_info_file, + os.path.join( + measurement_model_path, + "system_info.txt")) + + # Copy system_info.txt to the submission measurements folder if any + # model performance run has it + sys_info_file = None + + if os.path.exists(os.path.join(result_path, "system_info.txt")): + sys_info_file = os.path.join(result_path, "system_info.txt") + elif model_platform_info_file: + sys_info_file = model_platform_info_file + + if sys_info_file: + shutil.copy( + sys_info_file, + os.path.join( + measurement_path, + "system_info.txt")) + else: + if env.get('CM_GET_PLATFORM_DETAILS', '') == "yes": + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': 'get,platform,details', + 'env': {'CM_PLATFORM_DETAILS_FILE_PATH': os.path.join(measurement_path, "system_info.txt")}, + 'quiet': True + } + r = cmind.access(cm_input) + if r['return'] > 0: + return r + + with open(system_file, "w") as fp: + json.dump(system_meta, fp, indent=2) + + result_table, headers = mlperf_utils.get_result_table(results) + + print(tabulate(result_table, headers=headers, tablefmt="pretty")) + + sut_readme_file = os.path.join(measurement_path, "README.md") + with open(sut_readme_file, mode='w') as f: + f.write(tabulate(result_table, headers=headers, tablefmt="github")) + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + state = i['state'] + inp = i['input'] + + submission_divisions = [] + + if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') in [ + "open-closed", "closed-open"]: + submission_divisions = ["open", "closed"] + elif env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '': + submission_divisions.append(env['CM_MLPERF_SUBMISSION_DIVISION']) + + # if submission division is not assigned, default value would be taken in + # submission_generation function + if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') == '': + r = generate_submission(env, state, inp, submission_division="") + else: + for submission_division in submission_divisions: + r = generate_submission(env, state, inp, submission_division) + if r['return'] > 0: + return r + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/default_files/analyzer_table.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/default_files/analyzer_table.md new file mode 100644 index 000000000..fee88895c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/default_files/analyzer_table.md @@ -0,0 +1,3 @@ +| Vendor | Model | Firmware | Config | Interface | Wiring/topology | Number of channels used | Which channel(s) | +|----------|--------|----------|-----------------|-----------|-----------------|-------------------------|------------------| +| Yokogawa | WT310E | 1.04 | Single channel | USB | 1P2W | 1 | 1 | diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/default_files/power_settings.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/default_files/power_settings.md new file mode 100644 index 000000000..a00bcf992 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/default_files/power_settings.md @@ -0,0 +1 @@ +No special setting for power management is done. Out-of-the-box OS settings are used. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/sample-cm-sut-info.json b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/sample-cm-sut-info.json new file mode 100644 index 000000000..3cc137038 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-submission/sample-cm-sut-info.json @@ -0,0 +1,7 @@ +{ + "hardware_name": "VivoBook-ASUSLaptop-X515UA-M515UA", + "implementation": "reference", + "device": "cpu", + "framework": "pytorch_v2.4.0", + "run_config": "default" +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/README.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/README.md new file mode 100644 index 000000000..1e1ada047 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/_cm.yaml new file mode 100644 index 000000000..99fc35995 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/_cm.yaml @@ -0,0 +1,96 @@ +# Identification of this CM script +alias: generate-mlperf-inference-user-conf +uid: 3af4475745964b93 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - generate + - mlperf + - inference + - user-conf + - inference-user-conf + +# Default environment +default_env: + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_LOADGEN_SCENARIO: Offline + CM_OUTPUT_FOLDER_NAME: test_results + CM_MLPERF_RUN_STYLE: test + CM_TEST_QUERY_COUNT: '10' + CM_FAST_FACTOR: '5' + CM_MLPERF_QUANTIZATION: off + CM_MLPERF_RESULTS_DIR_SHARED: yes + +docker: + real_run: False + +# Map script inputs to environment variables +input_mapping: + count: CM_MLPERF_LOADGEN_QUERY_COUNT + hw_name: CM_HW_NAME + mode: CM_MLPERF_LOADGEN_MODE + num_threads: CM_NUM_THREADS + output_dir: OUTPUT_BASE_DIR + power: CM_MLPERF_POWER + regenerate_files: CM_REGENERATE_MEASURE_FILES + rerun: CM_RERUN + scenario: CM_MLPERF_LOADGEN_SCENARIO + test_query_count: CM_TEST_QUERY_COUNT + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + performance_sample_count: CM_MLPERF_PERFORMANCE_SAMPLE_COUNT + +# Env keys which are exposed to higher level scripts +new_env_keys: + - CM_MLPERF_* + - CM_LOGS_DIR + - CM_HW_* + - CM_SUT_* + - CM_MAX_EXAMPLES + +new_state_keys: + - CM_SUT_* + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + - tags: get,mlperf,results,dir,local + names: + - get-mlperf-results-dir + skip_if_env: + OUTPUT_BASE_DIR: + - "on" + + ######################################################################## + # Install MLPerf inference dependencies + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Get SUT configs (System Under Test) + - tags: get,sut,configs diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/customize.py b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/customize.py new file mode 100644 index 000000000..6fc9a3bb4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-inference-user-conf/customize.py @@ -0,0 +1,587 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json +import shutil +import subprocess +import cmind as cm +import sys + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + rerun = True if env.get("CM_RERUN", "") != '' else False + + env['CM_MLPERF_SKIP_RUN'] = env.get('CM_MLPERF_SKIP_RUN', "no") + + mlperf_path = env['CM_MLPERF_INFERENCE_SOURCE'] + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + sys.path.append(submission_checker_dir) + + version = env.get('CM_MLPERF_INFERENCE_VERSION', "4.1") + + required_files = [] + required_files = get_checker_files() + + if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: + env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + + if 'CM_MLPERF_LOADGEN_MODE' not in env: + print("\nNo mode given. Using accuracy as default\n") + env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" + + if env.get('OUTPUT_BASE_DIR', '') == '': + env['OUTPUT_BASE_DIR'] = env.get( + 'CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) + + if 'CM_NUM_THREADS' not in env: + if 'CM_MINIMIZE_THREADS' in env: + env['CM_NUM_THREADS'] = str(int(env['CM_HOST_CPU_TOTAL_CORES']) // + (int(env.get('CM_HOST_CPU_SOCKETS', '1')) * int(env.get('CM_HOST_CPU_TOTAL_CORES', '1')))) + else: + env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') + + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + + if 'CM_MLPERF_CONF' not in env: + env['CM_MLPERF_CONF'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") + + RUN_CMD = "" + state['RUN'] = {} + + scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] + state['RUN'][scenario] = {} + + model_full_name = env.get('CM_ML_MODEL_FULL_NAME', env['CM_MODEL']) + + if model_full_name != env['CM_MODEL']: + if 'model_mapping' not in state['CM_SUT_CONFIG']: + state['CM_SUT_CONFIG']['model_mappings'] = {} + state['CM_SUT_CONFIG']['model_mappings'][model_full_name] = env['CM_MODEL'] + + if model_full_name not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']]: + i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME']][model_full_name] = {} + + if scenario not in i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + ][model_full_name]: + i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + ][model_full_name][scenario] = {} + + conf = i['state']['CM_SUT_CONFIG'][env['CM_SUT_NAME'] + ][model_full_name][scenario] + + mode = env['CM_MLPERF_LOADGEN_MODE'] + + user_conf = '' + if env['CM_MLPERF_RUN_STYLE'] == "fast": + fast_factor = int(env['CM_FAST_FACTOR']) + else: + fast_factor = 1 + + ml_model_name = env['CM_MODEL'] + if 'bert' in ml_model_name: + ml_model_name = "bert" + if 'dlrm' in ml_model_name: + ml_model_name = "dlrm-v2" + if '3d-unet' in ml_model_name: + ml_model_name = "3d-unet" + if 'gptj' in ml_model_name: + ml_model_name = "gptj" + if 'llama2-70b' in ml_model_name: + ml_model_name = "llama2-70b" + + query_count = None + + value = None + if scenario in ['Offline', 'Server']: + metric = "target_qps" + tolerance = 1.01 + # value = env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS') if scenario == "Server" else env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS') + value = env.get('CM_MLPERF_LOADGEN_TARGET_QPS') + elif scenario in ['SingleStream', 'MultiStream']: + metric = "target_latency" + value = env.get('CM_MLPERF_LOADGEN_TARGET_LATENCY') + if value: + if scenario == "SingleStream" and ( + 1000 / float(value) * 660 < 100): + env['CM_MLPERF_USE_MAX_DURATION'] = 'no' + elif scenario == "MultiStream" and (1000 / float(value) * 660 < 662): + env['CM_MLPERF_USE_MAX_DURATION'] = 'no' + if env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"] and env.get( + 'CM_MLPERF_USE_MAX_DURATION', "yes").lower() not in ["no", "false", "0"]: + tolerance = 0.4 # much lower because we have max_duration + else: + tolerance = 0.9 + else: + return {'return': 1, 'error': 'Invalid scenario: {}'.format(scenario)} + + if value: + metric_value = value + conf[metric] = value + else: + if metric in conf: + print( + "Original configuration value {} {}".format( + conf[metric], metric)) + metric_value = str( + float( + conf[metric]) * + tolerance) # some tolerance + print( + "Adjusted configuration value {} {}".format( + metric_value, metric)) + else: + # if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + if metric == "target_qps": + if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + print("In find performance mode: using 1 as target_qps") + else: + print("No target_qps specified. Using 1 as target_qps") + conf[metric] = 1 + if metric == "target_latency": + if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + print("In find performance mode: using 0.5ms as target_latency") + else: + print("No target_latency specified. Using default") + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() in ["no", "false", "0"] or env.get( + 'CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in ["yes", "1", "true"]: + # Total number of queries needed is a multiple of dataset + # size. So we dont use max_duration and so we need to be + # careful with the input latency + if '3d-unet' in env['CM_MODEL']: + conf[metric] = 400 + elif 'gptj' in env['CM_MODEL']: + conf[metric] = 1000 + else: + conf[metric] = 100 + else: + conf[metric] = 0.5 + metric_value = conf[metric] + # else: + # return {'return': 1, 'error': f"Config details missing for + # SUT:{env['CM_SUT_NAME']}, Model:{env['CM_MODEL']}, Scenario: + # {scenario}. Please input {metric} value"} + + # Pass the modified performance metrics to the implementation + if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": + if metric == "target_latency" and env.get( + 'CM_MLPERF_LOADGEN_TARGET_LATENCY', '') == '': + env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = conf[metric] + elif metric == "target_qps" and env.get('CM_MLPERF_LOADGEN_TARGET_QPS', '') == '': + env['CM_MLPERF_LOADGEN_TARGET_QPS'] = conf[metric] + + if env['CM_MLPERF_RUN_STYLE'] == "fast": + if scenario == "Offline": + metric_value = float(metric_value) / fast_factor + if scenario in ["SingleStream", "MultiStream"]: + metric_value = float(metric_value) * fast_factor + + elif env['CM_MLPERF_RUN_STYLE'] == "test": + if scenario == "Offline": + metric_value = float(env.get('CM_MLPERF_INFERENCE_TEST_QPS', 1)) + if scenario in ["SingleStream"]: + metric_value = 1000 + + elif env['CM_MLPERF_RUN_STYLE'] == "valid": + if scenario == "Offline": + required_min_queries_offline = {} + required_min_queries_offline = get_required_min_queries_offline( + env['CM_MODEL'], version) + + if mode == "compliance" and scenario == "Server": # Adjust the server_target_qps + test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + if test == "TEST01": + metric_value = str( + float(metric_value) * + float( + env.get( + "CM_MLPERF_TEST01_SERVER_ADJUST_FACTOR", + 0.96))) + # if test == "TEST05": + # metric_value = str(float(metric_value) * float(env.get("CM_MLPERF_TEST05_SERVER_ADJUST_FACTOR", 0.97))) + if test == "TEST04": + metric_value = str( + float(metric_value) * + float( + env.get( + "CM_MLPERF_TEST04_SERVER_ADJUST_FACTOR", + 0.97))) + + conf[metric] = metric_value + user_conf += ml_model_name + "." + scenario + \ + "." + metric + " = " + str(metric_value) + "\n" + + if env.get('CM_MLPERF_PERFORMANCE_SAMPLE_COUNT', '') != '': + performance_sample_count = env['CM_MLPERF_PERFORMANCE_SAMPLE_COUNT'] + user_conf += ml_model_name + ".*.performance_sample_count_override = " + \ + performance_sample_count + "\n" + + log_mode = mode + if 'CM_MLPERF_POWER' in env and mode == "performance": + log_mode = "performance_power" + + env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] = os.path.join( + env['OUTPUT_BASE_DIR'], env['CM_OUTPUT_FOLDER_NAME']) + + sut_name = env.get( + 'CM_SUT_NAME', + env['CM_MLPERF_BACKEND'] + + "-" + + env['CM_MLPERF_DEVICE']) + OUTPUT_DIR = os.path.join(env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name, + model_full_name, scenario.lower(), mode) + + env['CM_MLPERF_INFERENCE_RESULTS_SUT_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'], sut_name) + + if 'CM_MLPERF_POWER' in env and mode == "performance": + env['CM_MLPERF_POWER_LOG_DIR'] = os.path.join(OUTPUT_DIR, "tmp_power") + + if mode == "accuracy": + pass + elif mode == "performance": + OUTPUT_DIR = os.path.join(OUTPUT_DIR, "run_1") + elif mode == "compliance": + test = env.get("CM_MLPERF_LOADGEN_COMPLIANCE_TEST", "TEST01") + OUTPUT_DIR = os.path.join( + env['OUTPUT_BASE_DIR'], + env['CM_OUTPUT_FOLDER_NAME'], + sut_name, + model_full_name, + scenario.lower(), + test) + if test == "TEST01": + audit_path = os.path.join(test, ml_model_name) + else: + audit_path = test + + audit_full_path = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "compliance", + "nvidia", + audit_path, + "audit.config") + env['CM_MLPERF_INFERENCE_AUDIT_PATH'] = audit_full_path + # copy the audit conf to the run directory incase the implementation is + # not supporting the audit-conf path + if not os.path.exists(OUTPUT_DIR): + os.makedirs(OUTPUT_DIR) + shutil.copyfile( + audit_full_path, + os.path.join( + OUTPUT_DIR, + "audit.config")) + + env['CM_MLPERF_OUTPUT_DIR'] = OUTPUT_DIR + env['CM_LOGS_DIR'] = OUTPUT_DIR + env['CM_MLPERF_LOADGEN_LOGS_DIR'] = OUTPUT_DIR + + if mode == "accuracy": + output_dir = env['CM_MLPERF_OUTPUT_DIR'] + env['CM_MLPERF_ACCURACY_RESULTS_DIR'] = output_dir + else: + env['CM_MLPERF_ACCURACY_RESULTS_DIR'] = '' + + run_exists = run_files_exist(log_mode, OUTPUT_DIR, required_files, env) + + if 'CM_MLPERF_POWER' in env and env.get( + 'CM_MLPERF_SHORT_RANGING_RUN', '') != 'no' and env['CM_MLPERF_RUN_STYLE'] == "valid" and mode == "performance": + short_ranging = True + else: + short_ranging = False + + if short_ranging: + import copy + ranging_user_conf = copy.deepcopy(user_conf) + ranging_user_conf += ml_model_name + "." + \ + scenario + ".min_duration = 300000" + "\n" + + if env['CM_MLPERF_RUN_STYLE'] == "test": + max_duration_test_s = int(env.get('CM_MLPERF_MAX_DURATION_TEST', 30)) + max_duration_test = str(max_duration_test_s * 1000) # in milliseconds + query_count = int(env.get('CM_TEST_QUERY_COUNT', 5)) + min_query_count = int( + env.get( + 'CM_MLPERF_INFERENCE_MIN_QUERY_COUNT', + query_count)) + max_query_count = max( + min_query_count, int( + env.get( + 'CM_MLPERF_INFERENCE_MAX_QUERY_COUNT', query_count))) + user_conf += ml_model_name + "." + scenario + \ + ".max_query_count = " + str(max_query_count) + "\n" + user_conf += ml_model_name + "." + scenario + \ + ".min_query_count = " + str(min_query_count) + "\n" + user_conf += ml_model_name + "." + scenario + ".min_duration = 0" + "\n" + user_conf += ml_model_name + "." + scenario + \ + ".sample_concatenate_permutation = 0" + "\n" + env['CM_MLPERF_MAX_QUERY_COUNT'] = max_query_count + + # max_duration is effective for all scenarios except the Offline + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [ + "no", "false", "0"]: + if scenario != "Offline": + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_test}" + "\n" + + elif env['CM_MLPERF_RUN_STYLE'] == "fast": + user_conf += ml_model_name + "." + scenario + \ + ".sample_concatenate_permutation = 0" + "\n" + max_duration_fast_s = int(env.get('CM_MLPERF_MAX_DURATION_FAST', 120)) + max_duration_fast = str(max_duration_fast_s * 1000) # in milliseconds + if scenario == "Server": + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_fast}" + "\n" + target_qps = conf['target_qps'] + query_count = str(int((660 / fast_factor) * (float(target_qps)))) + user_conf += ml_model_name + "." + scenario + \ + ".max_query_count = " + query_count + "\n" + env['CM_MLPERF_MAX_QUERY_COUNT'] = query_count + + else: + max_duration_valid_s = int( + env.get('CM_MLPERF_MAX_DURATION_VALID', 660)) + max_duration_valid = str( + max_duration_valid_s * + 1000) # in milliseconds + max_duration_ranging_s = int( + env.get('CM_MLPERF_MAX_DURATION_RANGING', 300)) + max_duration_ranging = str( + max_duration_ranging_s * + 1000) # in milliseconds + if scenario == "MultiStream" or scenario == "SingleStream": + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in ["no", "false", "0"] and env.get( + 'CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() not in ["yes", "1", "true"]: + user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_valid}" + "\n" + elif env.get('CM_MLPERF_INFERENCE_MIN_DURATION', '') != '': + user_conf += ml_model_name + "." + scenario + ".min_duration = " + \ + env['CM_MLPERF_INFERENCE_MIN_DURATION'] + " \n" + if scenario == "MultiStream": + user_conf += ml_model_name + "." + scenario + ".min_query_count = " + \ + env.get( + 'CM_MLPERF_INFERENCE_MULTISTREAM_MIN_QUERY_COUNT', + "662") + "\n" + if short_ranging: + ranging_user_conf += ml_model_name + "." + scenario + \ + f".max_duration = {max_duration_ranging} \n " + elif scenario == "Offline": + query_count = int(float(conf['target_qps']) * 660) + query_count = str(max(query_count, required_min_queries_offline)) + + # user_conf += ml_model_name + "." + scenario + ".max_query_count = " + str(int(query_count)+40) + "\n" + if short_ranging: + ranging_query_count = str(int(float(conf['target_qps']) * 300)) + ranging_user_conf += ml_model_name + "." + scenario + \ + ".max_query_count = " + str(ranging_query_count) + "\n" + ranging_user_conf += ml_model_name + "." + scenario + ".min_query_count = 0 \n" + + if query_count: + # needed for squad accuracy checker + env['CM_MAX_EXAMPLES'] = str(query_count) + + import uuid + from pathlib import Path + key = uuid.uuid4().hex + user_conf_path = os.path.join(script_path, "tmp", key + ".conf") + user_conf_file = Path(user_conf_path) + user_conf_file.parent.mkdir(exist_ok=True, parents=True) + user_conf_file.write_text(user_conf) + + if short_ranging: + ranging_user_conf_path = os.path.join( + script_path, "tmp", "ranging_" + key + ".conf") + ranging_user_conf_file = Path(ranging_user_conf_path) + ranging_user_conf_file.write_text(ranging_user_conf) + + if (env.get('CM_MLPERF_LOADGEN_QUERY_COUNT', '') == '') and query_count and ( + (mode != "accuracy") or (env['CM_MLPERF_RUN_STYLE'] != "valid")): + env['CM_MLPERF_LOADGEN_QUERY_COUNT'] = str(query_count) + + if not run_exists or rerun: + + print("Output Dir: '" + OUTPUT_DIR + "'") + print(user_conf) + if env.get('CM_MLPERF_POWER', '') == "yes" and os.path.exists( + env.get('CM_MLPERF_POWER_LOG_DIR', '')): + shutil.rmtree(env['CM_MLPERF_POWER_LOG_DIR']) + else: + if not env.get('CM_MLPERF_COMPLIANCE_RUN_POSTPONED', False): + print("Run files exist, skipping run...\n") + env['CM_MLPERF_SKIP_RUN'] = "yes" + + if not run_exists or rerun or not measure_files_exist(OUTPUT_DIR, + required_files[4]) or env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes" or env.get("CM_REGENERATE_MEASURE_FILES", False): + + env['CM_MLPERF_TESTING_USER_CONF'] = os.path.join( + os.path.dirname(user_conf_path), key + ".conf") # user_conf_path + env['CM_MLPERF_RANGING_USER_CONF'] = os.path.join( + os.path.dirname(user_conf_path), + "ranging_" + key + ".conf") # ranging_user_conf_path for a shorter run + + if short_ranging: + env['CM_MLPERF_USER_CONF'] = r"\${CM_MLPERF_USER_CONF}" + else: + env['CM_MLPERF_USER_CONF'] = os.path.join( + os.path.dirname(user_conf_path), key + ".conf") # user_conf_path + else: + print( + f"Measure files exist at {OUTPUT_DIR}. Skipping regeneration...\n") + env['CM_MLPERF_USER_CONF'] = '' + + os.makedirs(OUTPUT_DIR, exist_ok=True) + + if str(env.get('CM_MLPERF_RESULTS_DIR_SHARED', '') + ).lower() in ["yes", "true", "1"]: + os.chmod(OUTPUT_DIR, 0o2775) + + return {'return': 0} + + +def run_files_exist(mode, OUTPUT_DIR, run_files, env): + import submission_checker as checker + from log_parser import MLPerfLog + + is_valid = True + + file_loc = { + "accuracy": 0, + "performance": 1, + "power": 2, + "performance_power": 3, + "measure": 4, + "compliance": 1} + + required_files = run_files[file_loc[mode]] + if mode == "performance_power": + for file_ in run_files[2]: + file_path = os.path.join( + os.path.dirname(OUTPUT_DIR), "power", file_) + if (not os.path.exists(file_path) + or os.stat(file_path).st_size == 0): + return False + # We need performance files too in the run directory + required_files += run_files[1] + + for file_ in required_files: + file_path = os.path.join(OUTPUT_DIR, file_) + if (not os.path.exists(file_path) or os.stat( + file_path).st_size == 0) and file_ != "accuracy.txt": + return False + + if file_ == "mlperf_log_detail.txt" and "performance" in mode: + mlperf_log = MLPerfLog(file_path) + if ( + "result_validity" not in mlperf_log.get_keys() + or mlperf_log["result_validity"] != "VALID" + ): + return False + + if mode == "compliance": + # If a performance run followed the last compliance run, compliance + # check needs to be redone + RESULT_DIR = os.path.split(OUTPUT_DIR)[0] + COMPLIANCE_DIR = OUTPUT_DIR + OUTPUT_DIR = os.path.dirname(COMPLIANCE_DIR) + + # If reference test result is invalid, don't do compliance run + file_path = os.path.join( + RESULT_DIR, + "performance", + "run_1", + "mlperf_log_detail.txt") + mlperf_log = MLPerfLog(file_path) + if ( + "result_validity" not in mlperf_log.get_keys() + or mlperf_log["result_validity"] != "VALID" + ): + env['CM_MLPERF_COMPLIANCE_RUN_POSTPONED'] = True + return True + + test = env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] + + SCRIPT_PATH = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "compliance", + "nvidia", + test, + "run_verification.py") + if test == "TEST06": + cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR} --scenario {scenario} --dtype int32" + else: + cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} {SCRIPT_PATH} -r {RESULT_DIR} -c {COMPLIANCE_DIR} -o {OUTPUT_DIR}" + + print(cmd) + os.system(cmd) + + is_valid = checker.check_compliance_perf_dir(COMPLIANCE_DIR) + + if not is_valid and 'Stream' in env['CM_MLPERF_LOADGEN_SCENARIO']: + # We have the determined latency, compliance test failed, so lets + # not use max duration + env['CM_MLPERF_USE_MAX_DURATION'] = 'no' + env['CM_MLPERF_INFERENCE_MIN_DURATION'] = '990000' # Try a longer run + + return is_valid + + if "power" in mode and env.get( + 'CM_MLPERF_SKIP_POWER_CHECKS', 'no').lower() not in ["yes", "true", "on"]: + from power.power_checker import check as check_power_more + try: + is_valid = check_power_more(os.path.dirname(OUTPUT_DIR)) == 0 + except BaseException: + is_valid = False + return is_valid + + return is_valid + + +def measure_files_exist(OUTPUT_DIR, run_files): + for file in run_files: + file_path = os.path.join(OUTPUT_DIR, file) + if not os.path.exists(file_path): + return False + return True + + +def get_checker_files(): + import submission_checker as checker + + REQUIRED_ACC_FILES = checker.REQUIRED_ACC_FILES + REQUIRED_PERF_FILES = checker.REQUIRED_PERF_FILES + REQUIRED_POWER_FILES = checker.REQUIRED_POWER_FILES + REQUIRED_PERF_POWER_FILES = checker.REQUIRED_PERF_POWER_FILES + REQUIRED_MEASURE_FILES = checker.REQUIRED_MEASURE_FILES + return REQUIRED_ACC_FILES, REQUIRED_PERF_FILES, REQUIRED_POWER_FILES, REQUIRED_PERF_POWER_FILES, REQUIRED_MEASURE_FILES + + +def get_required_min_queries_offline(model, version): + + import submission_checker as checker + + version_split = version.split(".") + if int(version[0]) < 4: + return 24756 + + REQUIRED_MIN_QUERIES = checker.OFFLINE_MIN_SPQ_SINCE_V4 + mlperf_model = model + mlperf_model = mlperf_model.replace("resnet50", "resnet") + + return REQUIRED_MIN_QUERIES[mlperf_model] diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/README-extra.md new file mode 100644 index 000000000..36a0c58fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/README-extra.md @@ -0,0 +1,55 @@ +# About + +This portable CM script run submission checker and generates summary report for all Tiny MLPerf results +using [these native scripts](https://github.com/mlcommons/submissions_tiny_v1.1/pull/51). + +## Usage + +We have tested this portable CM script on Ubuntu and Windows. + +Install [MLCommons CM framework](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Pull the MLCommons CK repository with automation recipes for interoperable MLOps: +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +``` + +Install repositories with raw MLPerf inference benchmark results: +```bash +cmr "get git repo _repo.https://github.com/mlcommons/tiny_results_v0.7" --extra_cache_tags=mlperf-tiny-results,version-0.7 +cmr "get git repo _repo.https://github.com/mlcommons/tiny_results_v1.0" --extra_cache_tags=mlperf-tiny-results,version-1.0 +``` + +You can also add private results to compare submissions locally before they become public: +```bash +cmr "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" --extra_cache_tags=mlperf-tiny-results,version-1.1-private +``` + +You can use a specific checkout/branch as follows: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" \ + --extra_cache_tags=mlperf-tiny-results,version-1.1-private,generate_final_report \ + --depth="" \ + --branch=generate_final_report +``` + + +Now run this script: +```bash +cmr "generate mlperf-tiny report" +``` + +It will create `summary-{TinyMLPerf version}.csv' report in your current directory. + +You can also specify a version of a repository here: + +```bash +cmr "generate mlperf-tiny report" --repo_tags=1.1-private +``` + +These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny,all). + +# Contact us + +This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). +Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/README.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/README.md new file mode 100644 index 000000000..0a25c3627 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/_cm.yaml new file mode 100644 index 000000000..3af0906f7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/_cm.yaml @@ -0,0 +1,42 @@ +# Identification of this CM script +alias: generate-mlperf-tiny-report +uid: 709c3f3f9b3e4783 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +default_env: + CM_IMPORT_TINYMLPERF_REPO_TAGS: "1.1-private" + +# User-friendly tags to find this CM script +tags: + - generate + - mlperf + - tiny + - mlperf-tiny + - report + +input_mapping: + repo_tags: CM_IMPORT_TINYMLPERF_REPO_TAGS + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect python3 + - tags: get,python3 + names: + - python + - python3 + + - tags: get,generic-python-lib,_xlsxwriter + - tags: get,generic-python-lib,_pandas diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/customize.py b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/customize.py new file mode 100644 index 000000000..b93ef11c4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/customize.py @@ -0,0 +1,97 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import cmind as cm +from cmind import utils + +import os +import subprocess +import json +import shutil + + +def preprocess(i): + + env = i['env'] + + cur_dir = os.getcwd() + + # Query cache for results dirs + env_repo_tags = env.get('CM_IMPORT_TINYMLPERF_REPO_TAGS', '').strip() + xtags = '' if env_repo_tags == '' else ',version-' + env_repo_tags + + r = cm.access({'action': 'find', + 'automation': 'cache,541d6f712a6b464e', + 'tags': 'get,repo,mlperf-tiny-results' + xtags}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + return {'return': 1, 'error': 'no repository with TinyMLPerf results found'} + + for c in lst: + path = os.path.join(c.path, 'repo') + + if os.path.isdir(path): + meta = c.meta + + tags = meta['tags'] + + version = '' + for t in tags: + if t.startswith('version-'): + version = 'v' + t[8:] + break + + # Run local script + run_script_input = i['run_script_input'] + automation = i['automation'] + + env['CM_TINYMLPERF_REPO_PATH'] = path + env['CM_TINYMLPERF_CURRENT_DIR'] = cur_dir + env['CM_TINYMLPERF_REPO_VERSION'] = version + + print('') + print('Repo path: {}'.format(path)) + + r = automation.run_native_script({'run_script_input': run_script_input, + 'env': env, + 'script_name': 'run_submission_checker'}) + if r['return'] > 0: + return r + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + path = env['CM_TINYMLPERF_REPO_PATH'] + cur_dir = env['CM_TINYMLPERF_CURRENT_DIR'] + version = env['CM_TINYMLPERF_REPO_VERSION'] + + for ext in ['.csv', '.xlsx']: + + p1 = os.path.join(path, 'summary' + ext) + p2 = os.path.join(cur_dir, 'summary-{}{}'.format(version, ext)) + + if not os.path.isfile(p1): + return {'return': 1, 'error': 'summary.csv file was not created'} + + if os.path.isfile(p2): + os.remove(p2) + + shutil.copy(p1, p2) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/run_submission_checker.bat b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/run_submission_checker.bat new file mode 100644 index 000000000..5d9a6fbaf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/run_submission_checker.bat @@ -0,0 +1,10 @@ +cd %CM_TINYMLPERF_REPO_PATH% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +%CM_PYTHON_BIN_WITH_PATH% submission_checker.py --input . +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +%CM_PYTHON_BIN_WITH_PATH% generate_final_report.py --input summary.csv +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/run_submission_checker.sh b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/run_submission_checker.sh new file mode 100644 index 000000000..d858c9b22 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-report/run_submission_checker.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +cd ${CM_TINYMLPERF_REPO_PATH} +test $? -eq 0 || exit $? + +echo "" +${CM_PYTHON_BIN_WITH_PATH} submission_checker.py --input . +test $? -eq 0 || exit $? + +echo "" +${CM_PYTHON_BIN_WITH_PATH} generate_final_report.py --input summary.csv +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/README-extra.md new file mode 100644 index 000000000..6b3671619 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/README-extra.md @@ -0,0 +1,3 @@ +# Generate MLPerf Tiny Submission Folder from a Results Directory + +This is a work in progress script. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/README.md b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/README.md new file mode 100644 index 000000000..4f1a45aeb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/_cm.yaml new file mode 100644 index 000000000..e6f112c42 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/_cm.yaml @@ -0,0 +1,26 @@ +alias: generate-mlperf-tiny-submission +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +deps: +- names: + - python + - python3 + tags: get,python3 +- tags: get,sut,system-description +post_deps: +- enable_if_env: + CM_MLPERF_RUN_STYLE: + - valid +tags: +- generate +- submission +- mlperf +- mlperf-tiny +- tiny +- mlcommons +- tiny-submission +- mlperf-tiny-submission +- mlcommons-tiny-submission +uid: 04289b9fc07b42b6 diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/customize.py b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/customize.py new file mode 100644 index 000000000..476c1e2ed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-mlperf-tiny-submission/customize.py @@ -0,0 +1,214 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json +import shutil + + +def preprocess(i): + return generate_submission(i) + + +############################################################################## + +def generate_submission(i): + + # Save current user directory + cur_dir = os.getcwd() + env = i['env'] + state = i['state'] + inp = i['input'] + results_dir = env['CM_MLPERF_RESULTS_DIR'] + + if 'CM_MLPERF_SUBMISSION_DIR' not in env: + env['CM_MLPERF_SUBMISSION_DIR'] = os.path.join(cur_dir, "results") + submission_dir = env['CM_MLPERF_SUBMISSION_DIR'] + if not os.path.isdir(submission_dir): + os.makedirs(submission_dir) + + print('* MLPerf tiny submission dir: {}'.format(submission_dir)) + print('* MLPerf tiny results dir: {}'.format(results_dir)) + results = [ + f for f in os.listdir(results_dir) if not os.path.isfile( + os.path.join( + results_dir, + f))] + + division = inp.get('division', 'open') + + if division not in ['open', 'closed']: + return {'return': 1, 'error': '"division" must be "open" or "closed"'} + system_meta = state['CM_SUT_META'] + division = system_meta['division'] + + print('* MLPerf tiny division: {}'.format(division)) + + path_submission_root = submission_dir + path_submission_division = os.path.join(path_submission_root, division) + if not os.path.isdir(path_submission_division): + os.makedirs(path_submission_division) + + # Check submitter + submitter = system_meta['submitter'] + env['CM_MLPERF_SUBMITTER'] = submitter + + print('* MLPerf tiny submitter: {}'.format(submitter)) + + path_submission = os.path.join(path_submission_division, submitter) + if not os.path.isdir(path_submission): + os.makedirs(path_submission) + + # SUT base + system = i.get('system', 'default') + + code_path = os.path.join(path_submission, "code") + for res in results: + parts = res.split("-") + backend = parts[0] + target = parts[1] + framework = backend + + print('* Target: {}'.format(target)) + print('* Framework: {}'.format(framework)) + result_path = os.path.join(results_dir, res) + platform_prefix = inp.get('platform_prefix', '') + if platform_prefix: + sub_res = platform_prefix + "-" + res + else: + sub_res = res + submission_path = os.path.join(path_submission, "results", sub_res) + measurement_path = os.path.join( + path_submission, "measurements", sub_res) + compliance_path = os.path.join(path_submission, "compliance", sub_res) + system_path = os.path.join(path_submission, "systems") + submission_system_path = system_path + if not os.path.isdir(submission_system_path): + os.makedirs(submission_system_path) + system_file = os.path.join(submission_system_path, sub_res + ".json") + with open(system_file, "w") as fp: + json.dump(system_meta, fp, indent=2) + + models = [ + f for f in os.listdir(result_path) if not os.path.isfile( + os.path.join( + result_path, f))] + for model in models: + result_model_path = os.path.join(result_path, model) + submission_model_path = os.path.join(submission_path, model) + measurement_model_path = os.path.join(measurement_path, model) + compliance_model_path = os.path.join(compliance_path, model) + code_model_path = os.path.join(code_path, model) + scenarios = [ + f for f in os.listdir(result_model_path) if not os.path.isfile( + os.path.join( + result_model_path, f))] + submission_code_path = code_model_path + if not os.path.isdir(submission_code_path): + os.makedirs(submission_code_path) + if not os.path.exists(os.path.join( + submission_code_path, "README.md")): + with open(os.path.join(submission_code_path, "README.md"), mode='w'): + pass # create an empty README + + print('* MLPerf inference model: {}'.format(model)) + for scenario in scenarios: + result_scenario_path = os.path.join( + result_model_path, scenario) + submission_scenario_path = os.path.join( + submission_model_path, scenario) + measurement_scenario_path = os.path.join( + measurement_model_path, scenario) + compliance_scenario_path = os.path.join( + compliance_model_path, scenario) + + modes = [ + f for f in os.listdir(result_scenario_path) if not os.path.isfile( + os.path.join( + result_scenario_path, f))] + for mode in modes: + result_mode_path = os.path.join(result_scenario_path, mode) + submission_mode_path = os.path.join( + submission_scenario_path, mode) + submission_results_path = submission_mode_path + submission_measurement_path = measurement_scenario_path + submission_compliance_path = os.path.join( + compliance_scenario_path, mode) + if mode == 'performance': + result_mode_path = os.path.join( + result_mode_path, 'run_1') + submission_results_path = os.path.join( + submission_mode_path, 'run_1') + if not os.path.isdir(submission_results_path): + os.makedirs(submission_results_path) + if not os.path.isdir(submission_measurement_path): + os.makedirs(submission_measurement_path) + if not os.path.isdir(submission_compliance_path): + os.makedirs(submission_compliance_path) + mlperf_inference_conf_path = os.path.join( + result_mode_path, "mlperf.conf") + if os.path.exists(mlperf_inference_conf_path): + shutil.copy( + mlperf_inference_conf_path, os.path.join( + submission_measurement_path, 'mlperf.conf')) + user_conf_path = os.path.join( + result_mode_path, "user.conf") + if os.path.exists(user_conf_path): + shutil.copy( + user_conf_path, os.path.join( + submission_measurement_path, 'user.conf')) + measurements_json_path = os.path.join( + result_mode_path, "measurements.json") + if os.path.exists(user_conf_path): + shutil.copy( + measurements_json_path, + os.path.join( + submission_measurement_path, + sub_res + '.json')) + files = [] + readme = False + for f in os.listdir(result_mode_path): + if f.startswith('mlperf_'): + files.append(f) + if f == "README.md": + shutil.copy( + os.path.join( + result_mode_path, f), os.path.join( + submission_measurement_path, f)) + readme = True + + if mode == "accuracy": + if os.path.exists(os.path.join( + result_mode_path, "accuracy.txt")): + files.append("accuracy.txt") + + for f in files: + print(' * ' + f) + p_target = os.path.join(submission_results_path, f) + shutil.copy( + os.path.join( + result_mode_path, + f), + p_target) + + if not readme: + with open(os.path.join(submission_measurement_path, "README.md"), mode='w'): + pass # create an empty README + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/README-about.md b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/README-about.md new file mode 100644 index 000000000..b8cb79e7a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/README-about.md @@ -0,0 +1 @@ +This CM script is in draft stage diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/README.md b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/README.md new file mode 100644 index 000000000..eea00d600 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-nvidia-engine](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/generate-nvidia-engine) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/_cm.yaml new file mode 100644 index 000000000..7a6852447 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/_cm.yaml @@ -0,0 +1,152 @@ +# Identification of this CM script +alias: generate-nvidia-engine +uid: 0eef9f05b272401f + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + + +# User-friendly tags to find this CM script +tags: + - generate + - engine + - mlperf + - inference + - nvidia + +# Default environment +default_env: + CM_BATCH_COUNT: '1' + CM_BATCH_SIZE: '1' + CM_LOADGEN_SCENARIO: 'Offline' + CM_GPU_COPY_STREAMS: '1' + CM_TENSORRT_WORKSPACE_SIZE: '4194304' + +# Map script inputs to environment variables +input_mapping: + output_dir: CM_MLPERF_OUTPUT_DIR + +new_env_keys: + - CM_MLPERF_* + - CM_DATASET_* + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect CUDA + - tags: get,cuda,_cudnn + + # Detect Tensorrt + - tags: get,tensorrt + + # Detect numpy + - tags: get,generic-python-lib,_numpy + + # Detect numpy + - tags: get,generic-python-lib,_pycuda + + + ######################################################################## + # Install MLPerf inference dependencies + + # Install MLPerf loadgen + - tags: get,loadgen + names: + - loadgen + + # Download MLPerf inference source + - tags: get,mlcommons,inference,src + names: + - inference-src + + # Download Nvidia Submission Code + - tags: get,nvidia,mlperf,inference,common-code + names: + - nvidia-inference-common-code + + + ######################################################################## + # Install ResNet50 model (ONNX) and ImageNet + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - imagenet-preprocessed + tags: get,dataset,preprocessed,imagenet,_NCHW + + - enable_if_env: + CM_MODEL: + - resnet50 + names: + - ml-model + - resnet50-model + tags: get,ml-model,resnet50,_onnx + + + ######################################################################## + # Install RetinaNet model (ONNX) and OpenImages + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - openimages-preprocessed + tags: get,dataset,preprocessed,openimages,_validation,_NCHW + + - enable_if_env: + CM_MODEL: + - retinanet + names: + - ml-model + - retinanet-model + tags: get,ml-model,retinanet,_onnx,_fp32 + + + + +# Variations to customize dependencies +variations: + # Target devices + cpu: + group: device + default: true + env: + CM_MLPERF_DEVICE: cpu + cuda: + env: + CM_MLPERF_DEVICE: gpu + CM_MLPERF_DEVICE_LIB_NAMESPEC: cudart + + + # Reference MLPerf models + resnet50: + group: model + default: true + env: + CM_MODEL: resnet50 + + retinanet: + group: model + env: + CM_MODEL: retinanet + + batch_size.#: + env: + CM_MODEL_BATCH_SIZE: # + + copy_streams.#: + env: + CM_GPU_COPY_STREAMS: # diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/customize.py b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/customize.py new file mode 100644 index 000000000..ce62ba7bb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/customize.py @@ -0,0 +1,48 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + if 'CM_MODEL' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the model to run'} + if 'CM_MLPERF_DEVICE' not in env: + return { + 'return': 1, 'error': 'Please select a variation specifying the device to run on'} + + # will later extend to other scenarios + scenarios = env['CM_LOADGEN_SCENARIO'] + cmd = " --action generate_engines " +\ + " --benchmarks " + env['CM_MODEL'] + \ + " --scenarios " + scenarios + \ + " --gpu_batch_size=" + env['CM_MODEL_BATCH_SIZE'] +\ + " --gpu_copy_streams=" + env['CM_GPU_COPY_STREAMS'] +\ + " --workspace_size=" + env['CM_TENSORRT_WORKSPACE_SIZE'] + + +~ +return {'return': 0} + + +def postprocess(i): + + env = i['env'] + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/run.sh b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/run.sh new file mode 100644 index 000000000..c5dd2d9a4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/generate-nvidia-engine/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +nvidia_code_path=${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH} +cd ${nvidia_code_path} +scenarios=${CM_TMP_LOADGEN_SCENARIOS} +#batchsize=$ +python3 code/main.py --action generate_engines --benchmarks resnet50 --scenarios $scenarios --gpu_batch_size=256 --gpu_copy_streams=1 --workspace_size=4194304 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/README-extra.md new file mode 100644 index 000000000..c15c6df33 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/README-extra.md @@ -0,0 +1,3 @@ +# About + +https://developer.android.com/studio#command-line-tools-only diff --git a/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/README.md b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/README.md new file mode 100644 index 000000000..fc32018d1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/_cm.yaml new file mode 100644 index 000000000..a4da9f94f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/_cm.yaml @@ -0,0 +1,32 @@ +alias: get-android-sdk +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_env: + CM_ANDROID_BUILD_TOOLS_VERSION: 29.0.3 + CM_ANDROID_CMAKE_VERSION: 3.6.4111459 + CM_ANDROID_CMDLINE_TOOLS_URL: https://dl.google.com/android/repository/commandlinetools-${CM_ANDROID_CMDLINE_TOOLS_OS}-${CM_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip + CM_ANDROID_CMDLINE_TOOLS_VERSION: '9123335' + CM_ANDROID_NDK_VERSION: 21.3.6528147 + CM_ANDROID_VERSION: '30' +deps: +- tags: detect,os +- tags: get,java +input_mapping: + android_cmake_version: CM_ANDROID_CMAKE_VERSION + android_ndk_version: CM_ANDROID_NDK_VERSION + android_version: CM_ANDROID_VERSION + build_tools_version: CM_ANDROID_BUILD_TOOLS_VERSION + cmdline_tools_version: CM_ANDROID_CMDLINE_TOOLS_VERSION +new_env_keys: +- CM_ANDROID_HOME +- ANDROID_HOME +- ANDROID_NDK_HOME +- +PATH +tags: +- get +- android +- sdk +- android-sdk +uid: 8c5b4b83d49c441a diff --git a/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/customize.py new file mode 100644 index 000000000..aafd6d96f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/customize.py @@ -0,0 +1,194 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + platform = os_info['platform'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + run_script_input = i['run_script_input'] + + # Check if ANDROID_HOME is already set + android_home = os.environ.get('ANDROID_HOME', '').strip() + + # We are inside CM cache entry + cur_dir = os.getcwd() + + if android_home == '': + android_home = cur_dir + + env['CM_ANDROID_HOME'] = android_home + env['ANDROID_HOME'] = android_home + + paths = [] + + # Check SDK manager + ext = '' + host_os_for_android = 'linux' + host_os_for_ndk = 'linux-x86_64' + if platform == "windows": + host_os_for_android = 'win' + host_os_for_ndk = 'windows-x86_64' + ext = '.bat' + elif platform == "darwin": + host_os_for_android = 'mac' + + sdk_manager_file = 'sdkmanager' + ext + + print('') + + found = False + + for x in ['cmdline-tools', 'cmdline-tools' + os.sep + 'tools', 'tools']: + sdk_manager_path = os.path.join( + android_home, x, 'bin', sdk_manager_file) + if os.path.isfile(sdk_manager_path): + found = True + break + + if not found: + # Some magic for cmdline tools (need specific directory) + new_path = os.path.join(android_home, 'cmdline-tools') + if not os.path.isdir(new_path): + os.makedirs(new_path) + + os.chdir(new_path) + + cmdline_tools_version = env.get('CM_ANDROID_CMDLINE_TOOLS_VERSION', '') + + env['CM_ANDROID_CMDLINE_TOOLS_VERSION'] = cmdline_tools_version + + package_url = env['CM_ANDROID_CMDLINE_TOOLS_URL'] + package_url = package_url.replace( + '${CM_ANDROID_CMDLINE_TOOLS_OS}', + host_os_for_android) + package_url = package_url.replace( + '${CM_ANDROID_CMDLINE_TOOLS_VERSION}', + cmdline_tools_version) + + env['CM_ANDROID_CMDLINE_TOOLS_URL'] = package_url + + print('') + print('Downloading from {} ...'.format(package_url)) + + cm = automation.cmind + + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': package_url}) + if r['return'] > 0: + return r + + filename = r['filename'] + + print('Unzipping file {}'.format(filename)) + + r = cm.access({'action': 'unzip_file', + 'automation': 'utils,dc2743f8450541e3', + 'filename': filename, + 'strip_folders': 0}) + if r['return'] > 0: + return r + +# if os.path.isfile(filename): +# print ('Removing file {}'.format(filename)) +# os.remove(filename) + + os.rename('cmdline-tools', 'tools') + + os.chdir(cur_dir) + + sdk_manager_path = os.path.join( + android_home, + 'cmdline-tools', + 'tools', + 'bin', + sdk_manager_file) + + sdk_manager_dir = os.path.dirname(sdk_manager_path) + + env['CM_ANDROID_SDK_MANAGER_BIN'] = sdk_manager_file + env['CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH'] = sdk_manager_path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = cur_dir + + paths.append(sdk_manager_dir) + + # Prepare SDK + print('Preparing Android SDK manager ...') + + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'prepare-sdk-manager'}) + if r['return'] > 0: + return r + + build_tools_version = env['CM_ANDROID_BUILD_TOOLS_VERSION'] + + path_build_tools = os.path.join( + android_home, 'build-tools', build_tools_version) + env['CM_ANDROID_BUILD_TOOLS_PATH'] = path_build_tools + paths.append(path_build_tools) + + cmake_version = env['CM_ANDROID_CMAKE_VERSION'] + + path_cmake = os.path.join(android_home, 'cmake', cmake_version, 'bin') + env['CM_ANDROID_CMAKE_PATH'] = path_cmake + paths.append(path_cmake) + + path_emulator = os.path.join(android_home, 'emulator') + env['CM_ANDROID_EMULATOR_PATH'] = path_emulator + paths.append(path_emulator) + + path_platform_tools = os.path.join(android_home, 'platform-tools') + env['CM_ANDROID_PLATFORM_TOOLS_PATH'] = path_platform_tools + paths.append(path_platform_tools) + + android_version = env['CM_ANDROID_VERSION'] + + path_platforms = os.path.join(android_home, 'platforms', android_version) + env['CM_ANDROID_PLATFORMS_PATH'] = path_platforms + + path_tools = os.path.join(android_home, 'tools') + env['CM_ANDROID_TOOLS_PATH'] = path_tools + paths.append(path_tools) + + android_ndk_version = env['CM_ANDROID_NDK_VERSION'] + + # Check Android NDK + path_ndk = os.path.join(android_home, 'ndk', android_ndk_version) + env['CM_ANDROID_NDK_PATH'] = path_ndk + env['ANDROID_NDK_HOME'] = path_ndk + + path_ndk_compiler = os.path.join( + path_ndk, + 'toolchains', + 'llvm', + 'prebuilt', + host_os_for_ndk, + 'bin') + env['CM_ANDROID_LLVM_PATH'] = path_ndk_compiler + env['CM_ANDROID_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + path_ndk_compiler, 'clang.exe') + paths.append(path_ndk_compiler) + + env['+PATH'] = paths + + return {'return': 0} # , 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/prepare-sdk-manager.bat b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/prepare-sdk-manager.bat new file mode 100644 index 000000000..5b1add122 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/prepare-sdk-manager.bat @@ -0,0 +1,27 @@ +echo %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +more tmp-ver.out + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% --licenses +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% ^ + "tools" ^ + "platform-tools" ^ + "extras;android;m2repository" ^ + "extras;google;m2repository" ^ + "extras;google;google_play_services" ^ + "build-tools;%CM_ANDROID_BUILD_TOOLS_VERSION%" +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "platforms;android-%CM_ANDROID_VERSION%" +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "cmake;%CM_ANDROID_CMAKE_VERSION%" +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +call %CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH% "ndk;%CM_ANDROID_NDK_VERSION%" +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/prepare-sdk-manager.sh b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/prepare-sdk-manager.sh new file mode 100644 index 000000000..8613a43b1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-android-sdk/prepare-sdk-manager.sh @@ -0,0 +1,26 @@ +echo ${JAVA_HOME} +echo ${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --version > tmp-ver.out +cat tmp-ver.out + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} --licenses +test $? -eq 0 || exit 1 + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} \ + "tools" \ + "platform-tools" \ + "extras;android;m2repository" \ + "extras;google;m2repository" \ + "extras;google;google_play_services" \ + "build-tools;${CM_ANDROID_BUILD_TOOLS_VERSION}" +test $? -eq 0 || exit 1 + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "platforms;android-${CM_ANDROID_VERSION}" +test $? -eq 0 || exit 1 + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "cmake;${CM_ANDROID_CMAKE_VERSION}" +test $? -eq 0 || exit 1 + +${CM_ANDROID_SDK_MANAGER_BIN_WITH_PATH} "ndk;${CM_ANDROID_NDK_VERSION}" +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aocl/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-aocl/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aocl/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aocl/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-aocl/README-extra.md new file mode 100644 index 000000000..e69de29bb diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aocl/README.md b/cmx4mlops/cmx4mlops/repo/script/get-aocl/README.md new file mode 100644 index 000000000..f1eefc1c6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aocl/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-aocl](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-aocl) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aocl/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-aocl/_cm.yaml new file mode 100644 index 000000000..061d6829e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aocl/_cm.yaml @@ -0,0 +1,33 @@ +alias: get-aocl +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: 'true' +category: Compiler automation +default_version: '4.0' +deps: +- tags: get,generic,sys-util,_libmpfr-dev +- tags: get,generic-python-lib,_scons +- force_env_keys: + - CM_GIT_CHECKOUT + tags: get,git,_repo.https://github.com/amd/aocl-libm-ose +new_env_keys: +- CM_AOCL_BUILD_PATH +- CM_AOCL_SRC_PATH +- CM_AOCL_LIB_PATH +- +LD_LIBRARY_PATH +- +LIBRARY_PATH +tags: +- get +- lib +- aocl +- amd-optimized +- amd +uid: a65d3088f57d413d +variations: {} +versions: + '4.0': + env: + CM_GIT_CHECKOUT: aocl-4.0 + master: + env: + CM_GIT_CHECKOUT: master diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aocl/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-aocl/customize.py new file mode 100644 index 000000000..58c280fa7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aocl/customize.py @@ -0,0 +1,47 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_AOCL_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + env['CM_AOCL_BUILD_PATH'] = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], "build") + aocl_lib_path = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + "build", + "aocl-release", + "src") + env['CM_AOCL_LIB_PATH'] = aocl_lib_path + env['+LIBRARY_PATH'] = [aocl_lib_path] if '+LIBRARY_PATH' not in env else env['+LIBRARY_PATH'] + [aocl_lib_path] + env['+LD_LIBRARY_PATH'] = [aocl_lib_path] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [aocl_lib_path] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aocl/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-aocl/run.sh new file mode 100644 index 000000000..1b00dd9fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aocl/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash +if [[ -z ${CM_GIT_REPO_CHECKOUT_PATH} ]]; then + echo "Git repository not found!" + exit 1 +fi +cd ${CM_GIT_REPO_CHECKOUT_PATH} +scons +test $? -eq 0 || exit $? + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-aria2/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-aria2/README-extra.md new file mode 100644 index 000000000..40539d77f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/README-extra.md @@ -0,0 +1,9 @@ +# Some commands + +```bash +cmr "get aria2" --version=1.37.0 +cmr "get aria2" --install +cmr "get aria2" --path={path to the directory with aria2} +cmr "get aria2" --input={full path to aria2} +cmr "get aria2" --shell +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/README.md b/cmx4mlops/cmx4mlops/repo/script/get-aria2/README.md new file mode 100644 index 000000000..b54019784 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-aria2/_cm.yaml new file mode 100644 index 000000000..6fdd8bb17 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/_cm.yaml @@ -0,0 +1,37 @@ +alias: get-aria2 +uid: d83419a90a0c40d0 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: Detection or installation of tools and artifacts + +input_mapping: + install: CM_FORCE_INSTALL + src: CM_ARIA2_BUILD_FROM_SRC + +deps: + - tags: detect,cpu + - tags: detect,os + +#called after preprocess from customize.py +#prehook_deps: +# - tags: print,native,hello-world + +env: + CM_REQUIRE_INSTALL: no + CM_ARIA2_DEFAULT_INSTALL_VERSION: "1.37.0" + +new_env_keys: + - CM_ARIA2_* + - +PATH + +print_env_at_the_end: + CM_ARIA2_INSTALLED_PATH: Path to the tool + +tags: +- get +- aria2 +- get-aria2 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-aria2/customize.py new file mode 100644 index 000000000..f9fd6e5ba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/customize.py @@ -0,0 +1,146 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + # Pre-set by CM + os_info = i['os_info'] + env = i['env'] + recursion_spaces = i['recursion_spaces'] + automation = i['automation'] + run_script_input = i['run_script_input'] + + # Check if a given tool is already installed + file_name_core = 'aria2c' + file_name = file_name_core + \ + '.exe' if os_info['platform'] == 'windows' else file_name_core + + force_install = env.get('CM_FORCE_INSTALL', False) == True + + if not force_install: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_ARIA2_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + # Not found, try install + force_install = True + else: + return r + + # Force install + if force_install: + # Attempt to run installer + version = env.get('CM_VERSION', '') + if version == '': + version = env['CM_ARIA2_DEFAULT_INSTALL_VERSION'] + + if os_info['platform'] == 'windows': + archive = 'aria2-{}-win-64bit-build1' + ext = '.zip' + ext2 = '' + else: + archive = 'aria2-{}' + ext = '.tar.bz2' + ext2 = '.tar' + + archive = archive.format(version) + archive_with_ext = archive + ext + + env['CM_ARIA2_DOWNLOAD_DIR'] = archive + + env['CM_ARIA2_DOWNLOAD_FILE'] = archive_with_ext + if ext2 != '': + env['CM_ARIA2_DOWNLOAD_FILE2'] = archive + ext2 + + url = 'https://github.com/aria2/aria2/releases/download/release-{}/{}'.format( + version, archive_with_ext) + env['CM_ARIA2_DOWNLOAD_URL'] = url + + print('URL to download ARIA2: {}'.format(url)) + + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install'}) + if r['return'] > 0: + return r + + if os_info['platform'] == 'windows' or env.get( + 'CM_ARIA2_BUILD_FROM_SRC', '').lower() == 'true': + install_path = os.path.join(os.getcwd(), archive) + + path_to_file = os.path.join(install_path, file_name) + if not os.path.isfile(path_to_file): + return {'return': 1, + 'error': 'file not found: {}'.format(path_to_file)} + + env['CM_ARIA2_BIN_WITH_PATH'] = path_to_file + env['CM_ARIA2_INSTALLED_TO_CACHE'] = 'yes' + else: + path_to_bin = r['env_tmp'].get('CM_ARIA2_BIN_WITH_PATH', '') + env['CM_ARIA2_BIN_WITH_PATH'] = path_to_bin + + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_ARIA2_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + return r + + return {'return': 0} + + +def detect_version(i): + env = i['env'] + + r = i['automation'].parse_version({'match_text': r'aria2 version\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_ARIA2_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = r['version'] + found_file_path = env['CM_ARIA2_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['CM_ARIA2_INSTALLED_PATH'] = found_path + + if env.get('CM_ARIA2_INSTALLED_TO_CACHE', '') == 'yes': + env['+PATH'] = [env['CM_ARIA2_INSTALLED_PATH']] + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/install.bat b/cmx4mlops/cmx4mlops/repo/script/get-aria2/install.bat new file mode 100644 index 000000000..6255f0caf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/install.bat @@ -0,0 +1,9 @@ +echo. + +del /Q /S %CM_ARIA2_DOWNLOAD_FILE% + +wget --no-check-certificate %CM_ARIA2_DOWNLOAD_URL% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip -o -q %CM_ARIA2_DOWNLOAD_FILE% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/install.sh b/cmx4mlops/cmx4mlops/repo/script/get-aria2/install.sh new file mode 100644 index 000000000..d9424732d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/install.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +echo "" + +if [[ "${CM_ARIA2_BUILD_FROM_SRC}" == "True" ]]; then + + echo "Building from sources ..." + echo "" + + rm -rf ${CM_ARIA2_DOWNLOAD_FILE} + rm -rf ${CM_ARIA2_DOWNLOAD_FILE2} + + wget --no-check-certificate ${CM_ARIA2_DOWNLOAD_URL} + test $? -eq 0 || exit $? + + bzip2 -d ${CM_ARIA2_DOWNLOAD_FILE} + test $? -eq 0 || exit $? + + tar xvf ${CM_ARIA2_DOWNLOAD_FILE2} + test $? -eq 0 || exit $? + + cd ${CM_ARIA2_DOWNLOAD_DIR} + test $? -eq 0 || exit $? + + ./configure --prefix=$PWD/bin + test $? -eq 0 || exit $? + + make + test $? -eq 0 || exit $? + + make install + test $? -eq 0 || exit $? + +else + echo "Installing binary via sudo ..." + echo "" + + cmd="sudo ${CM_HOST_OS_PACKAGE_MANAGER} install aria2" + echo "$cmd" + + $cmd + test $? -eq 0 || exit $? + + path_to_bin=`which aria2c` + echo "CM_ARIA2_BIN_WITH_PATH=$path_to_bin" > tmp-run-env.out + +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-aria2/run.bat new file mode 100644 index 000000000..625b7edc0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/run.bat @@ -0,0 +1,4 @@ +rem Detect version + +%CM_ARIA2_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aria2/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-aria2/run.sh new file mode 100644 index 000000000..85ba9421a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aria2/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# Detect version + +${CM_ARIA2_BIN_WITH_PATH} --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/README-extra.md new file mode 100644 index 000000000..7c8475871 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/README-extra.md @@ -0,0 +1,9 @@ +# Get AWS CLI +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed aws-cli on the system and if not found calls the [install script for aws-cli](../script/install-aws-cli). + +## Exported Variables +* `CM_AWS_BIN_WITH_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/README.md b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/README.md new file mode 100644 index 000000000..51fbe4536 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/get-aws-cli](https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/get-aws-cli) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/_cm.yaml new file mode 100644 index 000000000..a8017278c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/_cm.yaml @@ -0,0 +1,20 @@ +alias: get-aws-cli +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Cloud automation +clean_files: [] +new_env_keys: +- CM_AWS_* +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + reuse_version: true + tags: install,aws-cli +tags: +- get +- aws-cli +- aws +- cli +uid: dad67944229942a3 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/customize.py new file mode 100644 index 000000000..60c7555f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/customize.py @@ -0,0 +1,75 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'aws.exe' if os_info['platform'] == 'windows' else 'aws' + env['FILE_NAME'] = file_name + if 'CM_AWS_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_AWS_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'aws-cli/([\d.]+)\s', + 'group_number': 1, + 'env_key': 'CM_AWS_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + found_file_path = env['CM_AWS_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_AWS_INSTALLED_PATH'] = found_path + + env['CM_AWS_CACHE_TAGS'] = 'version-' + version + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/run.sh new file mode 100644 index 000000000..3d65d9ae4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-aws-cli/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +aws --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-bazel/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-bazel/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-bazel/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-bazel/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-bazel/README-extra.md new file mode 100644 index 000000000..8e11a61bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-bazel/README-extra.md @@ -0,0 +1,9 @@ +# Get Bazel +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed bazel on the system and if not found calls the [install script for bazel](../script/install-bazel). + +## Exported Variables +* `CM_BAZEL_BIN_WITH_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-bazel/README.md b/cmx4mlops/cmx4mlops/repo/script/get-bazel/README.md new file mode 100644 index 000000000..de623278e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-bazel/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-bazel/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-bazel/_cm.yaml new file mode 100644 index 000000000..ee5b19581 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-bazel/_cm.yaml @@ -0,0 +1,19 @@ +alias: get-bazel +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +new_env_keys: +- CM_BAZEL_* +- +PATH +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + reuse_version: true + tags: install,bazel +tags: +- get +- bazel +- get-bazel +uid: eaef0be38bac493c diff --git a/cmx4mlops/cmx4mlops/repo/script/get-bazel/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-bazel/customize.py new file mode 100644 index 000000000..280c2484b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-bazel/customize.py @@ -0,0 +1,76 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'bazel.exe' if os_info['platform'] == 'windows' else 'bazel' + env['FILE_NAME'] = file_name + if 'CM_BAZEL_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_BAZEL_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'bazel\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_BAZEL_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + found_file_path = env['CM_BAZEL_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_BAZEL_INSTALLED_PATH'] = found_path + env['+PATH'] = [found_path] + + env['CM_BAZEL_CACHE_TAGS'] = 'version-' + version + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-bazel/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-bazel/run.bat new file mode 100644 index 000000000..1e8da4b27 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-bazel/run.bat @@ -0,0 +1,2 @@ +%CM_BAZEL_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-bazel/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-bazel/run.sh new file mode 100644 index 000000000..e145f4638 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-bazel/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +bazel_bin=${CM_BAZEL_BIN_WITH_PATH} +if [[ ${CM_VERSION} == "0.26.1" ]]; then + ${bazel_bin} version |grep "Build label" |sed 's/Build label:/bazel/' > tmp-ver.out +else + ${bazel_bin} --version > tmp-ver.out +fi +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-blis/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-blis/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-blis/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-blis/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-blis/README-extra.md new file mode 100644 index 000000000..e69de29bb diff --git a/cmx4mlops/cmx4mlops/repo/script/get-blis/README.md b/cmx4mlops/cmx4mlops/repo/script/get-blis/README.md new file mode 100644 index 000000000..59cb4fe36 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-blis/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-blis/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-blis/_cm.yaml new file mode 100644 index 000000000..8f90c9e9d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-blis/_cm.yaml @@ -0,0 +1,49 @@ +alias: get-blis +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: master +deps: +- force_env_keys: + - CM_GIT_CHECKOUT + names: + - blis-source-repo + tags: get,git +- tags: detect,cpu +input_description: {} +input_mapping: {} +new_env_keys: +- CM_BLIS_SRC_PATH +- +LD_LIBRARY_PATH +- CM_BLIS_INSTALL_PATH +- CM_BLIS_INC +- CM_BLIS_LIB +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- lib +- blis +uid: ea6e1cf75242456c +variations: + amd: + add_deps_recursive: + blis-source-repo: + tags: _repo.https://github.com/amd/libflame.git + group: source + flame: + add_deps_recursive: + blis-source-repo: + tags: _repo.https://github.com/flame/blis.git + default: true + group: source +versions: + 0.9.0: + env: + CM_GIT_CHECKOUT: 0.9.0 + master: + env: + CM_GIT_CHECKOUT: master diff --git a/cmx4mlops/cmx4mlops/repo/script/get-blis/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-blis/customize.py new file mode 100644 index 000000000..afd22977e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-blis/customize.py @@ -0,0 +1,46 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + env['CM_BLIS_SRC_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + install_dir = os.path.join(env['CM_BLIS_SRC_PATH'], "install") + + env['CM_BLIS_INSTALL_PATH'] = install_dir + env['CM_BLIS_INC'] = os.path.join(install_dir, 'include', 'blis') + env['CM_BLIS_LIB'] = os.path.join(install_dir, 'lib', 'libblis.a') + + blis_lib_path = os.path.join(install_dir, 'lib') + + env['+LD_LIBRARY_PATH'] = [blis_lib_path] if '+LD_LIBRARY_PATH' not in env else env['+LD_LIBRARY_PATH'] + [blis_lib_path] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-blis/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-blis/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-blis/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/get-blis/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-blis/run.sh new file mode 100644 index 000000000..4c6d91d78 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-blis/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +CUR=$PWD +mkdir -p install +test $? -eq 0 || exit $? +INSTALL_DIR=$PWD/install +cd ${CM_BLIS_SRC_PATH} +./configure --prefix=$INSTALL_DIR auto +test $? -eq 0 || exit $? +make -j${CM_HOST_CPU_TOTAL_PHYSICAL_CORES} +test $? -eq 0 || exit $? +make install +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-brew/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-brew/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-brew/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-brew/README.md b/cmx4mlops/cmx4mlops/repo/script/get-brew/README.md new file mode 100644 index 000000000..3423fbb30 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-brew/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-brew/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-brew/_cm.yaml new file mode 100644 index 000000000..7da327f63 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-brew/_cm.yaml @@ -0,0 +1,19 @@ +alias: get-brew +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Detection or installation of tools and artifacts +deps: [] +input_description: {} +input_mapping: {} +new_env_keys: [] +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- brew +uid: 4a2c5eab1ccf484f +variations: {} +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-brew/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-brew/run.sh new file mode 100644 index 000000000..bdb3af4c4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-brew/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/README.md b/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/README.md new file mode 100644 index 000000000..7a39725e7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CM-Interface/get-cache-dir](https://docs.mlcommons.org/cm4mlops/scripts/CM-Interface/get-cache-dir) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/_cm.yaml new file mode 100644 index 000000000..ad9695f53 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/_cm.yaml @@ -0,0 +1,27 @@ +alias: get-cache-dir +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: CM Interface +deps: [] +docker: + run: false +input_description: {} +new_env_keys: +- CM_CACHE_DIR +- <<>> +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- cache +- dir +- directory +uid: 48f4622e059b45ce +variations: + name.#: + env: + CM_CACHE_DIR_NAME: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/customize.py new file mode 100644 index 000000000..1b42d4d31 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cache-dir/customize.py @@ -0,0 +1,42 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + cache_dir = os.getcwd() + if env.get('CM_CACHE_DIR_ENV_NAME', '') != '': + env[env['CM_CACHE_DIR_ENV_NAME']] = cache_dir + + env['CM_CACHE_DIR'] = cache_dir + env['CM_GET_DEPENDENT_CACHED_PATH'] = cache_dir + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/README.md new file mode 100644 index 000000000..60596c89e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Legacy-CK-support/get-ck-repo-mlops](https://docs.mlcommons.org/cm4mlops/scripts/Legacy-CK-support/get-ck-repo-mlops) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/_cm.yaml new file mode 100644 index 000000000..f33b2ed16 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/_cm.yaml @@ -0,0 +1,12 @@ +alias: get-ck-repo-mlops +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Legacy CK support +deps: +- tags: get,ck +tags: +- get +- ck-repo +- mlops +- ck-repo-mlops +uid: d3a619b8186e4f74 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/run.bat new file mode 100644 index 000000000..3e3239b8b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/run.bat @@ -0,0 +1 @@ +ck pull repo:mlcommons@ck-mlops diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/run.sh new file mode 100644 index 000000000..ec1267b5c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck-repo-mlops/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +ck pull repo:mlcommons@ck-mlops + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ck/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ck/README.md new file mode 100644 index 000000000..0f8f829cf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Legacy-CK-support/get-ck](https://docs.mlcommons.org/cm4mlops/scripts/Legacy-CK-support/get-ck) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ck/_cm.yaml new file mode 100644 index 000000000..2dbb1fb66 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck/_cm.yaml @@ -0,0 +1,10 @@ +alias: get-ck +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Legacy CK support +tags: +- get +- ck +- ck-framework +uid: 5575126797174cac diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-ck/run.bat new file mode 100644 index 000000000..75d92799e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck/run.bat @@ -0,0 +1 @@ +pip install ck diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ck/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-ck/run.sh new file mode 100644 index 000000000..eae526fd3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ck/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +python3 -m pip install ck ${CM_CK_FRAMEWORK_INSTALL_CLI} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cl/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-cl/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cl/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cl/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-cl/README-extra.md new file mode 100644 index 000000000..796ec7113 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cl/README-extra.md @@ -0,0 +1,7 @@ +# Get Microsoft C compiler + +Example to detect a Microsoft C compiler from the Visual Studio: + +```bash +cm run script "get cl" --path="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cl/README.md b/cmx4mlops/cmx4mlops/repo/script/get-cl/README.md new file mode 100644 index 000000000..66ccfab7e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cl/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-cl](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-cl) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cl/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-cl/_cm.yaml new file mode 100644 index 000000000..2bc7741d7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cl/_cm.yaml @@ -0,0 +1,25 @@ +alias: get-cl +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +clean_files: [] +name: Detect or install Microsoft C compiler +new_env_keys: +- CM_CL_* +- CM_C_COMPILER_* +- CM_CXX_COMPILER_* +- CM_COMPILER_* +- CM_LINKER_* +- +PATH +new_state_keys: +- script_prefix +sort: 1000 +tags: +- get +- cl +- compiler +- c-compiler +- cpp-compiler +- get-cl +uid: 7dbb770faff947c0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cl/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-cl/customize.py new file mode 100644 index 000000000..6a5620530 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cl/customize.py @@ -0,0 +1,158 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] != 'windows': + return {'return': 0} + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + automation = i['automation'] + + file_name = 'cl.exe' + + # Will check env['CM_TMP_PATH'] if comes from installation script + ii = {'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_CL_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces} + + rr = automation.find_artifact(ii) + if rr['return'] > 0: + # If not found in PATH, try a longer search + if rr['return'] != 16: + return rr + + if env.get('CM_INPUT', '').strip() == '' and env.get( + 'CM_TMP_PATH', '').strip() == '': + + print( + i['recursion_spaces'] + + ' Starting deep search for {} - it may take some time ...'.format(file_name)) + + paths = ['C:\\Program Files\\Microsoft Visual Studio', + 'C:\\Program Files (x86)\\Microsoft Visual Studio', + 'C:\\Program Files (x86)\\Microsoft Visual Studio 14'] + + restrict_paths = ['Hostx64\\x64'] + + r = automation.find_file_deep({'paths': paths, + 'file_name': file_name, + 'restrict_paths': restrict_paths}) + if r['return'] > 0: + return r + + found_paths = r['found_paths'] + + if len(found_paths) == 0: + return rr + + tmp_paths = ';'.join(found_paths) + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + ii['env'] = env + + rr = automation.find_artifact(ii) + if rr['return'] > 0: + return rr + + else: + return rr + + found_path = rr['found_path'] + + # Check vcvarall.bat + state = i['state'] + script_prefix = state.get('script_prefix', []) + + # Attempt to find vcvars64.bat + bat_file_name = 'VC\\Auxiliary\\Build\\vcvars64.bat' + r = automation.find_file_back( + {'path': found_path, 'file_name': bat_file_name}) + if r['return'] > 0: + return r + + found_path_bat = r['found_path'] + + if found_path_bat != '': + path_to_vcvars = os.path.join(found_path_bat, bat_file_name) + + s = os_info['run_bat'].replace( + '${bat_file}', '"' + path_to_vcvars + '"') + + script_prefix.append(s) + + state['script_prefix'] = script_prefix + + env['CM_CL_BIN'] = file_name + env['CM_CL_BIN_WITH_PATH'] = os.path.join(found_path, file_name) + + # General compiler for general program compilation + env['CM_C_COMPILER_BIN'] = file_name + env['CM_C_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name) + env['CM_C_COMPILER_FLAG_OUTPUT'] = '/Fe:' + env['CM_C_COMPILER_FLAG_VERSION'] = '' + + env['CM_CXX_COMPILER_BIN'] = env['CM_C_COMPILER_BIN'] + env['CM_CXX_COMPILER_WITH_PATH'] = env['CM_C_COMPILER_WITH_PATH'] + env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '/Fe:' + env['CM_CXX_COMPILER_FLAG_VERSION'] = '' + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'Version\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_CL_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + + env['CM_CL_CACHE_TAGS'] = 'version-' + version + env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-msvc' + env['CM_COMPILER_FAMILY'] = 'MSVC' + env['CM_COMPILER_VERSION'] = env['CM_CL_VERSION'] + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cl/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-cl/run.bat new file mode 100644 index 000000000..2a5fc7c9b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cl/run.bat @@ -0,0 +1,3 @@ +"%CM_CL_BIN_WITH_PATH%" > tmp-ver.out 2>&1 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmake/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-cmake/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmake/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmake/README.md b/cmx4mlops/cmx4mlops/repo/script/get-cmake/README.md new file mode 100644 index 000000000..e03e1956b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmake/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmake/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-cmake/_cm.yaml new file mode 100644 index 000000000..ae051d22a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmake/_cm.yaml @@ -0,0 +1,26 @@ +alias: get-cmake +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +deps: +- tags: detect,cpu +env: + CM_REQUIRE_INSTALL: 'no' +new_env_keys: +- CM_CMAKE_* +- CM_MAKE_CORES +- +PATH +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + reuse_version: true + tags: install,cmake,prebuilt +print_env_at_the_end: + CM_CMAKE_BIN_WITH_PATH: Path to the tool +tags: +- get +- cmake +- get-cmake +uid: 52bf974d791b4fc8 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmake/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-cmake/customize.py new file mode 100644 index 000000000..c51ef5633 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmake/customize.py @@ -0,0 +1,76 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'cmake.exe' if os_info['platform'] == 'windows' else 'cmake' + + if 'CM_CMAKE_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_CMAKE_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'cmake version\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_CMAKE_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = r['version'] + found_file_path = env['CM_CMAKE_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['CM_CMAKE_CACHE_TAGS'] = 'version-' + version + + if 'CM_HOST_CPU_TOTAL_CORES' in env: + env['CM_MAKE_CORES'] = env['CM_HOST_CPU_TOTAL_CORES'] + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmake/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-cmake/run.bat new file mode 100644 index 000000000..0802ae828 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmake/run.bat @@ -0,0 +1,2 @@ +%CM_CMAKE_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmake/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-cmake/run.sh new file mode 100644 index 000000000..6d2aeff97 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmake/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cmake_bin=${CM_CMAKE_BIN_WITH_PATH} + +${cmake_bin} --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/README-extra.md new file mode 100644 index 000000000..1f052e7ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/README-extra.md @@ -0,0 +1,5 @@ +# GET-CMSIS_5 +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [CMSIS Version 5](https://github.com/ARM-software/CMSIS_5) and cache it in CM for reuse across other CM scripts. + +## Exported Variables +1. [CMSIS_PATH](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-cmsis_5/customize.py#L23): Location in CM cache where CMSIS_5 git repository is cloned. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/README.md b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/README.md new file mode 100644 index 000000000..ddc5c99a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/_cm.yaml new file mode 100644 index 000000000..e28a2d5aa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/_cm.yaml @@ -0,0 +1,38 @@ +alias: get-cmsis_5 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_env: + CM_GIT_DEPTH: '' + CM_GIT_PATCH: 'no' + CM_GIT_URL: https://github.com/ARM-software/CMSIS_5.git +default_version: custom +deps: +- tags: detect,os +new_env_keys: +- CMSIS* +tags: +- get +- cmsis +- cmsis_5 +- arm-software +uid: 2258c212b11443f5 +variations: + recurse-submodules: + env: + CM_GIT_RECURSE_SUBMODULES: --recurse-submodules + short-history: + env: + CM_GIT_DEPTH: --depth 10 +versions: + custom: + env: + CM_GIT_CHECKOUT: e5dc19182f6084de32d8dc5a22c84e01210f4995 + CM_GIT_SHA: 'yes' + develop: + env: + CM_GIT_CHECKOUT: develop + master: + env: + CM_GIT_CHECKOUT: master diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/customize.py new file mode 100644 index 000000000..732c9c26a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/customize.py @@ -0,0 +1,38 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + env['CMSIS_PATH'] = os.path.join(os.getcwd(), 'cmsis') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/run.sh new file mode 100644 index 000000000..9093c093b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cmsis_5/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" + +if [ ! -d "cmsis" ]; then + if [ -z ${CM_GIT_SHA} ]; then + echo "Cloning CMSIS_5 from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} cmsis + if [ "${?}" != "0" ]; then exit 1; fi + else + echo "Cloning CMSIS_5 from ${CM_GIT_URL} with default branch and checkout ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} cmsis + if [ "${?}" != "0" ]; then exit 1; fi + cd cmsis + git checkout "${CM_GIT_CHECKOUT}" + if [ "${?}" != "0" ]; then exit 1; fi + fi +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/README.md b/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/README.md new file mode 100644 index 000000000..9e88a2e65 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-compiler-flags](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-compiler-flags) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/_cm.yaml new file mode 100644 index 000000000..080020d0d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/_cm.yaml @@ -0,0 +1,22 @@ +alias: get-compiler-flags +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Compiler automation +deps: +- tags: detect,cpu +- names: + - compiler + skip_if_env: + CM_C_COMPILER_BIN: + - 'on' + tags: get,compiler +new_env_keys: +- + CFLAGS +- + CXXFLAGS +- + FFLAGS +- + LDFLAGS +- +CM_HOST_OS_DEFAULT_INCLUDE_PATH +tags: +- get +- compiler-flags +uid: 31be8b74a69742f8 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/customize.py new file mode 100644 index 000000000..7dc48300c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-flags/customize.py @@ -0,0 +1,76 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess + + +def preprocess(i): + os_info = i['os_info'] + + env = i['env'] + env['+ CFLAGS'] = [] + env['+ CXXFLAGS'] = [] + env['+ FFLAGS'] = [] + env['+ LDFLAGS'] = [] + + # TBD: add unified flags for Windows + if os_info['platform'] == 'windows': + return {'return': 0} + + if env.get("CM_FAST_COMPILATION") in ["yes", "on", "1"]: + DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_FAST", "-O3") + # -flto") - this flag is not always available + DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_FAST", "-O3") + elif env.get("CM_DEBUG_COMPILATION") in ["yes", "on", "1"]: + DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEBUG", "-O0") + DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEBUG", "-O0") + else: + DEFAULT_COMPILER_FLAGS = env.get("CM_COMPILER_FLAGS_DEFAULT", "-O2") + DEFAULT_LINKER_FLAGS = env.get("CM_LINKER_FLAGS_DEFAULT", "-O2") + + env['+ CFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ") + env['+ CXXFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ") + env['+ FFLAGS'] += DEFAULT_COMPILER_FLAGS.split(" ") + env['+ LDFLAGS'] += DEFAULT_LINKER_FLAGS.split(" ") + + env['+ CFLAGS'] = list(set(env['+ CFLAGS'])) + env['+ CXXFLAGS'] = list(set(env['+ CXXFLAGS'])) + env['+ FFLAGS'] = list(set(env['+ FFLAGS'])) + env['+ LDFLAGS'] = list(set(env['+ LDFLAGS'])) + + sys_cmd = "cpp -v /dev/null -o /dev/null 2>&1" + result = subprocess.check_output(sys_cmd, shell=True).decode("utf-8") + start = False + inc_dir = [] + for out in result.split("\n"): + if "> search starts here" not in out and not start: + continue + if not start: + start = True + continue + if "End of search list" in out: + break + if 'gcc' not in out: + inc_dir.append(out.strip()) + env['+CM_HOST_OS_DEFAULT_INCLUDE_PATH'] = inc_dir + +# if env['CM_C_COMPILER_BIN'] == 'icc': +# if env['CM_CPUINFO_Vendor_ID'] == 'GenuineIntel': +# if int(env['CM_CPUINFO_CPU_family']) >= 0: +# env['+ CFLAGS'] += ["-ipo"] +# if env['CM_C_COMPILER_BIN'] == 'gcc': +# if env['CM_HOST_CPU_VENDOR_ID'] == 'AMD': +# if int(env['CM_HOST_CPU_FAMILY']) >= 0: +# env['+ CFLAGS'] += ["-march=znver2", "-flto"] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/README.md b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/README.md new file mode 100644 index 000000000..fa0bc47db --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-compiler-rust](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-compiler-rust) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/_cm.yaml new file mode 100644 index 000000000..ca1a372d9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/_cm.yaml @@ -0,0 +1,19 @@ +uid: 97ffbd9e537b4b59 +alias: get-compiler-rust + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: Compiler automation + +deps: + - tags: get,python3 + +new_env_keys: +- "+PATH" + +tags: +- get +- rust-compiler diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/customize.py new file mode 100644 index 000000000..e1560d8f4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/customize.py @@ -0,0 +1,38 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + rust_path = os.path.join(os.path.expanduser('~'), ".cargo", "bin") + env['+PATH'] = [rust_path] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/run.sh new file mode 100644 index 000000000..4651e2fd0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-compiler-rust/run.sh @@ -0,0 +1,7 @@ +CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} + +${CM_PYTHON_BIN} -m pip install --upgrade pip ${CM_PYTHON_PIP_COMMON_EXTRA} +${CM_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${CM_PYTHON_PIP_COMMON_EXTRA} + +curl https://sh.rustup.rs -sSf -o tmp.sh +sh tmp.sh -y diff --git a/cmx4mlops/cmx4mlops/repo/script/get-conda/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-conda/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-conda/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-conda/README.md b/cmx4mlops/cmx4mlops/repo/script/get-conda/README.md new file mode 100644 index 000000000..43f929d35 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-conda/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/get-conda](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/get-conda) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-conda/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-conda/_cm.yaml new file mode 100644 index 000000000..8e34801fa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-conda/_cm.yaml @@ -0,0 +1,37 @@ +alias: get-conda +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: DevOps automation +clean_files: [] +deps: +- tags: detect,os +new_env_keys: +- +PATH +- +LD_LIBRARY_PATH +- +LIBRARY_PATH +- CM_CONDA_PREFIX +- CONDA_PREFIX +- CM_CONDA_BIN_PATH +- CM_CONDA_BIN_WITH_PATH +- CM_CONDA_LIB_PATH +tags: +- get +- conda +- get-conda +uid: 6600115f41324c7b +variations: + name.#: + adr: + conda-package: + tags: _name.# + env: + CM_CONDA_PREFIX_NAME: '#' + python-3.#: + env: + CM_CONDA_PYTHON_VERSION: 3.# + group: conda-python + python-3.8: + env: + CM_CONDA_PYTHON_VERSION: '3.8' + group: conda-python diff --git a/cmx4mlops/cmx4mlops/repo/script/get-conda/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-conda/customize.py new file mode 100644 index 000000000..7316b5b06 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-conda/customize.py @@ -0,0 +1,120 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + automation = i['automation'] + run_script_input = i['run_script_input'] + + recursion_spaces = i['recursion_spaces'] + + conda_prefix_name = env.get('CM_CONDA_PREFIX_NAME', '') + r = None + file_name = 'conda.exe' if os_info['platform'] == 'windows' else 'conda' + if conda_prefix_name == '': + tmp_path = env.get('CM_CONDA_INSTALL_PATH', env.get('CM_TMP_PATH', '')) + if tmp_path: + x = ';' if os_info['platform'] == 'windows' else ':' + tmp_path += x + conda_path = os.path.join(os.path.expanduser("~"), "miniconda3", "bin") + if os.path.exists(conda_path): + tmp_path += os.path.join(os.path.expanduser("~"), + "miniconda3", "bin") + env['CM_TMP_PATH'] = tmp_path + + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_CONDA_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + else: + env['CM_CONDA_INSTALL_PATH'] = os.path.join(os.getcwd(), "miniconda3") + bin_dir = 'Scripts' if os_info['platform'] == 'windows' else 'bin' + env['CM_CONDA_BIN_WITH_PATH'] = os.path.join( + env['CM_CONDA_INSTALL_PATH'], bin_dir, file_name) + + if conda_prefix_name != '' or r['return'] > 0: + if conda_prefix_name != '' or r['return'] == 16: + if conda_prefix_name == '': + if env.get('CM_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes': + return r + + print(recursion_spaces + ' # {}'.format(r['error'])) + + # Attempt to run installer + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install'}) + if r['return'] > 0: + return r + + # Grigori: temporal fix - should be generalized/improved above + if os_info['platform'] == 'windows' and env.get( + 'CM_CONDA_BIN_WITH_PATH', '') == '': + env['CM_CONDA_INSTALL_PATH'] = os.path.join( + os.getcwd(), "miniconda3") + env['CM_CONDA_BIN_WITH_PATH'] = os.path.join( + env['CM_CONDA_INSTALL_PATH'], 'Scripts', file_name) + + else: + found_path = r['found_path'] + env['+PATH'] = [found_path] + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'conda\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_CONDA_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + return {'return': 0, 'version': r['version']} + + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + if r['return'] > 0: + return r + + conda_bin_path = os.path.dirname(env['CM_CONDA_BIN_WITH_PATH']) + env['CM_CONDA_BIN_PATH'] = conda_bin_path + + env['+PATH'] = [conda_bin_path] + + conda_prefix = os.path.dirname(conda_bin_path) + env['CM_CONDA_PREFIX'] = conda_prefix + env['CONDA_PREFIX'] = conda_prefix + + conda_lib_path = os.path.join(conda_prefix, "lib") + + if os.path.exists(conda_lib_path): + env['CM_CONDA_LIB_PATH'] = conda_lib_path + env['+LD_LIBRARY_PATH'] = [conda_lib_path] + env['+LIBRARY_PATH'] = [conda_lib_path] + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-conda/install.bat b/cmx4mlops/cmx4mlops/repo/script/get-conda/install.bat new file mode 100644 index 000000000..2528840d9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-conda/install.bat @@ -0,0 +1,9 @@ +if exist Miniconda3-latest-Windows-x86_64.exe ( + del /Q /S Miniconda3-latest-Windows-x86_64.exe +) + +wget --no-check-certificate https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +start /wait "" Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /D=%CD%\miniconda3 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-conda/install.sh b/cmx4mlops/cmx4mlops/repo/script/get-conda/install.sh new file mode 100644 index 000000000..6d1888285 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-conda/install.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh +test $? -eq 0 || exit $? +chmod +x ~/miniconda.sh + +if [ ! -z ${CM_CONDA_PREFIX_NAME} ]; then + CM_CONDA_INSTALL_PATH=$PWD/miniconda3 + rm -rf ${CM_CONDA_INSTALL_PATH} +fi + + +if [ ! -z ${CM_CONDA_INSTALL_PATH} ]; then + ~/miniconda.sh -b -p ${CM_CONDA_INSTALL_PATH} +else + ~/miniconda.sh -b +fi +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-conda/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-conda/run.bat new file mode 100644 index 000000000..99b9d97d2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-conda/run.bat @@ -0,0 +1 @@ +%CM_CONDA_BIN_WITH_PATH% --version > tmp-ver.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-conda/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-conda/run.sh new file mode 100644 index 000000000..5d61f106f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-conda/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +${CM_CONDA_BIN_WITH_PATH} --version > tmp-ver.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-croissant/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-croissant/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-croissant/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-croissant/README.md b/cmx4mlops/cmx4mlops/repo/script/get-croissant/README.md new file mode 100644 index 000000000..65edec955 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-croissant/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-croissant](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-croissant) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-croissant/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-croissant/_cm.yaml new file mode 100644 index 000000000..a024189d2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-croissant/_cm.yaml @@ -0,0 +1,30 @@ +alias: get-croissant +uid: 8fd653eac8da4c14 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: AI/ML datasets + +deps: + - tags: detect,os + + - names: + - python3 + - python + tags: get,python3 + version_min: '3.10' + + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLCOMMONS_CROISSANT_PATH + extra_cache_tags: mlcommons,croissant + names: + - git-mlcommons-croissant + tags: git,repo,_repo.https://github.com/mlcommons/croissant + +tags: + - get + - mlcommons + - croissant diff --git a/cmx4mlops/cmx4mlops/repo/script/get-croissant/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-croissant/customize.py new file mode 100644 index 000000000..93e0971bf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-croissant/customize.py @@ -0,0 +1,28 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-croissant/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-croissant/run.bat new file mode 100644 index 000000000..3177de9f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-croissant/run.bat @@ -0,0 +1,20 @@ +@echo off + +echo ======================================================= + +cd %CM_MLCOMMONS_CROISSANT_PATH%\python\mlcroissant +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +echo Running %CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git] + +%CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git] +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +echo Validating Croissant ... + +mlcroissant validate --file ../../datasets/titanic/metadata.json +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= diff --git a/cmx4mlops/cmx4mlops/repo/script/get-croissant/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-croissant/run.sh new file mode 100644 index 000000000..dd2c67bb2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-croissant/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +echo "=======================================================" + +cd ${CM_MLCOMMONS_CROISSANT_PATH}/python/mlcroissant +if [ "${?}" != "0" ]; then exit 1; fi + +echo "" +echo "Running ${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]" + +${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git] +if [ "${?}" != "0" ]; then exit 1; fi + +echo "" +echo "Validating Croissant ..." + +mlcroissant validate --file ../../datasets/titanic/metadata.json +if [ "${?}" != "0" ]; then exit 1; fi + +echo "=======================================================" + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/README.md b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/README.md new file mode 100644 index 000000000..6c08604e0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/get-cuda-devices](https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/get-cuda-devices) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/_cm.yaml new file mode 100644 index 000000000..2d4869286 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/_cm.yaml @@ -0,0 +1,64 @@ +alias: get-cuda-devices +uid: 7a3ede4d3558427a + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- get +- cuda-devices + +cache: false + +can_force_cache: true + +category: CUDA automation + +clean_files: +- tmp-run.out + +deps: +- names: + - cuda + tags: get,cuda,_toolkit + +docker: + run: false + all_gpus: 'yes' + skip_run_cmd: 'no' + skip_cm_sys_upgrade: 'yes' + cm_repo_flags: '--checkout=dev' + use_host_group_id: 'yes' + image_tag_extra: '-cm-dev' + +env: + CM_DETECT_USING_PYCUDA: 'no' + +new_env_keys: +- CM_CUDA_DEVICE_* +- CM_CUDA_NUM_DEVICES +- CM_CUDA_VERSION + +new_state_keys: +- cm_cuda_device_prop +- cm_cuda_devices_prop +- cm_cuda_num_devices + +print_files_if_script_error: +- tmp-run.out + +variations: + with-pycuda: + env: + CM_DETECT_USING_PYCUDA: 'yes' + deps: + - tags: get,python3 + names: + - python + - python3 + - tags: get,generic-python-lib,_package.pycuda + names: + - pycuda + - tags: get,generic-python-lib,_package.numpy + names: + - numpy diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/customize.py new file mode 100644 index 000000000..ac23ee7ef --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess + + +def preprocess(i): + + env = i['env'] + + if str(env.get('CM_DETECT_USING_PYCUDA', '') + ).lower() in ["1", "yes", "true"]: + i['run_script_input']['script_name'] = 'detect' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + r = utils.load_txt(file_name='tmp-run.out', + check_if_exists=True, + split=True) + if r['return'] > 0: + return r + + lst = r['list'] + + # properties + p = {} + gpu = {} + + gpu_id = -1 + + for line in lst: + # print (line) + + j = line.find(':') + + if j >= 0: + key = line[:j].strip() + val = line[j + 1:].strip() + + if key == "GPU Device ID": + gpu_id += 1 + gpu[gpu_id] = {} + + if gpu_id < 0: + continue + + gpu[gpu_id][key] = val + p[key] = val + + key_env = 'CM_CUDA_DEVICE_PROP_' + key.upper().replace(' ', '_') + env[key_env] = val + + state['cm_cuda_num_devices'] = gpu_id + 1 + env['CM_CUDA_NUM_DEVICES'] = gpu_id + 1 + + state['cm_cuda_device_prop'] = p + state['cm_cuda_devices_prop'] = gpu + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/detect.py b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/detect.py new file mode 100644 index 000000000..6603cc9f4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/detect.py @@ -0,0 +1,47 @@ +import pycuda.driver as cuda +import pycuda.autoinit + + +def get_gpu_info(): + num_gpus = cuda.Device.count() + all_gpu_info = [] + + for i in range(num_gpus): + device = cuda.Device(i) + cuda_runtime_version = cuda.get_version() + cuda_runtime_version_str = f"{cuda_runtime_version[0]}.{cuda_runtime_version[1]}" + + gpu_info = { + "GPU Device ID": device.pci_bus_id(), + "GPU Name": device.name(), + "GPU compute capability": f"{device.compute_capability()[0]}.{device.compute_capability()[1]}", + "CUDA driver version": f"{cuda.get_driver_version() // 1000}.{(cuda.get_driver_version() % 1000) // 10}", + "CUDA runtime version": cuda_runtime_version_str, + "Global memory": device.total_memory(), + "Max clock rate": f"{device.get_attribute(cuda.device_attribute.CLOCK_RATE)} MHz", + "Total amount of shared memory per block": device.get_attribute(cuda.device_attribute.MAX_SHARED_MEMORY_PER_BLOCK), + "Total number of registers available per block": device.get_attribute(cuda.device_attribute.MAX_REGISTERS_PER_BLOCK), + "Warp size": device.get_attribute(cuda.device_attribute.WARP_SIZE), + "Maximum number of threads per multiprocessor": device.get_attribute(cuda.device_attribute.MAX_THREADS_PER_MULTIPROCESSOR), + "Maximum number of threads per block": device.get_attribute(cuda.device_attribute.MAX_THREADS_PER_BLOCK), + "Max dimension size of a thread block X": device.get_attribute(cuda.device_attribute.MAX_BLOCK_DIM_X), + "Max dimension size of a thread block Y": device.get_attribute(cuda.device_attribute.MAX_BLOCK_DIM_Y), + "Max dimension size of a thread block Z": device.get_attribute(cuda.device_attribute.MAX_BLOCK_DIM_Z), + "Max dimension size of a grid size X": device.get_attribute(cuda.device_attribute.MAX_GRID_DIM_X), + "Max dimension size of a grid size Y": device.get_attribute(cuda.device_attribute.MAX_GRID_DIM_Y), + "Max dimension size of a grid size Z": device.get_attribute(cuda.device_attribute.MAX_GRID_DIM_Z), + } + + all_gpu_info.append(gpu_info) + + return all_gpu_info + + +# Print the GPU information for all available GPUs +if __name__ == "__main__": + gpu_info_list = get_gpu_info() + with open("tmp-run.out", "w") as f: + for idx, gpu_info in enumerate(gpu_info_list): + print(f"GPU {idx}:") + for key, value in gpu_info.items(): + f.write(f"{key}: {value}\n") diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/detect.sh b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/detect.sh new file mode 100644 index 000000000..8f6b93596 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/detect.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect.py +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/print_cuda_devices.cu b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/print_cuda_devices.cu new file mode 100644 index 000000000..d68a109ba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/print_cuda_devices.cu @@ -0,0 +1,74 @@ +#ifndef WINDOWS + #include +#endif + +#include +#include + +int main(int argc, char *argv[]) +{ + int ndev=0; + int id=0; + cudaError_t error; + cudaDeviceProp features; + + int rtver=0; + int dver=0; + + /* Get number of devices */ + error = cudaGetDeviceCount(&ndev); + if (error != cudaSuccess) { + printf("Error: problem obtaining number of CUDA devices: %d\n", error); + return 1; + } + + /* Iterating over devices */ + for (id=0; id tmp-run.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/run.sh new file mode 100644 index 000000000..3d208dd6b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda-devices/run.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Compile + +rm a.out + +echo "" +echo "NVCC path: ${CM_NVCC_BIN_WITH_PATH}" +echo "" + +echo "" +echo "Checking compiler version ..." +echo "" + +${CM_NVCC_BIN_WITH_PATH} -V + +echo "" +echo "Compiling program ..." +echo "" + +cd ${CM_TMP_CURRENT_SCRIPT_PATH} + +${CM_NVCC_BIN_WITH_PATH} -allow-unsupported-compiler print_cuda_devices.cu +test $? -eq 0 || exit 1 + +# Return to the original path obtained in CM + +echo "" +echo "Running program ..." +echo "" + +cd ${CM_TMP_CURRENT_PATH} + +${CM_TMP_CURRENT_SCRIPT_PATH}/a.out > tmp-run.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-cuda/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda/README-about.md b/cmx4mlops/cmx4mlops/repo/script/get-cuda/README-about.md new file mode 100644 index 000000000..af8e24899 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda/README-about.md @@ -0,0 +1,6 @@ +# System dependencies + +* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit). +* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download). +* Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download). + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-cuda/README-extra.md new file mode 100644 index 000000000..c075711ff --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda/README-extra.md @@ -0,0 +1,44 @@ +# Get CUDA + +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed CUDA on the system +and if not found calls the [install script for CUDA](../script/install-cuda-prebuilt). + +## Exported Variables +* `CM_CUDA_INSTALLED_PATH` +* `CM_CUDA_VERSION` +* `CM_NVCC_BIN_WITH_PATH` +* `CUDA_HOME` +* `CUDA_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. Windows + +# Examples + +## Detect CUDA on Windows + +You may want to install all system dependencies as described [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html). + +If Visual Studio and CUDA updated your PATH variable, you should just run the following: +```bash +cm run script "get cuda" +``` + +However, if the PATH variable was not updated, you need to provide path to the cl.exe and nvcc.exe to help CM detect them: + +```bash +cm run script "get cl" --path="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64" +cm run script "get cuda _compiler" --path="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\bin" +``` + +# System dependencies + +* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit). +* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download). +* (Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download)). + +## Windows + +* ? Download [Microsoft Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist) +* Check [Nvidia installation guide](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda/README.md b/cmx4mlops/cmx4mlops/repo/script/get-cuda/README.md new file mode 100644 index 000000000..ed8e4273f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/get-cuda](https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/get-cuda) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-cuda/_cm.yaml new file mode 100644 index 000000000..db5a30b0b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda/_cm.yaml @@ -0,0 +1,110 @@ +alias: get-cuda +uid: 46d133d9ef92422d + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- get +- cuda +- cuda-compiler +- cuda-lib +- toolkit +- lib +- nvcc +- get-nvcc +- get-cuda +- 46d133d9ef92422d + +cache: true + +category: CUDA automation + +default_env: + CM_CUDA_PATH_LIB_CUDNN_EXISTS: 'no' + CM_REQUIRE_INSTALL: 'no' + +deps: +- tags: detect,os +- enable_if_env: + CM_CUDA_FULL_TOOLKIT_INSTALL: + - 'yes' + CM_HOST_OS_TYPE: + - windows + names: + - compiler + tags: get,cl + +input_mapping: + cudnn_tar_file: CM_CUDNN_TAR_FILE_PATH + cudnn_tar_path: CM_CUDNN_TAR_FILE_PATH + skip_sudo: CUDA_SKIP_SUDO + skip_cudnn_install: CM_CUDA_SKIP_CUDNN_INSTALL + +new_env_keys: +- CUDA_HOME +- CUDA_PATH +- CM_CUDA_* +- CM_NVCC_* +- CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5 +- +PATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +- + LDFLAGS + +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + names: + - install-cuda-prebuilt + reuse_version: true + tags: install,cuda,prebuilt +- enable_if_env: + CM_CUDA_PACKAGE_MANAGER_INSTALL: + - 'yes' + tags: get,generic-sys-util,_nvidia-cuda-toolkit + +print_env_at_the_end: + CM_CUDA_PATH_LIB_CUDNN_EXISTS: '' + CM_CUDA_VERSION: '' + CM_CUDA_VERSION_STRING: '' + CM_NVCC_BIN_WITH_PATH: '' + CUDA_HOME: '' + +print_files_if_script_error: +- tmp-ver.out + +variations: + cudnn: + env: + CM_CUDA_NEEDS_CUDNN: 'yes' + post_deps: + - names: + - cudnn + tags: get,nvidia,cudnn + skip_if_env: + CM_CUDA_SKIP_CUDNN_INSTALL: + - yes + + lib-only: + env: + CM_CUDA_FULL_TOOLKIT_INSTALL: 'no' + CM_TMP_FILE_TO_CHECK_UNIX: libcudart.so + CM_TMP_FILE_TO_CHECK_WINDOWS: libcudart.dll + group: installation-mode + package-manager: + env: + CM_CUDA_PACKAGE_MANAGER_INSTALL: 'yes' + prebuilt: + env: + CM_REQUIRE_INSTALL: 'yes' + toolkit: + default: true + env: + CM_CUDA_FULL_TOOLKIT_INSTALL: 'yes' + CM_TMP_FILE_TO_CHECK_UNIX: nvcc + CM_TMP_FILE_TO_CHECK_WINDOWS: nvcc.exe + group: installation-mode diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-cuda/customize.py new file mode 100644 index 000000000..aa2df4494 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda/customize.py @@ -0,0 +1,242 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': + env['CM_SUDO'] = '' + + recursion_spaces = i['recursion_spaces'] + + if os_info['platform'] == 'windows': + file_name = env['CM_TMP_FILE_TO_CHECK_WINDOWS'] + + if env.get('CM_INPUT', '').strip() == '' and env.get( + 'CM_TMP_PATH', '').strip() == '': + # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" + paths = [] + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", + "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + if os.path.isdir(path): + dirs = os.listdir(path) + for dr in dirs: + path2 = os.path.join(path, dr, 'bin') + if os.path.isdir(path2): + paths.append(path2) + + if len(paths) > 0: + tmp_paths = ';'.join(paths) + tmp_paths += ';' + os.environ.get('PATH', '') + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + else: + file_name = env['CM_TMP_FILE_TO_CHECK_UNIX'] + + # paths to cuda are not always in PATH - add a few typical locations to search for + # (unless forced by a user) + + if env.get('CM_INPUT', '').strip() == '' and env.get( + 'CM_TMP_PATH', '').strip() == '': + system_path = os.environ.get('PATH') + if system_path: + system_path = system_path + ":" + env['CM_TMP_PATH'] = system_path + \ + '/usr/local/cuda/bin:/usr/cuda/bin:/usr/local/cuda-11/bin:/usr/cuda-11/bin:/usr/local/cuda-12/bin:/usr/cuda-12/bin:/usr/local/packages/cuda' + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + env_key = 'CM_NVCC_BIN_WITH_PATH' + path_env_key = 'PATH' + else: + env_key = 'CM_CUDA_RT_WITH_PATH' + path_env_key = 'LD_LIBRARY_PATH' + env['CM_TMP_ENV_KEY'] = env_key + + if env_key not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': path_env_key, + 'detect_version': True, + 'env_path_key': env_key, + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if os_info['platform'] == 'windows': + return r + + if r['return'] == 16 and env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + env = i['env'] + if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + return detect_version_nvcc(i) + else: + return detect_version_cuda_lib(i) + + +def detect_version_nvcc(i): + r = i['automation'].parse_version({'match_text': r'release\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_CUDA_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def detect_version_cuda_lib(i): + + env = i['env'] + print(env) + cuda_rt_file_path = env['CM_CUDA_RT_WITH_PATH'] + cuda_lib_path = os.path.dirname(cuda_rt_file_path) + cuda_path = os.path.abspath(os.path.join(cuda_lib_path, os.pardir)) + + cuda_version = "version-missing" + + version_json = os.path.join(cuda_path, "version.json") + if os.path.exists(version_json): + with open(version_json) as f: + version_info = json.load(f) + cuda_version_info = version_info.get('cuda_cudart') + if cuda_version_info: + cuda_version = cuda_version_info.get('version') + + env['CM_CUDA_VERSION'] = cuda_version + version = cuda_version + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + r = detect_version(i) + if r['return'] > 0: + return r + version = r['version'] + + env['CM_CUDA_CACHE_TAGS'] = 'version-' + version + + found_file_path = env[env['CM_TMP_ENV_KEY']] + + if env['CM_CUDA_FULL_TOOLKIT_INSTALL'] == "yes": + + cuda_path_bin = os.path.dirname(found_file_path) + env['CM_CUDA_PATH_BIN'] = cuda_path_bin + + cuda_path = os.path.dirname(cuda_path_bin) + env['CM_CUDA_INSTALLED_PATH'] = cuda_path + env['CM_NVCC_BIN'] = os.path.basename(found_file_path) + + else: + # We traverse backwards until we find a path with include dir + parent_path = os.path.dirname(found_file_path) + env['CM_CUDA_PATH_LIB'] = parent_path + parent_path = os.path.dirname(parent_path) + while os.path.isdir(parent_path): + if os.path.exists(os.path.join(parent_path, "include")): + print("Path is " + parent_path) + found_path = parent_path + cuda_path = found_path + env['CM_CUDA_INSTALLED_PATH'] = cuda_path + break + else: + parent_path = os.path.dirname(parent_path) + + if 'CM_CUDA_INSTALLED_PATH' not in env: + return { + 'return': 1, 'error': "No CUDA installation path with an include directory is found"} + + env['CUDA_HOME'] = cuda_path + env['CUDA_PATH'] = cuda_path + + cuda_system_path_install = False + system_path = os.environ.get('PATH') + if os.path.join(cuda_path, "bin") in system_path.split(":"): + cuda_system_path_install = True + + # Check extra paths + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + env[key] = [] + + # Include + cuda_path_include = os.path.join(cuda_path, 'include') + if os.path.isdir(cuda_path_include): + if os_info['platform'] != 'windows' and not cuda_system_path_install: + env['+C_INCLUDE_PATH'].append(cuda_path_include) + env['+CPLUS_INCLUDE_PATH'].append(cuda_path_include) + + env['CM_CUDA_PATH_INCLUDE'] = cuda_path_include + + # Lib + if os_info['platform'] == 'windows': + extra_dir = 'x64' + else: + extra_dir = '' + + for d in ['lib64', 'lib']: + cuda_path_lib = os.path.join(cuda_path, d) + + if extra_dir != '': + cuda_path_lib = os.path.join(cuda_path_lib, extra_dir) + + if os.path.isdir(cuda_path_lib): + if not cuda_system_path_install: + env['+LD_LIBRARY_PATH'].append(cuda_path_lib) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(cuda_path_lib) + + env['CM_CUDA_PATH_LIB'] = cuda_path_lib + break + + if '+ LDFLAGS' not in env: + env['+ LDFLAGS'] = [] + if 'CM_CUDA_PATH_LIB' in env and not cuda_system_path_install: + x = env['CM_CUDA_PATH_LIB'] + if ' ' in x: + x = '"' + x + '"' + env['+ LDFLAGS'].append("-L" + x) + + env['CM_CUDA_VERSION_STRING'] = "cu" + \ + env['CM_CUDA_VERSION'].replace(".", "") + env['CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX5'] = env['CM_CUDA_VERSION_STRING'] + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-cuda/run.bat new file mode 100644 index 000000000..89af970ac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda/run.bat @@ -0,0 +1,3 @@ +"%CM_NVCC_BIN_WITH_PATH%" -V > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cuda/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-cuda/run.sh new file mode 100644 index 000000000..aac0fee36 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cuda/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash +if [[ ${CM_CUDA_FULL_TOOLKIT_INSTALL} == "no" ]]; then + exit 0 +fi +nvcc_bin=${CM_NVCC_BIN_WITH_PATH:-nvcc} + +${nvcc_bin} -V > tmp-ver.out +test $? -eq 0 || exit 1 + +if [[ ${nvcc_bin} == "nvcc" ]]; then + nvcc_path=`which nvcc` + echo "CM_NVCC_BIN_WITH_PATH=${nvcc_path}" >> tmp-run-env.out + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cudnn/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cudnn/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/README-extra.md new file mode 100644 index 000000000..374680813 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/README-extra.md @@ -0,0 +1,3 @@ +# TBD + +We need to add detection of cuDNN version on Windows, Linux and MacOS diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cudnn/README.md b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/README.md new file mode 100644 index 000000000..b221283bf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/get-cudnn](https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/get-cudnn) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cudnn/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/_cm.yaml new file mode 100644 index 000000000..b01506f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/_cm.yaml @@ -0,0 +1,55 @@ +alias: get-cudnn +uid: d73ee19baee14df8 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- get +- cudnn +- nvidia + +cache: true + +category: CUDA automation + +default_env: + CM_INPUT: '' + CM_SUDO: sudo + +deps: +- tags: detect,os +- names: + - cuda + skip_if_env: + CM_CUDA_PATH_INCLUDE: + - 'on' + CM_CUDA_PATH_LIB: + - 'on' + tags: get,cuda + +input_description: + input: + desc: Full path to the installed cuDNN library + tar_file: + desc: Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn) + +input_mapping: + input: CM_INPUT + tar_file: CM_CUDNN_TAR_FILE_PATH + +new_env_keys: +- CM_CUDNN_* +- CM_CUDA_PATH_LIB_CUDNN +- CM_CUDA_PATH_INCLUDE_CUDNN +- CM_CUDA_PATH_LIB_CUDNN_EXISTS +- +PATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH + +print_env_at_the_end: + CM_CUDA_PATH_LIB_CUDNN: '' + CM_CUDA_PATH_INCLUDE_CUDNN: '' + CM_CUDNN_VERSION: '' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cudnn/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/customize.py new file mode 100644 index 000000000..89043319c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/customize.py @@ -0,0 +1,213 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import tarfile +import shutil + + +def preprocess(i): + + recursion_spaces = i['recursion_spaces'] + + os_info = i['os_info'] + + env = i['env'] + + env['CM_TMP_RUN_COPY_SCRIPT'] = "no" + + # If TAR file is not explicitly specified, search + if env.get('CM_CUDNN_TAR_FILE_PATH', '') == '': + + cuda_path_lib = env.get('CM_CUDA_PATH_LIB') + + if os_info['platform'] == 'windows': + extra_pre = '' + extra_ext = 'lib' + else: + extra_pre = 'lib' + extra_ext = 'so' + + libfilename = extra_pre + 'cudnn.' + extra_ext + env['CM_CUDNN_VERSION'] = 'vdetected' + + if os.path.exists(os.path.join(cuda_path_lib, libfilename)): + env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB'] + return {'return': 0} + + if env.get('CM_TMP_PATH', '').strip() != '': + path = env.get('CM_TMP_PATH') + if os.path.exists(os.path.join(path, libfilename)): + env['CM_CUDA_PATH_LIB_CUDNN'] = path + return {'return': 0} + + if env.get('CM_INPUT', '').strip() == '': + if os_info['platform'] == 'windows': + if env.get('CM_TMP_PATH', '').strip() == '': + # Check in "C:\Program Files\NVIDIA GPU Computing + # Toolkit\CUDA" + paths = [] + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", + "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + if os.path.isdir(path): + dirs = os.listdir(path) + for dr in dirs: + path2 = os.path.join(path, dr, 'lib') + if os.path.isdir(path2): + paths.append(path2) + + if len(paths) > 0: + tmp_paths = ';'.join(paths) + tmp_paths += ';' + os.environ.get('PATH', '') + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + else: + # paths to cuda are not always in PATH - add a few typical locations to search for + # (unless forced by a user) + + cm_tmp_path = env.get('CM_TMP_PATH', '').strip() + if cm_tmp_path != '': + cm_tmp_path += ':' + cm_tmp_path += '/usr/local/cuda/lib64:/usr/cuda/lib64:/usr/local/cuda/lib:/usr/cuda/lib:/usr/local/cuda-11/lib64:/usr/cuda-11/lib:/usr/local/cuda-12/lib:/usr/cuda-12/lib:/usr/local/packages/cuda/lib' + cm_tmp_path += os.path.expandvars(':$CUDNN_ROOT/lib') + env['CM_TMP_PATH'] = cm_tmp_path + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + for lib_path in env.get( + '+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + if (os.path.exists(lib_path)): + env['CM_TMP_PATH'] += ':' + lib_path + + r = i['automation'].find_artifact({'file_name': libfilename, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'LD_LIBRARY_PATH', + 'detect_version': False, + 'env_path_key': 'CM_CUDA_PATH_LIB_CUDNN', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if os_info['platform'] == 'windows': + return r + + if r['return'] == 16: + env['CM_TMP_REQUIRE_INSTALL'] = "yes" + else: + return r + else: + # On Linux we may detected file instead of path to cudnn + if os.path.isfile(env['CM_CUDA_PATH_LIB_CUDNN']): + env['CM_CUDA_PATH_LIB_CUDNN'] = os.path.dirname( + env['CM_CUDA_PATH_LIB_CUDNN']) + + return {'return': 0} + + if env.get('CM_CUDNN_TAR_FILE_PATH', '') == '': + return {'return': 1, 'error': 'Please envoke cm run script "get cudnn" --tar_file={full path to the cuDNN tar file}'} + + print('Untaring file - can take some time ...') + + my_tar = tarfile.open(os.path.expanduser(env['CM_CUDNN_TAR_FILE_PATH'])) + folder_name = my_tar.getnames()[0] + if not os.path.exists(os.path.join(os.getcwd(), folder_name)): + my_tar.extractall() + my_tar.close() + + import re + version_match = re.match(r'cudnn-.*?-(\d+.\d+.\d+.\d+)', folder_name) + if not version_match: + return { + 'return': 1, 'error': 'Extracted CUDNN folder does not seem proper - Version information missing'} + version = version_match.group(1) + env['CM_CUDNN_VERSION'] = version + + inc_path = os.path.join(os.getcwd(), folder_name, "include") + lib_path = os.path.join(os.getcwd(), folder_name, "lib") + cuda_inc_path = env['CM_CUDA_PATH_INCLUDE'] + cuda_lib_path = env['CM_CUDA_PATH_LIB'] + env['CM_CUDA_PATH_LIB_CUDNN'] = env['CM_CUDA_PATH_LIB'] + env['CM_CUDA_PATH_INCLUDE_CUDNN'] = env['CM_CUDA_PATH_INCLUDE'] + + try: + print( + "Copying cudnn include files to {}(CUDA_INCLUDE_PATH)".format(cuda_inc_path)) + shutil.copytree(inc_path, cuda_inc_path, dirs_exist_ok=True) + print("Copying cudnn lib files to {}CUDA_LIB_PATH".format(cuda_lib_path)) + shutil.copytree(lib_path, cuda_lib_path, dirs_exist_ok=True) + except BaseException: + # Need to copy to system path via run.sh + env['CM_TMP_RUN_COPY_SCRIPT'] = "yes" + env['CM_TMP_INC_PATH'] = inc_path + env['CM_TMP_LIB_PATH'] = lib_path + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + version = env['CM_CUDNN_VERSION'] + + if version == 'vdetected': + path_to_cudnn = env.get('CM_CUDA_PATH_LIB_CUDNN', '') + if os.path.isdir(path_to_cudnn): + path_to_include = path_to_cudnn + path_to_include_file = '' + for j in range(0, 2): + path_to_include = os.path.dirname(path_to_include) + x = os.path.join(path_to_include, 'include', 'cudnn_version.h') + if os.path.isfile(x): + path_to_include_file = x + break + + if path_to_include_file == '' and path_to_cudnn.startswith('/lib'): + x = os.path.join('/usr', 'include', 'cudnn_version.h') + if os.path.isfile(x): + path_to_include_file = x + + if path_to_include_file != '': + env['CM_CUDA_PATH_INCLUDE_CUDNN'] = os.path.dirname( + path_to_include_file) + + r = utils.load_txt(path_to_include_file, split=True) + if r['return'] == 0: + lst = r['list'] + + xversion = '' + + for l in lst: + l = l.strip() + + x = '#define CUDNN_MAJOR ' + if l.startswith(x): + xversion = l[len(x):] + + x = '#define CUDNN_MINOR ' + if l.startswith(x): + xversion += '.' + l[len(x):] + + x = '#define CUDNN_PATCHLEVEL ' + if l.startswith(x): + xversion += '.' + l[len(x):] + + if xversion != '': + version = xversion + env['CM_CUDNN_VERSION'] = xversion + + env['CM_CUDA_PATH_LIB_CUDNN_EXISTS'] = 'yes' + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-cudnn/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/run.sh new file mode 100644 index 000000000..e2cb00fb0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-cudnn/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +if [ ${CM_TMP_RUN_COPY_SCRIPT} == "yes" ]; then + cmd="${CM_SUDO} cp ${CM_TMP_INC_PATH}/*.h ${CM_CUDA_PATH_INCLUDE}/" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 + + cmd="${CM_SUDO} cp -P ${CM_TMP_LIB_PATH}/libcudnn* ${CM_CUDA_PATH_LIB}/" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/README.md new file mode 100644 index 000000000..93670519f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-cifar10](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-cifar10) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/_cm.yaml new file mode 100644 index 000000000..1be5ef644 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/_cm.yaml @@ -0,0 +1,36 @@ +alias: get-dataset-cifar10 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +deps: +- tags: detect,os +new_env_keys: +- CM_DATASET_* +tags: +- get +- dataset +- cifar10 +- image-classification +- validation +- training +uid: 2f0c0bb3663b4ed7 +variations: + python: + default: true + env: + CM_DATASET: CIFAR10 + CM_DATASET_CIFAR10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz + CM_DATASET_FILENAME: cifar-10-python.tar.gz + CM_DATASET_FILENAME1: cifar-10-python.tar + group: data_format + tiny: + deps: + - names: + - python + - python3 + tags: get,python3 + - tags: get,tinymlperf,src + - tags: get,src,eembc,energy-runner + env: + CM_DATASET_CONVERT_TO_TINYMLPERF: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/customize.py new file mode 100644 index 000000000..a94b4e7c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/customize.py @@ -0,0 +1,29 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + variation_tags = i.get('variation_tags', []) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/requirements.txt new file mode 100644 index 000000000..530995dd0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/requirements.txt @@ -0,0 +1,47 @@ +absl-py +astunparse +cachetools +certifi +chardet +cycler +flatbuffers +gast +google-auth +google-auth-oauthlib +google-pasta +grpcio +h5py +idna +imageio +joblib +Keras-Preprocessing +kiwisolver +Markdown +matplotlib +numpy +oauthlib +opencv-python +opt-einsum +Pillow +protobuf +pyasn1 +pyasn1-modules +pyparsing +python-dateutil +PyYAML +requests +requests-oauthlib +rsa +scikit-learn +scipy +six +tensorboard +tensorboard-plugin-wit +tensorflow +tensorflow-estimator +termcolor +threadpoolctl +typing-extensions +urllib3 +Werkzeug +wrapt diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/run.bat new file mode 100644 index 000000000..8f54fb86e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/run.bat @@ -0,0 +1,48 @@ +wget -nc %CM_DATASET_CIFAR10% --no-check-certificate +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +del /Q /S %CM_DATASET_FILENAME1% + +gzip -d %CM_DATASET_FILENAME% +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +tar -xvf %CM_DATASET_FILENAME1% +IF %ERRORLEVEL% NEQ 0 EXIT 1 + +del /Q /S %CM_DATASET_FILENAME1% + +echo CM_DATASET_PATH=%CD%\cifar-10-batches-py > tmp-run-env.out +echo CM_DATASET_CIFAR10_PATH=%CD%\cifar-10-batches-py >> tmp-run-env.out + +if "%CM_DATASET_CONVERT_TO_TINYMLPERF%" == "yes" ( + echo. + echo Copying TinyMLPerf convertor ... + echo. + + copy /B /Y %CM_MLPERF_TINY_TRAINING_IC%\* . + + echo. + echo Installing Python requirements ... + echo. + + %CM_PYTHON_BIN% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT 1 + + echo. + echo Converting ... + echo. + + %CM_PYTHON_BIN% perf_samples_loader.py + IF %ERRORLEVEL% NEQ 0 EXIT 1 + + copy /B /Y y_labels.csv perf_samples + + echo CM_DATASET_CIFAR10_TINYMLPERF_PATH=%CD%\perf_samples >> tmp-run-env.out + + echo. + echo Copying to EEMBC runner user space ... + echo. + + copy /B /Y perf_samples\* %CM_EEMBC_ENERGY_RUNNER_DATASETS%\ic01 +) + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/run.sh new file mode 100644 index 000000000..a113a2e4d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cifar10/run.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +wget -nc ${CM_DATASET_CIFAR10} --no-check-certificate +test $? -eq 0 || exit 1 + +rm -rf ${CM_DATASET_FILENAME1} + +gzip -d ${CM_DATASET_FILENAME} +test $? -eq 0 || exit 1 + +tar -xvf ${CM_DATASET_FILENAME1} +test $? -eq 0 || exit 1 + +rm -rf ${CM_DATASET_FILENAME} + +echo "CM_DATASET_PATH=$PWD/cifar-10-batches-py" > tmp-run-env.out +echo "CM_DATASET_CIFAR10_PATH=$PWD/cifar-10-batches-py" >> tmp-run-env.out + +if [ "${CM_DATASET_CONVERT_TO_TINYMLPERF}" == "yes" ]; then + echo "" + echo "Copying TinyMLPerf convertor ..." + echo "" + + cp -rf ${CM_MLPERF_TINY_TRAINING_IC}/* . + + echo "" + echo "Installing Python requirements ..." + echo "" + + ${CM_PYTHON_BIN} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + if [ "${?}" != "0" ]; then exit 1; fi + + echo "" + echo "Converting in $PWD ..." + echo "" + + ${CM_PYTHON_BIN} perf_samples_loader.py + if [ "${?}" != "0" ]; then exit 1; fi + + cp -rf y_labels.csv perf_samples + + echo "CM_DATASET_CIFAR10_TINYMLPERF_PATH=$PWD/perf_samples" >> tmp-run-env.out + + echo "" + echo "Copying to EEMBC runner user space ..." + echo "" + + cp -rf perf_samples/* ${CM_EEMBC_ENERGY_RUNNER_DATASETS}/ic01 +fi + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/README.md new file mode 100644 index 000000000..90260fb7b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-cnndm](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-cnndm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/_cm.yaml new file mode 100644 index 000000000..91b2af381 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/_cm.yaml @@ -0,0 +1,55 @@ +alias: get-dataset-cnndm +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + CM_DATASET_CALIBRATION: 'no' +deps: +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 + version_max: 3.9.999 +- names: + - inference-src + skip_if_env: + CM_CNNDM_INTEL_VARIATION: + - 'yes' + tags: mlperf,inference,source +- tags: get,generic-python-lib,_package.simplejson +- tags: get,generic-python-lib,_datasets +- tags: get,generic-python-lib,_package.tokenizers +- tags: get,generic-python-lib,_numpy +env: + CM_DATASET: CNNDM +tags: +- get +- dataset +- gpt-j +- cnndm +- cnn-dailymail +- original +uid: aed298c156e24257 +variations: + calibration: + env: + CM_DATASET_CALIBRATION: 'yes' + group: dataset-type + new_env_keys: + - CM_CALIBRATION_DATASET_PATH + - CM_CALIBRATION_DATASET_CNNDM_PATH + intel: {} + intel,validation: + env: + CM_CNNDM_INTEL_VARIATION: 'yes' + validation: + default: true + env: + CM_DATASET_CALIBRATION: 'no' + group: dataset-type + new_env_keys: + - CM_DATASET_PATH + - CM_DATASET_EVAL_PATH + - CM_DATASET_CNNDM_EVAL_PATH diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/customize.py new file mode 100644 index 000000000..ea837309b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/customize.py @@ -0,0 +1,47 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + if env.get('CM_CNNDM_INTEL_VARIATION', '') == 'yes': + i['run_script_input']['script_name'] = "run-intel" + else: + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + if env.get('CM_DATASET_CALIBRATION', '') == "no": + env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install') + env['CM_DATASET_EVAL_PATH'] = os.path.join( + os.getcwd(), 'install', 'cnn_eval.json') + env['CM_DATASET_CNNDM_EVAL_PATH'] = os.path.join( + os.getcwd(), 'install', 'cnn_eval.json') + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_PATH'] + else: + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'cnn_dailymail_calibration.json') + env['CM_CALIBRATION_DATASET_CNNDM_PATH'] = os.path.join( + os.getcwd(), 'install', 'cnn_dailymail_calibration.json') + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_CALIBRATION_DATASET_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/run-intel.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/run-intel.sh new file mode 100644 index 000000000..067f158a5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/run-intel.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR=${PWD} +rm -rf install +mkdir -p install + +export DATASET_CNNDM_PATH=${CUR}/install + +wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/gptj-99/pytorch-cpu/download-dataset.py +test $? -eq 0 || exit 1 + +cmd="${CM_PYTHON_BIN_WITH_PATH} download-dataset.py --split validation --output-dir ${DATASET_CNNDM_PATH}" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/run.sh new file mode 100644 index 000000000..f9aa3864b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-cnndm/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +CUR=${PWD} +mkdir -p install +export DATASET_CNNDM_PATH=${CUR}/install + +cd ${CM_MLPERF_INFERENCE_SOURCE} +cd language/gpt-j + +if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then + cmd="${CM_PYTHON_BIN_WITH_PATH} download_cnndm.py" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +else + cmd="${CM_PYTHON_BIN_WITH_PATH} prepare-calibration.py --calibration-list-file calibration-list.txt --output-dir ${DATASET_CNNDM_PATH}" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +fi +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/README-extra.md new file mode 100644 index 000000000..9f19d2e8d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/README-extra.md @@ -0,0 +1,95 @@ +# CM interface to download or detect COCO data sets + +This CM automation recipe helps to download or detect [COCO datasets](https://cocodataset.org) +and register them in the CM cache with various environment variables +to be reused in CM workflows and other projects. + +Supported versions: +* 2017 val/train +* 2014 val/train + +## Use-cases + +* https://github.com/mlcommons/abtf-ssd-pytorch + +## Download COCO dataset and register in CM cache + +```bash +cmr "get coco dataset" +cmr "get coco dataset _val _2017" +cmr "get coco dataset _train _2017" +``` + +You can find this data set in the CM cache using the following command: + +```bash +cm show cache "get coco dataset" +``` + +#### Output environment variables + +You can check produced environment variables produced by this CM script by adding the `-j` flag: + +```bash +cmr "get coco dataset _val _2017" -j +``` + +```json + "new_env": { + "CM_DATASET_COCO_URL_ANNOTATIONS": "http://images.cocodataset.org/annotations", + "CM_DATASET_COCO_URL_DATA": "http://images.cocodataset.org/zips", + "CM_DATASET_COCO_VERSION": "2017", + "CM_DATASET_COCO_TYPE": "val", + "CM_DATASET_COCO_SIZE": "complete", + "CM_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\annotations_trainval2017.zip", + "CM_DATASET_COCO_ANNOTATIONS_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\annotations", + "CM_DATASET_COCO_DATA_DOWNLOAD_PATH": "d:\\Work2\\COCO-2017-val\\val2017.zip", + "CM_DATASET_COCO_DATA_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07\\val2017", + "CM_DATASET_COCO_MD5SUM_ANN": "f4bbac642086de4f52a3fdda2de5fa2c", + "CM_DATASET_COCO_MD5SUM_DATA": "442b8da7639aecaf257c1dceb8ba8c80", + "CM_DATASET_COCO_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", + "CM_DATASET_COCO_TYPE_AND_VERSION": "val2017", + "CM_DATASET_COCO_URL_ANNOTATIONS_FULL": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip", + "CM_DATASET_COCO_URL_DATA_FULL": "http://images.cocodataset.org/zips/val2017.zip", + "CM_DATASET_PATH": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07", + "CM_DATASET_PATH_ROOT": "D:\\Work1\\CM\\repos\\local\\cache\\62ad05746b5d4f07" + }, +``` + +#### Input flags and equivalent environment variables + +* `--from` - where to find dataset archive files instead of downloading them +* `--to` - where to extract dataset files +* `--path` - where to pick up extracted dataset files +* `--store` - where to keep downloaded files + +#### Variations + +* Dataset type: `_val` | `_train` +* Dataset year: `2017` | `2014` + + +## Detect already installed COCO dataset + +```bash +cmr "get coco dataset" --path={PATH to the installed dataset}" +``` + +CM script will attempt to automatically detect the type (val/train) and version (2014/2017) +of the dataset files. + +## Install dataset from already downloaded archives + +```bash +cmr "get coco dataset _val _2017" --from=d:\Work2\COCO-2017-val -j +``` + +where `--from` points to the COCO dataset zip files already downloaded from the server. +It is useful when all files are already downloaded and saved for common use. + + +## Download and store dataset files locally + +```bash +cmr "get coco dataset _val _2017" --to=d:\Downloads\COCO-2017-val --store=d:\Downloads +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/README.md new file mode 100644 index 000000000..fba4913f0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-coco](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-coco) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/_cm.yaml new file mode 100644 index 000000000..301d76951 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/_cm.yaml @@ -0,0 +1,97 @@ +alias: get-dataset-coco +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +deps: +- tags: detect,os +docker: + input_paths: + - store + - from + - to + mount_current_dir: 'yes' + skip_input_for_fake_run: + - store + - from + - to + skip_run_cmd: 'no' +env: + CM_DATASET: COCO + CM_DATASET_COCO_URL_ANNOTATIONS: http://images.cocodataset.org/annotations + CM_DATASET_COCO_URL_DATA: http://images.cocodataset.org/zips +input_mapping: + from: CM_FROM + home: CM_HOME_DIR + store: CM_STORE + to: CM_TO +new_env_keys: +- CM_DATASET_COCO* +- CM_DATASET_PATH +- CM_DATASET_PATH_ROOT +prehook_deps: +- env: + CM_DOWNLOAD_CHECKSUM: <<>> + CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_COCO_DATA_DOWNLOAD_PATH + CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_COCO_DATA_PATH + force_cache: true + names: + - get-dataset-coco-data + - 746e5dad5e784ad6 + skip_if_env: + CM_DATASET_COCO_DETECTED: + - 'yes' + skip_if_fake_run: true + tags: download-and-extract,file,_wget,_extract + update_tags_from_env_with_prefix: + _url.: + - CM_DATASET_COCO_URL_DATA_FULL + verify: false +- env: + CM_DOWNLOAD_CHECKSUM: <<>> + CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_COCO_ANNOTATIONS_DOWNLOAD_PATH + CM_DOWNLOAD_PATH: <<>> + CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_COCO_ANNOTATIONS_PATH + force_cache: true + names: + - get-dataset-coco-annotations + - edb6cd092ff64171 + skip_if_env: + CM_DATASET_COCO_DETECTED: + - 'yes' + skip_if_fake_run: true + tags: download-and-extract,file,_wget,_extract + update_tags_from_env_with_prefix: + _url.: + - CM_DATASET_COCO_URL_ANNOTATIONS_FULL + verify: false +tags: +- get +- dataset +- object-detection +- coco +uid: c198e1f60ac6445c +variations: + '2017': + default: true + env: + CM_DATASET_COCO_VERSION: '2017' + group: version + complete: + default: true + env: + CM_DATASET_COCO_SIZE: complete + group: size + small: + env: + CM_DATASET_COCO_SIZE: small + group: size + train: + env: + CM_DATASET_COCO_TYPE: train + group: type + val: + default: true + env: + CM_DATASET_COCO_TYPE: val + group: type diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/customize.py new file mode 100644 index 000000000..20b4fc148 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco/customize.py @@ -0,0 +1,224 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + # CM script internal variables + variation_tags = i.get('variation_tags', []) + automation = i['automation'] + env = i['env'] + meta = i['meta'] + quiet = (env.get('CM_QUIET', False) == 'yes') + + # Check if path is there to detect existing data set + detected = False + path = env.get('CM_TMP_PATH', '') + if path != '': + if not os.path.isdir(path): + return {'return': 1, + 'error': 'path to dataset "{}" doesn\'t exist'.format(path)} + + # Check which dataset + p = os.path.join(path, 'annotations') + if os.path.isdir(p): + for d in [('val2017', 'val', '2017'), + ('train2017', 'train', '2017')]: + p = os.path.join(path, d[0]) + + if os.path.isdir(p): + tp = d[1] + ver = d[2] + detected = True + break + + if not detected: + return { + 'return': 1, 'error': 'COCO dataset is not detected in "{}"'.format(path)} + + print('') + print('Detected COCO dataset {} {}'.format(tp, ver)) + + env['CM_DATASET_COCO_DETECTED'] = 'yes' + env['CM_DATASET_COCO_PATH'] = path + else: + ver = env['CM_DATASET_COCO_VERSION'] + tp = env['CM_DATASET_COCO_TYPE'] + + # Prepare URL + size = env.get('CM_DATASET_COCO_SIZE', '') + if size == 'small' and tp == 'val' and ver == '2017': + # We prepared a small version with 50 images for val 2017 + + filename_data = 'val2017_small.zip' + filename_annotation = 'annotations_val2017_small.zip' + + url_data_full = 'https://www.dropbox.com/scl/fi/whokyb7b7hyjqqotruyqb/{}?rlkey=hhgt4xtir91ej0nro6h69l22s&dl=0'.format( + filename_data) + url_ann_full = 'https://www.dropbox.com/scl/fi/bu41y62v9zqhee8w7q6z3/{}?rlkey=seqtgozldkc0ztu76kbd47p5w&dl=0'.format( + filename_annotation) + + else: + url_data = env['CM_DATASET_COCO_URL_DATA'] + url_ann = env['CM_DATASET_COCO_URL_ANNOTATIONS'] + + filename_data = tp + ver + '.zip' + filename_annotation = 'annotations_trainval' + ver + '.zip' + + url_data_full = url_data + '/' + filename_data + url_ann_full = url_ann + '/' + filename_annotation + + # Add extra tags with type and version to "download-and-extract" deps to be able to reuse them + # Add "from" and "to" to "download-and-extract" deps + download_extra_cache_tags = 'dataset,coco,data,' + tp + ',' + ver + + dae_input_data = { + 'extra_cache_tags': download_extra_cache_tags + } + dae_input_annotation = { + 'extra_cache_tags': download_extra_cache_tags + } + + path_from = env.get('CM_FROM', '') + if path_from != '': + path_from_data = os.path.join(path_from, filename_data) + if not os.path.isfile(path_from_data): + return {'return': 1, + 'error': 'File {} not found'.format(path_from_data)} + dae_input_data['local_path'] = path_from_data + + path_from_annotation = os.path.join(path_from, filename_annotation) + if not os.path.isfile(path_from_annotation): + return {'return': 1, 'error': 'File {} not found'.format( + path_from_annotation)} + dae_input_annotation['local_path'] = path_from_annotation + + path_to = env.get('CM_TO', '') + if path_to != '': + dae_input_data['extract_path'] = path_to + dae_input_annotation['extract_path'] = path_to + + path_store = env.get('CM_STORE', '') + if path_store != '': + dae_input_data['download_path'] = path_store + dae_input_data['tags'] = '_keep' + dae_input_annotation['download_path'] = path_store + dae_input_annotation['tags'] = '_keep' + + r = automation.update_deps({'deps': meta['prehook_deps'], + 'update_deps': { + '746e5dad5e784ad6': dae_input_data, + 'edb6cd092ff64171': dae_input_annotation + } + }) + if r['return'] > 0: + return r + + # Prepare environment variables + env['CM_DATASET_COCO_VERSION'] = ver + env['CM_DATASET_COCO_TYPE'] = tp + env['CM_DATASET_COCO_TYPE_AND_VERSION'] = tp + ver + env['CM_DATASET_COCO_URL_DATA_FULL'] = url_data_full + env['CM_DATASET_COCO_URL_ANNOTATIONS_FULL'] = url_ann_full + + # Check MD5SUM + md5sum_data = '' + md5sum_ann = '' + + if ver == '2017': + if tp == 'val': + if size == 'small': + md5sum_data = '16fab985a33afa66beeb987f68c2023c' + md5sum_ann = '78c0cfd9fc32c825d4ae693fd0d91407' + else: + md5sum_data = '442b8da7639aecaf257c1dceb8ba8c80' + md5sum_ann = 'f4bbac642086de4f52a3fdda2de5fa2c' + + if md5sum_data != '': + env['CM_DATASET_COCO_MD5SUM_DATA'] = md5sum_data + if md5sum_ann != '': + env['CM_DATASET_COCO_MD5SUM_ANN'] = md5sum_ann + + if not detected: + print('') + print('URL for data: {}'.format(url_data_full)) + print('URL for annotations: {}'.format(url_ann_full)) + + # Add version and type to tags + extra_cache_tags = [] + for tag in [ver, tp]: + if tag not in variation_tags: + extra_cache_tags.append(tag) + + return {'return': 0, 'add_extra_cache_tags': extra_cache_tags} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + tp_ver = env['CM_DATASET_COCO_TYPE_AND_VERSION'] + + path_to = env.get('CM_TO', '') + + # Check if detected or downloaded + if env.get('CM_DATASET_COCO_DETECTED', + '').lower() == 'yes' or path_to != '': + path_all = env['CM_DATASET_COCO_PATH'] if path_to == '' else path_to + + env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) + env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join( + path_all, 'annotations') + else: + path_all = os.getcwd() + + # Moving 2 directories to 1 place + + path_data = env['CM_DATASET_COCO_DATA_PATH'] + path_ann = env['CM_DATASET_COCO_ANNOTATIONS_PATH'] + + print('') + print(path_all) + print('') + + path_data_full = os.path.join(path_data, tp_ver) + path_ann_full = os.path.join(path_ann, 'annotations') + + if os_info['platform'] == 'windows': + # Moving to this directory since can't make symbolic links + command1 = ' move /y ' + path_data_full + ' ' + tp_ver + command2 = ' move /y ' + path_ann_full + ' annotations' + + env['CM_DATASET_COCO_DATA_PATH'] = os.path.join(path_all, tp_ver) + env['CM_DATASET_COCO_ANNOTATIONS_PATH'] = os.path.join( + path_all, 'annotations') + else: + # Make soft links from data and annotations into 1 directory + # (standard way for COCO) + + command1 = ' ln -s ' + path_data_full + ' ' + tp_ver + command2 = ' ln -s ' + path_ann_full + ' annotations' + + for command in [command1, command2]: + print(command) + os.system(command) + + env['CM_DATASET_COCO_PATH'] = path_all + env['CM_DATASET_PATH'] = path_all + env['CM_DATASET_PATH_ROOT'] = path_all + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/README.md new file mode 100644 index 000000000..d7ee4e36e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-coco2014](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-coco2014) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/_cm.yaml new file mode 100644 index 000000000..39c603642 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/_cm.yaml @@ -0,0 +1,94 @@ +alias: get-dataset-coco2014 +uid: 3f7ad9d42f4040f8 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: AI/ML datasets +category_sort: 8500 + +tags: +- get +- dataset +- coco2014 +- object-detection +- original + +default_env: + CM_DATASET_CALIBRATION: 'no' + +deps: + +- names: + - python + - python3 + tags: get,python3 + +- tags: get,generic-python-lib,_package.tqdm +- tags: get,generic-python-lib,_package.pandas + +- force_env_keys: + - CM_GIT_* + names: + - inference-src + tags: mlperf,inference,source + version: master + +env: + CM_DATASET: COCO2014 + +new_env_keys: +- CM_DATASET_PATH +- CM_DATASET_PATH_ROOT +- CM_DATASET_ANNOTATIONS_DIR_PATH +- CM_DATASET_ANNOTATIONS_FILE_PATH +- CM_CALIBRATION_DATASET_PATH +- CM_COCO2014_SAMPLE_ID_PATH + +posthook_deps: +- enable_if_env: + CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: + - 'yes' + tags: get,coco2014,annotations + +variations: + '50': + default: true + env: + CM_DATASET_SIZE: '50' + group: size + '500': + env: + CM_DATASET_SIZE: '500' + group: size + calibration: + env: + CM_DATASET_CALIBRATION: 'yes' + group: dataset-type + custom-annotations: + env: + CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'yes' + group: annotations + default-annotations: + default: true + env: + CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'no' + group: annotations + full: + env: + CM_DATASET_SIZE: '' + group: size + size.#: + env: + CM_DATASET_SIZE: '#' + group: size + with-sample-ids: + env: + CM_GENERATE_SAMPLE_ID: 'yes' + validation: + default: true + env: + CM_DATASET_CALIBRATION: 'no' + group: dataset-type diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/customize.py new file mode 100644 index 000000000..0349003fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/customize.py @@ -0,0 +1,51 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + + run_dir = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools") + + env['CM_RUN_DIR'] = run_dir + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + if env.get('CM_GENERATE_SAMPLE_ID', '') == "yes": + env['CM_COCO2014_SAMPLE_ID_PATH'] = os.path.join( + os.getcwd(), 'install', 'sample_ids.txt') + print(env['CM_COCO2014_SAMPLE_ID_PATH']) + if env.get('CM_DATASET_CALIBRATION', '') == "no": + env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') + # env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') + env['CM_DATASET_CAPTIONS_DIR_PATH'] = os.path.join( + os.getcwd(), 'install', 'captions') + env['CM_DATASET_LATENTS_DIR_PATH'] = os.path.join( + os.getcwd(), 'install', 'latents') + else: + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'calibration', 'data') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/run.bat new file mode 100644 index 000000000..9ac62e6ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/run.bat @@ -0,0 +1,21 @@ +@echo off + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +if not exist install mkdir install + +set INSTALL_DIR=%CUR_DIR%\install + +cd %CM_RUN_DIR% + +if not "%CM_DATASET_SIZE%" == "" ( + set MAX_IMAGES=--max-images %CM_DATASET_SIZE% --seed 42 +) else ( + set MAX_IMAGES= +) + +rem TBD - next file doesn't exist in the latest inference - need to check/fix ... + +%CM_PYTHON_BIN% download-coco-2014.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/run.sh new file mode 100644 index 000000000..61b9ffe52 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-coco2014/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash +python3() { + ${CM_PYTHON_BIN_WITH_PATH} "$@" +} +export -f python3 + +CUR=${PWD} +mkdir -p install +INSTALL_DIR=${CUR}/install + +cd ${CM_RUN_DIR} + +if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then + if [ ! -z ${CM_DATASET_SIZE} ]; then + max_images=" -m ${CM_DATASET_SIZE}" + else + max_images="" + fi + + # deleting existing incomplete downloads if any + if [ -f "${INSTALL_DIR}/download_aux/annotations_trainval2014.zip" ]; then + echo "File annotations_trainval2014.zip already exists. Deleting it." + rm ${INSTALL_DIR}/download_aux/annotations_trainval2014.zip + fi + + cmd="./download-coco-2014.sh -d ${INSTALL_DIR} ${max_images}" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? +else + cmd="./download-coco-2014-calibration.sh -d ${INSTALL_DIR}" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? +fi +if [[ ${CM_GENERATE_SAMPLE_ID} == "yes" ]]; then + cmd="python3 sample_ids.py --tsv-path ${INSTALL_DIR}/captions/captions.tsv --output-path ${INSTALL_DIR}/sample_ids.txt" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? +fi +cd ${INSTALL_DIR} + +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/README-extra.md new file mode 100644 index 000000000..345a59cfe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/README-extra.md @@ -0,0 +1,9 @@ +# Get Criteo Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the 24 days of Criteo dataset for MLPerf inference using DLRM. + +## Exported Variables +* `CM_DATASET_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/README.md new file mode 100644 index 000000000..7d5e3404e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-criteo](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-criteo) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/_cm.yaml new file mode 100644 index 000000000..06bdd335c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/_cm.yaml @@ -0,0 +1,26 @@ +alias: get-dataset-criteo +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + CM_BACKUP_ZIPS: 'no' +env: + CM_DATASET: terabyte +input_mapping: + criteo_path: CM_CRITEO_PATH +new_env_keys: +- CM_DATASET* +tags: +- get +- dataset +- criteo +- original +uid: 194a47d908714897 +variations: + backup: + env: + CM_BACKUP_ZIPS: 'yes' + fake: + env: + CM_CRITEO_FAKE: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/run.sh new file mode 100644 index 000000000..32a1c777f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-criteo/run.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if [ ! -z ${CM_CRITEO_PATH+x} ]; then + echo "CM_DATASET_PATH=${CM_CRITEO_PATH}" > tmp-run-env.out + test $? -eq 0 || exit 1 + exit 0 +fi + +CUR=$PWD +if [[ ${CM_CRITEO_FAKE} == "yes" ]]; then + cd ${CM_MLPERF_INFERENCE_DLRM_PATH}/pytorch/tools + bash ./make_fake_criteo.sh terabyte + mv ./fake_criteo/* $CUR/ + cd $CUR +else + curl -O -C - https://storage.googleapis.com/criteo-cail-datasets/day_{`seq -s "," 0 23`}.gz + test $? -eq 0 || exit 1 + + if [ ${CM_BACKUP_ZIPS:-no} == "yes" ]; then + mkdir backup + cp -r *.gz backup/ + fi + yes n | gunzip -k day_{0..23}.gz +fi + +echo "CM_DATASET_PATH=$PWD" > tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/README.md new file mode 100644 index 000000000..945516067 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-aux](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-aux) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/_cm.yaml new file mode 100644 index 000000000..242b53abc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-aux/_cm.yaml @@ -0,0 +1,52 @@ +alias: get-dataset-imagenet-aux +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +new_env_keys: +- CM_DATASET_AUX_* +prehook_deps: +- env: + CM_DOWNLOAD_URL: <<>> + CM_DOWNLOAD_URL1: <<>> + CM_EXTRACT_EXTRACTED_FILENAME: <<>> + CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_AUX_PATH + extra_cache_tags: imagenet-aux,dataset-aux + force_cache: true + tags: download-and-extract,_extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +tags: +- get +- aux +- dataset-aux +- image-classification +- imagenet-aux +uid: bb2c6dd8c8c64217 +variations: + '2012': + env: + CM_DATASET_AUX_VER: '2012' + from.berkeleyvision: + base: + - '2012' + default: true + env: + CM_DOWNLOAD_CHECKSUM: f963098ea0e785a968ca1eb634003a90 + CM_DOWNLOAD_CHECKSUM1: ee346d67141e476df9c1a3f813552503 + CM_PACKAGE_URL: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz + CM_PACKAGE_URL1: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz + group: download-source + from.dropbox: + base: + - '2012' + env: + CM_DOWNLOAD_CHECKSUM: ee346d67141e476df9c1a3f813552503 + CM_DOWNLOAD_CHECKSUM1: f963098ea0e785a968ca1eb634003a90 + CM_PACKAGE_URL: https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz + CM_PACKAGE_URL1: http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz + group: download-source + skip_ssl_verification: + env: + CM_VERIFY_SSL: 'False' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/README.md new file mode 100644 index 000000000..4dcc4e887 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-calibration](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-calibration) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/_cm.yaml new file mode 100644 index 000000000..741d7e205 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-calibration/_cm.yaml @@ -0,0 +1,46 @@ +uid: 30361fad3dff49ff +alias: get-dataset-imagenet-calibration + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: "AI/ML datasets" + +deps: + - tags: download,file + force_cache: true + extra_cache_tags: imagenet-calibration,imagenet,calibration + names: + - calibration-file-downloader + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH + +new_env_keys: +- CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH + +tags: +- get +- dataset +- imagenet +- calibration + +variations: + mlperf.option1: + group: calibration-option + default: true + env: + CM_MLPERF_IMAGENET_CALIBRATION_OPTION: one + CM_DOWNLOAD_CHECKSUM: f09719174af3553119e2c621157773a6 + adr: + calibration-file-downloader: + tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/ImageNet/cal_image_list_option_1.txt + mlperf.option2: + group: calibration-option + env: + CM_MLPERF_IMAGENET_CALIBRATION_OPTION: two + CM_DOWNLOAD_CHECKSUM: e44582af00e3b4fc3fac30efd6bdd05f + adr: + calibration-file-downloader: + tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/ImageNet/cal_image_list_option_2.txt diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/README.md new file mode 100644 index 000000000..b815431b0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-helper](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-helper) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/_cm.yaml new file mode 100644 index 000000000..a6ab0e7c8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/_cm.yaml @@ -0,0 +1,14 @@ +alias: get-dataset-imagenet-helper +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +new_env_keys: +- +PYTHONPATH +- CM_DATASET_IMAGENET_HELPER_PATH +tags: +- get +- imagenet +- helper +- imagenet-helper +uid: a6c3c321d07742f9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/customize.py new file mode 100644 index 000000000..9464f8ffb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/customize.py @@ -0,0 +1,24 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def postprocess(i): + env = i['env'] + + script_path = env['CM_TMP_CURRENT_SCRIPT_PATH'] + + env['CM_DATASET_IMAGENET_HELPER_PATH'] = script_path + env['+PYTHONPATH'] = [script_path] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py new file mode 100644 index 000000000..aa90deefd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-helper/imagenet_helper/__init__.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 + +import os +import numpy as np + + +# Processing in batches: +# +BATCH_SIZE = int(os.getenv('CM_BATCH_SIZE', 1)) + + +# Model properties: +# +MODEL_IMAGE_HEIGHT = int(os.getenv('CM_ML_MODEL_IMAGE_HEIGHT', + os.getenv('CM_ONNX_MODEL_IMAGE_HEIGHT', + os.getenv('CM_TENSORFLOW_MODEL_IMAGE_HEIGHT', + '')))) +MODEL_IMAGE_WIDTH = int(os.getenv('CM_ML_MODEL_IMAGE_WIDTH', + os.getenv('CM_ONNX_MODEL_IMAGE_WIDTH', + os.getenv('CM_TENSORFLOW_MODEL_IMAGE_WIDTH', + '')))) +MODEL_IMAGE_CHANNELS = int(os.getenv('CM_ML_MODEL_IMAGE_CHANNELS', 3)) +MODEL_DATA_LAYOUT = os.getenv('CM_ML_MODEL_DATA_LAYOUT', 'NCHW') +MODEL_COLOURS_BGR = os.getenv( + 'CM_ML_MODEL_COLOUR_CHANNELS_BGR', 'NO') in ( + 'YES', 'yes', 'ON', 'on', '1') +MODEL_INPUT_DATA_TYPE = os.getenv('CM_ML_MODEL_INPUT_DATA_TYPE', 'float32') +MODEL_DATA_TYPE = os.getenv('CM_ML_MODEL_DATA_TYPE', '(unknown)') +MODEL_USE_DLA = os.getenv( + 'CM_ML_MODEL_USE_DLA', + 'NO') in ( + 'YES', + 'yes', + 'ON', + 'on', + '1') +MODEL_MAX_BATCH_SIZE = int(os.getenv('CM_ML_MODEL_MAX_BATCH_SIZE', BATCH_SIZE)) + + +# Internal processing: +# +INTERMEDIATE_DATA_TYPE = np.float32 # default for internal conversion +# INTERMEDIATE_DATA_TYPE = np.int8 # affects the accuracy a bit + + +# Image normalization: +# +MODEL_NORMALIZE_DATA = os.getenv('CM_ML_MODEL_NORMALIZE_DATA') in ( + 'YES', 'yes', 'ON', 'on', '1') +MODEL_NORMALIZE_LOWER = float(os.getenv('CM_ML_MODEL_NORMALIZE_LOWER', -1.0)) +MODEL_NORMALIZE_UPPER = float(os.getenv('CM_ML_MODEL_NORMALIZE_UPPER', 1.0)) +SUBTRACT_MEAN = os.getenv( + 'CM_ML_MODEL_SUBTRACT_MEANS', 'YES') in ( + 'YES', 'yes', 'ON', 'on', '1') +GIVEN_CHANNEL_MEANS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_MEANS', '') +if GIVEN_CHANNEL_MEANS: + GIVEN_CHANNEL_MEANS = np.fromstring( + GIVEN_CHANNEL_MEANS, + dtype=np.float32, + sep=' ').astype(INTERMEDIATE_DATA_TYPE) + if MODEL_COLOURS_BGR: + # swapping Red and Blue colour channels + GIVEN_CHANNEL_MEANS = GIVEN_CHANNEL_MEANS[::-1] + +GIVEN_CHANNEL_STDS = os.getenv('CM_ML_MODEL_GIVEN_CHANNEL_STDS', '') +if GIVEN_CHANNEL_STDS: + GIVEN_CHANNEL_STDS = np.fromstring( + GIVEN_CHANNEL_STDS, + dtype=np.float32, + sep=' ').astype(INTERMEDIATE_DATA_TYPE) + if MODEL_COLOURS_BGR: + # swapping Red and Blue colour channels + GIVEN_CHANNEL_STDS = GIVEN_CHANNEL_STDS[::-1] + + +# ImageNet dataset properties: +# +LABELS_PATH = os.environ['CM_CAFFE_IMAGENET_SYNSET_WORDS_TXT'] + + +# Preprocessed input images' properties: +# +IMAGE_DIR = os.getenv('CM_DATASET_PREPROCESSED_PATH') +IMAGE_DATA_TYPE = os.getenv('CM_DATASET_PREPROCESSED_DATA_TYPE', 'float32') + + +def load_labels(labels_filepath): + my_labels = [] + input_file = open(labels_filepath, 'r') + for l in input_file: + my_labels.append(l.strip()) + return my_labels + + +class_labels = load_labels(LABELS_PATH) + + +# Load preprocessed image filenames: +image_list = [] +all_images = os.listdir(IMAGE_DIR) +for image_file in all_images: + if image_file.endswith('.npy'): + image_list.append(image_file) + + +def load_image_by_index_and_normalize(image_index): + + img_file = os.path.join(IMAGE_DIR, image_list[image_index]) + + img = np.fromfile(img_file, np.dtype(IMAGE_DATA_TYPE)) + # img = img.reshape((1,MODEL_IMAGE_HEIGHT, MODEL_IMAGE_WIDTH, 3)) + img.resize(224 * 224 * 3) + img = img.reshape( + (MODEL_IMAGE_HEIGHT, + MODEL_IMAGE_WIDTH, + MODEL_IMAGE_CHANNELS)) + if MODEL_COLOURS_BGR: + img = img[..., ::-1] # swapping Red and Blue colour channels + + if IMAGE_DATA_TYPE != 'float32': + img = img.astype(np.float32) + + # Normalize + if MODEL_NORMALIZE_DATA: + img /= (255.0 / (MODEL_NORMALIZE_UPPER - MODEL_NORMALIZE_LOWER)) + img += MODEL_NORMALIZE_LOWER + + # Subtract mean value + if len(GIVEN_CHANNEL_MEANS): + img -= GIVEN_CHANNEL_MEANS + elif SUBTRACT_MEAN: + img -= np.mean(img, axis=(0, 1), keepdims=True) + + if len(GIVEN_CHANNEL_STDS): + img /= GIVEN_CHANNEL_STDS + + if MODEL_INPUT_DATA_TYPE == 'int8' or INTERMEDIATE_DATA_TYPE == np.int8: + img = np.clip(img, -128, 127).astype(INTERMEDIATE_DATA_TYPE) + + if MODEL_DATA_LAYOUT == 'NCHW': + img = img.transpose(2, 0, 1) + elif MODEL_DATA_LAYOUT == 'CHW4': + img = np.pad(img, ((0, 0), (0, 0), (0, 1)), 'constant') + + # Add img to batch + return img.astype(MODEL_INPUT_DATA_TYPE) + + +def load_preprocessed_batch(image_list, image_index): + batch_data = None + for in_batch_idx in range(BATCH_SIZE): + img = load_image_by_index_and_normalize(image_index) + if batch_data is None: + batch_data = np.empty( + (BATCH_SIZE, *img.shape), dtype=MODEL_INPUT_DATA_TYPE) + batch_data[in_batch_idx] = img + image_index += 1 + + # print('Data shape: {}'.format(batch_data.shape)) + + if MODEL_USE_DLA and MODEL_MAX_BATCH_SIZE > len(batch_data): + return np.pad(batch_data, ((0, MODEL_MAX_BATCH_SIZE - len(batch_data)), + (0, 0), (0, 0), (0, 0)), 'constant'), image_index + else: + return batch_data, image_index diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/README.md new file mode 100644 index 000000000..4bd0bd5cb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-train](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-train) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/_cm.yaml new file mode 100644 index 000000000..3d1a1bd22 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/_cm.yaml @@ -0,0 +1,48 @@ +alias: get-dataset-imagenet-train +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +deps: [] +input_description: {} +input_mapping: + input: IMAGENET_TRAIN_PATH + torrent: CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH +new_env_keys: +- CM_DATASET_PATH +- CM_DATASET_IMAGENET_* +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: +- enable_if_env: + CM_DATASET_IMAGENET_VAL_REQUIRE_DAE: + - 'yes' + - 'True' + env: + CM_EXTRACT_TO_FOLDER: imagenet-2012-train + tags: download-and-extract,file,_extract + update_tags_from_env: + - CM_DAE_EXTRA_TAGS + update_tags_from_env_with_prefix: + _url.: + - CM_DAE_URL +- enable_if_env: + CM_DAE_ONLY_EXTRACT: + - 'yes' + - 'True' + env: + CM_EXTRACT_TO_FOLDER: imagenet-2012-train + tags: file,extract + update_tags_from_env_with_prefix: + _path.: + - CM_EXTRACT_PATH +tags: +- get +- imagenet +- train +- dataset +- original +uid: 2bec165da5cc4ebf +variations: {} +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/customize.py new file mode 100644 index 000000000..835b30238 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/customize.py @@ -0,0 +1,80 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + automation = i['automation'] + meta = i['meta'] + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 0} + + env['CM_DATASET_IMAGENET_TRAIN_REQUIRE_DAE'] = 'no' + + path = env.get('CM_INPUT', env.get('IMAGENET_TRAIN_PATH', '')).strip() + + if path == '': + if env.get('CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH'): + path = env['CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH'] + env['CM_DAE_EXTRA_TAGS'] = "_torrent" + env['CM_DAE_TORRENT_PATH'] = path + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + + return {'return': 0} + + else: + return {'return': 1, 'error': 'Please rerun the last CM command with --env.IMAGENET_TRAIN_PATH={path the folder containing full ImageNet training images} or envoke cm run script "get train dataset imagenet" --input={path to the folder containing ImageNet training images}'} + + elif not os.path.isdir(path): + if path.endswith(".tar"): + # env['CM_DAE_FILEPATH'] = path + env['CM_EXTRACT_FILEPATH'] = path + env['CM_DAE_ONLY_EXTRACT'] = 'yes' + return {'return': 0} + else: + return {'return': 1, + 'error': 'Path {} doesn\'t exist'.format(path)} + else: + env['CM_EXTRACT_EXTRACTED_PATH'] = path + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 0} + + env = i['env'] + + path = env['CM_EXTRACT_EXTRACTED_PATH'] + + path_tar = os.path.join(path, 'n01440764.tar') + + if not os.path.isfile(path_tar): + return {'return': 1, + 'error': 'ImageNet file {} not found'.format(path_tar)} + + env['CM_DATASET_PATH'] = path + env['CM_DATASET_IMAGENET_PATH'] = path + env['CM_DATASET_IMAGENET_TRAIN_PATH'] = path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/run.sh new file mode 100644 index 000000000..be86fb43c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-train/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/README-extra.md new file mode 100644 index 000000000..75b310b29 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/README-extra.md @@ -0,0 +1,28 @@ +## Notes + +The ImageNet 2012 validation data set is no longer publicly available [here](https://image-net.org/download.php). + +However, it seems that you can still download it via [Academic Torrents](https://academictorrents.com/details/5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5). +You can then register in the MLCommons CM using this portable CM script as follows: + +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +``` + +```bash +cm run script "get validation dataset imagenet _2012 _full" --input={directory with ILSVRC2012_val_00000001.JPEG} +``` + +Alternatively, you can download the imagenet validation dataset via torrent by giving the torrent URL as follows. + +```bash +cm run script "get validation dataset imagenet _2012 _full" --torrent={Torrent URL} +``` + +It can now be automatically plugged into other portable CM scripts for image classification including MLPerf inference vision benchmarks. + +You can also find the images and use them directly as follows: + +```bash +cm find cache --tags=dataset,validation,imagenet,_full +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/README.md new file mode 100644 index 000000000..0ed5c98e1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-val](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-imagenet-val) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/_cm.yaml new file mode 100644 index 000000000..0b9923927 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/_cm.yaml @@ -0,0 +1,101 @@ +alias: get-dataset-imagenet-val +uid: 7afd58d287fe4f11 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true +category: AI/ML datasets + +deps: +- tags: detect,os + +docker: + run: false + +env: + CM_DATASET: IMAGENET + +input_mapping: + imagenet_path: IMAGENET_PATH + torrent: CM_DATASET_IMAGENET_VAL_TORRENT_PATH + +new_env_keys: +- CM_DATASET_PATH +- CM_DATASET_IMAGENET_PATH +- CM_DATASET_IMAGENET_VAL_PATH +- CM_DATASET_SIZE +- CM_DATASET_VER + +prehook_deps: +- enable_if_env: + CM_DATASET_IMAGENET_VAL_REQUIRE_DAE: + - 'yes' + - 'True' + env: + CM_EXTRACT_TO_FOLDER: imagenet-2012-val + tags: download-and-extract,file,_extract + update_tags_from_env: + - CM_DAE_EXTRA_TAGS + update_tags_from_env_with_prefix: + _url.: + - CM_DAE_URL +- enable_if_env: + CM_DAE_ONLY_EXTRACT: + - 'yes' + - 'True' + env: + CM_EXTRACT_TO_FOLDER: imagenet-2012-val + tags: file,extract,_no-remove-extracted + update_tags_from_env_with_prefix: + _path.: + - CM_EXTRACT_PATH + +tags: +- get +- val +- validation +- dataset +- imagenet +- ILSVRC +- image-classification +- original + +variations: + '2012': + default: true + env: + CM_DATASET_VER: '2012' + group: dataset-version + 2012-500: + base: + - size.500 + - '2012' + 2012-full: + base: + - full + - '2012' + full: + env: + CM_DAE_FILENAME: ILSVRC2012_img_val.tar + CM_DATASET_SIZE: '50000' + CM_DOWNLOAD_CHECKSUM: 29b22e2961454d5413ddabcf34fc5622 + CM_IMAGENET_FULL: 'yes' + group: count + run-during-docker-build: + docker: + run: true + size.#: + env: + CM_DATASET_SIZE: '#' + group: count + size.500: + default: true + env: + CM_DAE_FILENAME: ILSVRC2012_img_val_500.tar + CM_DAE_URL: http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar + CM_DOWNLOAD_URL1: https://www.dropbox.com/scl/fi/a7fhjnzxi6x3ceapxh5bm/ILSVRC2012_img_val_500.tar?rlkey=hz4rabo9ve43co3c303y9r6l7&st=ulcgb3av&dl=1 + CM_DATASET_SIZE: '500' + CM_DOWNLOAD_CHECKSUM: 8627befdd8c2bcf305729020e9db354e + CM_DOWNLOAD_FILENAME: ILSVRC2012_img_val_500.tar + group: count diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/customize.py new file mode 100644 index 000000000..9cd596a73 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/customize.py @@ -0,0 +1,103 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + automation = i['automation'] + meta = i['meta'] + os_info = i['os_info'] + + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'no' + + full = env.get('CM_IMAGENET_FULL', '').strip() == 'yes' + + path = env.get( + 'CM_INPUT', + env.get( + 'IMAGENET_PATH', + env.get( + 'CM_DATASET_IMAGENET_PATH', + ''))).strip() + + if path == '': + if full: + + if env.get('CM_DATASET_IMAGENET_VAL_TORRENT_PATH'): + path = env['CM_DATASET_IMAGENET_VAL_TORRENT_PATH'] + env['CM_DAE_EXTRA_TAGS'] = "_torrent" + env['CM_DAE_TORRENT_PATH'] = path + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + return {'return': 0} + + else: + env['CM_DAE_URL'] = 'https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar' + env['CM_DAE_FILENAME'] = 'ILSVRC2012_img_val.tar' + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + + return {'return': 0} + # return {'return':1, 'error':'Please rerun the last CM command + # with --env.IMAGENET_PATH={path the folder containing full + # ImageNet images} or envoke cm run script "get val dataset + # imagenet" --input={path to the folder containing ImageNet + # images}'} + + else: + env['CM_DATASET_IMAGENET_VAL_REQUIRE_DAE'] = 'yes' + + elif not os.path.isdir(path): + if path.endswith(".tar"): + env['CM_EXTRACT_FILEPATH'] = path + env['CM_DAE_ONLY_EXTRACT'] = 'yes' + return {'return': 0} + else: + return {'return': 1, + 'error': 'Path {} doesn\'t exist'.format(path)} + else: + env['CM_EXTRACT_EXTRACTED_PATH'] = path + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + path = env['CM_EXTRACT_EXTRACTED_PATH'] + path1 = os.path.join(path, 'imagenet-2012-val') + if os.path.isdir(path1): + path = path1 + + path_image = os.path.join(path, 'ILSVRC2012_val_00000001.JPEG') + + if not os.path.isfile(path_image): + return {'return': 1, + 'error': 'ImageNet file {} not found'.format(path_image)} + + files = os.listdir(path) + if len(files) < int(env.get('CM_DATASET_SIZE', 0)): + return {'return': 1, 'error': 'Only {} files found in {}. {} expected'.format( + len(files), path, env.get('CM_DATASET_SIZE'))} + + env['CM_DATASET_PATH'] = path + env['CM_DATASET_IMAGENET_PATH'] = path + env['CM_DATASET_IMAGENET_VAL_PATH'] = path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/run.bat new file mode 100644 index 000000000..94625b7e5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-imagenet-val/run.bat @@ -0,0 +1,17 @@ +if "%CM_EXTRACT_EXTRACTED_PATH%" == "" ( + echo. + + wget -nc https://www.dropbox.com/s/57s11df6pts3z69/ILSVRC2012_img_val_500.tar --no-check-certificate + IF %ERRORLEVEL% NEQ 0 EXIT 1 + + mkdir images + + tar -C images -xvf ILSVRC2012_img_val_500.tar + IF %ERRORLEVEL% NEQ 0 EXIT 1 + + del /Q /S ILSVRC2012_img_val_500.tar + + echo CM_DATASET_PATH=%CD%\images > tmp-run-env.out + echo CM_DATASET_IMAGENET_PATH=%CD%\images >> tmp-run-env.out + echo CM_DATASET_IMAGENET_VAL_PATH=%CD%\images >> tmp-run-env.out +) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/README.md new file mode 100644 index 000000000..34bac1b08 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-kits19](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-kits19) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/_cm.yaml new file mode 100644 index 000000000..eddb6a9a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/_cm.yaml @@ -0,0 +1,60 @@ +alias: get-dataset-kits19 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + CM_GIT_CHECKOUT: master + CM_GIT_DEPTH: --depth 2 + CM_GIT_PATCH: 'no' + CM_GIT_RECURSE_SUBMODULES: '' + CM_GIT_URL: https://github.com/neheller/kits19 +default_version: master +deps: +- tags: detect,os +- names: + - python3 + - python + tags: get,python3 +new_env_keys: +- CM_DATASET_* +tags: +- get +- dataset +- medical-imaging +- kits +- original +- kits19 +uid: 79992bb221024ac5 +variations: + calibration: + env: + CM_DATASET_CALIBRATION: 'yes' + default: + base: + - short-history + env: + CM_GIT_PATCH: 'no' + full-history: + env: + CM_GIT_DEPTH: '' + no-recurse-submodules: + env: + CM_GIT_RECURSE_SUBMODULES: '' + patch: + env: + CM_GIT_PATCH: 'yes' + short-history: + env: + CM_GIT_DEPTH: --depth 5 + validation: + env: + CM_DATASET_VALIDATION: 'yes' +versions: + custom: + env: + CM_GIT_CHECKOUT: '' + CM_GIT_SHA: 'yes' + master: + env: + CM_GIT_CHECKOUT: master diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/customize.py new file mode 100644 index 000000000..31e974233 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/customize.py @@ -0,0 +1,52 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if not env.get('CM_GIT_CHECKOUT', ''): + return { + 'return': 1, 'error': 'Please provide a valid CM_GIT_SHA inside the custom variation of _cm.json'} + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + need_version = env.get('CM_VERSION', '') + versions = meta['versions'] + + if need_version != '' and not need_version in versions: + env['CM_GIT_CHECKOUT'] = need_version + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'kits19', 'data') + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/run.sh new file mode 100644 index 000000000..f5bf0617a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-kits19/run.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning kits19 from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + +if [ ! -d "kits19" ]; then + if [ -z ${CM_GIT_SHA} ]; then + cmd="git clone ${CM_GIT_RECURSE_SUBMODULES} -b ${CM_GIT_CHECKOUT} ${CM_GIT_URL} ${CM_GIT_DEPTH} kits19" + echo $cmd + eval $cmd + cd kits19 + else + git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} kits19 + cd kits19 + git checkout -b "${CM_GIT_CHECKOUT}" + fi + if [ "${?}" != "0" ]; then exit 1; fi +else + cd kits19 +fi + +if [ ${CM_GIT_PATCH} == "yes" ]; then + patch_filename=${CM_GIT_PATCH_FILENAME} + if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then + patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} + CM_GIT_PATCH_FILENAMES=$patchfile + fi + IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + for patch_filename in "${patch_files[@]}" + do + echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + git apply ${SCRIPT_DIR}/patch/"$patch_filename" + if [ "${?}" != "0" ]; then exit 1; fi + done +fi +cd ${CUR_DIR}/kits19 +${CM_PYTHON_BIN_WITH_PATH} -m starter_code.get_imaging +cd data +cp -rf case_00185 case_00400 +cd "$CUR_DIR" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/README-extra.md new file mode 100644 index 000000000..265902c92 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/README-extra.md @@ -0,0 +1,26 @@ +# Downloads LibriSpeech Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the LibriSpeech dataset. + +## Usage + +``` +cm run script --tags=get,dataset,librispeech --version=[VERSION] +``` +where [VERSION] is one of +* `dev-clean` +* `dev-other` +* `train-clean` +* `train-other` +* `train-clean-100` +* `train-clean-360` +* `train-other-500` + +## Exported Variables +* `CM_DATASET_ARCHIVE:` +* `CM_DATASET_LIBRISPEECH_PATH:` +* `CM_DATASET_MD5:` +* `CM_DATASET_NAME:` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/README.md new file mode 100644 index 000000000..ce7cd109c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-librispeech](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-librispeech) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/_cm.yaml new file mode 100644 index 000000000..ead114f4a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/_cm.yaml @@ -0,0 +1,62 @@ +alias: get-dataset-librispeech +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_version: dev-clean +deps: +- names: + - sys-utils + tags: get,sys-utils-cm +env: + CM_DATASET: LIBRISPEECH + CM_WGET_URL: http://www.openslr.org/resources/12/<<>> +new_env_keys: +- CM_DATASET_* +tags: +- get +- dataset +- speech +- speech-recognition +- librispeech +- validation +- audio +- training +- original +uid: 09f29df607e0415d +versions: + dev-clean: + env: + CM_DATASET_ARCHIVE: dev-clean.tar.gz + CM_DATASET_MD5: 42e2234ba48799c1f50f24a7926300a1 + CM_DATASET_NAME: LibriSpeech Dev Clean dataset + dev-other: + env: + CM_DATASET_ARCHIVE: dev-other.tar.gz + CM_DATASET_MD5: c8d0bcc9cca99d4f8b62fcc847357931 + CM_DATASET_NAME: LibriSpeech Dev Other dataset + test-clean: + env: + CM_DATASET_ARCHIVE: test-clean.tar.gz + CM_DATASET_MD5: 32fa31d27d2e1cad72775fee3f4849a9 + CM_DATASET_NAME: LibriSpeech Test Clean dataset + test-other: + env: + CM_DATASET_ARCHIVE: test-other.tar.gz + CM_DATASET_MD5: fb5a50374b501bb3bac4815ee91d3135 + CM_DATASET_NAME: LibriSpeech Test Other dataset + train-clean-100: + env: + CM_DATASET_ARCHIVE: train-clean-100.tar.gz + CM_DATASET_MD5: 2a93770f6d5c6c964bc36631d331a522 + CM_DATASET_NAME: LibriSpeech Train Clean 100 dataset + train-clean-360: + env: + DATASET_ARCHIVE: train-clean-360.tar.gz + DATASET_MD5: c0e676e450a7ff2f54aeade5171606fa + DATASET_NAME: LibriSpeech Train Clean 360 dataset + train-other-500: + env: + DATASET_ARCHIVE: train-other-500.tar.gz + DATASET_MD5: d1a0fd59409feb2c614ce4d30c387708 + DATASET_NAME: LibriSpeech Train Other 500 dataset diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/customize.py new file mode 100644 index 000000000..ae19d286f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + folder_name = env['CM_DATASET_ARCHIVE'].split(".")[0] + env['CM_DATASET_LIBRISPEECH_PATH'] = os.path.join( + os.getcwd(), "LibriSpeech", folder_name) + env['CM_DATASET_PATH'] = os.path.join( + os.getcwd(), "LibriSpeech", folder_name) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/run.sh new file mode 100644 index 000000000..9c2fc2660 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-librispeech/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +wget -nc ${CM_WGET_URL} --no-check-certificate +test $? -eq 0 || exit 1 + +tar -x --skip-old-files -vf ${CM_DATASET_ARCHIVE} +test $? -eq 0 || exit 1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/_cm.yaml new file mode 100644 index 000000000..c3e78b464 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/_cm.yaml @@ -0,0 +1,380 @@ +alias: get-dataset-mlperf-inference-igbh +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- dataset +- mlperf +- rgat +- igbh +- inference +uid: 824e61316c074253 +new_env_keys: + - CM_IGBH_DATASET_PATH +input_mapping: + out_path: CM_IGBH_DATASET_OUT_PATH +deps: + - tags: mlperf,inference,source + names: + - inference-src + - tags: get,python + names: + - get-python +prehook_deps: + #paper + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_feat.npy + CM_DOWNLOAD_CHECKSUM: 71058b9ac8011bafa1c5467504452d13 + CM_DOWNLOAD_FILENAME: node_feet.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + extra_cache_tags: dataset,igbh,paper,node_feat + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_19.npy + CM_DOWNLOAD_CHECKSUM: be6fda45566e679bdb05ebea98ad16d4 + CM_DOWNLOAD_FILENAME: node_label_19.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + extra_cache_tags: dataset,igbh,paper,node_label_19 + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/node_label_2K.npy + CM_DOWNLOAD_CHECKSUM: 6eccab9a14f92f42be5b367c39002031 + CM_DOWNLOAD_FILENAME: node_label_2K.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + extra_cache_tags: dataset,igbh,paper,node_label_2K + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper/paper_id_index_mapping.npy + CM_DOWNLOAD_CHECKSUM: f70dd642a4f7e41d926c91c8c054fc4c + CM_DOWNLOAD_FILENAME: paper_id_index_mapping.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper/ + extra_cache_tags: dataset,igbh,paper,paper_id_index_mapping + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + #paper_cites_paper + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__cites__paper/edge_index.npy + CM_DOWNLOAD_CHECKSUM: f4897f53636c04a9c66f6063ec635c16 + CM_DOWNLOAD_FILENAME: edge_index.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__cites__paper/ + extra_cache_tags: dataset,igbh,paper_cites_paper,edge_index + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # author + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/author_id_index_mapping.npy + CM_DOWNLOAD_CHECKSUM: 58c15aab7dae03bbd57e6a4ac5e61bd9 + CM_DOWNLOAD_FILENAME: author_id_index_mapping.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/author/ + extra_cache_tags: dataset,igbh,author,author_id_index_mapping + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author/node_feat.npy + CM_DOWNLOAD_CHECKSUM: 2ec2512b554088381c04ec013e893c8d + CM_DOWNLOAD_FILENAME: node_feat.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/author/ + extra_cache_tags: dataset,igbh,author,node_feat + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # conference + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/conference_id_index_mapping.npy + CM_DOWNLOAD_CHECKSUM: 0bf7c555d8c697b31b6af6c4cb6b6612 + CM_DOWNLOAD_FILENAME: conference_id_index_mapping.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/conference/ + extra_cache_tags: dataset,igbh,conference,conference_id_index_mapping + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/conference/node_feat.npy + CM_DOWNLOAD_CHECKSUM: 898ff529b8cf972261fedd50df6377f8 + CM_DOWNLOAD_FILENAME: node_feat.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/conference/ + extra_cache_tags: dataset,igbh,conference,node_feat + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # institute + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/institute_id_index_mapping.npy + CM_DOWNLOAD_CHECKSUM: 03fb45eafb7bd35875ef4c7cd2a299a9 + CM_DOWNLOAD_FILENAME: institute_id_index_mapping.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/institute/ + extra_cache_tags: dataset,igbh,institute,institute_id_index_mapping + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/institute/node_feat.npy + CM_DOWNLOAD_CHECKSUM: 12eaeced22d17b4e97d4b4742331c819 + CM_DOWNLOAD_FILENAME: node_feat.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/institute/ + extra_cache_tags: dataset,igbh,institute,node_feat + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # journal + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/journal_id_index_mapping.npy + CM_DOWNLOAD_CHECKSUM: b630c20852b76d17a5c9c37b39176f69 + CM_DOWNLOAD_FILENAME: journal_id_index_mapping.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/journal/ + extra_cache_tags: dataset,igbh,journal,journal_id_index_mapping + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/journal/node_feat.npy + CM_DOWNLOAD_CHECKSUM: 49d51b554b3004f10bee19d1c7f9b416 + CM_DOWNLOAD_FILENAME: node_feat.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/journal/ + extra_cache_tags: dataset,igbh,journal,node_feat + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # fos + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/fos_id_index_mapping.npy + CM_DOWNLOAD_CHECKSUM: 0f0cfde619361cde35d3be9f201d081a + CM_DOWNLOAD_FILENAME: fos_id_index_mapping.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/fos/ + extra_cache_tags: dataset,igbh,fos,fos_id_index_mapping + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/fos/node_feat.npy + CM_DOWNLOAD_CHECKSUM: 3ef3df19e2475c387fec10bac82773df + CM_DOWNLOAD_FILENAME: node_feat.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/fos/ + extra_cache_tags: dataset,igbh,fos,node_feat + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # author__affiliated_to__institute + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/author__affiliated_to__institute/edge_index.npy + CM_DOWNLOAD_CHECKSUM: e35dba208f81e0987207f78787c75711 + CM_DOWNLOAD_FILENAME: edge_index.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/author__affiliated_to__institute/ + extra_cache_tags: dataset,igbh,author_affiliated_to_institute,edge_index + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # paper__published__journal + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__published__journal/edge_index.npy + CM_DOWNLOAD_CHECKSUM: 38505e83bde8e5cf94ae0a85afa60e13 + CM_DOWNLOAD_FILENAME: edge_index.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__published__journal/ + extra_cache_tags: dataset,igbh,paper_published_journal,edge_index + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # paper__topic__fos + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__topic__fos/edge_index.npy + CM_DOWNLOAD_CHECKSUM: 427fb350a248ee6eaa8c21cde942fda4 + CM_DOWNLOAD_FILENAME: edge_index.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__topic__fos/ + extra_cache_tags: dataset,igbh,paper_topic_fos,edge_index + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # paper__venue__conference + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__venue__conference/edge_index.npy + CM_DOWNLOAD_CHECKSUM: 541b8d43cd93579305cfb71961e10a7d + CM_DOWNLOAD_FILENAME: edge_index.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__venue__conference/ + extra_cache_tags: dataset,igbh,paper_venue_conference,edge_index + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL + # paper__written_by__author + - env: + CM_PACKAGE_URL: https://igb-public.s3.us-east-2.amazonaws.com/IGBH/processed/paper__written_by__author/edge_index.npy + CM_DOWNLOAD_CHECKSUM: df39fe44bbcec93a640400e6d81ffcb5 + CM_DOWNLOAD_FILENAME: edge_index.npy + CM_DOWNLOAD_PATH: <<>>/full/processed/paper__written_by__author/ + extra_cache_tags: dataset,igbh,paper_written_by_author,edge_index + force_cache: true + enable_if_env: + CM_IGBH_DATASET_TYPE: + - 'full' + names: + - dae + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +variations: + debug: + default: true + group: dataset-type + env: + CM_IGBH_DATASET_TYPE: debug + CM_IGBH_DATASET_SIZE: tiny + full: + group: dataset-type + env: + CM_IGBH_DATASET_TYPE: full + CM_IGBH_DATASET_SIZE: full + glt: + env: + CM_IGBH_GRAPH_COMPRESS: yes + csc: + group: compressed-layout + default: true + env: + CM_IGBH_GRAPH_COMPRESS_LAYOUT: csc + csr: + group: compressed-layout + env: + CM_IGBH_GRAPH_COMPRESS_LAYOUT: csr diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/customize.py new file mode 100644 index 000000000..f68df4bd5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/customize.py @@ -0,0 +1,69 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if os_info['platform'] == "windows": + return {'return': 1, 'error': 'Script not supported in windows yet!'} + + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + + # run cmd + run_cmd = "" + graph_folder = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], 'graph', 'R-GAT') + + download_loc = env.get('CM_IGBH_DATASET_OUT_PATH', os.getcwd()) + + env['CM_IGBH_DATASET_DOWNLOAD_LOCATION'] = download_loc + + run_cmd += f"cd {graph_folder} " + x_sep = " && " + + # download the model + if env['CM_IGBH_DATASET_TYPE'] == "debug": + run_cmd += x_sep + env['CM_PYTHON_BIN_WITH_PATH'] + \ + f" tools/download_igbh_test.py --target-path {download_loc} " + + # split seeds + run_cmd += x_sep + \ + f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/split_seeds.py --path {download_loc} --dataset_size {env['CM_IGBH_DATASET_SIZE']}" + + # compress graph(for glt implementation) + if env.get('CM_IGBH_GRAPH_COMPRESS', '') == "yes": + run_cmd += x_sep + \ + f"{env['CM_PYTHON_BIN_WITH_PATH']} tools/compress_graph.py --path {download_loc} --dataset_size {env['CM_IGBH_DATASET_SIZE']} --layout {env['CM_IGBH_GRAPH_COMPRESS_LAYOUT']}" + + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_IGBH_DATASET_PATH'] = env.get( + 'CM_IGBH_DATASET_OUT_PATH', os.getcwd()) + + print( + f"Path to the IGBH dataset: {os.path.join(env['CM_IGBH_DATASET_PATH'], env['CM_IGBH_DATASET_SIZE'])}") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/run.sh new file mode 100644 index 000000000..238652160 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-igbh/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + eval "$1" + exit_if_error +} + +run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/_cm.yaml new file mode 100644 index 000000000..f8684eef5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/_cm.yaml @@ -0,0 +1,38 @@ +alias: get-dataset-mlperf-inference-mixtral +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +new_env_keys: +- CM_DATASET_* +prehook_deps: +- env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_PREPROCESSED_PATH + extra_cache_tags: mixtral,get-mixtral-dataset + force_cache: true + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +tags: +- get +- dataset-mixtral +- openorca-mbxp-gsm8k-combined +uid: 89e7c91444804775 +variations: + mlcommons-storage: + default: true + env: + CM_DOWNLOAD_CHECKSUM: 78823c13e0e73e518872105c4b09628b + CM_DOWNLOAD_FILENAME: 2024.06.06_mixtral_15k_v4.pkl + CM_PACKAGE_URL: https://inference.mlcommons-storage.org/mixtral_8x7b%2F2024.06.06_mixtral_15k_v4.pkl + group: download-source + size.#: + base: + - mlcommons-storage + deps: + - tags: get,generic-python-lib,_package.pandas + - tags: get,python3 + env: + CM_DATASET_MIXTRAL_GENERATE_TEST_DATA: 'yes' + CM_DATASET_MIXTRAL_TEST_DATA_SIZE: '#' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/customize.py new file mode 100644 index 000000000..f0543acb9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/customize.py @@ -0,0 +1,37 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": + env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] = os.path.join( + os.getcwd(), "mixtral-test-dataset.pkl") + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] + + if env.get('CM_DATASET_MIXTRAL_GENERATE_TEST_DATA', '') == "yes": + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] = env['CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py new file mode 100644 index 000000000..5e13f5b7a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/generate-test-dataset.py @@ -0,0 +1,58 @@ +import pandas as pd +import argparse +import os + + +def main(): + # Set up argument parser + parser = argparse.ArgumentParser( + description="Sample test dataset from the original dataset.") + parser.add_argument( + '--dataset-path', + required=True, + help="Path to the input dataset (pickle file).") + parser.add_argument( + '--output-path', + default=os.path.join( + os.getcwd(), + "mixtral-test-dataset.pkl"), + help="Path to save the output dataset (pickle file).") + parser.add_argument( + '--samples', + default=2, + help="Number of entries to be extracted from each group.") + + args = parser.parse_args() + dataset_path = args.dataset_path + output_path = args.output_path + no_of_samples = int(args.samples) + + try: + # Load the dataset from the specified pickle file + print(f"Loading dataset from {dataset_path}...") + df = pd.read_pickle(dataset_path) + + # Check if 'group' column exists + if 'dataset' not in df.columns: + raise ValueError( + "The input dataset must contain a 'dataset' column to identify data set groups.") + + # Sample 2 entries from each group + print(f"Sampling {no_of_samples} entries from each group...") + sampled_df = df.groupby('dataset').apply( + lambda x: x.sample( + n=no_of_samples)).reset_index( + drop=True) + + # Save the sampled dataset to the specified output path + print(f"Saving the sampled dataset to {output_path}...") + sampled_df.to_pickle(output_path) + + print("Dataset processing and saving completed successfully!") + except Exception as e: + print(f"Error: {e}") + exit(1) + + +if __name__ == '__main__': + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/run.sh new file mode 100644 index 000000000..91ad97a53 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-mlperf-inference-mixtral/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [[ "$CM_DATASET_MIXTRAL_GENERATE_TEST_DATA" == "yes" ]]; then + ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/generate-test-dataset.py --dataset-path ${CM_DATASET_PREPROCESSED_PATH} --output-path ${CM_DATASET_MIXTRAL_TEST_DATA_GENERATED_PATH} --samples ${CM_DATASET_MIXTRAL_TEST_DATA_SIZE} +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/README.md new file mode 100644 index 000000000..6212da337 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-openimages-annotations](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-openimages-annotations) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/_cm.yaml new file mode 100644 index 000000000..a96e7f58e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/_cm.yaml @@ -0,0 +1,35 @@ +alias: get-dataset-openimages-annotations +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +new_env_keys: +- CM_DATASET_OPENIMAGES_ANNOTATIONS_* +- CM_DATASET_ANNOTATIONS_* +prehook_deps: +- env: + CM_DAE_FINAL_ENV_NAME: CM_DATASET_ANNOTATIONS_FILE_PATH + extra_cache_tags: retinanet,get,dataset-openimages-annotations + force_cache: true + tags: download-and-extract,_wget,_extract + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_DATASET_ANNOTATIONS_FILE_PATH: Path to OpenImages annotation file +tags: +- get +- aux +- dataset-aux +- object-detection +- openimages +- annotations +uid: 47e2158ed24c44e9 +variations: + from.github: + default: true + env: + CM_DOWNLOAD_CHECKSUM: 817fd8da3aeeb0575f1e2d2926b15e68 + CM_DOWNLOAD_FILENAME: openimages-mlperf_annotations_2.1.json.zip + CM_PACKAGE_URL: https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip + group: download-source diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/customize.py new file mode 100644 index 000000000..604423fb5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-annotations/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join( + env['CM_DATASET_ANNOTATIONS_FILE_PATH'], 'openimages-mlperf.json') + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.dirname( + env['CM_DATASET_ANNOTATIONS_FILE_PATH']) + env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + env['CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH'] = env['CM_DATASET_ANNOTATIONS_DIR_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/README.md new file mode 100644 index 000000000..0a5ce43cd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-openimages-calibration](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-openimages-calibration) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/_cm.yaml new file mode 100644 index 000000000..b8bd73e12 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/_cm.yaml @@ -0,0 +1,62 @@ +uid: 27228976bb084dd0 +alias: get-dataset-openimages-calibration + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: "AI/ML datasets" + +deps: + - tags: download,file + force_cache: true + extra_cache_tags: openimages-calibration,openimages,calibration + names: + - calibration-file-downloader + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH + +new_env_keys: +- CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH + +tags: +- get +- dataset +- openimages +- calibration + +variations: + mlperf.option1: + group: calibration-option + default: true + env: + CM_MLPERF_OPENIMAGES_CALIBRATION_OPTION: one + CM_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6 + adr: + calibration-file-downloader: + tags: _url.https://github.com/mlcommons/inference/raw/master/calibration/openimages/openimages_cal_images_list.txt + env: + CM_DOWNLOAD_CHECKSUM: 5c3196ddcec4605c6a9fcf004d9615e6 + filter: + default_variations: + filter-size: filter_size.400 + deps: + - names: + - python + - python3 + tags: get,python3 + - tags: get,openimages,dataset,original,_calibration + env: + CM_CALIBRATE_FILTER: '' + env: + CM_CALIBRATE_FILTER: 'yes' + + filter-size.#: + group: filter-size + env: + CM_CALIBRATION_FILTER_SIZE: "#" + filter-size.400: + group: filter-size + env: + CM_CALIBRATION_FILTER_SIZE: 400 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/customize.py new file mode 100644 index 000000000..7eacb1256 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/customize.py @@ -0,0 +1,41 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get("CM_CALIBRATE_FILTER", "") == "yes": + i['run_script_input']['script_name'] = "run-filter" + env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] = os.path.join( + os.getcwd(), "filtered.txt") + env['CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH'] = env['CM_MLPERF_OPENIMAGES_CALIBRATION_FILTERED_LIST'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/filter.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/filter.py new file mode 100644 index 000000000..d8d2638b5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/filter.py @@ -0,0 +1,25 @@ +import json +import sys +import os + +with open(sys.argv[1], "r") as f: + data = json.load(f) + +images = {} +for image in data['images']: + images[image['id']] = image + images[image['id']]['num_boxes'] = 0 + +annots = data['annotations'] +for box in annots: + imageid = box['image_id'] + images[imageid]['num_boxes'] += 1 + +sorted_image_data = sorted( + data['images'], + key=lambda x: x['num_boxes'], + reverse=os.environ.get( + 'CM_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC', + '') == "yes") +for image in data['images']: + print(image['file_name']) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/run-filter.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/run-filter.sh new file mode 100644 index 000000000..9b1a90c68 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages-calibration/run-filter.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/filter.py ${CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH} > ordered.txt +test $? -eq 0 || exit $? +head -n ${CM_CALIBRATION_FILTER_SIZE} ordered.txt >filtered.txt +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/README-extra.md new file mode 100644 index 000000000..b6f5d0812 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/README-extra.md @@ -0,0 +1,2 @@ +# Ubuntu 22.04 +`sudo apt-get install -y libgl1-mesa-dev` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/README.md new file mode 100644 index 000000000..d5e20dd74 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-openimages](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-openimages) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/_cm.yaml new file mode 100644 index 000000000..2e0189183 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/_cm.yaml @@ -0,0 +1,126 @@ +alias: get-dataset-openimages +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +category_sort: 8500 +default_env: + CM_DATASET_CALIBRATION: 'no' +deps: +- names: + - python + - python3 + tags: get,python3 +- tags: get,generic-python-lib,_requests +- force_env_keys: + - CM_GIT_* + names: + - inference-src + tags: mlperf,inference,source +- tags: get,generic-python-lib,_boto3 +- tags: get,generic-python-lib,_tqdm +- tags: get,generic-python-lib,_numpy +- tags: get,generic-python-lib,_opencv-python +- tags: get,generic-python-lib,_pandas +- names: + - pycocotools + tags: get,generic-python-lib,_pycocotools +env: + CM_DATASET: OPENIMAGES +new_env_keys: +- CM_DATASET_PATH +- CM_DATASET_PATH_ROOT +- CM_DATASET_OPENIMAGES_PATH +- CM_DATASET_OPENIMAGES_DATASET_PATH +- CM_DATASET_OPENIMAGES_DATASET_PATH_ROOT +- CM_DATASET_ANNOTATIONS_DIR_PATH +- CM_DATASET_ANNOTATIONS_FILE_PATH +- CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH +- CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH +- CM_CALIBRATION_DATASET_PATH +- CM_CALIBRATION_DATASET_PATH_ROOT +- CM_OPENIMAGES_CALIBRATION_DATASET_PATH +- CM_DATASET_OPENIMAGES_ANNOTATIONS_DIR_PATH +- CM_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH +posthook_deps: +- enable_if_env: + CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: + - 'yes' + tags: get,openimages,annotations +tags: +- get +- dataset +- openimages +- open-images +- object-detection +- original +uid: 0a9d49b644cf4142 +variations: + '50': + default: true + env: + CM_DATASET_SIZE: '50' + group: size + '500': + env: + CM_DATASET_SIZE: '500' + group: size + calibration: + deps: + - names: + - openimages-calibration + tags: get,openimages,calibration + env: + CM_DATASET_CALIBRATION: 'yes' + group: dataset-type + new_env_keys: + - CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH + - CM_CALIBRATION_DATASET_PATH + - CM_CALIBRATION_DATASET_PATH_ROOT + custom-annotations: + env: + CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'yes' + group: annotations + default-annotations: + default: true + env: + CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: 'no' + group: annotations + filter: {} + filter,calibration: + ad: + openimages-calibration: + tags: _filter + filter-size.#: + ad: + openimages-calibration: + tags: _filter-size.# + full: + env: + CM_DATASET_SIZE: '' + group: size + size.#: + env: + CM_DATASET_SIZE: '#' + group: size + using-fiftyone: + add_deps_recursive: + inference-src: + version: r2.1 + deps: + - tags: get,generic-python-lib,_fiftyone + - tags: get,openssl,lib + version: 1.1.1 + validation: + default: true + env: + CM_DATASET_CALIBRATION: 'no' + group: dataset-type + new_env_keys: + - CM_DATASET_PATH + - CM_DATASET_PATH_ROOT + - CM_DATASET_OPENIMAGES_DATASET_PATH + - CM_DATASET_OPENIMAGES_DATASET_PATH_ROOT + - CM_DATASET_ANNOTATIONS_DIR_PATH + - CM_DATASET_ANNOTATIONS_FILE_PATH + - CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/customize.py new file mode 100644 index 000000000..b9d9998b8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/customize.py @@ -0,0 +1,112 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + print("") + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + print("") + + if os_info['platform'] == 'windows': + MLPERF_CLASSES = ['Airplane', 'Antelope', 'Apple', 'Backpack', 'Balloon', 'Banana', + 'Barrel', 'Baseball bat', 'Baseball glove', 'Bee', 'Beer', 'Bench', 'Bicycle', + 'Bicycle helmet', 'Bicycle wheel', 'Billboard', 'Book', 'Bookcase', 'Boot', + 'Bottle', 'Bowl', 'Bowling equipment', 'Box', 'Boy', 'Brassiere', 'Bread', + 'Broccoli', 'Bronze sculpture', 'Bull', 'Bus', 'Bust', 'Butterfly', 'Cabinetry', + 'Cake', 'Camel', 'Camera', 'Candle', 'Candy', 'Cannon', 'Canoe', 'Carrot', 'Cart', + 'Castle', 'Cat', 'Cattle', 'Cello', 'Chair', 'Cheese', 'Chest of drawers', 'Chicken', + 'Christmas tree', 'Coat', 'Cocktail', 'Coffee', 'Coffee cup', 'Coffee table', 'Coin', + 'Common sunflower', 'Computer keyboard', 'Computer monitor', 'Convenience store', + 'Cookie', 'Countertop', 'Cowboy hat', 'Crab', 'Crocodile', 'Cucumber', 'Cupboard', + 'Curtain', 'Deer', 'Desk', 'Dinosaur', 'Dog', 'Doll', 'Dolphin', 'Door', 'Dragonfly', + 'Drawer', 'Dress', 'Drum', 'Duck', 'Eagle', 'Earrings', 'Egg (Food)', 'Elephant', + 'Falcon', 'Fedora', 'Flag', 'Flowerpot', 'Football', 'Football helmet', 'Fork', + 'Fountain', 'French fries', 'French horn', 'Frog', 'Giraffe', 'Girl', 'Glasses', + 'Goat', 'Goggles', 'Goldfish', 'Gondola', 'Goose', 'Grape', 'Grapefruit', 'Guitar', + 'Hamburger', 'Handbag', 'Harbor seal', 'Headphones', 'Helicopter', 'High heels', + 'Hiking equipment', 'Horse', 'House', 'Houseplant', 'Human arm', 'Human beard', + 'Human body', 'Human ear', 'Human eye', 'Human face', 'Human foot', 'Human hair', + 'Human hand', 'Human head', 'Human leg', 'Human mouth', 'Human nose', 'Ice cream', + 'Jacket', 'Jeans', 'Jellyfish', 'Juice', 'Kitchen & dining room table', 'Kite', + 'Lamp', 'Lantern', 'Laptop', 'Lavender (Plant)', 'Lemon', 'Light bulb', 'Lighthouse', + 'Lily', 'Lion', 'Lipstick', 'Lizard', 'Man', 'Maple', 'Microphone', 'Mirror', + 'Mixing bowl', 'Mobile phone', 'Monkey', 'Motorcycle', 'Muffin', 'Mug', 'Mule', + 'Mushroom', 'Musical keyboard', 'Necklace', 'Nightstand', 'Office building', + 'Orange', 'Owl', 'Oyster', 'Paddle', 'Palm tree', 'Parachute', 'Parrot', 'Pen', + 'Penguin', 'Personal flotation device', 'Piano', 'Picture frame', 'Pig', 'Pillow', + 'Pizza', 'Plate', 'Platter', 'Porch', 'Poster', 'Pumpkin', 'Rabbit', 'Rifle', + 'Roller skates', 'Rose', 'Salad', 'Sandal', 'Saucer', 'Saxophone', 'Scarf', 'Sea lion', + 'Sea turtle', 'Sheep', 'Shelf', 'Shirt', 'Shorts', 'Shrimp', 'Sink', 'Skateboard', + 'Ski', 'Skull', 'Skyscraper', 'Snake', 'Sock', 'Sofa bed', 'Sparrow', 'Spider', 'Spoon', + 'Sports uniform', 'Squirrel', 'Stairs', 'Stool', 'Strawberry', 'Street light', + 'Studio couch', 'Suit', 'Sun hat', 'Sunglasses', 'Surfboard', 'Sushi', 'Swan', + 'Swimming pool', 'Swimwear', 'Tank', 'Tap', 'Taxi', 'Tea', 'Teddy bear', 'Television', + 'Tent', 'Tie', 'Tiger', 'Tin can', 'Tire', 'Toilet', 'Tomato', 'Tortoise', 'Tower', + 'Traffic light', 'Train', 'Tripod', 'Truck', 'Trumpet', 'Umbrella', 'Van', 'Vase', + 'Vehicle registration plate', 'Violin', 'Wall clock', 'Waste container', 'Watch', + 'Whale', 'Wheel', 'Wheelchair', 'Whiteboard', 'Window', 'Wine', 'Wine glass', 'Woman', + 'Zebra', 'Zucchini'] + + x = '' + for v in MLPERF_CLASSES: + if x != '': + x += ' ' + x += '"' + v + '"' + env['CM_DATASET_OPENIMAGES_CLASSES'] = x + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join( + os.getcwd(), 'install', 'annotations') + + if env.get('CM_DATASET_CALIBRATION', '') == "no": + env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') + env['CM_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'validation', 'data') + annotations_file_path = os.path.join( + env['CM_DATASET_ANNOTATIONS_DIR_PATH'], + "openimages-mlperf.json") + env['CM_DATASET_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = annotations_file_path + env['CM_DATASET_OPENIMAGES_VALIDATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path + if env.get("CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS", '') == "yes": + annotations_file_src = env['CM_DATASET_OPENIMAGES_ANNOTATIONS_FILE_PATH'] + shutil.copy( + annotations_file_src, + env['CM_DATASET_ANNOTATIONS_DIR_PATH']) + env['CM_DATASET_OPENIMAGES_PATH'] = env['CM_DATASET_PATH'] + env['CM_DATASET_OPENIMAGES_PATH_ROOT'] = env['CM_DATASET_PATH_ROOT'] + else: + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'calibration', 'data') + env['CM_OPENIMAGES_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'calibration', 'data') + env['CM_CALIBRATION_DATASET_PATH_ROOT'] = os.path.join( + os.getcwd(), 'install') + annotations_file_path = os.path.join( + env['CM_DATASET_ANNOTATIONS_DIR_PATH'], + "openimages-calibration-mlperf.json") + env['CM_DATASET_CALIBRATION_ANNOTATIONS_FILE_PATH'] = annotations_file_path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/run.bat new file mode 100644 index 000000000..742542d25 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/run.bat @@ -0,0 +1,24 @@ +@echo off + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +if not exist install mkdir install + +set INSTALL_DIR=%CUR_DIR%\install + +cd %CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH% + +if not "%CM_DATASET_SIZE%" == "" ( + set MAX_IMAGES=--max-images %CM_DATASET_SIZE% --seed 42 +) else ( + set MAX_IMAGES= +) + +%CM_PYTHON_BIN% tools\openimages.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json --classes %CM_DATASET_OPENIMAGES_CLASSES% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +rem Next is a hack to support MLPerf inference on Windows +cd %INSTALL_DIR% +if not exist validation\data\annotations mkdir validation\data\annotations +copy annotations\* validation\data\annotations diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/run.sh new file mode 100644 index 000000000..2fc6eaddf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openimages/run.sh @@ -0,0 +1,40 @@ +#!/bin/bash +python3() { + ${CM_PYTHON_BIN_WITH_PATH} "$@" +} +export -f python3 + +CUR=${PWD} +mkdir -p install +INSTALL_DIR=${CUR}/install + +cd ${CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH} +cd tools +if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then + if [ ! -z ${CM_DATASET_SIZE} ]; then + max_images=" -m ${CM_DATASET_SIZE}" + else + max_images="" + fi + cmd="./openimages_mlperf.sh -d ${INSTALL_DIR} ${max_images}" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +else + if [ -n ${CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH} ]; then + calibration_file_string=" --calibration-file ${CM_MLPERF_OPENIMAGES_CALIBRATION_LIST_FILE_WITH_PATH}" + else + calibration_file_string="" + fi + cmd="./openimages_calibration_mlperf.sh -d \"${INSTALL_DIR} ${calibration_file_string}\"" + echo $cmd + eval $cmd + test $? -eq 0 || exit 1 +fi +cd ${INSTALL_DIR} + +if [[ ! -d "open-images-v6-mlperf" ]]; then + ln -sf ../ open-images-v6-mlperf +fi + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/README.md new file mode 100644 index 000000000..2e3c02154 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-openorca](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-openorca) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/_cm.yaml new file mode 100644 index 000000000..861c49575 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/_cm.yaml @@ -0,0 +1,55 @@ +alias: get-dataset-openorca +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +category_sort: 8500 +default_env: + CM_DATASET_CALIBRATION: 'no' +deps: +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_DATASET_OPENORCA_PATH + extra_cache_tags: openorca,repo,src + force_env_keys: + - CM_GIT_* + names: + - openorca-src + tags: get,git,repo,_lfs,_repo.https://huggingface.co/datasets/Open-Orca/OpenOrca +env: + CM_DATASET: OPENORCA +new_env_keys: +- CM_DATASET_* +tags: +- get +- dataset +- openorca +- language-processing +- original +uid: 9252c4d90d5940b7 +variations: + '500': + env: + CM_DATASET_SIZE: '500' + group: size + '60': + env: + CM_DATASET_SIZE: '60' + group: size + calibration: + env: + CM_DATASET_CALIBRATION: 'yes' + group: dataset-type + full: + default: true + env: + CM_DATASET_SIZE: '24576' + group: size + size.#: + env: + CM_DATASET_SIZE: '#' + group: size + validation: + default: true + env: + CM_DATASET_CALIBRATION: 'no' + group: dataset-type diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/customize.py new file mode 100644 index 000000000..b0a7add20 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-openorca/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + if env.get('CM_DATASET_CALIBRATION', '') == "no": + env['CM_DATASET_PATH_ROOT'] = env['CM_DATASET_OPENORCA_PATH'] + env['CM_DATASET_PATH'] = env['CM_DATASET_OPENORCA_PATH'] + env['CM_DATASET_OPENORCA_PARQUET'] = os.path.join( + env['CM_DATASET_OPENORCA_PATH'], '1M-GPT4-Augmented.parquet') + else: + env['CM_CALIBRATION_DATASET_PATH'] = os.path.join( + os.getcwd(), 'install', 'calibration', 'data') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/README.md new file mode 100644 index 000000000..acfdcf6a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-squad-vocab](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-squad-vocab) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/_cm.yaml new file mode 100644 index 000000000..a6ec2e902 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/_cm.yaml @@ -0,0 +1,37 @@ +alias: get-dataset-squad-vocab +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +new_env_keys: +- CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH +- CM_DATASET_SQUAD_VOCAB_PATH +prehook_deps: +- env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_SQUAD_VOCAB_PATH + extra_cache_tags: bert,get,dataset-squad-vocab + force_cache: true + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_DATASET_SQUAD_VOCAB_PATH: Path to SQUAD vocab file +tags: +- get +- aux +- squad +- dataset-aux +- language-processing +- squad-aux +- vocab +- squad-vocab +uid: e38874fff5094577 +variations: + from.zenodo: + default: true + env: + CM_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e + CM_DOWNLOAD_FILENAME: vocab.txt + CM_PACKAGE_URL: https://zenodo.org/record/3733868/files/vocab.txt + group: download-source diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/customize.py new file mode 100644 index 000000000..905db202d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad-vocab/customize.py @@ -0,0 +1,30 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] = env['CM_DATASET_SQUAD_VOCAB_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/README-extra.md new file mode 100644 index 000000000..4497abe6b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/README-extra.md @@ -0,0 +1,20 @@ +# Downloads SQUAD Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the SQUAD dataset. + +## Usage + +``` +cm run script --tags=get,dataset,squad --version=[VERSION] +``` +where [VERSION] is one of +* `1.1` +* `2.0` + +## Exported Variables +* `CM_DATASET_SQUAD_PATH:` Directory path to SQUAD dataset +* `CM_DATASET_SQUAD_TRAIN_PATH:` JSON file path to SQUAD training dataset +* `CM_DATASET_SQUAD_VAL_PATH:` JSON file path to SQUAD validation dataset + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/README.md new file mode 100644 index 000000000..455e75867 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-squad](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-dataset-squad) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/_cm.yaml new file mode 100644 index 000000000..a9dd6ed94 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/_cm.yaml @@ -0,0 +1,46 @@ +alias: get-dataset-squad +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_version: '1.1' +deps: +- tags: get,sys-utils-cm +env: + CM_DATASET: SQUAD +new_env_keys: +- CM_DATASET_* +prehook_deps: +- env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_SQUAD_VAL_PATH + extra_cache_tags: bert,get,dataset-squad + force_cache: true + tags: download-and-extract,_wget + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_DATASET_SQUAD_VAL_PATH: Path to SQUAD dataset +tags: +- get +- dataset +- squad +- language-processing +- validation +- original +uid: 6651c119c3ae49b3 +versions: + '1.1': + env: + CM_DOWNLOAD_CHECKSUM: 3e85deb501d4e538b6bc56f786231552 + CM_DOWNLOAD_FILENAME: dev-v1.1.json + CM_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v1.1.json + CM_TRAIN_FILENAME: train-v1.1.json + CM_VAL_FILENAME: dev-v1.1.json + '2.0': + env: + CM_DOWNLOAD_CHECKSUM: 246adae8b7002f8679c027697b0b7cf8 + CM_DOWNLOAD_FILENAME: dev-v2.0.json + CM_PACKAGE_URL: https://raw.githubusercontent.com/rajpurkar/SQuAD-explorer/master/dataset/dev-v2.0.json + CM_TRAIN_FILENAME: train-v2.0.json + CM_VAL_FILENAME: dev-v2.0.json diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/customize.py new file mode 100644 index 000000000..73a9b3ba8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dataset-squad/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['CM_DATASET_SQUAD_PATH'] = os.path.dirname( + env['CM_DATASET_SQUAD_VAL_PATH']) + env['CM_DATASET_PATH'] = os.path.dirname(env['CM_DATASET_SQUAD_VAL_PATH']) + # env['CM_DATASET_SQUAD_VAL_PATH'] = os.path.join(os.getcwd(), env['CM_VAL_FILENAME']) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/README.md new file mode 100644 index 000000000..76db0d5a1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts//get-dlrm-data-mlperf-inference](https://docs.mlcommons.org/cm4mlops/scripts//get-dlrm-data-mlperf-inference) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/_cm.yaml new file mode 100644 index 000000000..f287e37db --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/_cm.yaml @@ -0,0 +1,41 @@ +alias: get-dlrm-data-mlperf-inference +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- get +- dlrm +- data +- mlperf +- inference +uid: 34bdfcd9c8364935 +docker: + real_run: false +new_env_keys: + - CM_DLRM_DATA_PATH + - DLRM_DATA_PATH +input_mapping: + dlrm_data_path: CM_DLRM_DATA_PATH + criteo_day23_raw_data_path: CM_CRITEO_DAY23_RAW_DATA_PATH +prehook_deps: + - tags: get,ml-model,dlrm,_pytorch + enable_if_env: + CM_DLRM_MODEL_DOWNLOAD: + - "on" + - tags: get,dataset,preprocessed,criteo,_mlc + enable_if_env: + CM_DLRM_DATASET_DOWNLOAD: + - "on" +variations: + nvidia: + group: implementation + default: true + new_env_keys: + - CM_DLRM_V2_DAY23_FILE_PATH + - CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH + env: + CM_DLRM_DATA_VARIATION: nvidia + intel: + group: implementation + env: + CM_DLRM_DATA_VARIATION: intel diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/checksums.txt b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/checksums.txt new file mode 100644 index 000000000..86d89381d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/checksums.txt @@ -0,0 +1,26 @@ +a435220293e8e2b4c2b70267b759cf36 ./day_23_sparse_multi_hot_unpacked/0.npy +9b383e9ca2ad6d0841346b255f390a01 ./day_23_sparse_multi_hot_unpacked/10.npy +b1ce2de05b791c1ddb36e0e573a75d93 ./day_23_sparse_multi_hot_unpacked/11.npy +e9b71259c97546df1e9c82841f9c3d03 ./day_23_sparse_multi_hot_unpacked/12.npy +4b8e79310e06168422e6aa7f1b66f6ae ./day_23_sparse_multi_hot_unpacked/13.npy +f9acdc32bd6b766358be846d34b7dd19 ./day_23_sparse_multi_hot_unpacked/14.npy +29b6319884c87eac2dc10e4670576bc5 ./day_23_sparse_multi_hot_unpacked/15.npy +c3ae2edfb9c2279ec5e10e452226f661 ./day_23_sparse_multi_hot_unpacked/16.npy +ced0a164f926f97a7501b047d3d05fad ./day_23_sparse_multi_hot_unpacked/17.npy +a0f8ef9b4a4f14fe4211c07819f2fcf1 ./day_23_sparse_multi_hot_unpacked/18.npy +0ab3a06e2b648cf574d1235d71ebb006 ./day_23_sparse_multi_hot_unpacked/19.npy +a684ab288fe2bcc76374be0b0744fa2f ./day_23_sparse_multi_hot_unpacked/1.npy +7c753b13d54ad9e3e6c5e73719622201 ./day_23_sparse_multi_hot_unpacked/20.npy +3f8626a163420fc26c35c82b5b42e7ee ./day_23_sparse_multi_hot_unpacked/21.npy +45026929433aa879157e9b4f033c75b2 ./day_23_sparse_multi_hot_unpacked/22.npy +3434833ecd8a225f3f405b812bb47944 ./day_23_sparse_multi_hot_unpacked/23.npy +cc7daf94cf81e89360f1273750b0a78a ./day_23_sparse_multi_hot_unpacked/24.npy +7bd8c842b7504c97e1078654d2e3a5c0 ./day_23_sparse_multi_hot_unpacked/25.npy +2e841abb6f8f7cd30e3e5ef5df855bf1 ./day_23_sparse_multi_hot_unpacked/2.npy +e020aa1563b8b1a411405e420b322f49 ./day_23_sparse_multi_hot_unpacked/3.npy +cabb6b0d784a9d74192ef029f53309d4 ./day_23_sparse_multi_hot_unpacked/4.npy +ef346cd1ce26c7c85b6c4c108bdafaf0 ./day_23_sparse_multi_hot_unpacked/5.npy +da0245108f14131171ac3d43418a100c ./day_23_sparse_multi_hot_unpacked/6.npy +41610d7810e5552ab5ecf0332e47a55e ./day_23_sparse_multi_hot_unpacked/7.npy +a7650873137dc3518fa06d296d47df2b ./day_23_sparse_multi_hot_unpacked/8.npy +49acd882a1b742af1743922f9409fc1e ./day_23_sparse_multi_hot_unpacked/9.npy diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/customize.py new file mode 100644 index 000000000..7be990dbe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/customize.py @@ -0,0 +1,166 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + dlrm_data_path = env.get( + 'CM_DLRM_DATA_PATH', env.get( + 'DLRM_DATA_PATH', '')) + if dlrm_data_path == '': + print( + f'Data path is not given as input through --dlrm_data_path. Using the cache directory:{os.getcwd()} as the data path') + dlrm_data_path = os.getcwd() + elif not os.path.exists(dlrm_data_path): + return {'return': 1, 'error': "given dlrm data path does not exists"} + + # creating required folders inside the dlrm data path if not exists + # criteo dataset + criteo_fp32_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32") + if not os.path.exists(criteo_fp32_path): + os.makedirs(criteo_fp32_path) + + # dlrm model + model_path = os.path.join(dlrm_data_path, "model") + if not os.path.exists(model_path): + os.makedirs(model_path) + + meta = i['meta'] + + script_path = i['run_script_input']['path'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + variation = env['CM_DLRM_DATA_VARIATION'] + + if variation == "nvidia": + if not os.path.exists(os.path.join(dlrm_data_path, "model")): + print(f'model directory is missing inside {dlrm_data_path}') + env['CM_DLRM_MODEL_DOWNLOAD'] = True + if not os.path.exists(os.path.join(dlrm_data_path, "criteo")): + print(f'criteo directory is missing inside {dlrm_data_path}') + env['CM_DLRM_DATASET_DOWNLOAD'] = True + if not os.path.exists(os.path.join( + dlrm_data_path, "model", "model_weights")): + print( + f'model_weights directory is missing inside {dlrm_data_path}/model') + env['CM_DLRM_MODEL_DOWNLOAD'] = True + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23")): + print(f'day23 directory is missing inside {dlrm_data_path}/day23') + env['CM_DLRM_DATASET_DOWNLOAD'] = True + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "fp32")): + print( + f'fp32 directory is missing inside {dlrm_data_path}/criteo/day23') + env['CM_DLRM_DATASET_DOWNLOAD'] = True + if not os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot.npz")) and not os.path.exists( + os.path.join(dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): + print( + f'day_23_sparse_multi_hot.npz or day_23_sparse_multi_hot_unpacked is missing inside {dlrm_data_path}/criteo/day23/fp32') + env['CM_DLRM_DATASET_DOWNLOAD'] = True + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "fp32", "day_23_dense.npy")): + print( + f'day_23_dense.npy is missing inside {dlrm_data_path}/criteo/day23/fp32') + env['CM_DLRM_DATASET_DOWNLOAD'] = True + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "fp32", "day_23_labels.npy")): + print( + f'day_23_labels.npy is missing inside {dlrm_data_path}/criteo/day23/fp32') + env['CM_DLRM_DATASET_DOWNLOAD'] = True + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "raw_data")): + if env.get('CM_CRITEO_DAY23_RAW_DATA_PATH', '') == '': + return { + 'return': 1, 'error': 'Raw data missing inside {dlrm_data_path}/criteo/day23. Specify the target folder through input mapping(--criteo_day23_raw_data_path="path to raw criteo dataset")'} + + run_cmd = '' + xsep = ' && ' + + # addition of run command to download the datasets and model + if env.get('CM_DLRM_DATASET_DOWNLOAD', False) == True: + run_cmd += 'cp -r "$CM_CRITEO_PREPROCESSED_PATH"/. ' + \ + os.path.join(dlrm_data_path, "criteo", "day23", "fp32") + xsep + if env.get('CM_DLRM_MODEL_DOWNLOAD', False) == True: + run_cmd += 'cp -r "$CM_ML_MODEL_FILE_WITH_PATH"/. ' + \ + os.path.join(dlrm_data_path, "model") + xsep + + if env.get('CM_DLRM_DATASET_DOWNLOAD', '') != True: + if not os.path.exists(os.path.join( + dlrm_data_path, "criteo", "day23", "fp32", "day_23_sparse_multi_hot_unpacked")): + os.system(f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}") + else: + run_cmd += f"unzip {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot.npz')} -d {os.path.join(dlrm_data_path, 'criteo', 'day23', 'fp32', 'day_23_sparse_multi_hot_unpacked')}" + xsep + + if os.path.exists(os.path.join(dlrm_data_path, "criteo", "day23", "fp32", + "day_23_sparse_multi_hot.npz")) or env['CM_DLRM_DATASET_DOWNLOAD'] == True: + file_path = os.path.join( + dlrm_data_path, + "criteo", + "day23", + "fp32", + "day_23_sparse_multi_hot.npz") + run_cmd += ("echo {} {} | md5sum -c").format( + 'c46b7e31ec6f2f8768fa60bdfc0f6e40', file_path) + xsep + + file_path = os.path.join( + dlrm_data_path, + "criteo", + "day23", + "fp32", + "day_23_dense.npy") + run_cmd += ("echo {} {} | md5sum -c").format( + 'cdf7af87cbc7e9b468c0be46b1767601', file_path) + xsep + + file_path = os.path.join( + dlrm_data_path, + "criteo", + "day23", + "fp32", + "day_23_labels.npy") + run_cmd += ("echo {} {} | md5sum -c").format( + 'dd68f93301812026ed6f58dfb0757fa7', file_path) + xsep + + dir_path = os.path.join(dlrm_data_path, "criteo", "day23", "fp32") + run_cmd += ("cd {}; md5sum -c {}").format(dir_path, + os.path.join(script_path, "checksums.txt")) + + env['CM_DLRM_V2_DAY23_FILE_PATH'] = os.path.join( + dlrm_data_path, "criteo", "day23", "raw_data") + env['CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH'] = os.path.join( + dlrm_data_path, "criteo", "day23", "sample_partition.txt") + + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if env.get('CM_DLRM_DATA_PATH', '') == '' and env.get( + 'DLRM_DATA_PATH', '') == '': + env['CM_DLRM_DATA_PATH'] = os.getcwd() + else: + env['CM_GET_DEPENDENT_CACHED_PATH'] = env.get( + 'CM_DLRM_DATA_PATH', env['DLRM_DATA_PATH']) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/run.sh new file mode 100644 index 000000000..d1cb7df69 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm-data-mlperf-inference/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/README-extra.md new file mode 100644 index 000000000..8c70c36cd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/README-extra.md @@ -0,0 +1,15 @@ +# Get DLRM +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [DLRM repository](https://github.com/facebookdresearch/dlrm). + +## Commands +To install +``` +cm run script --tags=get,mlperf,dlrm,src +``` + +## Exported Variables +* `DLRM_DIR`: Directory path of the cloned dlrm repository + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm/README.md b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/README.md new file mode 100644 index 000000000..969faeb0a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-dlrm](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-dlrm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/_cm.yaml new file mode 100644 index 000000000..cc598990f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/_cm.yaml @@ -0,0 +1,27 @@ +alias: get-dlrm +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +default_env: + CM_GIT_DEPTH: --depth 10 + CM_GIT_PATCH: 'no' + CM_GIT_URL: https://github.com/facebookresearch/dlrm.git +default_version: main +deps: +- tags: detect,os +new_env_keys: +- DLRM_DIR +tags: +- get +- src +- dlrm +uid: 63680ac2449a4241 +variations: + full-history: + env: + CM_GIT_DEPTH: '' +versions: + main: + env: + CM_GIT_CHECKOUT: main diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/customize.py new file mode 100644 index 000000000..c0d96bb3d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/customize.py @@ -0,0 +1,48 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + need_version = env.get('CM_VERSION', '') + versions = meta['versions'] + + if need_version != '' and not need_version in versions: + env['CM_GIT_CHECKOUT'] = need_version + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['DLRM_DIR'] = os.path.join(os.getcwd(), "dlrm") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-dlrm/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/run.sh new file mode 100644 index 000000000..37e9e59a7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-dlrm/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning DLRM from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + +if [ ! -d "dlrm" ]; then + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} dlrm + if [ "${?}" != "0" ]; then exit 1; fi +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-docker/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/README.md b/cmx4mlops/cmx4mlops/repo/script/get-docker/README.md new file mode 100644 index 000000000..ba88a667a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-docker/_cm.yaml new file mode 100644 index 000000000..e2f33e875 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/_cm.yaml @@ -0,0 +1,25 @@ +alias: get-docker +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +deps: +- tags: detect,os +docker_input_mapping: {} +input_description: {} +input_mapping: {} +new_env_keys: [ + "CM_DOCKER_VERSION" +] +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- install +- docker +- engine +uid: 6192accce4234084 +variations: {} +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-docker/customize.py new file mode 100644 index 000000000..d84a8eaed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/customize.py @@ -0,0 +1,84 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'docker.exe' if os_info['platform'] == 'windows' else 'docker' + env['FILE_NAME'] = file_name + + if 'CM_DOCKER_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_DOCKER_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + run_file_name = "install" + r = automation.run_native_script( + {'run_script_input': i['run_script_input'], 'env': env, 'script_name': run_file_name}) + if r['return'] > 0: + return r + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'[Docker|podman] version\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_DOCKER_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + found_file_path = env['CM_DOCKER_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_DOCKER_INSTALLED_PATH'] = found_path + env['+PATH'] = [found_path] + + env['CM_DOCKER_CACHE_TAGS'] = 'version-' + version + + env['CM_DOCKER_VERSION'] = version + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/install-centos.sh b/cmx4mlops/cmx4mlops/repo/script/get-docker/install-centos.sh new file mode 100644 index 000000000..46cbbc166 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/install-centos.sh @@ -0,0 +1,13 @@ +sudo yum install -y yum-utils +sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo +sudo yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + +cmd="sudo usermod -aG docker $USER" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +echo "Please relogin to the shell so that the new group is effective" +exit 1 +#exec newgrp docker +#sudo su - $USER diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/install-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/get-docker/install-ubuntu.sh new file mode 100644 index 000000000..b0b6eb3a6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/install-ubuntu.sh @@ -0,0 +1,43 @@ +#!/bin/bash +export DEBIAN_FRONTEND=noninteractive +sudo apt-get update +cmd="sudo apt-get install -y ca-certificates curl gnupg" +echo "$cmd" +eval "$cmd" + +test $? -eq 0 || exit $? + +if [[ ! -d /etc/apt/keyrings ]]; then + sudo install -m 0755 -d /etc/apt/keyrings +fi +test $? -eq 0 || exit $? + +cmd="curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg" +echo "$cmd" +eval "$cmd" + +sudo chmod a+r /etc/apt/keyrings/docker.gpg +echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt-get update +cmd="sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +if [[ -z $USER ]]; then + USER=`whoami` +fi + +cmd="sudo usermod -aG docker $USER" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +echo "Please relogin to the shell so that the new group is effective" +exit 1 +#exec newgrp docker +#sudo su - $USER + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/install.bat b/cmx4mlops/cmx4mlops/repo/script/get-docker/install.bat new file mode 100644 index 000000000..d6bdb8295 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/install.bat @@ -0,0 +1,2 @@ +echo "Please install docker to continue" +exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/install.sh b/cmx4mlops/cmx4mlops/repo/script/get-docker/install.sh new file mode 100644 index 000000000..d6bdb8295 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/install.sh @@ -0,0 +1,2 @@ +echo "Please install docker to continue" +exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-docker/run.bat new file mode 100644 index 000000000..16618d48a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/run.bat @@ -0,0 +1,3 @@ +@echo off +docker --version > tmp-ver.out +if %errorlevel% neq 0 exit /b 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-docker/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-docker/run.sh new file mode 100644 index 000000000..f7f946a7f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-docker/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +docker --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gcc/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-gcc/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gcc/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gcc/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-gcc/README-extra.md new file mode 100644 index 000000000..bb9d97694 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gcc/README-extra.md @@ -0,0 +1,15 @@ +# Get GCC +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed gcc on the system. + +## Exported Variables +* `CM_GCC_BIN` +* `CM_GCC_BIN_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_WITH_PATH` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_COMPILER_*` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gcc/README.md b/cmx4mlops/cmx4mlops/repo/script/get-gcc/README.md new file mode 100644 index 000000000..d220033ed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gcc/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-gcc](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-gcc) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gcc/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-gcc/_cm.yaml new file mode 100644 index 000000000..f67a59d2e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gcc/_cm.yaml @@ -0,0 +1,32 @@ +alias: get-gcc +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +clean_files: [] +deps: +- tags: detect,os +name: Detect or install GCC compiler +new_env_keys: +- CM_GCC_* +- CM_C_COMPILER_* +- CM_CXX_COMPILER_* +- CM_COMPILER_* +- CM_LINKER_* +- + CFLAGS +- + CXXFLAGS +- + FFLAGS +- + LDFLAGS +- +CM_HOST_OS_DEFAULT_INCLUDE_PATH +- +PATH +post_deps: +- tags: get,compiler-flags +sort: 500 +tags: +- get +- gcc +- compiler +- c-compiler +- cpp-compiler +- get-gcc +uid: dbf4ab5cbed74372 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gcc/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-gcc/customize.py new file mode 100644 index 000000000..e8a225a4c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gcc/customize.py @@ -0,0 +1,117 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + file_name_c = 'gcc.exe' if os_info['platform'] == 'windows' else 'gcc' + + if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel': + if "12" in env.get('CM_VERSION', '') or "12" in env.get( + 'CM_VERSION_MIN', ''): + if env.get('CM_TMP_PATH', '') == '': + env['CM_TMP_PATH'] = '' + env['CM_TMP_PATH'] += "/opt/rh/gcc-toolset-12/root/usr/bin" + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + if 'CM_GCC_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name_c, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_GCC_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + # if r['return'] == 16: + # if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': + # return r + # + # print (recursion_spaces+' # {}'.format(r['error'])) + # + # # Attempt to run installer + # r = {'return':0, 'skip':True, 'script':{'tags':'install,gcc,src'}} + + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r' \(.*\)\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_GCC_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + if 'clang' in r['error']: + return {'return': 0, 'version': -1} + return r + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + env['CM_COMPILER_FAMILY'] = 'GCC' + version = r['version'] + env['CM_COMPILER_VERSION'] = env['CM_GCC_VERSION'] + env['CM_GCC_CACHE_TAGS'] = 'version-' + version + env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-gcc' + + found_file_path = env['CM_GCC_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['CM_GCC_INSTALLED_PATH'] = found_path + + file_name_c = os.path.basename(found_file_path) + # G: changed next line to handle cases like gcc-8 + file_name_cpp = file_name_c.replace('gcc', 'g++') + env['FILE_NAME_CPP'] = file_name_cpp + + env['CM_GCC_BIN'] = file_name_c + + # General compiler for general program compilation + env['CM_C_COMPILER_BIN'] = file_name_c + env['CM_C_COMPILER_FLAG_OUTPUT'] = '-o ' + env['CM_C_COMPILER_WITH_PATH'] = found_file_path + env['CM_C_COMPILER_FLAG_VERSION'] = '--version' + + env['CM_CXX_COMPILER_BIN'] = file_name_cpp + env['CM_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) + env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' + env['CM_CXX_COMPILER_FLAG_VERSION'] = '--version' + + env['CM_COMPILER_FLAGS_FAST'] = "-O3" + env['CM_LINKER_FLAGS_FAST'] = "-O3" + env['CM_COMPILER_FLAGS_DEBUG'] = "-O0" + env['CM_LINKER_FLAGS_DEBUG'] = "-O0" + env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2" + env['CM_LINKER_FLAGS_DEFAULT'] = "-O2" + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gcc/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-gcc/run.bat new file mode 100644 index 000000000..fac96d834 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gcc/run.bat @@ -0,0 +1,3 @@ +%CM_GCC_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gcc/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-gcc/run.sh new file mode 100644 index 000000000..08be81f21 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gcc/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +gcc_bin=${CM_GCC_BIN_WITH_PATH} +echo "${gcc_bin} --version" + +${gcc_bin} --version > tmp-ver.out +test $? -eq 0 || exit 1 + +cat tmp-ver.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/README-extra.md new file mode 100644 index 000000000..5d320ba2b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/README-extra.md @@ -0,0 +1,6 @@ +## Variation onnxruntime_gpu + +### Windows + +* General installation notes: https://onnxruntime.ai/docs/install +* Notes about dependencies: [link](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html). diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/README.md b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/README.md new file mode 100644 index 000000000..54edb3334 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/get-generic-python-lib](https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/get-generic-python-lib) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/_cm.yaml new file mode 100644 index 000000000..ee0a4cdd1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/_cm.yaml @@ -0,0 +1,727 @@ +alias: get-generic-python-lib +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Python automation +clean_files: [] +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_TMP_USE_CUSTOM_PYTHON: + - 'on' + tags: get,python3 + dynamic: true +- names: + - python-pip + - pip + skip_if_env: + CM_GENERIC_PYTHON_PACKAGE_NAME: + - pip + tags: get,generic-python-lib,_pip +extra_cache_tags_from_env: +- env: CM_PYTHON_CACHE_TAGS + prefix: python- +input_mapping: + extra_index_url: CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL + force_install: CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL + index_url: CM_GENERIC_PYTHON_PIP_INDEX_URL +local_env_keys: +- CM_GENERIC_PYTHON_PACKAGE_VARIANT +new_env_keys: +- CM_PYTHONLIB_* +prehook_deps: +- enable_if_env: + CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC: + - 'yes' + tags: install,onnxruntime,from.src,_cuda +tags: +- get +- install +- generic +- pip-package +- generic-python-lib +tags_help: get generic-python-lib +uid: 94b62a682bc44791 +variations: + Pillow: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: Pillow + new_env_keys: + - CM_PILLOW_VERSION + anthropic: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: anthropic + new_env_keys: + - CM_ANTHROPIC_VERSION + apache-tvm: + deps: + - tags: get,generic-python-lib,_typing_extensions + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: apache-tvm + CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre' + new_env_keys: + - CM_APACHE_TVM_VERSION + apex: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: apex + new_env_keys: + - CM_APEX_VERSION + async_timeout: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: async_timeout + new_env_keys: + - CM_ASYNC_TIMEOUT_VERSION + attr: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: attr + new_env_keys: + - CM_ATTR_VERSION + attrs: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: attrs + new_env_keys: + - CM_ATTRS_VERSION + boto3: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: boto3 + new_env_keys: + - CM_BOTO3_VERSION + cloudpickle: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: cloudpickle + new_env_keys: + - CM_CLOUDPICKLE_VERSION + cmind: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: cmind + new_env_keys: + - CM_CMIND_VERSION + colored: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: colored + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com + new_env_keys: + - CM_COLORED_VERSION + conda.#: + ad: + python-pip: + tags: _conda.# + python3: + tags: _conda.# + cupy: + deps: + - names: + - cuda + tags: get,cuda + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: cupy + new_env_keys: + - CM_CUPY_VERSION + custom-python: + ad: + python-pip: + tags: _custom-python + env: + CM_TMP_USE_CUSTOM_PYTHON: 'on' + cxx11-abi: + env: {} + datasets: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: datasets + new_env_keys: + - CM_DATASETS_VERSION + decorator: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: decorator + new_env_keys: + - CM_DECORATOR_VERSION + deepsparse: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: deepsparse + new_env_keys: + - CM_DEEPSPARSE_VERSION + dllogger: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: dllogger + CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/NVIDIA/dllogger#egg=dllogger + extra-index-url.#: + env: + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '#' + fiftyone: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: fiftyone + new_env_keys: + - CM_FIFTYONE_VERSION + google-api-python-client: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: google_api_python_client + new_env_keys: + - CM_GOOGLE_API_PYTHON_CLIENT_VERSION + google-auth-oauthlib: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: google_auth_oauthlib + new_env_keys: + - CM_GOOGLE_AUTH_OAUTHLIB_VERSION + huggingface_hub: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: huggingface_hub + new_env_keys: + - CM_HUGGINGFACE_HUB_VERSION + index-url.#: + env: + CM_GENERIC_PYTHON_PIP_INDEX_URL: '#' + inflect: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: inflect + new_env_keys: + - CM_INFLECT_VERSION + jax: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: jax + new_env_keys: + - CM_JAX_VERSION* + jax_cuda: + deps: + - names: + - cuda + tags: get,cuda + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: jax[cuda] + CM_GENERIC_PYTHON_PIP_EXTRA: -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html + CM_JAX_VERSION_EXTRA: CUDA + new_env_keys: + - CM_JAX_VERSION* + librosa: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: librosa + new_env_keys: + - CM_LIBROSA_VERSION + matplotlib: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: matplotlib + new_env_keys: + - CM_MATPLOTLIB_VERSION + mlperf_loadgen: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: mlperf_loadgen + CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlcommons/inference.git#subdirectory=loadgen + new_env_keys: + - CM_MLPERF_LOADGEN_VERSION + mlperf_logging: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: mlperf_logging + CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/mlperf/logging.git + new_env_keys: + - CM_MLPERF_LOGGING_VERSION + mpld3: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: mpld3 + new_env_keys: + - CM_MPLD3_VERSION + mxeval: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: mxeval + CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/amazon-science/mxeval.git + CM_PIP_ERROR_SKIP: 'true' + nibabel: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: nibabel + new_env_keys: + - CM_NIBABEL_VERSION + no-deps: + env: + CM_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS: 'no' + numpy: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: numpy + new_env_keys: + - CM_NUMPY_VERSION + nvidia-apex: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: apex + CM_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex + CM_GENERIC_PYTHON_PIP_URL: git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880 + new_env_keys: + - CM_NVIDIA_APEX_VERSION + nvidia-apex-from-src: + deps: + - names: + - cuda + tags: get,cuda + - names: + - torch + tags: get,generic-python-lib,_torch_cuda + - env: + CM_GIT_CHECKOUT_FOLDER: apex + extra_cache_tags: nvidia-apex + tags: get,git,repo,_repo.https://github.com/NVIDIA/apex,_tag.23.05 + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: apex + CM_GENERIC_PYTHON_PACKAGE_VARIANT: nvidia-apex + new_env_keys: + - CM_NVIDIA_APEX_VERSION + nvidia-dali: + deps: + - names: + - cuda + tags: get,cuda + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-dali-cuda120 + CM_GENERIC_PYTHON_PIP_EXTRA: ' --upgrade --default-timeout=900' + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://developer.download.nvidia.com/compute/redist + new_env_keys: + - CM_NVIDIA_DALI_VERSION + nvidia-pycocotools: + base: + - pycocotools + deps: + - names: + - cython + tags: get,generic-python-lib,_package.cython + - names: + - numpy + tags: get,generic-python-lib,_package.numpy + env: + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: pycocotools + CM_GENERIC_PYTHON_PIP_URL: pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI + nvidia-pyindex: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-pyindex + new_env_keys: + - CM_NVIDIA_PYINDEX_VERSION + nvidia-tensorrt: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: nvidia-tensorrt + new_env_keys: + - CM_NVIDIA_TENSORRT_VERSION + onnx: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: onnx + new_env_keys: + - CM_ONNX_VERSION + onnx-graphsurgeon: + deps: + - tags: get,generic-python-lib,_package.nvidia-pyindex + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: onnx_graphsurgeon + new_env_keys: + - CM_ONNX_GRAPHSURGEON_VERSION + onnxruntime: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime + new_env_keys: + - CM_ONNXRUNTIME_VERSION + onnxruntime,rocm: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime-training + CM_GENERIC_PYTHON_PIP_URL: https://download.onnxruntime.ai/onnxruntime_training-1.16.0%2Brocm56-cp3<<>>-cp3<<>>-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + new_env_keys: + - CM_ONNXRUNTIME_TRAINING_VERSION* + onnxruntime_gpu: + default_env: + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: onnxruntime + deps: + - names: + - cuda + tags: get,cuda + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: onnxruntime_gpu + CM_ONNXRUNTIME_VERSION_EXTRA: GPU + new_env_keys: + - CM_ONNXRUNTIME_GPU_VERSION* + openai: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: openai + new_env_keys: + - CM_OPENAI_VERSION + opencv-python: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: opencv-python + new_env_keys: + - CM_OPENCV_PYTHON_VERSION + package.#: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: '#' + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: '' + CM_GENERIC_PYTHON_PIP_URL: '' + find_links_url.#: + env: + CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL: '#' + package.torch,cxx11-abi: + env: + CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi + pandas: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: pandas + new_env_keys: + - CM_PANDAS_VERSION + path.#: + env: + CM_GENERIC_PYTHON_PIP_URL: '#' + pillow: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: Pillow + new_env_keys: + - CM_PILLOW_VERSION + pip: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: pip + new_env_keys: + - CM_PIP_VERSION + - CM_PYTHON_PIP_COMMON_EXTRA + polygraphy: + deps: + - tags: get,generic-python-lib,_colored + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: polygraphy + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com + new_env_keys: + - CM_POLYGRAPHY_VERSION + pre: + env: + CM_GENERIC_PYTHON_DEV_VERSION: 'yes' + protobuf: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: protobuf + new_env_keys: + - CM_PROTOBUF_VERSION + psutil: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: psutil + new_env_keys: + - CM_PSUTIL_VERSION + pycocotools: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: pycocotools + new_env_keys: + - CM_PYCOCOTOOLS_VERSION + pycuda: + deps: + - names: + - cuda + tags: get,cuda + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: pycuda + new_env_keys: + - CM_PYCUDA_VERSION + quark-amd: + deps: + - env: + CM_DOWNLOAD_FILENAME: quark-0.1.0+a9827f5-py39-none-any.whl + CM_DOWNLOAD_FINAL_ENV_NAME: CM_QUARK_AMD_WHL_PATH + extra_cache_tags: quark-amd + force_cache: true + tags: download,file,_wget,_url.https://www.xilinx.com/bin/public/openDownload?filename=quark-0.1.0+a9827f5-py39-none-any.whl + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: quark + CM_GENERIC_PYTHON_PIP_URL: <<>> + ray: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: ray[default] + new_env_keys: + - CM_RAY_VERSION + requests: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: requests + new_env_keys: + - CM_REQUESTS_VERSION + rocm: + deps: + - names: + - rocm + tags: get,rocm + env: {} + safetensors: + deps: + - skip_if_env: + CM_HOST_PLATFORM_FLAVOR: + - x86_64 + tags: get,rust-compiler + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: safetensors + new_env_keys: + - CM_SAFETENSORS_VERSION + scikit-learn: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: scikit-learn + new_env_keys: + - CM_SCIKIT_LEARN_VERSION + scipy: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: scipy + new_env_keys: + - CM_SCIPY_VERSION + scons: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: scons + new_env_keys: + - CM_SCONS_VERSION + setfit: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: setfit + new_env_keys: + - CM_SETFIT_VERSION + setuptools: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: setuptools + new_env_keys: + - CM_SETUPTOOL_VERSION + six: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: six + new_env_keys: + - CM_SIX_VERSION + sklearn: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: sklearn + new_env_keys: + - CM_SKLEARN_VERSION + sox: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: sox + new_env_keys: + - CM_SOX_VERSION + sparsezoo: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: sparsezoo + new_env_keys: + - CM_SPARSEZOO_VERSION + streamlit: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: streamlit + new_env_keys: + - CM_STREAMLIT_VERSION + streamlit_option_menu: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: streamlit_option_menu + new_env_keys: + - CM_STREAMLIT_OPTION_MENU_VERSION + tensorboard: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tensorboard + new_env_keys: + - CM_TENSORBOARD_VERSION + tensorflow: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tensorflow + new_env_keys: + - CM_TENSORFLOW_VERSION + tensorflow,rocm: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tensorflow-rocm + new_env_keys: + - CM_TENSORFLOW_ROCM_VERSION + tensorrt: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tensorrt + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>> + CM_TORCH_VERSION_EXTRA: CUDA + new_env_keys: + - CM_TENSORRT_VERSION + tflite: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tflite + new_env_keys: + - CM_TFLITE_VERSION + tflite-runtime: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tflite-runtime + new_env_keys: + - CM_TFLITE_RUNTIME_VERSION + tokenization: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tokenization + new_env_keys: + - CM_TOKENIZATION_VERSION + toml: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: toml + new_env_keys: + - CM_TOML_VERSION + torch: + deps: + - enable_if_env: + CM_PYTHON_MINOR_VERSION: + - '7' + - '8' + tags: get,generic-python-lib,_package.networkx + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torch + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu + new_env_keys: + - CM_TORCH_VERSION* + torch,cxx11-abi: + env: + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu-cxx11-abi + torch,pre: + default_env: + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torch + CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre' + CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/cpu + new_env_keys: + - CM_TORCH_VERSION* + torch,rocm: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torch + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' + CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch + new_env_keys: + - CM_TORCH_VERSION* + post_deps: + - tags: get,generic-python-lib,_torchvision,_rocm + - tags: get,generic-python-lib,_torchaudio,_rocm + torch_cuda: + default_env: {} + deps: + - names: + - cuda + tags: get,cuda + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torch + CM_TORCH_VERSION_EXTRA: CUDA + new_env_keys: + - CM_TORCH_VERSION* + torch_cuda,pre: + default_env: + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torch_cuda + deps: + - names: + - cuda + tags: get,cuda + - tags: get,generic-python-lib,_numpy + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torch + CM_GENERIC_PYTHON_PIP_EXTRA: ' --pre' + CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/<<>> + CM_TORCH_VERSION_EXTRA: CUDA + new_env_keys: + - CM_TORCH_VERSION* + torch_tensorrt: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torch-tensorrt + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/<<>> + CM_TORCH_VERSION_EXTRA: CUDA + new_env_keys: + - CM_TORCH_TENSORRT_VERSION + torchaudio: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu + new_env_keys: + - CM_TORCHAUDIO_VERSION* + torchaudio,rocm: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' + CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio + new_env_keys: + - CM_TORCHAUDIO_VERSION* + torchaudio_cuda: + default_env: + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchaudio + deps: + - names: + - cuda + tags: get,cuda + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torchaudio + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: https://download.pytorch.org/whl/<<>> + CM_TORCHAUDIO_VERSION_EXTRA: CUDA + new_env_keys: + - CM_TORCHAUDIO_VERSION* + torchvision: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu + new_env_keys: + - CM_TORCHVISION_VERSION* + torchvision,rocm: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision + CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: '' + CM_GENERIC_PYTHON_PIP_INDEX_URL: https://download.pytorch.org/whl/nightly/rocm6.2 + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: torchvision + new_env_keys: + - CM_TORCHVISION_VERSION* + torchvision_cuda: + default_env: + CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS1: torchvision + deps: + - names: + - cuda + tags: get,cuda + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: torchvision + CM_TORCHVISION_VERSION_EXTRA: CUDA + new_env_keys: + - CM_TORCHVISION_VERSION* + tornado: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tornado + new_env_keys: + - CM_TORNADO_VERSION + tqdm: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: tqdm + new_env_keys: + - CM_TQDM_VERSION + transformers: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: transformers + new_env_keys: + - CM_TRANSFORMERS_VERSION + typing_extensions: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: typing_extensions + new_env_keys: + - CM_TYPING_EXTENSIONS_VERSION + ujson: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: ujson + new_env_keys: + - CM_UJSON_VERSION + unidecode: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: unidecode + new_env_keys: + - CM_UNIDECODE_VERSION + url.#: + env: + CM_GENERIC_PYTHON_PIP_URL: '#' + CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes' + wandb: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: wandb + new_env_keys: + - CM_WANDB_VERSION + west: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: west + new_env_keys: + - CM_WEST_VERSION + whl-url.#: + deps: + - env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_GENERIC_PYTHON_PIP_URL + force_cache: 'yes' + tags: download,file,_url.# + env: + CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: 'yes' + xgboost: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: xgboost + new_env_keys: + - CM_XGBOOST_VERSION + xlsxwriter: + env: + CM_GENERIC_PYTHON_PACKAGE_NAME: xlsxwriter + new_env_keys: + - CM_XLSXWRITER_VERSION diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/customize.py new file mode 100644 index 000000000..edbb2d552 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/customize.py @@ -0,0 +1,203 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import cmind as cm + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + automation = i['automation'] + run_script_input = i['run_script_input'] + pip_version = env.get('CM_PIP_VERSION', '').strip().split('.') + + package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() + if package_name == '': + return automation._available_variations({'meta': meta}) + + if package_name == "onnxruntime_gpu": + # https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements + # 20240214: ONNXRuntime 1.17.0 now support CUDA 12 so we remove next check + # TBD: if we have explicit version for ONNX < 17.0.0 and CUDA is >= 12, + # we should add a check to fail ... + cuda_version = env.get('CM_CUDA_VERSION', '').strip() +# if cuda_version!='': +# cuda_version_split = cuda_version.split('.') +# if int(cuda_version_split[0]) >= 12: +# # env['CM_INSTALL_ONNXRUNTIME_GPU_FROM_SRC'] = "yes" +# return {'return': 1, 'error':'at this moment, PIP package +# "onnxruntime_gpu" needs CUDA < 12'} + + extra = env.get('CM_GENERIC_PYTHON_PIP_EXTRA', '') + if (pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23) and ( + '--break-system-packages' not in extra): + extra += ' --break-system-packages ' + env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" + + if env.get('CM_GENERIC_PYTHON_PACKAGE_INSTALL_DEPS', '') == "no": + env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --no-deps" + + if env.get('CM_PIP_INSTALL_NEEDS_USER', '') == "yes": + env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --user" + + if env.get('CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS', '') != '': + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'uninstall_deps'}) + if r['return'] > 0: + return r + + prepare_env_key = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '') + for x in ["-", "[", "]"]: + prepare_env_key = prepare_env_key.replace(x, "_") + + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] = prepare_env_key.upper() + + recursion_spaces = i['recursion_spaces'] + + r = automation.detect_version_using_script({ + 'env': env, + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + + force_install = ( + env.get( + 'CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL', + '') in [ + 'yes', + 'true', + 'True', + True]) + + if r['return'] > 0 or force_install: + if r['return'] == 16 or force_install: + # Clean detected version env if exists otherwise takes detected version + # for example, when we reinstall generic python lib package + env_version_key = 'CM_' + \ + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' + if env.get(env_version_key, '') != '': + del (env[env_version_key]) + + # Check if upgrade + if force_install: + extra += ' --upgrade --no-deps --force-reinstall' + + # Check index URL + index_url = env.get('CM_GENERIC_PYTHON_PIP_INDEX_URL', '').strip() + if index_url != '': + # Check special cases + if '${CM_TORCH_CUDA}' in index_url: + index_url = index_url.replace( + '${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) + + extra += ' --index-url ' + index_url + + # Check extra index URL + extra_index_url = env.get( + 'CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL', '').strip() + + if extra_index_url != '': + # Check special cases + if '${CM_TORCH_CUDA}' in extra_index_url: + extra_index_url = extra_index_url.replace( + '${CM_TORCH_CUDA}', env.get('CM_TORCH_CUDA')) + + extra += ' --extra-index-url ' + extra_index_url + + # check find-links + find_links_url = env.get( + 'CM_GENERIC_PYTHON_PIP_EXTRA_FIND_LINKS_URL', '').strip() + + if find_links_url != '': + extra += ' -f ' + find_links_url + + # Check update + if env.get('CM_GENERIC_PYTHON_PIP_UPDATE', '') in [ + True, 'true', 'yes', 'on']: + extra += ' -U' + + print('') + print(recursion_spaces + ' Extra PIP CMD: ' + extra) + print('') + + env['CM_GENERIC_PYTHON_PIP_EXTRA'] = extra + + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install'}) + + if r['return'] > 0: + return r + + return {'return': 0} + + +def detect_version(i): + + env = i['env'] + + if env.get('CM_TMP_PYTHON_PACKAGE_NAME_ENV', '') != '': + env_version_key = 'CM_' + \ + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' + else: + env_version_key = 'CM_CACHE_TMP_VERSION' + + r = i['automation'].parse_version({'match_text': r'\s*([\d.a-z\-]+)', + 'group_number': 1, + 'env_key': env_version_key, + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + current_detected_version = version + + if env.get('CM_TMP_SILENT', '') != 'yes': + print( + i['recursion_spaces'] + + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + + env_version_key = 'CM_' + \ + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'].upper() + '_VERSION' + + if env.get(env_version_key, '') != '': + version = env[env_version_key] + else: + r = detect_version(i) + if r['return'] > 0: + return r + + version = r['version'] + + env['CM_PYTHONLIB_' + env['CM_TMP_PYTHON_PACKAGE_NAME_ENV'] + + '_CACHE_TAGS'] = 'version-' + version + + import pkgutil + package_name = env.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '').strip() + package = pkgutil.get_loader(package_name) + if package: + installed_file_path = package.get_filename() + env['CM_GET_DEPENDENT_CACHED_PATH'] = installed_file_path + + pip_version = env.get('CM_PIP_VERSION', '').strip().split('.') + if pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23: + env['CM_PYTHON_PIP_COMMON_EXTRA'] = " --break-system-packages" + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/detect-version.py b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/detect-version.py new file mode 100644 index 000000000..001c39b37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/detect-version.py @@ -0,0 +1,36 @@ +import os +import sys + +package_name = os.environ.get('CM_GENERIC_PYTHON_PACKAGE_NAME', '') + +filename = 'tmp-ver.out' + +if os.path.isfile(filename): + os.remove(filename) + +if package_name != '': + + version = '' + error = '' + + try: + import importlib.metadata + version = importlib.metadata.version(package_name) + except Exception as e: + error = format(e) + + if error != '' and sys.version_info < (3, 9): + try: + import pkg_resources + version = pkg_resources.get_distribution(package_name).version + error = '' + except Exception as e: + if error != '': + error += '\n' + error += format(e) + + # We generally skip error since it usually means that + # package is not installed + + with open(filename, 'w') as file: + file.write(str(version) + '\n') diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/install.bat b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/install.bat new file mode 100644 index 000000000..0a5967462 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/install.bat @@ -0,0 +1,15 @@ +echo. + +if NOT "%CM_GENERIC_PYTHON_PIP_URL%" == "" ( + + %CM_PYTHON_BIN_WITH_PATH% -m pip install %CM_GENERIC_PYTHON_PIP_URL% %CM_GENERIC_PYTHON_PIP_EXTRA% + IF %ERRORLEVEL% NEQ 0 EXIT 1 + +) else ( + + %CM_PYTHON_BIN_WITH_PATH% -m pip install %CM_GENERIC_PYTHON_PACKAGE_NAME%%CM_TMP_PIP_VERSION_STRING% %CM_GENERIC_PYTHON_PIP_EXTRA% + IF %ERRORLEVEL% NEQ 0 EXIT 1 + +) + + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/install.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/install.sh new file mode 100644 index 000000000..b79aa8146 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/install.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +echo "" + +if [[ ${CM_GENERIC_PYTHON_PACKAGE_VARIANT} == "nvidia-apex-depreciated" ]]; then + cd ${CM_GIT_REPO_CHECKOUT_PATH} + cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install -v --disable-pip-version-check --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./" + echo $cmd + if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then + eval $cmd + else + eval $cmd + test $? -eq 0 || exit $? + fi + exit 0 +fi + +if [[ ${CM_GENERIC_PYTHON_PACKAGE_NAME} == "tensorflow_old" ]]; then + if [[ ${CM_HOST_OS_FLAVOR} == "macos" ]]; then + if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh + else + . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-macos.sh + test $? -eq 0 || exit $? + fi + exit 0 + fi + if [[ ${CM_HOST_PLATFORM_FLAVOR} == "aarch64" ]]; then + if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh + else + . ${CM_TMP_CURRENT_SCRIPT_PATH}/tensorflow/run-aarch64.sh + test $? -eq 0 || exit $? + fi + exit 0 + fi +fi + +if [[ -n ${CM_GENERIC_PYTHON_PIP_URL} ]]; then + cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install \"${CM_GENERIC_PYTHON_PIP_URL}\" ${CM_GENERIC_PYTHON_PIP_EXTRA}" + echo $cmd + if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then + eval $cmd + else + eval $cmd + test $? -eq 0 || exit $? + fi + exit 0 +fi + +cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip install \"${CM_GENERIC_PYTHON_PACKAGE_NAME}${CM_TMP_PIP_VERSION_STRING}\" ${CM_GENERIC_PYTHON_PIP_EXTRA}" +echo $cmd + +if [[ -n ${CM_PIP_ERROR_SKIP} ]]; then + eval $cmd +else + eval $cmd + test $? -eq 0 || exit $? +fi +exit 0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/run.bat new file mode 100644 index 000000000..2612377c8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/run.bat @@ -0,0 +1,4 @@ +IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\detect-version.py +IF %ERRORLEVEL% NEQ 0 EXIT 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/run.sh new file mode 100644 index 000000000..b60ac0814 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect-version.py +test $? -eq 0 || exit $? +exit 0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/tensorflow/run-aarch64.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/tensorflow/run-aarch64.sh new file mode 100644 index 000000000..6c11efb71 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/tensorflow/run-aarch64.sh @@ -0,0 +1,13 @@ +CM_PYTHON_BIN=${CM_PYTHON_BIN_WITH_PATH:-python3} + +${CM_PYTHON_BIN} -m pip install --upgrade pip ${CM_PYTHON_PIP_COMMON_EXTRA} +${CM_PYTHON_BIN} -m pip install setuptools testresources wheel h5py --user --upgrade --ignore-installed ${CM_PYTHON_PIP_COMMON_EXTRA} + +curl https://sh.rustup.rs -sSf -o tmp.sh +sh tmp.sh -y + +export PATH=$PATH:$HOME/.cargo/bin + +${CM_PYTHON_BIN} -m pip install tensorflow-aarch64${CM_TMP_PIP_VERSION_STRING} --user ${CM_PYTHON_PIP_COMMON_EXTRA} +test $? -eq 0 || exit 1 +echo "CM_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-aarch64" >> $PWD/tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/tensorflow/run-macos.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/tensorflow/run-macos.sh new file mode 100644 index 000000000..525b532eb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/tensorflow/run-macos.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +CM_PYTHON_BIN=${CM_PYTHON_BIN:-python3} + +${CM_PYTHON_BIN} -m pip install tensorflow-macos${CM_TMP_PIP_VERSION_STRING} +test $? -eq 0 || exit 1 +echo "CM_GENERIC_PYTHON_PACKAGE_NAME=tensorflow-macos" >> $PWD/tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/uninstall_deps.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/uninstall_deps.sh new file mode 100644 index 000000000..eeddf36d7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/uninstall_deps.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +if [[ -n ${CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} ]]; then + cmd="${CM_PYTHON_BIN_WITH_PATH} -m pip uninstall ${CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS} -y ${CM_PYTHON_PIP_COMMON_EXTRA}" + echo "$cmd" + eval "$cmd" + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/validate_cache.bat b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/validate_cache.bat new file mode 100644 index 000000000..2612377c8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/validate_cache.bat @@ -0,0 +1,4 @@ +IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\detect-version.py +IF %ERRORLEVEL% NEQ 0 EXIT 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/validate_cache.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/validate_cache.sh new file mode 100644 index 000000000..b60ac0814 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-python-lib/validate_cache.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect-version.py +test $? -eq 0 || exit $? +exit 0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/README-extra.md new file mode 100644 index 000000000..d8f0015ae --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/README-extra.md @@ -0,0 +1,425 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util) for the documentation of this CM script. + +# get-generic-sys-util +Below are the specific regexes and the format of output that they are expecting for each command used to check for versions. + +All commands are tested to be working on Ubuntu. + +Format: + +## Utility name +`regex` + +`command to obtain version` + +command output + +---- + +## g++-12 +`^.*([0-9]+(\\.[0-9]+)+).*` + +`g++-9 --version` + +g++-9 (Ubuntu 9.5.0-1ubuntu1~22.04) 9.5.0
    +Copyright (C) 2019 Free Software Foundation, Inc.
    +This is free software; see the source for copying conditions. There is NO
    +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
    + +## g++-11 +`^.*([0-9]+(\\.[0-9]+)+).*` + +`g++-11 --version` + +g++-11 (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
    +Copyright (C) 2021 Free Software Foundation, Inc.
    +This is free software; see the source for copying conditions. There is NO
    +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
    + +## g++-12 +`^.*([0-9]+(\\.[0-9]+)+).*` + +`g++-12 --version` + +g++-12 (Ubuntu 12.3.0-1ubuntu1~22.04) 12.3.0
    +Copyright (C) 2022 Free Software Foundation, Inc.
    +This is free software; see the source for copying conditions. There is NO
    +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
    + + +## gcc-9 +`^.*([0-9]+(\\.[0-9]+)+).*` + +`gcc-9 --version` + +gcc-9 (Ubuntu 9.5.0-1ubuntu1~22.04) 9.5.0
    +Copyright (C) 2019 Free Software Foundation, Inc.
    +This is free software; see the source for copying conditions. There is NO
    +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
    + +## gcc-11 +`^.*([0-9]+(\\.[0-9]+)+).*` + +`gcc-11 --version` + +gcc-11 (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
    +Copyright (C) 2021 Free Software Foundation, Inc.
    +This is free software; see the source for copying conditions. There is NO
    +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
    + + +## libgflags-dev +`([\d.]+)` + +`pkg-config --modversion gflags` + +2.2.2 + +## libglog-dev +`([\d.]+)` + +`pkg-config --modversion libglog` + +0.4.0 + +## libboost-all-dev +`([0-9]+(\w.[0-9]+)+)` + +`dpkg -s libboost-dev | grep 'Version'` + +Version: 1.74.0.3ubuntu7 + + +## libpng-dev +`([\d.]+)` + +`pkg-config --modversion libpng` + +1.6.37 + +## libre2-dev +`([\d.]+)` + +`pkg-config --modversion libre2` + +0.0.0 + +## libpci-dev +`([\d.]+)` + +`pkg-config --modversion libpci` + +3.7.0 + + +## libreadline_dev +`([\d.]+)` + +`pkg-config --modversion readline` + +8.1 + +## zlib +`([\d.]+)` + +`pkg-config --modversion zlib` + +1.2.11 + + +## libsqlite3_dev +`([\d.]+)` + +`pkg-config --modversion sqlite3` + +3.37.2 + +## libssl_dev +`OpenSSL\s+([\d.]+)` + +`openssl version` + +OpenSSL 3.0.2 15 Mar 2022 (Library: OpenSSL 3.0.2 15 Mar 2022) + +## libudev-dev +`([\d.]+)` + +`pkg-config --modversion libudev` + +249 + + +## libbz2_dev +`Version ([A-Za-z0-9]+(\.[A-Za-z0-9]+)+)` + +`bzcat --version` + +bzip2, a block-sorting file compressor. Version 1.0.8, 13-Jul-2019. + +## libev_dev +dpkg here should be fine as only apt install is supported +`Version ([A-Za-z0-9]+(\.[A-Za-z0-9]+)+)` + +`dpkg -s libev-dev | grep 'Version'` + +Version: 1:4.33-1 + +## libffi-dev +`([\d.]+)` + +`pkg-config --modversion libffi` + +3.4.2 + +## libffi_dev +`([\d.]+)` + +`pkg-config --modversion libffi` + +3.4.2 + +## libffi7 +`\d\.\d-[0-9]+` + +`dpkg -l libffi7 2>/dev/null | grep '^ii' | awk '{print $3}' || rpm -q libffi7 2>/dev/null || pacman -Q libffi7 2>/dev/null` + +3.3-5ubuntu1 + +## libffi8 +`\d\.\d\.\d-\d` + +`pkg-config --modversion libffi8"` + +3.4.2-4 + +## libgdbm_dev +dpkg here should be fine as only apt install is supported +`dpkg -s libgdbm-dev | grep 'Version'` + +`([\d]+\.[\d\.-]+)` + +Version: 1.23-1 + + +## libgmock +`([\d.]+)` + +`pkg-config --modversion libgmock` + +1.11.0 + +## liblzma_dev +`[A-Za-z]+\s\d\.\d\.\d` + +`xz --version` + +xz (XZ Utils) 5.2.5 +liblzma 5.2.5 + + +## libmpfr_dev +`([\d.]+)` + +`pkg-config --modversion mpfr` + +`4.1.0` + +## libncurses_dev +`([0-9]+(\.[0-9]+)+)` + +`ncurses5-config --version` + +6.3.20211021 + + + +## ninja-build +`([\d.]+)` + +`ninja --version` + +1.11.1 + +## md5sha1sum +`md5sum \(GNU coreutils\) ([\d.]+)` + +`md5sum --version` or `sha1sum --version` + +md5sum (GNU coreutils) 9.5 + +sha1sum (GNU coreutils) 9.5 + + +## nlohmann-json3-dev +`([\d.]+)` + +`pkg-config --modversion nlohmann_json` + +`3.10.5` + +## ntpdate +`([A-Za-z0-9]+(\.[A-Za-z0-9]+)+)` + +`dpkg -l ntpdate 2>/dev/null | grep ^ii | awk '{print $3}'` + +1:4.2.8p15+dfsg-1ubuntu2 + +## nvidia-cuda-toolkit +`release ([\d.]+)` + +`nvcc --version` + +nvcc: NVIDIA (R) Cuda compiler driver
    +Copyright (c) 2005-2021 NVIDIA Corporation
    +Built on Thu_Nov_18_09:45:25_PST_2021
    +Cuda compilation tools, release 11.5, V11.5.119
    +Build cuda_11.5.r11.5/compiler.30672275_0
    + + +## psmisc +`\(PSmisc\) ([\d.]+)` + +`pstree --version` + +pstree (PSmisc) 23.4 + +## rapidjson-dev +`([\d.]+)` + +`pkg-config --modversion RapidJSON` + +1.1.0 + +## cmake +`cmake version ([\d.]+)` + +`cmake --version` + +cmake version 3.30.4 + +## libnuma-dev +`([\d.]+)` + +`pkg-config --modversion numa` + +2.0.14 + + +## numactl +`([\d.]+)` + +`pkg-config --modversion numa` + +2.0.14 + +## wget +`Wget\s*([\d.]+)` + +`wget --version` + +GNU Wget 1.21.2 built on linux-gnu. + +## screen +`Screen version ([\d.]+)` + +`screen --version` + +Screen version 4.00.020 (FAU) 23-Oct-06 + +## xz +`xz \(XZ Utils\) ([\d.]+)` + +`xz --version` + +xz (XZ Utils) 5.2.5 +liblzma 5.2.5 + +## VIM +`VIM - Vi IMproved ([\d.]+` + +`vim --version` + +VIM - Vi IMproved 9.0 (2022 Jun 28, compiled Aug 3 2024 14:50:46) + +## rsync +`rsync\s+version\s+([\d.]+)` + +`rsync --version` + +rsync version 3.2.7 protocol version 31 + +## sox +`sox:\s+SoX\s+v([\d.]+)` + +`sox --version` + +sox: SoX v14.4.2 + + +## systemd +`systemd ([\d]+)` + +`systemctl --version` + +systemd 249 (249.11-0ubuntu3.12) + +## tk-dev +Probably fine to use `dpkg` here as only installation supported is for ubuntu + +`([0-9]+(\.[0-9]+)+)` + +`dpkg -s tk-dev | grep Version` + +Version: 8.6.11+1build2 + + +## transmission +`transmission-daemon ([\d.]+)` + +`transmission-daemon --version` + +transmission-daemon 3.00 (bb6b5a062e) + + +## wkhtmltopdf +`wkhtmltopdf ([\d.]+)` + +`wkhtmltopdf --version` + +wkhtmltopdf 0.12.6 + +## systemd +`systemd ([\d]+)` + +`systemd --version` + +systemd 255 (255.4-1ubuntu8.4) + + +## dmidecode +`([\d.]+)` + +`dmidecode --version` + +3.3 + +## git-lfs +`git-lfs/([\d.]+)` + +`git-lfs --version` + +git-lfs/3.4.1 (GitHub; linux arm64; go 1.22.2) + +## zlib1g +`([\d.]+)` + +`pkg-config --modversion zlib` + +1.2.11 + +## zlib1g_dev +`([\d.]+)` + +`pkg-config --modversion zlib` + +1.2.11 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/_cm.yaml new file mode 100644 index 000000000..1d45c2c28 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/_cm.yaml @@ -0,0 +1,717 @@ +alias: get-generic-sys-util +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_env: + CM_CLEAN_DIRS: bin + CM_SUDO: sudo +deps: +- tags: detect,os +env: + CM_GENERIC_SYS_UTIL_INSTALL_NEEDED: 'no' + CM_SYS_UTIL_VERSION_CMD: '' +input_mapping: + fail_safe: CM_TMP_FAIL_SAFE + ignore_missing: CM_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE +new_env_keys: +- +PATH +tags: +- get +- sys-util +- generic +- generic-sys-util +tests: + run_inputs: + - docker: 'yes' + docker_os: rhel + docker_os_version: '9' + env: + CM_TMP_FAIL_SAFE: 'yes' + ignore_missing: 'yes' + test-all-variations: 'yes' + - docker: 'yes' + docker_os: ubuntu + docker_os_version: '20.04' + fail_safe: 'yes' + ignore_missing: 'yes' + test-all-variations: 'yes' + - docker: 'yes' + docker_os: ubuntu + docker_os_version: '22.04' + fail_safe: 'yes' + ignore_missing: 'yes' + test-all-variations: 'yes' + - docker: 'yes' + docker_os: ubuntu + docker_os_version: '24.04' + fail_safe: 'yes' + ignore_missing: 'yes' + test-all-variations: 'yes' +uid: bb0393afa8404a11 +variations: + cmake: + env: + CM_SYS_UTIL_NAME: cmake + CM_SYS_UTIL_VERSION_CMD: cmake --version + CM_SYS_UTIL_VERSION_RE: cmake version ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - CM_CMAKE_VERSION + state: + cmake: + apt: cmake + brew: cmake + dnf: cmake + yum: cmake + detect: + default: true + env: + CM_GENERIC_SYS_UTIL_RUN_MODE: detect + group: mode + prehook_deps: + - enable_if_env: + CM_GENERIC_SYS_UTIL_INSTALL_NEEDED: + - 'yes' + force_env_keys: + - CM_TMP_FAIL_SAFE + inherit_variation_tags: true + names: + - install-sys-util + skip_inherit_variation_groups: + - mode + tags: get,generic-sys-util,_install + dmidecode: + env: + CM_SYS_UTIL_NAME: dmidecode + CM_SYS_UTIL_VERSION_CMD: dmidecode --version + CM_SYS_UTIL_VERSION_RE: ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_DMIDECODE_VERSION + state: + dmidecode: + apt: dmidecode + brew: '' + dnf: dmidecode + yum: dmidecode + g++-11: + env: + CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' + CM_SYS_UTIL_NAME: g++11 + CM_SYS_UTIL_VERSION_CMD: g++-11 --version + CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_GPP11_VERSION + state: + g++11: + apt: g++-11 + dnf: gcc-toolset-11-gcc-c++ + g++-12: + env: + CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' + CM_SYS_UTIL_NAME: g++12 + CM_SYS_UTIL_VERSION_CMD: g++-12 --version + CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_GPP12_VERSION + state: + g++12: + apt: g++-12 + dnf: gcc-toolset-12-gcc-c++ + g++-9: + env: + CM_SYS_UTIL_NAME: g++9 + CM_SYS_UTIL_VERSION_CMD: g++-9 --version + CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_GPP9_VERSION + state: + g++9: + apt: g++-9 + dnf: gcc-toolset-9-gcc-c++ + gcc-11: + env: + CM_SYS_UTIL_NAME: gcc11 + CM_SYS_UTIL_VERSION_CMD: gcc-11 --version + CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_GCC11_VERSION + state: + gcc11: + apt: gcc-11 + gcc-9: + env: + CM_SYS_UTIL_NAME: gcc9 + CM_SYS_UTIL_VERSION_CMD: gcc-9 --version + CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_GCC9_VERSION + state: + gcc9: + apt: gcc-9 + gflags-dev: + env: + CM_SYS_UTIL_NAME: gflags-dev + new_env_keys: + - CM_GFLAGS_DEV_VERSION + state: + gflags-dev: + apt: libgflags-dev + brew: gflags + dnf: gflags-devel + yum: gflags-devel + git-lfs: + env: + CM_SYS_UTIL_NAME: git-lfs + CM_SYS_UTIL_VERSION_CMD: git-lfs --version + CM_SYS_UTIL_VERSION_RE: git-lfs\/([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_GIT_LFS_VERSION + state: + git-lfs: + apt: git-lfs + brew: git-lfs + dnf: git-lfs + yum: git-lfs + glog-dev: + env: + CM_SYS_UTIL_NAME: glog-dev + new_env_keys: + - CM_GLOG_DEV_VERSION + state: + glog-dev: + apt: libgoogle-glog-dev + brew: glog + dnf: glog-devel + yum: glog-devel + install: + env: + CM_GENERIC_SYS_UTIL_RUN_MODE: install + group: mode + new_env_keys: + - CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED + - CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED + libboost-all-dev: + env: + CM_SYS_UTIL_NAME: libboost-all-dev + CM_SYS_UTIL_VERSION_CMD: dpkg -s libboost-dev | grep 'Version' + CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - CM_LIBBOOST_ALL_DEV_VERSION + state: + libboost-all-dev: + apt: libboost-all-dev + brew: '' + dnf: boost-devel + yum: boost-devel + libbz2-dev: + env: + CM_SYS_UTIL_NAME: libbz2_dev + CM_SYS_UTIL_VERSION_CMD_OVERRIDE: bzcat --version 2>&1 | grep bzip > tmp-ver.out + CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - CM_LIBBZ2_DEV_VERSION + state: + libbz2_dev: + apt: libbz2-dev + brew: bzip2 + dnf: libbzip2-devel + yum: libbzip2-devel + zlib-devel: libbz2-devel + libev-dev: + env: + CM_SYS_UTIL_NAME: libev_dev + CM_SYS_UTIL_VERSION_CMD: dpkg -s libev-dev | grep 'Version' + CM_SYS_UTIL_VERSION_RE: ([\d:]+\.[\d\.-]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_LIBEV_DEV_VERSION + state: + libev_dev: + apt: libev-dev + libffi: + env: + CM_SYS_UTIL_NAME: libffi + new_env_keys: + - CM_LIBFFI_VERSION + state: + libffi: + apt: libffi + libffi-dev: + env: + CM_SYS_UTIL_NAME: libffi_dev + new_env_keys: + - CM_LIBFFI_DEV_VERSION + state: + libffi_dev: + apt: libffi-dev + brew: libffi + dnf: libffi-devel + yum: libffi-devel + libffi7: + env: + CM_SYS_UTIL_NAME: libffi7 + CM_SYS_UTIL_VERSION_CMD: dpkg -l libffi7 2>/dev/null | grep '^ii' | awk '{print + $3}' || rpm -q libffi7 2>/dev/null || pacman -Q libffi7 2>/dev/null + CM_SYS_UTIL_VERSION_RE: \d\.\d-[0-9]+ + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_LIBFFI7_VERSION + state: + libffi7: + apt: libffi7 + libffi8: + env: + CM_SYS_UTIL_NAME: libffi8 + new_env_keys: + - CM_LIBFFI8_VERSION + state: + libffi8: + apt: libffi8 + libgdbm-dev: + env: + CM_SYS_UTIL_NAME: libgdbm_dev + CM_SYS_UTIL_VERSION_CMD: dpkg -s libgdbm-dev | grep 'Version' + CM_SYS_UTIL_VERSION_RE: ([\d]+\.[\d\.-]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_LIBGDBM_DEV_VERSION + state: + libgdbm_dev: + apt: libgdbm-dev + libgmock-dev: + env: + CM_SYS_UTIL_NAME: libgmock-dev + new_env_keys: + - CM_LIBGMOCK_DEV_VERSION + state: + libgmock-dev: + apt: libgmock-dev + brew: '' + dnf: gmock-devel + yum: gmock-devel + liblzma-dev: + env: + CM_SYS_UTIL_NAME: liblzma_dev + CM_SYS_UTIL_VERSION_CMD: xz --version + CM_SYS_UTIL_VERSION_RE: (\d(\.\d)+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_LIBLZMA_DEV_VERSION + state: + liblzma_dev: + apt: liblzma-dev + libmkl-dev: + env: + CM_SYS_UTIL_NAME: libmkl-dev + new_env_keys: + - CM_LIBMKL_DEV_VERSION + state: + libmkl-dev: + apt: libmkl-dev + brew: '' + dnf: '' + yum: '' + libmpfr-dev: + env: + CM_SYS_UTIL_NAME: libmpfr-dev + new_env_keys: + - CM_LIBMPFR_DEV_VERSION + state: + libmpfr-dev: + apt: libmpfr-dev + brew: '' + dnf: mpfr-devel.x86_64 + yum: mpfr-devel.x86_64 + zypper: mpfr-devel + libncurses-dev: + env: + CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE: 'yes' + CM_SYS_UTIL_NAME: libncurses_dev + CM_SYS_UTIL_VERSION_CMD: ncurses5-config --version + CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - CM_LIBNCURSES_DEV_VERSION + state: + libncurses_dev: + apt: libncurses-dev + dnf: libncurses-devel + yum: libncurses-devel + libnuma-dev: + env: + CM_SYS_UTIL_NAME: libnuma-dev + new_env_keys: + - CM_LIBNUMA_DEV_VERSION + state: + libnuma-dev: + apt: libnuma-dev + brew: '' + dnf: numactl-libs + yum: numactl-libs + libpci-dev: + env: + CM_SYS_UTIL_NAME: libpci-dev + new_env_keys: + - CM_LIBPCI_DEV_VERSION + state: + libpci-dev: + apt: libpci-dev + brew: '' + dnf: pciutils-devel + yum: pciutils-devel + libpng-dev: + env: + CM_SYS_UTIL_NAME: libpng-dev + new_env_keys: + - CM_LIBPNG_DEV_VERSION + state: + libpng-dev: + apt: libpng-dev + brew: '' + dnf: libpng-devel + yum: libpng-devel + libre2-dev: + env: + CM_SYS_UTIL_NAME: libre2-dev + new_env_keys: + - CM_LIBRE2_DEV_VERSION + state: + libre2-dev: + apt: libre2-dev + brew: '' + dnf: libre-devel + yum: libre-devel + libreadline-dev: + env: + CM_SYS_UTIL_NAME: libreadline_dev + new_env_keys: + - CM_LIBREADLINE_DEV_VERSION + state: + libreadline_dev: + apt: libreadline-dev + dnf: libreadline-devel + yum: readline-devel + libsqlite3-dev: + env: + CM_SYS_UTIL_NAME: libsqlite3_dev + new_env_keys: + - CM_LIBSQLITE3_DEV_VERSION + state: + libsqlite3_dev: + apt: libsqlite3-dev + libssl-dev: + env: + CM_SYS_UTIL_NAME: libssl_dev + CM_SYS_UTIL_VERSION_CMD: openssl version + CM_SYS_UTIL_VERSION_RE: OpenSSL\s+([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - CM_LIBSSL_DEV_VERSION + state: + libssl_dev: + apt: libssl-dev + brew: openssl + dnf: libssl-devel + yum: libssl-devel + libudev-dev: + env: + CM_SYS_UTIL_NAME: libudev-dev + new_env_keys: + - CM_LIBUDEV_DEV_VERSION + state: + libudev-dev: + apt: libudev-dev + brew: '' + dnf: libudev-devl + yum: libudev-devel + linux-tools: + deps: + - tags: detect,os + env: + CM_SYS_UTIL_NAME: linux-tools + new_env_keys: + - CM_LINUX_TOOLS_VERSION + state: + linux-tools: + apt: linux-tools-<<>> + md5sha1sum: + env: + CM_SYS_UTIL_NAME: md5sha1sum + CM_SYS_UTIL_VERSION_CMD: md5sum --version | grep sha1sum + CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_MD5SHA1SUM_VERSION + state: + md5sha1sum: + apt: '' + brew: md5sha1sum + ninja-build: + env: + CM_SYS_UTIL_NAME: ninja-build + CM_SYS_UTIL_VERSION_CMD: ninja --version + CM_SYS_UTIL_VERSION_RE: ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_NINJA_BUILD_VERSION + state: + ninja-build: + apt: ninja-build + brew: ninja-build + dnf: ninja-build + yum: ninja-build + zypper: ninja-build + nlohmann-json3-dev: + env: + CM_SYS_UTIL_NAME: nlohmann_json3_dev + new_env_keys: + - CM_NLOHMANN_JSON3_DEV_VERSION + state: + nlohmann_json3_dev: + apt: nlohmann-json3-dev + dnf: nlohmann-json-devel + ntpdate: + env: + CM_SYS_UTIL_NAME: ntpdate + new_env_keys: + - CM_NTPDATE_VERSION + state: + ntpdate: + apt: ntpdate + brew: '' + dnf: ntpdate + yum: ntpdate + numactl: + deps: + - enable_if_env: + CM_HOST_OS_FLAVOR: + - rhel + CM_HOST_OS_VERSION: + - '9.1' + - '9.2' + - '9.3' + tags: install,numactl,from.src + env: + CM_SYS_UTIL_NAME: numactl + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_NUMACTL_VERSION + state: + numactl: + apt: numactl + dnf: numactl-devel + yum: numactl-devel + nvidia-cuda-toolkit: + env: + CM_SYS_UTIL_NAME: nvidia-cuda-toolkit + CM_SYS_UTIL_VERSION_CMD: nvcc --version + CM_SYS_UTIL_VERSION_RE: release ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - CM_NVIDIA_CUDA_TOOLKIT_VERSION + state: + nvidia-cuda-toolkit: + apt: nvidia-cuda-toolkit + brew: '' + dnf: nvidia-cuda-toolkit + yum: nvidia-cuda-toolkit + pkg-config: + env: + CM_SYS_UTIL_NAME: pkg_config + CM_SYS_UTIL_VERSION_RE: \b(\d+\.\d+(?:\.\d+)?)\b + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + state: + pkg_config: + apt: pkg-config + brew: pkg-config + dnf: pkg-config + yum: pkg-config + psmisc: + env: + CM_SYS_UTIL_NAME: psmisc + new_env_keys: + - CM_PSMISC_VERSION + state: + psmisc: + apt: psmisc + brew: pstree + dnf: psmisc + yum: psmisc + rapidjson-dev: + env: + CM_SYS_UTIL_NAME: rapidjson-dev + new_env_keys: + - CM_RAPIDJSON_DEV_VERSION + state: + rapidjson-dev: + apt: rapidjson-dev + brew: '' + dnf: rapidjson-devel + yum: rapidjson-devel + rsync: + env: + CM_SYS_UTIL_NAME: rsync + CM_SYS_UTIL_VERSION_CMD: rsync --version + CM_SYS_UTIL_VERSION_RE: rsync\s+version\s+([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_RSYNC_VERSION + state: + rsync: + apt: rsync + brew: rsync + dnf: rsync + yum: rsync + zypper: rsync + screen: + env: + CM_SYS_UTIL_NAME: screen + CM_SYS_UTIL_VERSION_CMD: screen --version + CM_SYS_UTIL_VERSION_RE: Screen version ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_SCREEN_VERSION + state: + screen: + apt: screen + brew: screen + dnf: screen + yum: screen + zypper: rsync + sox: + env: + CM_SYS_UTIL_NAME: sox + CM_SYS_UTIL_VERSION_CMD: sox --version + CM_SYS_UTIL_VERSION_RE: sox:\s+SoX\s+v([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_SOX_VERSION + state: + sox: + apt: sox + brew: sox + dnf: sox + systemd: + env: + CM_SYS_UTIL_NAME: systemd + CM_SYS_UTIL_VERSION_CMD: systemctl --version + CM_SYS_UTIL_VERSION_RE: systemd ([\d]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_SYSTEMD_VERSION + state: + systemd: + apt: systemd + brew: '' + dnf: systemd + yum: systemd + tk-dev: + env: + CM_SYS_UTIL_NAME: tk_dev + CM_SYS_UTIL_VERSION_CMD: dpkg -s tk-dev | grep Version + CM_SYS_UTIL_VERSION_RE: ([0-9]+(\.[0-9]+)+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - CM_TK_DEV_VERSION + state: + tk_dev: + apt: tk-dev + transmission: + env: + CM_SYS_UTIL_NAME: transmission + CM_SYS_UTIL_VERSION_CMD: transmission-daemon --version + CM_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM: 'yes' + CM_SYS_UTIL_VERSION_RE: transmission-daemon ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_TRANSMISSION_VERSION + state: + transmission: + apt: transmission-daemon + brew: transmission + dnf: transmission-daemon + yum: transmission-daemon + vim-common: + env: + CM_SYS_UTIL_NAME: vim_common + CM_SYS_UTIL_VERSION_CMD: vim --version + CM_SYS_UTIL_VERSION_RE: VIM - Vi IMproved ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_VIM_COMMON_VERSION + state: + vim_common: + apt: vim-common + brew: vim + choco: vim + dnf: vim-common + yum: vim-common + wget: + env: + CM_SYS_UTIL_NAME: wget + CM_SYS_UTIL_VERSION_CMD: wget --version + CM_SYS_UTIL_VERSION_RE: Wget\s*([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_WGET_VERSION + state: + wget: + apt: wget + brew: wget + wkhtmltopdf: + env: + CM_SYS_UTIL_NAME: wkhtmltopdf + CM_SYS_UTIL_VERSION_CMD: wkhtmltopdf --version + CM_SYS_UTIL_VERSION_RE: wkhtmltopdf ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 0 + new_env_keys: + - CM_WKHTMLTOPDF_VERSION + state: + wkhtmltopdf: + apt: wkhtmltopdf + brew: wkhtmltopdf + xz: + env: + CM_SYS_UTIL_NAME: xz + CM_SYS_UTIL_VERSION_CMD: xz --version + CM_SYS_UTIL_VERSION_RE: xz \(XZ Utils\) ([\d.]+) + CM_TMP_VERSION_DETECT_GROUP_NUMBER: 1 + new_env_keys: + - CM_XZ_VERSION + state: + xz: + apt: xz-utils + brew: xz + choco: xz + dnf: xz + yum: xz + zlib: + env: + CM_SYS_UTIL_NAME: zlib + new_env_keys: + - CM_ZLIB_VERSION + state: + zlib: + apt: zlib1g + choco: zlib + zlib1g-dev: + env: + CM_SYS_UTIL_NAME: zlib1g_dev + new_env_keys: + - CM_ZLIB1G_DEV_VERSION + state: + zlib1g_dev: + apt: zlib1g-dev + dnf: zlib-devel + yum: zlib-devel + zypper: zlib-devel diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/customize.py new file mode 100644 index 000000000..e6061a899 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/customize.py @@ -0,0 +1,197 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import re + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + automation = i['automation'] + + # Use VERSION_CMD and CHECK_CMD if no CHECK_CMD is set + if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' and env.get( + 'CM_SYS_UTIL_CHECK_CMD', '') == '': + env['CM_SYS_UTIL_CHECK_CMD'] = env['CM_SYS_UTIL_VERSION_CMD'] + + if env.get('CM_GENERIC_SYS_UTIL_RUN_MODE', '') == "install": + if env.get('CM_SYS_UTIL_INSTALL_WITH_RETRY', '') == "yes": + i['run_script_input']['script_name'] = "install-with-retry" + else: + i['run_script_input']['script_name'] = "install" + + if env.get('CM_GENERIC_SYS_UTIL_RUN_MODE', '') == "detect": + if env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get( + 'CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '': + r = automation.run_native_script( + {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect'}) + if r['return'] != 0: # detection failed, do install via prehook_deps + print("detection failed, going for installation") + env['CM_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes" + return {'return': 0} + else: # detection is successful, no need to install + # print("detection success") + env['CM_SYS_UTIL_INSTALL_CMD'] = "" + return {'return': 0} + else: # No detction command available, just install + # print("No detection possible, going for installation") + env['CM_GENERIC_SYS_UTIL_INSTALL_NEEDED'] = "yes" + return {'return': 0} + + # Only "install" mode reaches here + pm = env.get('CM_HOST_OS_PACKAGE_MANAGER') + util = env.get('CM_SYS_UTIL_NAME', '') + if util == '': + return { + 'return': 1, 'error': 'Please select a variation specifying the sys util name'} + + package = state.get(util) + package_name = None + if package and pm: + package_name = package.get(pm) + + if os_info['platform'] == 'windows' and not package_name: + print('') + print('WARNING: for now skipping get-generic-sys-util on Windows ...') + print('') + + return {'return': 0} + + if not pm: + return {'return': 1, 'error': 'Package manager not detected for the given OS'} + + if not package: + return {'return': 1, + 'error': f'No package name specified for {util} in the meta'} + + if not package_name: + if str(env.get('CM_GENERIC_SYS_UTIL_IGNORE_MISSING_PACKAGE', '') + ).lower() in ["1", "true", "yes"]: + print( + f"WARNING: No package name specified for {pm} and util name {util}. Ignoring it...") + env['CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED'] = 'yes' + return {'return': 0} + else: + return { + 'return': 1, 'error': f'No package name specified for {pm} and util name {util}'} + + if util == "libffi": + if env.get("CM_HOST_OS_FLAVOR", "") == "ubuntu": + if env.get("CM_HOST_OS_VERSION", "") in [ + "20.04", "20.10", "21.04", "21.10"]: + package_name = "libffi7" + else: + package_name = "libffi8" + + # Temporary handling of dynamic state variables + tmp_values = re.findall(r'<<<(.*?)>>>', str(package_name)) + for tmp_value in tmp_values: + if tmp_value not in env: + return {'return': 1, + 'error': 'variable {} is not in env'.format(tmp_value)} + if tmp_value in env: + if isinstance(package_name, str): + package_name = package_name.replace( + "<<<" + tmp_value + ">>>", str(env[tmp_value])) + + install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') + if not install_cmd: + return { + 'return': 1, 'error': 'Package manager installation command not detected for the given OS'} + + if pm == "brew": + sudo = '' + else: + sudo = env.get('CM_SUDO', '') + env['CM_SYS_UTIL_INSTALL_CMD'] = sudo + \ + ' ' + install_cmd + ' ' + package_name + + env['+PATH'] = [] + + if env.get('CM_HOST_OS_FLAVOR', '') == 'rhel': + if env['CM_SYS_UTIL_NAME'] == "g++12": + env['+PATH'] = ["/opt/rh/gcc-toolset-12/root/usr/bin"] + + if env['CM_SYS_UTIL_NAME'] == "numactl" and env['CM_HOST_OS_VERSION'] in [ + "9.1", "9.2", "9.3"]: + env['CM_SYS_UTIL_INSTALL_CMD'] = '' + + if env.get('CM_SYS_UTIL_CHECK_CMD', + '') != '' and env['CM_SYS_UTIL_INSTALL_CMD'] != '': + env['CM_SYS_UTIL_INSTALL_CMD'] = f"""{env['CM_SYS_UTIL_CHECK_CMD']} || {env['CM_SYS_UTIL_INSTALL_CMD']}""" + + return {'return': 0} + + +def detect_version(i): + env = i['env'] + version_env_key = f"CM_{env['CM_SYS_UTIL_NAME'].upper()}_VERSION" + version_check_re = env.get('CM_SYS_UTIL_VERSION_RE', '') + group_number = env.get('CM_TMP_VERSION_DETECT_GROUP_NUMBER', 1) + + # Confirm that the regex pattern and file are present + if version_check_re == '' or not os.path.exists("tmp-ver.out"): + version = "undetected" + else: + r = i['automation'].parse_version({'match_text': version_check_re, + 'group_number': group_number, + 'env_key': version_env_key, + 'which_env': env}) + + if r['return'] > 0: + return r + + version = r['version'] + print( + i['recursion_spaces'] + + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + env = i['env'] + + version_env_key = f"CM_{env['CM_SYS_UTIL_NAME'].upper()}_VERSION" + + if (env.get('CM_SYS_UTIL_VERSION_CMD', '') != '' or env.get('CM_SYS_UTIL_VERSION_CMD_OVERRIDE', '') != '') and env.get(version_env_key, '') == '' and str(env.get( + 'CM_TMP_GENERIC_SYS_UTIL_PACKAGE_INSTALL_IGNORED', '')).lower() not in ["yes", "1", "true"] and env.get('CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED', '') != 'yes': + automation = i['automation'] + + r = automation.run_native_script( + {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect'}) + if r['return'] > 0 and str(env.get( + 'CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE', '')).lower() not in ["1", "yes", "true"]: + return {'return': 1, 'error': 'Version detection failed after installation. Please check the provided version command or use env.CM_GENERIC_SYS_UTIL_IGNORE_VERSION_DETECTION_FAILURE=yes to ignore the error.'} + + elif r['return'] == 0: + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + + env[version_env_key] = version + + # Not used now + env['CM_GENERIC_SYS_UTIL_' + env['CM_SYS_UTIL_NAME'].upper() + + '_CACHE_TAGS'] = 'version-' + version + + if env.get(version_env_key, '') == '': + env[version_env_key] = "undetected" + + return {'return': 0, 'version': env[version_env_key]} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/detect.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/detect.sh new file mode 100644 index 000000000..2c3583799 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/detect.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +if [[ -n "${CM_SYS_UTIL_VERSION_CMD_OVERRIDE}" ]]; then + cmd="${CM_SYS_UTIL_VERSION_CMD_OVERRIDE}" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? +else + if [[ -n "${CM_SYS_UTIL_VERSION_CMD}" ]]; then + if [[ "${CM_SYS_UTIL_VERSION_CMD_USE_ERROR_STREAM}" == "yes" ]]; then + # Redirect both stdout and stderr to tmp-ver.out + cmd="${CM_SYS_UTIL_VERSION_CMD} > tmp-ver.out 2>&1" + else + cmd="${CM_SYS_UTIL_VERSION_CMD} > tmp-ver.out" + fi + echo $cmd + eval $cmd + test $? -eq 0 || exit $? + fi +fi + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/install-with-retry.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/install-with-retry.sh new file mode 100644 index 000000000..9abc55d08 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/install-with-retry.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Safe execution of a command stored in a variable +cmd="${CM_SYS_UTIL_INSTALL_CMD}" +echo "$cmd" + +# set the max number of retries as well as the delay between the retries +max_retries=3 +delay_in_retry=3 + + +for ((i=1; i<=max_retries; i++)); do + echo "Attempting to install ${CM_SYS_UTIL_NAME} - $i of $max_retries..." + output=$(eval "$cmd" 2>&1) + echo "$output" + exit_status=$? + + if [[ $exit_status -ne 0 || "$output" == *"Temporary failure resolving"* || "$output" == *"Unable to fetch some archives"* ]]; then + # Check for network-related errors in the output + if echo "$output" | grep -q -E "Could not resolve|Temporary failure resolving"; then + echo "Network issue detected, retrying in $delay_in_retry seconds..." + sleep $delay_in_retry + else + # If it's a non-network error, handle based on fail-safe setting + if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then + echo "CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out + echo "Fail-safe is enabled, exiting with status 0." + exit 0 + else + echo "Fail-safe is not enabled, exiting with error status $exit_status." + exit $exit_status + fi + fi + else + # If the command succeeded + echo "Successfully installed ${CM_SYS_UTIL_NAME}." + exit 0 + fi + + # If this was the last retry, print a final failure message + if [[ $i -eq $max_retries ]]; then + echo "Installation failed after $max_retries attempts due to persistent network issues." + if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then + exit 0 + else + exit 1 + fi + fi +done diff --git a/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/install.sh b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/install.sh new file mode 100644 index 000000000..c8f532c49 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-generic-sys-util/install.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Safe execution of a command stored in a variable +cmd="${CM_SYS_UTIL_INSTALL_CMD}" +echo "$cmd" + +# Execute the command and capture the exit status directly +if ! eval "$cmd"; then + echo "Command failed with status $?" + if [[ "${CM_TMP_FAIL_SAFE}" == 'yes' ]]; then + # Exit safely if fail-safe is enabled + echo "CM_GET_GENERIC_SYS_UTIL_INSTALL_FAILED=yes" > tmp-run-env.out + echo "Fail-safe is enabled, exiting with status 0" + exit 0 + else + # Otherwise exit with the actual error status + exit $? + fi +else + #echo "Command succeeded" + exit 0 +fi \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/_cm.yaml new file mode 100644 index 000000000..3008f6365 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/_cm.yaml @@ -0,0 +1,51 @@ +alias: get-gh-actions-runner +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +can_force_cache: true +tags: +- get +- gh +- actions-runner +- runner-code +- runner +- code +- gh-actions-runner +uid: 5b005c5a76f242a7 +input_mapping: + token: CM_GH_ACTIONS_RUNNER_TOKEN + url: CM_GH_ACTIONS_RUNNER_URL + +new_env_keys: + - CM_GH_ACTIONS_RUNNER_CODE_PATH + +deps: + - tags: detect-os + - tags: download-and-extract,_extract,_url.https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz + force_cache: yes + extra_cache_tags: gh-actions-runner-code,gh-actions,code + env: + CM_DAE_FINAL_ENV_NAME: CM_GH_ACTIONS_RUNNER_CODE_PATH + +variations: + config: + env: + CM_GH_ACTIONS_RUNNER_COMMAND: config + remove: + env: + CM_GH_ACTIONS_RUNNER_COMMAND: remove + install: + deps: + - tags: get,gh,actions-runner,_config + force_cache: yes + env: + CM_GH_ACTIONS_RUNNER_COMMAND: install + uninstall: + env: + CM_GH_ACTIONS_RUNNER_COMMAND: uninstall + start: + deps: + - tags: get,gh,actions-runner,_install + force_cache: yes + env: + CM_GH_ACTIONS_RUNNER_COMMAND: start diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/customize.py new file mode 100644 index 000000000..83006dc30 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/customize.py @@ -0,0 +1,56 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import cmind as cm + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + cmd = env.get('CM_GH_ACTIONS_RUNNER_COMMAND', '') + if cmd == "config": + run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh --url {env['CM_GH_ACTIONS_RUNNER_URL']} --token {env['CM_GH_ACTIONS_RUNNER_TOKEN']}" + elif cmd == "remove": + run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && ./config.sh remove --token {env['CM_GH_ACTIONS_RUNNER_TOKEN']}" + elif cmd == "install": + run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh install" + elif cmd == "uninstall": + run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh uninstall" + cache_rm_tags = "gh,runner,_install" + r = cm.access({'action': 'rm', 'automation': 'cache', + 'tags': cache_rm_tags, 'f': True}) + print(r) + if r['return'] != 0 and r['return'] != 16: # ignore missing ones + return r + elif cmd == "start": + run_cmd = f"cd {env['CM_GH_ACTIONS_RUNNER_CODE_PATH']} && sudo ./svc.sh start" + + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/run.sh new file mode 100644 index 000000000..547395120 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-gh-actions-runner/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-git-repo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-git-repo/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/README-extra.md new file mode 100644 index 000000000..83a368e5f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/README-extra.md @@ -0,0 +1,20 @@ +# Get GIT Repository +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones any specified GIT repository. + +## Commands +To install +``` +cm run script --tags=get,git,repo,_repo.,[VARIATION] +``` +where [VARIATION] is one of +* `patch:` Applies the `git.patch` to the cloned git repository +* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) +* `full-history:` Uses the full git history +* `no-recurse-submodules:` Only download the main repository + +## Exported Variables +* `CM_GIT_CHECKOUT_PATH`: Directory path of the cloned git repository + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-git-repo/README.md b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/README.md new file mode 100644 index 000000000..fb9e891c6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/get-git-repo](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/get-git-repo) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-git-repo/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/_cm.yaml new file mode 100644 index 000000000..eae2ac3e7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/_cm.yaml @@ -0,0 +1,94 @@ +alias: get-git-repo +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: DevOps automation +default_env: + CM_GIT_CHECKOUT_FOLDER: repo + CM_GIT_DEPTH: --depth 4 + CM_GIT_PATCH: 'no' + CM_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' + CM_GIT_URL: https://github.com/mlcommons/ck.git +deps: +- tags: detect,os +input_mapping: + branch: CM_GIT_CHECKOUT + depth: CM_GIT_DEPTH + env_key: CM_GIT_ENV_KEY + folder: CM_GIT_CHECKOUT_FOLDER + patch: CM_GIT_PATCH + pull: CM_GIT_REPO_PULL + submodules: CM_GIT_RECURSE_SUBMODULES + update: CM_GIT_REPO_PULL +new_env_keys: +- CM_GIT_CHECKOUT_PATH +- CM_GIT_REPO_* +- <<>> +post_deps: +- dynamic: true + enable_if_env: + CM_GIT_REPO_PULL: + - 'yes' + - 'True' + force_env_keys: + - CM_GIT_CHECKOUT_PATH + names: + - pull-git-repo + tags: pull,git,repo +print_env_at_the_end: + CM_GIT_CHECKOUT_PATH: CM cache path to the Git repo +tags: +- get +- git +- repo +- repository +- clone +uid: ed603e7292974f10 +variations: + branch.#: + env: + CM_GIT_BRANCH: '#' + group: checkout + cherrypicks.#: + env: + CM_GIT_CHERRYPICKS: '#' + full-history: + env: + CM_GIT_DEPTH: '' + group: git-history + lfs: + deps: + - tags: get,generic,sys-util,_git-lfs + env: + CM_GIT_REPO_NEEDS_LFS: 'yes' + no-recurse-submodules: + env: + CM_GIT_RECURSE_SUBMODULES: '' + patch: + env: + CM_GIT_PATCH: 'yes' + pr-to-apply.#: + env: + CM_GIT_PR_TO_APPLY: '#' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + sha.#: + default_variations: + git-history: full-history + env: + CM_GIT_SHA: '#' + group: checkout + short-history: + default: true + env: + CM_GIT_DEPTH: --depth 5 + group: git-history + submodules.#: + env: + CM_GIT_SUBMODULES: '#' + tag.#: + env: + CM_GIT_CHECKOUT_TAG: '#' + group: checkout diff --git a/cmx4mlops/cmx4mlops/repo/script/get-git-repo/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/customize.py new file mode 100644 index 000000000..f292f4e19 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/customize.py @@ -0,0 +1,118 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script +# yet'} + + env = i['env'] + meta = i['meta'] + + env_key = get_env_key(env) + + cm_git_url = env['CM_GIT_URL'] + + if 'CM_GIT_REPO_NAME' not in env: + update_env( + env, + 'CM_GIT_REPO{}_NAME', + env_key, + os.path.basename( + env['CM_GIT_URL'])) + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + if env.get('CM_GIT_CHECKOUT', '') == '': + env['CM_GIT_CHECKOUT'] = env.get( + 'CM_GIT_SHA', env.get( + 'CM_GIT_BRANCH', '')) + + git_checkout_string = " -b " + env['CM_GIT_BRANCH'] if ( + "CM_GIT_BRANCH" in env and env.get('CM_GIT_SHA', '') == '') else "" + + git_clone_cmd = "git clone " + env['CM_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + \ + env['CM_GIT_URL'] + " " + \ + env.get('CM_GIT_DEPTH', '') + ' ' + env['CM_GIT_CHECKOUT_FOLDER'] + + env['CM_GIT_CLONE_CMD'] = git_clone_cmd + env['CM_TMP_GIT_PATH'] = os.path.join( + os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'], ".gitdone") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + env['CM_GIT_CHECKOUT_PATH'] = os.path.join( + os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER']) + git_checkout_path = env['CM_GIT_CHECKOUT_PATH'] + + env_key = get_env_key(env) + + # We remap CM_GIT variables with CM_GIT_REPO prefix so that they don't + # contaminate the env of the parent script + update_env(env, 'CM_GIT_REPO{}_CHECKOUT_PATH', + env_key, env['CM_GIT_CHECKOUT_PATH']) + update_env(env, 'CM_GIT_REPO{}_URL', env_key, env['CM_GIT_URL']) + update_env(env, 'CM_GIT_REPO{}_CHECKOUT', env_key, env['CM_GIT_CHECKOUT']) + update_env(env, 'CM_GIT_REPO{}_DEPTH', env_key, env['CM_GIT_DEPTH']) + update_env(env, 'CM_GIT_REPO{}_CHECKOUT_FOLDER', + env_key, env['CM_GIT_CHECKOUT_FOLDER']) + update_env(env, 'CM_GIT_REPO{}_PATCH', env_key, env['CM_GIT_PATCH']) + update_env(env, 'CM_GIT_REPO{}_RECURSE_SUBMODULES', + env_key, env['CM_GIT_RECURSE_SUBMODULES']) + + if (env.get('CM_GIT_CHECKOUT_PATH_ENV_NAME', '') != ''): + env[env['CM_GIT_CHECKOUT_PATH_ENV_NAME']] = git_checkout_path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = git_checkout_path + + if os.path.exists("tmp-cm-git-hash.out"): + with open("tmp-cm-git-hash.out", "r") as f: + git_hash = f.readline().strip() + env['CM_GIT_REPO_CURRENT_HASH'] = git_hash + + return {'return': 0} + + +def get_env_key(env): + + env_key = env.get('CM_GIT_ENV_KEY', '') + + if env_key != '' and not env_key.startswith('_'): + env_key = '_' + env_key + + return env_key + + +def update_env(env, key, env_key, var): + + env[key.format('')] = var + + if env_key != '': + env[key.format(env_key)] = var + + return diff --git a/cmx4mlops/cmx4mlops/repo/script/get-git-repo/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/run.bat new file mode 100644 index 000000000..d00f32b15 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/run.bat @@ -0,0 +1,70 @@ +@echo off + +rem echo ****************************************************** +rem echo Cloning MLCommons from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... + +rem git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% inference +rem cd inference +rem git checkout -b "%CM_GIT_CHECKOUT%" +rem + +rem Next line allows ERRORLEVEL inside if statements! +setlocal enabledelayedexpansion + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +set folder=%CM_GIT_CHECKOUT_FOLDER% + +if not exist "%CM_TMP_GIT_PATH%" ( + + if exist "%folder%" ( + rmdir /S /Q "%folder%" rem Use rmdir instead of deltree + ) + + echo ****************************************************** + echo Current directory: %CUR_DIR% + echo. + echo Cloning %CM_GIT_REPO_NAME% from %CM_GIT_URL% + echo. + echo "%CM_GIT_CLONE_CMD%" + echo. + + %CM_GIT_CLONE_CMD% + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + + cd "%folder%" + + if not "%CM_GIT_SHA%" == "" ( + echo. + echo. + git checkout "%CM_GIT_CHECKOUT%" + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + ) + +) else ( + cd "%folder%" +) + +if not "%CM_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%CM_GIT_SUBMODULES%") do ( + echo. + echo Initializing submodule %%s + git submodule update --init %%s + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + ) +) + +if "%CM_GIT_PATCH%" == "yes" ( + for %%x in (%CM_GIT_PATCH_FILEPATHS%) do ( + echo. + echo Applying patch %%x ... + git apply %%x + IF !ERRORLEVEL! NEQ 0 EXIT !ERRORLEVEL! + ) +) + +cd "%CUR_DIR%" + +exit /b 0 + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-git-repo/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/run.sh new file mode 100644 index 000000000..2a7b0b51c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-git-repo/run.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +CUR_DIR=$PWD +echo "$CUR_DIR" +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +folder=${CM_GIT_CHECKOUT_FOLDER} +if [ ! -e "${CM_TMP_GIT_PATH}" ]; then + cmd="rm -rf ${folder}" + echo $cmd + eval $cmd + echo "******************************************************" + echo "Current directory: ${CUR_DIR}" + echo "" + echo "Cloning ${CM_GIT_REPO_NAME} from ${CM_GIT_URL}" + echo "" + echo "${CM_GIT_CLONE_CMD}"; + echo "" + + ${CM_GIT_CLONE_CMD} + rcode=$? + + if [ ! $rcode -eq 0 ]; then #try once more + rm -rf $folder + ${CM_GIT_CLONE_CMD} + test $? -eq 0 || exit $? + fi + + cd ${folder} + + if [ ! -z ${CM_GIT_SHA} ]; then + + echo "" + cmd="git checkout -b ${CM_GIT_SHA} ${CM_GIT_SHA}" + echo "$cmd" + eval "$cmd" + test $? -eq 0 || exit $? + + elif [ ! -z ${CM_GIT_CHECKOUT_TAG} ]; then + + echo "" + cmd="git fetch --all --tags" + echo "$cmd" + eval "$cmd" + cmd="git checkout tags/${CM_GIT_CHECKOUT_TAG} -b ${CM_GIT_CHECKOUT_TAG}" + echo "$cmd" + eval "$cmd" + test $? -eq 0 || exit $? + + else + cmd="git rev-parse HEAD >> ../tmp-cm-git-hash.out" + echo "$cmd" + eval "$cmd" + test $? -eq 0 || exit $? + fi + +else + cd ${folder} +fi + +if [ ! -z ${CM_GIT_PR_TO_APPLY} ]; then + echo "" + echo "Fetching from ${CM_GIT_PR_TO_APPLY}" + git fetch origin ${CM_GIT_PR_TO_APPLY}:tmp-apply +fi + +IFS=',' read -r -a cherrypicks <<< "${CM_GIT_CHERRYPICKS}" +for cherrypick in "${cherrypicks[@]}" +do + echo "" + echo "Applying cherrypick $cherrypick" + git cherry-pick -n $cherrypick + test $? -eq 0 || exit $? +done + +IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" + +for submodule in "${submodules[@]}" +do + echo "" + echo "Initializing submodule ${submodule}" + git submodule update --init "${submodule}" + test $? -eq 0 || exit $? +done + +if [ ${CM_GIT_PATCH} == "yes" ]; then + IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILEPATHS} + for patch_file in "${patch_files[@]}" + do + echo "" + echo "Applying patch $patch_file" + git apply "$patch_file" + test $? -eq 0 || exit $? + done +fi + +cd "$CUR_DIR" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-github-cli/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-github-cli/README.md b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/README.md new file mode 100644 index 000000000..a43458ae7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/get-github-cli](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/get-github-cli) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-github-cli/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/_cm.yaml new file mode 100644 index 000000000..9cff523c2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/_cm.yaml @@ -0,0 +1,16 @@ +alias: get-github-cli +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: DevOps automation +clean_files: [] +deps: +- tags: detect,os +tags: +- get +- gh +- gh-cli +- github +- cli +- github-cli +uid: 1417029c6ff44f21 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-github-cli/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/customize.py new file mode 100644 index 000000000..a4bc06820 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/customize.py @@ -0,0 +1,70 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'gh.exe' if os_info['platform'] == 'windows' else 'gh' + + # Will check env['CM_TMP_PATH'] if comes from installation script + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_GITHUBCLI_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + if env.get('CM_TMP_FAIL_IF_NOT_FOUND', '').lower() == 'yes': + return r + + print(recursion_spaces + ' # {}'.format(r['error'])) + + # Attempt to run installer + r = { + 'return': 0, + 'skip': True, + 'script': { + 'tags': 'install,github-cli'}} + + return r + + found_path = r['found_path'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + r = i['automation'].parse_version({'match_text': r'gh\s*version\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_GITHUBCLI_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-github-cli/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/run.bat new file mode 100644 index 000000000..5d06678cf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/run.bat @@ -0,0 +1 @@ +gh.exe --version > tmp-ver.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-github-cli/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/run.sh new file mode 100644 index 000000000..6ac03d3ca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-github-cli/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash +gh --version > tmp-ver.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-go/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-go/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-go/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-go/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-go/README-extra.md new file mode 100644 index 000000000..d1c4f9caa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-go/README-extra.md @@ -0,0 +1,10 @@ +# Get GO Tool +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed GO tool on the system. + +## Exported Variables +* `CM_GO_BIN_WITH_PATH` +* `+PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-go/README.md b/cmx4mlops/cmx4mlops/repo/script/get-go/README.md new file mode 100644 index 000000000..9dceef8df --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-go/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-go](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-go) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-go/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-go/_cm.yaml new file mode 100644 index 000000000..f7c5c89d2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-go/_cm.yaml @@ -0,0 +1,23 @@ +alias: get-go +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +clean_files: [] +env: + CM_REQUIRE_INSTALL: 'no' +new_env_keys: +- CM_GO_* +- +PATH +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + reuse_version: true + tags: install,go +tags: +- get +- tool +- go +- get-go +uid: ab42647a96724a25 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-go/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-go/customize.py new file mode 100644 index 000000000..136fccf14 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-go/customize.py @@ -0,0 +1,75 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'go.exe' if os_info['platform'] == 'windows' else 'go' + env['FILE_NAME'] = file_name + if 'CM_GO_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_GO_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'\s+go([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_GO_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + found_file_path = env['CM_GO_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_GO_INSTALLED_PATH'] = found_path + + env['CM_GO_CACHE_TAGS'] = 'version-' + version + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-go/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-go/run.sh new file mode 100644 index 000000000..51faa937d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-go/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +go version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/README.md b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/README.md new file mode 100644 index 000000000..c2a5fd226 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-google-saxml](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-google-saxml) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/_cm.yaml new file mode 100644 index 000000000..2e2db0f88 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/_cm.yaml @@ -0,0 +1,45 @@ +alias: get-google-saxml +uid: 5d7b17d84b5a48fb + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: AI/ML frameworks + +default_version: master + +deps: + + - tags: detect,os + + - names: + - python3 + - python + tags: get,python3 + + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_GOOGLE_SAXML_SRC + extra_cache_tags: google,saxsml,src + force_env_keys: + - CM_GIT_CHECKOUT + names: + - google-saxml-git-src + tags: get,git,_repo.https://github.com/google/saxml + + - tags: get,bazel + names: + - bazel + +extra_cache_tags_from_env: + - env: CM_PYTHON_CACHE_TAGS + prefix: python- + +new_env_keys: +- CM_GOOGLE_SAXML* + +tags: +- get +- google +- saxml diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/customize.py new file mode 100644 index 000000000..29da237a5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + # TBD + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + # TBD + cur_dir = os.getcwd() + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/run.bat new file mode 100644 index 000000000..ceaa88fea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/run.bat @@ -0,0 +1,3 @@ +@echo off + +echo TBD diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/run.sh new file mode 100644 index 000000000..bbb9d5222 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-saxml/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +echo "TBD" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-test/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-google-test/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-test/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-test/README.md b/cmx4mlops/cmx4mlops/repo/script/get-google-test/README.md new file mode 100644 index 000000000..57cc055c6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-test/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-test/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-google-test/_cm.yaml new file mode 100644 index 000000000..68de9dbfd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-test/_cm.yaml @@ -0,0 +1,37 @@ +alias: get-google-test +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: 1.14.0 +deps: +- names: + - cmake + tags: get,cmake +- names: + - compiler + tags: get,compiler +input_description: {} +input_mapping: {} +new_env_keys: +- CM_GOOGLE_TEST_SRC_PATH +- CM_GOOGLE_TEST_INSTALL_PATH +- +C_INCLUDE_PATH +- +LD_LIBRARY_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: +- extra_cache_tags: google-test,gtest + force_env_keys: + - CM_GIT_* + tags: get,git,repo,_repo.https://github.com/google/googletest.git +tags: +- get +- google-test +- googletest +- gtest +- test +- google +uid: 02945138a5614253 +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-test/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-google-test/customize.py new file mode 100644 index 000000000..b604a9a7b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-test/customize.py @@ -0,0 +1,46 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + env['CM_GIT_CHECKOUT'] = "v" + env['CM_VERSION'] + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + if '+C_INCLUDE_PATH' not in env: + env['+C_INCLUDE_PATH'] = [] + if '+LD_LIBRARY_PATH' not in env: + env['+LD_LIBRARY_PATH'] = [] + + gtest_install_path = os.path.join(os.getcwd(), "install") + env['CM_GOOGLE_TEST_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + env['CM_GOOGLE_TEST_INSTALL_PATH'] = gtest_install_path + env['+C_INCLUDE_PATH'].append(os.path.join(gtest_install_path, "include")) + env['+LD_LIBRARY_PATH'].append(os.path.join(gtest_install_path, "lib")) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-google-test/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-google-test/run.sh new file mode 100644 index 000000000..c8a9a4425 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-google-test/run.sh @@ -0,0 +1,23 @@ +#!/bin/bash +function cmake() { +${CM_CMAKE_BIN_WITH_PATH} $@ +} + +export CC=${CM_C_COMPILER_WITH_PATH} +export CXX=${CM_CXX_COMPILER_WITH_PATH} + +CUR=$PWD +mkdir -p install +INSTALL_DIR=$CUR/install +cd ${CM_GIT_REPO_CHECKOUT_PATH} + +mkdir build +cd build +export MAKEFLAGS=-j${CM_MAKE_CORES} +cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} .. +test $? -eq 0 || exit $? + +CMD="make install" +echo ${CMD} +eval $CMD +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/README-extra.md new file mode 100644 index 000000000..1618d0ed0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/README-extra.md @@ -0,0 +1 @@ +20240127: Grigori added patch to support latest PIL diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/README.md new file mode 100644 index 000000000..ee0538b99 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src](https://docs.mlcommons.org/cm4mlops/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/_cm.yaml new file mode 100644 index 000000000..dd6b6ca0d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/_cm.yaml @@ -0,0 +1,29 @@ +alias: get-ipol-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Reproducibility and artifact evaluation +env: + CM_IPOL_NUMBER: '439' + CM_IPOL_SRC_URL: http://www.ipol.im/pub/art/{{CM_IPOL_YEAR}}/{{CM_IPOL_NUMBER}}/{{CM_IPOL_NUMBER}}-master.zip + CM_IPOL_YEAR: '2022' +extra_cache_tags_from_env: +- env: CM_IPOL_NUMBER + prefix: number- +- env: CM_IPOL_YEAR + prefix: year- +input_description: + number: IPOL publication number + year: IPOL publication year +input_mapping: + number: CM_IPOL_NUMBER + year: CM_IPOL_YEAR +new_env_keys: +- CM_IPOL_* +tags: +- get +- ipol +- journal +- src +- ipol-src +uid: b6fd8213d03d4aa4 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/customize.py new file mode 100644 index 000000000..61e06952f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + script_path = i['artifact'].path + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_IPOL_SRC_URL'] + + year = env.get('CM_IPOL_YEAR', '') + number = env.get('CM_IPOL_NUMBER', '') + + url = url.replace( + '{{CM_IPOL_YEAR}}', + year).replace( + '{{CM_IPOL_NUMBER}}', + number) + + print('Downloading from {}'.format(url)) + + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r + + filename = r['filename'] + + print('Unzipping file {}'.format(filename)) + + r = cm.access({'action': 'unzip_file', + 'automation': 'utils,dc2743f8450541e3', + 'filename': filename}) + if r['return'] > 0: + return r + + if os.path.isfile(filename): + print('Removing file {}'.format(filename)) + os.remove(filename) + + # Get sub-directory from filename + ff = os.path.splitext(filename) + + subdir = ff[0] + + env['CM_IPOL_PATH'] = os.path.join(path, subdir) + + # Applying patch + cmd = 'patch -p0 < {}'.format(os.path.join(script_path, + 'patch', '20240127.patch')) + + print('Patching code: {}'.format(cmd)) + os.system(cmd) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/patch/20240127.patch b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/patch/20240127.patch new file mode 100644 index 000000000..6610d0cee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ipol-src/patch/20240127.patch @@ -0,0 +1,10 @@ +diff -Naur 439-master/main.py 439-master.new/main.py +--- 439-master/main.py Sat Jan 27 22:11:55 2024 ++++ 439-master.new/main.py Sat Jan 27 22:06:51 2024 +@@ -135,5 +135,5 @@ + args = parser.parse_args() + #print('before plume detection', os.path.dirname(os.path.realpath('__file__')), file=sys.stderr) + p = compute_map(args.input_0,args.input_1) +- imageio.imsave("cm.png", ((255*p[0,:,:])).numpy()) ++ imageio.imsave("cm.png", np.array((255*p[0,:,:]).numpy(), np.uint8)) + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-java/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-java/README-extra.md new file mode 100644 index 000000000..232fbe6e0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/README-extra.md @@ -0,0 +1,6 @@ +# Windows + +## Misc + +* https://jdk.java.net/java-se-ri/11 +* https://learn.microsoft.com/fr-fr/java/openjdk/download diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/README.md b/cmx4mlops/cmx4mlops/repo/script/get-java/README.md new file mode 100644 index 000000000..51de05019 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-java](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-java) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-java/_cm.yaml new file mode 100644 index 000000000..07facec86 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/_cm.yaml @@ -0,0 +1,26 @@ +alias: get-java +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_env: + CM_JAVA_PREBUILT_BUILD: '36' + CM_JAVA_PREBUILT_FILENAME: openjdk-${CM_JAVA_PREBUILT_VERSION}+${CM_JAVA_PREBUILT_BUILD}_${CM_JAVA_PREBUILT_HOST_OS}-x64_bin + CM_JAVA_PREBUILT_URL: https://download.java.net/openjdk/jdk${CM_JAVA_PREBUILT_VERSION}/ri/ + CM_JAVA_PREBUILT_VERSION: '19' +deps: +- tags: detect,os +input_mapping: + install: CM_JAVA_PREBUILT_INSTALL +new_env_keys: +- CM_JAVA_* +- JAVA_HOME +- +PATH +tags: +- get +- java +uid: 9399d0e785704f8c +variations: + install: + env: + CM_JAVA_PREBUILT_INSTALL: 'on' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-java/customize.py new file mode 100644 index 000000000..7d14a3827 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/customize.py @@ -0,0 +1,165 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + run_script_input = i['run_script_input'] + + file_name = 'java.exe' if os_info['platform'] == 'windows' else 'java' + + cur_dir = os.getcwd() + + meta = i['meta'] + + found = False + install = env.get('CM_JAVA_PREBUILT_INSTALL', '') in ['on', 'True', True] + + env_path_key = 'CM_JAVA_BIN_WITH_PATH' + + # If not force install, search for artifact + if not install: + rr = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': env_path_key, + 'run_script_input': i['run_script_input'], + 'hook': skip_path, + 'recursion_spaces': recursion_spaces}) + if rr['return'] == 0: + found = True + elif rr['return'] != 16: + return rr + + # If not found or force install + if not found or install: + + if os_info['platform'] == 'windows': + env['CM_JAVA_PREBUILT_HOST_OS'] = 'windows' + env['CM_JAVA_PREBUILT_EXT'] = '.zip' + else: + env['CM_JAVA_PREBUILT_HOST_OS'] = 'linux' + env['CM_JAVA_PREBUILT_EXT'] = '.tar.gz' + + url = env['CM_JAVA_PREBUILT_URL'] + filename = env['CM_JAVA_PREBUILT_FILENAME'] + + java_prebuilt_version = env['CM_JAVA_PREBUILT_VERSION'] + java_prebuilt_build = env['CM_JAVA_PREBUILT_BUILD'] + + for key in ['CM_JAVA_PREBUILT_VERSION', + 'CM_JAVA_PREBUILT_BUILD', + 'CM_JAVA_PREBUILT_HOST_OS', + 'CM_JAVA_PREBUILT_EXT']: + url = url.replace('${' + key + '}', env[key]) + filename = filename.replace('${' + key + '}', env[key]) + + env['CM_JAVA_PREBUILT_URL'] = url + env['CM_JAVA_PREBUILT_FILENAME'] = filename + + print('') + print( + recursion_spaces + + ' Downloading and installing prebuilt Java from {} ...'.format( + url + + filename)) + + rr = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install-prebuilt'}) + if rr['return'] > 0: + return rr + + target_path = os.path.join( + cur_dir, 'jdk-' + java_prebuilt_version, 'bin') + target_file = os.path.join(target_path, file_name) + + if not os.path.isfile(target_file): + return {'return': 1, + 'error': 'can\'t find target file {}'.format(target_file)} + + print('') + print( + recursion_spaces + + ' Registering file {} ...'.format(target_file)) + + env[env_path_key] = target_file + + if '+PATH' not in env: + env['+PATH'] = [] + env['+PATH'].append(target_path) + + return {'return': 0} + + +def skip_path(i): + + # Avoid not complete path on Windows + skip = False + + path = i['file'] + + if 'javapath' in path: + skip = True + + return {'return': 0, 'skip': skip} + + +def detect_version(i): + + r = i['automation'].parse_version({'match_text': r'\s*"(.*?)"', + 'group_number': 1, + 'env_key': 'CM_JAVA_VERSION', + 'which_env': i['env'], + 'debug': True}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = env['CM_JAVA_VERSION'] + env['CM_JAVA_CACHE_TAGS'] = 'version-' + version + + found_file_path = env['CM_JAVA_BIN_WITH_PATH'] + file_name = os.path.basename(found_file_path) + + env['CM_JAVA_BIN'] = file_name + + found_path = os.path.dirname(found_file_path) + java_home_path = os.path.dirname(found_path) + + env['JAVA_HOME'] = java_home_path + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/install-prebuilt.bat b/cmx4mlops/cmx4mlops/repo/script/get-java/install-prebuilt.bat new file mode 100644 index 000000000..17b00e5ab --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/install-prebuilt.bat @@ -0,0 +1,9 @@ +del /Q %CM_JAVA_PREBUILT_FILENAME%.zip + +wget --no-check-certificate %CM_JAVA_PREBUILT_URL%%CM_JAVA_PREBUILT_FILENAME%.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip %CM_JAVA_PREBUILT_FILENAME%.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +del /Q %CM_JAVA_PREBUILT_FILENAME%.zip diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/install-prebuilt.sh b/cmx4mlops/cmx4mlops/repo/script/get-java/install-prebuilt.sh new file mode 100644 index 000000000..575d0467e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/install-prebuilt.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar.gz +rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar + +wget --no-check-certificate ${CM_JAVA_PREBUILT_URL}${CM_JAVA_PREBUILT_FILENAME}.tar.gz +test $? -eq 0 || exit 1 + +gzip -d ${CM_JAVA_PREBUILT_FILENAME}.tar.gz +test $? -eq 0 || exit 1 + +tar xvf ${CM_JAVA_PREBUILT_FILENAME}.tar +test $? -eq 0 || exit 1 + +rm -f ${CM_JAVA_PREBUILT_FILENAME}.tar diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-java/run.bat new file mode 100644 index 000000000..0a80aa34c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/run.bat @@ -0,0 +1,3 @@ +"%CM_JAVA_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-java/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-java/run.sh new file mode 100644 index 000000000..566a2b569 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-java/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +${CM_JAVA_BIN_WITH_PATH} -version &> tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-javac/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-javac/README-extra.md new file mode 100644 index 000000000..232fbe6e0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/README-extra.md @@ -0,0 +1,6 @@ +# Windows + +## Misc + +* https://jdk.java.net/java-se-ri/11 +* https://learn.microsoft.com/fr-fr/java/openjdk/download diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/README.md b/cmx4mlops/cmx4mlops/repo/script/get-javac/README.md new file mode 100644 index 000000000..b8587765f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-javac/_cm.yaml new file mode 100644 index 000000000..89ffaf779 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/_cm.yaml @@ -0,0 +1,27 @@ +alias: get-javac +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_env: + CM_JAVAC_PREBUILT_BUILD: '36' + CM_JAVAC_PREBUILT_FILENAME: openjdk-${CM_JAVAC_PREBUILT_VERSION}+${CM_JAVAC_PREBUILT_BUILD}_${CM_JAVAC_PREBUILT_HOST_OS}-x64_bin + CM_JAVAC_PREBUILT_URL: https://download.java.net/openjdk/jdk${CM_JAVAC_PREBUILT_VERSION}/ri/ + CM_JAVAC_PREBUILT_VERSION: '19' +deps: +- tags: detect,os +input_mapping: + install: CM_JAVAC_PREBUILT_INSTALL +new_env_keys: +- CM_JAVAC_* +- CM_JAVA_* +- JAVA_HOME +- +PATH +tags: +- get +- javac +uid: 509280c497b24226 +variations: + install: + env: + CM_JAVAC_PREBUILT_INSTALL: 'on' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-javac/customize.py new file mode 100644 index 000000000..ea3689ecb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/customize.py @@ -0,0 +1,175 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + run_script_input = i['run_script_input'] + + file_name = 'javac.exe' if os_info['platform'] == 'windows' else 'javac' + + cur_dir = os.getcwd() + + meta = i['meta'] + + found = False + install = env.get('CM_JAVAC_PREBUILT_INSTALL', '') in ['on', 'True', True] + + env_path_key = 'CM_JAVAC_BIN_WITH_PATH' + + # If not force install, search for artifact + if not install: + rr = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': env_path_key, + 'run_script_input': i['run_script_input'], + 'hook': skip_path, + 'recursion_spaces': recursion_spaces}) + if rr['return'] == 0: + found = True + elif rr['return'] != 16: + return rr + + # If not found or force install + if not found or install: + + if os_info['platform'] == 'windows': + env['CM_JAVAC_PREBUILT_HOST_OS'] = 'windows' + env['CM_JAVAC_PREBUILT_EXT'] = '.zip' + else: + env['CM_JAVAC_PREBUILT_HOST_OS'] = 'linux' + env['CM_JAVAC_PREBUILT_EXT'] = '.tar.gz' + + url = env['CM_JAVAC_PREBUILT_URL'] + filename = env['CM_JAVAC_PREBUILT_FILENAME'] + + javac_prebuilt_version = env['CM_JAVAC_PREBUILT_VERSION'] + javac_prebuilt_build = env['CM_JAVAC_PREBUILT_BUILD'] + + for key in ['CM_JAVAC_PREBUILT_VERSION', + 'CM_JAVAC_PREBUILT_BUILD', + 'CM_JAVAC_PREBUILT_HOST_OS', + 'CM_JAVAC_PREBUILT_EXT']: + url = url.replace('${' + key + '}', env[key]) + filename = filename.replace('${' + key + '}', env[key]) + + env['CM_JAVAC_PREBUILT_URL'] = url + env['CM_JAVAC_PREBUILT_FILENAME'] = filename + + print('') + print( + recursion_spaces + + ' Downloading and installing prebuilt Java from {} ...'.format( + url + + filename)) + + rr = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'install-prebuilt'}) + if rr['return'] > 0: + return rr + + target_path = os.path.join( + cur_dir, 'jdk-' + java_prebuilt_version, 'bin') + target_file = os.path.join(target_path, file_name) + + if not os.path.isfile(target_file): + return {'return': 1, + 'error': 'can\'t find target file {}'.format(target_file)} + + print('') + print( + recursion_spaces + + ' Registering file {} ...'.format(target_file)) + + env[env_path_key] = target_file + + if '+PATH' not in env: + env['+PATH'] = [] + env['+PATH'].append(target_path) + + return {'return': 0} + + +def skip_path(i): + + # Avoid not complete path on Windows + skip = False + + path = i['file'] + + if 'javapath' in path: + skip = True + + return {'return': 0, 'skip': skip} + + +def detect_version(i): + + r = i['automation'].parse_version({'match_text': r'javac\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_JAVAC_VERSION', + 'which_env': i['env'], + 'debug': True}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = env['CM_JAVAC_VERSION'] + env['CM_JAVAC_CACHE_TAGS'] = 'version-' + version + + found_file_path = env['CM_JAVAC_BIN_WITH_PATH'] + file_name = os.path.basename(found_file_path) + file_path = os.path.dirname(found_file_path) + + env['CM_JAVAC_BIN'] = file_name + + if os_info['platform'] == 'windows': + env['CM_JAVA_BIN'] = 'java.exe' + else: + env['CM_JAVA_BIN'] = 'java' + + env['CM_JAVA_BIN_WITH_PATH'] = os.path.join(file_path, env['CM_JAVA_BIN']) + + found_path = os.path.dirname(found_file_path) + javac_home_path = os.path.dirname(found_path) + + env['JAVA_HOME'] = javac_home_path + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/install-prebuilt.bat b/cmx4mlops/cmx4mlops/repo/script/get-javac/install-prebuilt.bat new file mode 100644 index 000000000..74b1c4812 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/install-prebuilt.bat @@ -0,0 +1,9 @@ +del /Q %CM_JAVAC_PREBUILT_FILENAME%.zip + +wget --no-check-certificate %CM_JAVAC_PREBUILT_URL%%CM_JAVAC_PREBUILT_FILENAME%.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip %CM_JAVAC_PREBUILT_FILENAME%.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +del /Q %CM_JAVAC_PREBUILT_FILENAME%.zip diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/install-prebuilt.sh b/cmx4mlops/cmx4mlops/repo/script/get-javac/install-prebuilt.sh new file mode 100644 index 000000000..eed1b8b01 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/install-prebuilt.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar.gz +rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar + +wget --no-check-certificate ${CM_JAVAC_PREBUILT_URL}${CM_JAVAC_PREBUILT_FILENAME}.tar.gz +test $? -eq 0 || exit 1 + +gzip -d ${CM_JAVAC_PREBUILT_FILENAME}.tar.gz +test $? -eq 0 || exit 1 + +tar xvf ${CM_JAVAC_PREBUILT_FILENAME}.tar +test $? -eq 0 || exit 1 + +rm -f ${CM_JAVAC_PREBUILT_FILENAME}.tar diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-javac/run.bat new file mode 100644 index 000000000..1919f559c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/run.bat @@ -0,0 +1,3 @@ +"%CM_JAVAC_BIN_WITH_PATH%" -version > tmp-ver.out 2>&1 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-javac/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-javac/run.sh new file mode 100644 index 000000000..40f97218d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-javac/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +${CM_JAVAC_BIN_WITH_PATH} -version &> tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/README.md b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/README.md new file mode 100644 index 000000000..1ee3c731e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/_cm.yaml new file mode 100644 index 000000000..df9a42a4d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/_cm.yaml @@ -0,0 +1,39 @@ +alias: get-lib-armnn +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: '23.11' +deps: +- tags: detect,os +env: + CM_GIT_URL: https://github.com/ARM-software/armnn +new_env_keys: +- CM_LIB_ARMNN_VERSION +- CM_LIB_DNNL_* +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +prehook_deps: +- force_env_keys: + - CM_GIT_* + tags: get,git,repo,_repo.https://github.com/ARM-software/armnn +tags: +- get +- lib-armnn +- lib +- armnn +uid: 9603a2e90fd44587 +versions: + '22.11': + env: + CM_LIB_ARMNN_VERSION: v22.11 + CM_TMP_GIT_BRANCH_NAME: branches/armnn_22_11 + '23.05': + env: + CM_LIB_ARMNN_VERSION: v23.05 + CM_TMP_GIT_BRANCH_NAME: branches/armnn_23_05 + '23.11': + env: + CM_LIB_ARMNN_VERSION: v23.11 + CM_TMP_GIT_BRANCH_NAME: branches/armnn_23_11 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/customize.py new file mode 100644 index 000000000..7505dd0ed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/customize.py @@ -0,0 +1,64 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + version = env['CM_LIB_ARMNN_VERSION'] + if env.get('CM_HOST_PLATFORM_FLAVOR', '') == 'x86_64': + url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-x86_64.tar.gz" + elif env.get('CM_HOST_PLATFORM_FLAVOR', '') == 'aarch64': + url = f"https://github.com/ARM-software/armnn/releases/download/{version}/ArmNN-linux-aarch64.tar.gz" + + env['CM_LIB_ARMNN_PREBUILT_BINARY_URL'] = url + env['CM_LIB_ARMNN_EXTRACT_FILENAME'] = os.path.basename(url) + + env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + include_paths = [] + armnn_src_path = env['CM_GIT_CHECKOUT_PATH'] + include_paths.append(os.path.join(os.getcwd(), 'include')) + include_paths.append(os.path.join(armnn_src_path, 'include')) + include_paths.append(os.path.join(armnn_src_path, 'profiling')) + + for inc_path in include_paths: + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + lib_path = os.path.join(os.getcwd()) + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/run.sh new file mode 100644 index 000000000..4bb5d182a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-armnn/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} + +wget -nc ${CM_LIB_ARMNN_PREBUILT_BINARY_URL} +tar -xvzf ${CM_LIB_ARMNN_EXTRACT_FILENAME} + +echo "******************************************************" +echo "ArmNN prebuilt binary downloaded to ${CUR_DIR} ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/README.md b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/README.md new file mode 100644 index 000000000..4d3e55768 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/_cm.yaml new file mode 100644 index 000000000..2fdc8cb7c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/_cm.yaml @@ -0,0 +1,33 @@ +alias: get-lib-dnnl +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: dev +deps: +- tags: detect,cpu +- tags: cmake,get-cmake +env: + CM_DNNL_CLEAN_BUILD: 'yes' + CM_GIT_URL: https://github.com/oneapi-src/oneDNN + DNNL_BUILD_EXAMPLES: 'OFF' + DNNL_BUILD_TESTS: 'OFF' + DNNL_CPU_RUNTIME: OMP +new_env_keys: +- CM_LIB_DNNL_* +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +tags: +- get +- lib-dnnl +- lib +- dnnl +uid: 1cd35a6a3b0b4530 +versions: + 2.2.4: + env: + CM_GIT_CHECKOUT: v2.2.4 + dev: + env: + CM_GIT_CHECKOUT: master diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/customize.py new file mode 100644 index 000000000..4a921e0ec --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/customize.py @@ -0,0 +1,44 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + env['CM_LIB_DNNL_INSTALL_DIR'] = os.getcwd() + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: + env[key] = [] + + env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), + 'install', 'include')) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), + 'install', 'include')) + + lib_path = os.path.join(os.getcwd(), 'install', 'lib') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/run.sh new file mode 100644 index 000000000..ca47ee3b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-dnnl/run.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} + +git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} src + +test $? -eq 0 || exit 1 + +INSTALL_DIR="${CUR_DIR}" +rm -rf ${INSTALL_DIR}/install + +cd ${INSTALL_DIR} +mkdir build +mkdir install + +echo "******************************************************" +cd build +cmake .. \ + -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}/install" \ + -DCMAKE_BUILD_TYPE=Release \ + -DDNNL_BUILD_TESTS=${DNNL_BUILD_TESTS} \ + -DDNNL_BUILD_EXAMPLES=${DNNL_BUILD_EXAMPLES} \ + -DDNNL_CPU_RUNTIME=${DNNL_CPU_RUNTIME} \ + ../src/ +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +cmake --build . -j${CM_CPUINFO_CPUs} +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +cmake --install . +if [ "${?}" != "0" ]; then exit 1; fi + + +# Clean build directory (too large) +cd ${INSTALL_DIR} +if [ "${CM_DNNL_CLEAN_BUILD}" != "no" ]; then + rm -rf build +fi + +echo "******************************************************" +echo "DNNL was built and installed to ${INSTALL_DIR}/install ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/README.md b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/README.md new file mode 100644 index 000000000..04002ac6f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/_cm.yaml new file mode 100644 index 000000000..6f06409fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/_cm.yaml @@ -0,0 +1,48 @@ +alias: get-lib-protobuf +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: 1.13.0 +deps: +- tags: get,cmake +- tags: get,gcc +input_description: {} +input_mapping: {} +new_env_keys: +- CM_GOOGLE_PROTOBUF_SRC_PATH +- CM_GOOGLE_PROTOBUF_INSTALL_PATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: +- extra_cache_tags: lib,protobuf,src + force_env_keys: + - CM_GIT_* + tags: get,git,repo,_repo.https://github.com/google/protobuf.git + update_tags_from_env_with_prefix: + _branch.: + - CM_TMP_GIT_CHECKOUT + _repo.: + - CM_TMP_GIT_URL + _tag.: + - CM_GIT_CHECKOUT_TAG +tags: +- get +- google-protobuf +- protobuf +- lib +- lib-protobuf +- google +uid: db45f1eb73934f91 +variations: + branch.#: + env: + CM_TMP_GIT_CHECKOUT: '#' + tag.#: + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/customize.py new file mode 100644 index 000000000..882393320 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/customize.py @@ -0,0 +1,59 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + env['CM_GIT_CHECKOUT'] = "v" + env['CM_VERSION'] + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + env['+C_INCLUDE_PATH'] = [] + env['+CPLUS_INCLUDE_PATH'] = [] + env['+LD_LIBRARY_PATH'] = [] + + protobuf_install_path = os.path.join(os.getcwd(), "install") + env['CM_GOOGLE_PROTOBUF_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + env['CM_GOOGLE_PROTOBUF_INSTALL_PATH'] = protobuf_install_path + env['+C_INCLUDE_PATH'].append( + os.path.join( + protobuf_install_path, + "include")) + env['+CPLUS_INCLUDE_PATH'].append( + os.path.join(protobuf_install_path, "include")) + + if os.path.exists(os.path.join(protobuf_install_path, "lib")): + env['+LD_LIBRARY_PATH'].append( + os.path.join(protobuf_install_path, "lib")) + elif os.path.exists(os.path.join(protobuf_install_path, "lib64")): + env['+LD_LIBRARY_PATH'].append( + os.path.join(protobuf_install_path, "lib64")) + else: + return { + 'return': 1, 'error': f'Protobuf library path not found in {protobuf_install_path}'} + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/run.sh new file mode 100644 index 000000000..29c0267d1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-protobuf/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash +CUR=$PWD +mkdir -p install +INSTALL_DIR=$CUR/install +cd ${CM_GIT_REPO_CHECKOUT_PATH} +rm -rf build +mkdir build +cd build +export MAKEFLAGS=-j${CM_MAKE_CORES} +cmake -Dprotobuf_BUILD_TESTS=OFF -DBUILD_SHARED_LIBS=ON -DCMAKE_CXX_STANDARD=14 -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} ../cmake +test $? -eq 0 || exit $? +CMD="make install" +echo ${CMD} +eval $CMD +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/README.md b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/README.md new file mode 100644 index 000000000..7b2ea6a7e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/_cm.yaml new file mode 100644 index 000000000..aaf6688dd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/_cm.yaml @@ -0,0 +1,27 @@ +alias: get-lib-qaic-api +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: master +deps: +- tags: detect,os +env: {} +new_env_keys: +- CM_LIB_QAIC_* +- CM_QAIC_API_* +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +prehook_deps: [] +tags: +- get +- api +- lib-qaic-api +- lib +- qaic +uid: 1e253ae184e44f23 +versions: + master: + env: + CM_LIB_QAIC_VERSION: master diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/customize.py new file mode 100644 index 000000000..596a40c1a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/customize.py @@ -0,0 +1,54 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + # env['CM_GIT_CHECKOUT'] = env['CM_TMP_GIT_BRANCH_NAME'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + include_paths = [env['CM_TMP_CURRENT_SCRIPT_PATH']] + + for inc_path in include_paths: + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + version = "master" + env['CM_QAIC_API_SRC_FILE'] = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.cpp") + env['CM_QAIC_API_INC_FILE'] = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], version, "QAicInfApi.h") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/master/QAicInfApi.cpp b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/master/QAicInfApi.cpp new file mode 100644 index 000000000..c2b41a683 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/master/QAicInfApi.cpp @@ -0,0 +1,750 @@ +// Copyright (c) 2021 Qualcomm Innovation Center, Inc. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted (subject to the limitations in the +// disclaimer below) provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// * Neither the name Qualcomm Innovation Center nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +// GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +// HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +// IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +// IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include "QAicInfApi.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +namespace qaic_api { + +const uint32_t setSizeDefault = 10; +const uint32_t numActivationsDefault = 1; +const uint32_t numInferencesDefault = 40; +const uint32_t numThreadsPerQueueDefault = 4; +const uint32_t qidDefault = 0; + + +class ActivationSet { + + public: + ActivationSet( + QData ioDescQData, + QAicContext* context, + QAicProgram* program, QAicQueue* queue, QID dev, uint32_t numBuffers, + QAicExecObjProperties_t& execObjProperties_t, + uint32_t activationId, + QAicEventCallback callback = nullptr); + virtual ~ActivationSet(); + + // protected: + // Program is expected to be activated before calling init + QStatus init(uint32_t setSize = setSizeDefault); + QBuffer* getDmaBuffers(uint32_t execOjbIndex); + QStatus reset(); + QStatus setData(std::vector>& buffers); + QStatus setDataSingle(int set_idx, std::vector& buffers); + QStatus run(uint32_t numInferences, void* payload); + QStatus deinit(); + void setOutBufIndex(uint32_t outBufIndex) { outBufIndex_ = outBufIndex;} + std::string filename; + uint32_t getNumBuffers() { return numBuffers_; } +private: + std::vector eventExecSet_; + std::vector execObjSet_; + std::vector qbuffersSet_; + uint32_t setSize_; + QAicEvent *activationEvent_; + QAicContext *context_; + QAicProgram *program_; + QAicQueue *queue_; + QID dev_; + uint32_t numBuffers_; + QBuffer *userBuffers_; + QAicExecObjProperties_t execObjProperties_; + uint32_t activationId_; + QAicEventCallback callback_; + QData ioDescQData_; + uint32_t outBufIndex_; +}; + +//-------------------------------------------------------------------- +// ActivationSet class Implementation +//-------------------------------------------------------------------- + +ActivationSet::ActivationSet( + QData ioDescQData, + QAicContext *context, + QAicProgram *program, QAicQueue *queue, QID dev, uint32_t numBuffers, + QAicExecObjProperties_t &execObjProperties, + uint32_t activationId, + QAicEventCallback callback) + : context_(context), program_(program), queue_(queue), + dev_(dev), numBuffers_(numBuffers), userBuffers_(nullptr), + execObjProperties_(execObjProperties), + activationId_(activationId), callback_(callback), ioDescQData_(ioDescQData) {} + +ActivationSet::~ActivationSet() {} + +QStatus ActivationSet::deinit() { + QStatus status = QS_SUCCESS; + + for (auto &e : execObjSet_) { + + status = qaicReleaseExecObj(e); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release Exec obj" << std::endl; + return status; + } + } + for (auto &ev : eventExecSet_) { + status = qaicReleaseEvent(ev); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release Event obj" << std::endl; + return status; + } + } + return status; +} + +QBuffer* ActivationSet::getDmaBuffers(uint32_t execObjIndex) { + return qbuffersSet_[execObjIndex]; +} + +QStatus ActivationSet::init(uint32_t setSize) { + QStatus status = QS_SUCCESS; + + setSize_ = setSize; + + qbuffersSet_.resize(setSize_); + + if (!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable")) { + // std::cout << "Zero Copy enabled\n"; + execObjProperties_ |= QAIC_EXECOBJ_PROPERTIES_ZERO_COPY_BUFFERS; + } + + for (uint32_t i = 0; i < setSize_; i++) { + QAicExecObj *execObj = nullptr; + qbuffersSet_[i] = nullptr; + // nullptr is passed as the ioDesc indicating we will use the default + // ioDescriptor. + status = qaicCreateExecObj( + context_, &execObj, &execObjProperties_, program_, + (ioDescQData_.data)?(&ioDescQData_):nullptr, + nullptr, nullptr); + if ((status != QS_SUCCESS) || (execObj == nullptr)) { + std::cerr << "Failed to create Exec obj" << std::endl; + return status; + } + execObjSet_.push_back(execObj); + if (!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable")) { + const QAicApiFunctionTable *aicApi_ = qaicGetFunctionTable(); + status = aicApi_ -> qaicExecObjGetIoBuffers( execObj, &numBuffers_, &qbuffersSet_[i]); + if ((status != QS_SUCCESS)) { + std::cerr << "Failed to get IO buffers" << std::endl; + return status; + } + } + + QAicEvent *event = nullptr; + status = qaicCreateEvent(context_, &event, + QAIC_EVENT_DEVICE_COMPLETE); + if ((status != QS_SUCCESS) || (event == nullptr)) { + std::cerr << "Failed to create Event" << std::endl; + return status; + } + eventExecSet_.push_back(event); + } + return QS_SUCCESS; +} + +QStatus ActivationSet::setData(std::vector> &buffers) { + QStatus status = QS_SUCCESS; + int i = 0; + if (!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable")) { + // no setdata is required when using dma buf path + + std::cerr << "no setdata is required when using dma buf path" << std::endl; + return status; + } + for (auto &e : execObjSet_) { + status = qaicExecObjSetData(e, buffers[i].size(), buffers[i].data()); + if (status != QS_SUCCESS) { + return status; + } + ++i; + } + //userBuffers_ = userBuffers; + return status; +} + +QStatus ActivationSet::setDataSingle(int set_idx, std::vector &buffers) { + QStatus status = QS_SUCCESS; + + status = qaicExecObjSetData(execObjSet_[set_idx], buffers.size(), buffers.data()); + if (status != QS_SUCCESS) { + std::cout << "tried to set " << set_idx << " " << buffers.data() << " " << buffers.size() << std::endl; + return status; + } + + return status; +} + + +QStatus ActivationSet::run(uint32_t index, void* payload) { + QStatus status; + + //std::cout << "clearing event for " << index << " " << payload << std::endl; + status = qaicEventClear(eventExecSet_.at(index)); + if (status != QS_SUCCESS) { + return status; + } + + qaicEventRemoveCallback(eventExecSet_.at(index), callback_); + + status = qaicEventAddCallback(eventExecSet_.at(index), callback_, payload); + if (status != QS_SUCCESS) { + return status; + } + + //std::cout << "Enqueuing work " << index << " " << payload << std::endl; + status = qaicEnqueueExecObj(queue_, execObjSet_.at(index), + eventExecSet_.at(index)); + if (status != QS_SUCCESS) { + return status; + } + + //std::cout << "Creating callback " << index << " " << payload << std::endl; + return QS_SUCCESS; +} + + + +//------------------------------------------------------------------ +// QAIC Runner Example Class Implementation +//------------------------------------------------------------------ +QAicInfApi::QAicInfApi() + : + context_(nullptr), + constants_(nullptr), contextProperties_(QAIC_CONTEXT_DEFAULT), + execObjProperties_(QAIC_EXECOBJ_PROPERTIES_DEFAULT), + queueProperties_{QAIC_QUEUE_PROPERTIES_ENABLE_MULTI_THREADED_QUEUES, + numThreadsPerQueueDefault}, + dev_(0), + numActivations_(numActivationsDefault), + numInferences_(numInferencesDefault), + numThreadsPerQueue_(numThreadsPerQueueDefault), setSize_(setSizeDefault), + activated_(false), entryPoint_("default") { +} + +QAicInfApi::~QAicInfApi() { + QStatus status; + + for (uint32_t i = 0; i < programs_.size(); i++) { + status = qaicReleaseProgram(programs_[i]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release program" << std::endl; + } + } + + for (uint32_t i = 0; i < queues_.size(); i++) { + status = qaicReleaseQueue(queues_[i]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release queue" << std::endl; + } + queues_[i] = nullptr; + } + + if (constants_ != nullptr) { + status = qaicReleaseConstants(constants_); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release constants" << std::endl; + } + } + + shActivationSets_.clear(); + + if (context_ != nullptr) { + status = qaicReleaseContext(context_); + if (status != QS_SUCCESS) { + std::cerr << "Failed to release context" << std::endl; + } + context_ = nullptr; + } + + inferenceBufferVector_.clear(); +} + +void QAicInfApi::setSkipStage(std::string qaic_skip_stage) { + if (!qaic_skip_stage.empty()) { + entryPoint_ = qaic_skip_stage; + } +} + +QStatus QAicInfApi::loadFileType( + const std::string &filePath, size_t &sizeLoaded, uint8_t *&dataPtr, + std::vector> &vector) { + uint64_t fileSize; + std::ifstream infile; + infile.open(filePath, std::ios::binary | std::ios::in); + if (!infile.is_open()) { + std::cerr << "Failed to open file: " << filePath << std::endl; + return QS_ERROR; + } + + infile.seekg(0, infile.end); + fileSize = infile.tellg(); + infile.seekg(0, infile.beg); + std::unique_ptr uniqueBuffer = + std::unique_ptr(new (std::nothrow) uint8_t[fileSize]); + if (uniqueBuffer == nullptr) { + std::cerr << "Failed to allocate buffer for file " << filePath + << " of size " << fileSize << std::endl; + return QS_ERROR; + } + infile.read((char *)uniqueBuffer.get(), fileSize); + if (!infile) { + std::cerr << "Failed to read all data from file " << filePath << std::endl; + return QS_ERROR; + } + dataPtr = uniqueBuffer.get(); + vector.emplace_back(std::move(uniqueBuffer)); + sizeLoaded = fileSize; + return QS_SUCCESS; +} + + +QStatus QAicInfApi::init(QID qid, QAicEventCallback callback) { + QStatus status = QS_SUCCESS; + + callback_ = callback; + //std::cout << "callback - " << (void*)callback_ << std::endl; + + dev_ = qid; + + // validate if device is available + QDevInfo devInfo; + status = qaicGetDeviceInfo(dev_, &devInfo); + if (status == QS_SUCCESS) { + if (devInfo.devStatus != QDS_READY) { + std::cerr << "Device:" << dev_ << " not in ready state" << std::endl; + exit(1); + } + } else { + std::cerr << "Invalid device:" << std::to_string(dev_) << std::endl; + exit(1); + } + + // Check Library Compatibility + { + uint16_t major; + uint16_t minor; + const char *patch; + const char *variant; + status = qaicGetAicVersion(&major, &minor, &patch, &variant); + + if (status != QS_SUCCESS) { + std::cerr << "Unable to retrieve AicVersion" << std::endl; + exit(1); + } + if ((major != LRT_LIB_MAJOR_VERSION) || (minor < LRT_LIB_MINOR_VERSION)) { + std::cerr << "AicApi Header is not compatible with Library, lib:" << major + << "." << minor << " header:" << LRT_LIB_MAJOR_VERSION << "." + << LRT_LIB_MINOR_VERSION << std::endl; + exit(1); + } + } + + status = qaicCreateContext(&context_, &contextProperties_, 1, &dev_, + logCallback, errorHandler, nullptr); + if ((context_ == nullptr) || (status != QS_SUCCESS)) { + std::cerr << "Failed to Create Context" << std::endl; + return status; + } + + for (uint32_t i = 0; i < modelBasePaths_.size() ; i++) { + + QBuffer programQpcBuf_; + QAicProgramProperties_t programProperties_; + std::vector> programBufferVector_; + QAicQpcObj *qpcObj_; + + std::string filePath = modelBasePaths_[i] + "/programqpc.bin"; + + + // Load file + status = loadFileType(filePath, programQpcBuf_.size, programQpcBuf_.buf, + programBufferVector_); + + + //------------------------------------------------------------------------- + // Create Programs + // It is valid to pass a null for constants, if null program will + // disregard constants + //------------------------------------------------------------------------- + // Initialize the program properties with default. + status = qaicProgramPropertiesInitDefault(&programProperties_); + if (status != QS_SUCCESS) { + std::cerr << "Failed to initialize program properties." << std::endl; + return status; + } + + status = qaicOpenQpc(&qpcObj_, programQpcBuf_.buf, programQpcBuf_.size, false); + if (status != QS_SUCCESS) { + std::cerr << "Failed to open Qpc." << std::endl; + return status; + } + + const char *name = "progName"; + QAicProgram *program = nullptr; + + status = qaicCreateProgram( + context_, &program, &programProperties_, dev_, name, qpcObj_); + + if ((program == nullptr) || (status != QS_SUCCESS)) { + std::cerr << "Failed to create program" << std::endl; + return status; + } + programs_.push_back(program); + } + + + //------------------------------------------------------------------------- + // Load Programs QAicInfApi(uint32_t dummy); + + // User may choose to explicitly load program, or let the driver load + // the program when it is needed. + // For this reason the following code is commented out, to demonstrate + // automatic loading and activation + //------------------------------------------------------------------------- + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + QStatus status; + status = qaicLoadProgram(programs_[i]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to load program" << std::endl; + return status; + } + } + //------------------------------------------------------------------------- + // Activate Programs + // User may choose to explicitly activate program, or let the driver + // activate the program when it is needed. + // For this reason the following code is commented out, to demonstrate + // automatic loading and activation + //------------------------------------------------------------------------- + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + QStatus status; + status = qaicRunActivationCmd(programs_[i], + QAIC_PROGRAM_CMD_ACTIVATE_FULL); + if (status != QS_SUCCESS) { + std::cerr << "Failed to enqueue Activation command" << std::endl; + return status; + } + } + + //------------------------------------------------------------------------- + // Create Queues for Execution + //------------------------------------------------------------------------- + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + + QAicQueue *queue = nullptr; + status = + qaicCreateQueue(context_, &queue, &queueProperties_, dev_); + if ((queue == nullptr) || (status != QS_SUCCESS)) { + std::cerr << "Failed to create queue" << std::endl; + return status; + } + queues_.push_back(queue); + } + + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + + QData ioDescQData; + ioDescQData.data = nullptr; + ioDescQData.size = 0; + aicapi::IoDesc ioDescProto; + status = qaicProgramGetIoDescriptor(programs_[i], &ioDescQData); + if (ioDescQData.data == nullptr) { + std::cerr << "Failed to get iodesc" << std::endl; + return QS_ERROR; + } + ioDescProto.ParseFromArray(ioDescQData.data, ioDescQData.size); + if (!entryPoint_.empty() && entryPoint_.compare("default") != 0) { + for (auto &io_set : ioDescProto.io_sets()) { + if (io_set.name().compare(entryPoint_) == 0) { + ioDescProto.clear_selected_set(); + ioDescProto.mutable_selected_set()->CopyFrom(io_set); + break; + } + } + if(ioDescProto.selected_set().name().compare(entryPoint_) != 0) { + std::cerr << "Failed to match name in iodesc" << std::endl; + return QS_ERROR; + } + + try { + customizedIoDescProtoBuffer_.resize(ioDescProto.ByteSizeLong()); + } catch (const std::bad_alloc &e) { + std::cerr << "vector resize failed for protocol Buffer -"<< e.what()< shActivation = + std::make_shared(ioDescQData, + context_, programs_[i], queues_[i], dev_, + numBuffers, + execObjProperties_, i, callback_); + if (shActivation != nullptr) { + shActivation->init(setSize_); + shActivationSets_.emplace_back(shActivation); + } + + // Create IO buffers + status = createBuffers(i, ioDescProto, shActivation); + if (status != QS_SUCCESS) { + std::cerr << "Failed to create IO buffers." << std::endl; + return status; + } + } + + if (!(!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable"))) { + setData(); + } + + return QS_SUCCESS; +} + +QStatus QAicInfApi::createBuffers(int idx, aicapi::IoDesc& ioDescProto, std::shared_ptr shActivation) { + + inferenceBuffersList_.resize(inferenceBuffersList_.size() + 1); + + inferenceBuffersList_[idx].resize(setSize_); + if (!strcmp(std::getenv("QAIC_BYPASS_PPP"),"enable")) { + for (uint32_t y = 0; y < setSize_; y++) { + + QBuffer* dmaBuffVect = shActivation->getDmaBuffers(y); + + for (uint32_t i = 0; i < shActivation->getNumBuffers(); i++) { + inferenceBuffersList_[idx][y].push_back(dmaBuffVect[i]); + } + } + return QS_SUCCESS; + } + + for (uint32_t y = 0; y < setSize_; y++) { + + for (uint32_t i = 0; i < ioDescProto.selected_set().bindings().size(); i++) { + if (ioDescProto.selected_set().bindings(i).dir() == aicapi::BUFFER_IO_TYPE_OUTPUT) { + QBuffer buf; + uint32_t outputBufferSize = ioDescProto.selected_set().bindings(i).size(); + std::unique_ptr uniqueBuffer = std::unique_ptr( + // over allocate to allow for buffer alignment + new(std::nothrow) uint8_t[outputBufferSize + 32]); + if (uniqueBuffer == nullptr) { + std::cerr << "Failed to allocate buffer for output, size " + << outputBufferSize << std::endl; + return QS_ERROR; + } + buf.buf = uniqueBuffer.get(); + + //align the buffer to 32 byte boundary + uint64_t mask = 31; + mask = ~mask; + buf.buf = (uint8_t*)((uint64_t)(buf.buf + 32) & mask); + + buf.size = outputBufferSize; + inferenceBufferVector_.push_back(std::move(uniqueBuffer)); + inferenceBuffersList_[idx][y].push_back(std::move(buf)); + } else if (ioDescProto.selected_set().bindings(i).dir() == aicapi::BUFFER_IO_TYPE_INPUT) { + QBuffer buf = QBuffer(); + uint32_t inputBufferSize = ioDescProto.selected_set().bindings(i).size(); + + std::unique_ptr uniqueBuffer = std::unique_ptr( + // over allocate to allow for buffer alignment + new(std::nothrow) uint8_t[inputBufferSize + 32]); + if (uniqueBuffer == nullptr) { + std::cerr << "Failed to allocate input buffer" << std::endl; + return QS_ERROR; + } + buf.buf = uniqueBuffer.get(); + + //align the buffer to 32 byte boundary + uint64_t mask = 31; + mask = ~mask; + buf.buf = (uint8_t*)((uint64_t)(buf.buf + 32) & mask); + + buf.size = inputBufferSize; + inferenceBufferVector_.push_back(std::move(uniqueBuffer)); + inferenceBuffersList_[idx][y].push_back(std::move(buf)); + } + } + } + + return QS_SUCCESS; +} + +QStatus QAicInfApi::setData() { + + //-------------------------------------- + // Set data in buffers + //-------------------------------------- + int x = 0; + for (auto &a : shActivationSets_) { + if (a != nullptr) { + a->setData(inferenceBuffersList_[x]); + } + ++x; + } + + return QS_SUCCESS; +} + + +//---------------------------------------------------------------- +// Run Inferences +//---------------------------------------------------------------- +QStatus QAicInfApi::run(uint32_t activation, + uint32_t execobj, + void* payload) { + QStatus status = QS_SUCCESS; + //setData(); + + shActivationSets_[activation]->run(execobj, payload); + + return status; +} +/*QStatus qaicExecObjGetIoBuffers(const QAicExecObj *execObj, + uint32_t *numBuffers, QBuffer **buffers) { + if ((execObj == nullptr) || (execObj->shExecObj == nullptr) || + (numBuffers == nullptr) || (buffers == nullptr)) { + // LogErrorG("Invalid null pointer"); + return QS_INVAL; + } + return execObj->shExecObj->getIoBuffers(*numBuffers, *buffers); +}*/ + +QStatus QAicInfApi::deinit() { + QStatus status; + + for (auto &a : shActivationSets_) { + if (a != nullptr) { + status = a->deinit(); + if (status != QS_SUCCESS) { + return status; + } + } + } + + if (activated_ == false) { + return QS_SUCCESS; + } + + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + qaicRunActivationCmd(programs_.at(i), + QAIC_PROGRAM_CMD_DEACTIVATE_FULL); + } + for (uint32_t i = 0; i < modelBasePaths_.size(); i++) { + status = qaicUnloadProgram(programs_[i]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to unload program" << std::endl; + return status; + } + } + + return QS_SUCCESS; +} + +// Kept to keep backwards compatibility for resnets 50 and 34. +void QAicInfApi::setNumActivations(uint32_t num) { + + for(int i=0 ; i(ptr); + + QStatus status = QS_SUCCESS; + + status = shActivationSets_[act_idx]->setDataSingle(set_idx, inferenceBuffersList_[act_idx][set_idx]); + if (status != QS_SUCCESS) { + std::cerr << "Failed to set data." << std::endl; + return status; + } + + return status; +} + + + +} + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/master/QAicInfApi.h b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/master/QAicInfApi.h new file mode 100644 index 000000000..3af6f1f33 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/master/QAicInfApi.h @@ -0,0 +1,146 @@ +// Copyright (c) 2021 Qualcomm Innovation Center, Inc. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted (subject to the limitations in the +// disclaimer below) provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// * Neither the name Qualcomm Innovation Center nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +// GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +// HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +// IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +// IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef QAIC_DEVICE_H_ +#define QAIC_DEVICE_H_ + +#include "QAicApi.h" +#include "QAicApi.pb.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace qaic_api { + +extern const uint32_t setSizeDefault; +extern const uint32_t numActivationsDefault; +extern const uint32_t numInferencesDefault; +extern const uint32_t numThreadsPerQueueDefault; +extern const uint32_t qidDefault; + +class ActivationSet; + +class QAicInfApi { +public: + QAicInfApi(); + + virtual ~QAicInfApi(); + static void logCallback(QLogLevel logLevel, const char *str) { + std::cout << str; + } + + static void errorHandler(QAicContextID id, const char *errInfo, + QAicErrorType errType, const void *errData, + size_t errDataSize, void *userData) { + std::cout << "Received Error Handler CB: id " << id << "msg: " << errInfo + << std::endl; + } + + void setModelBasePath(std::string modelBasePath); + void setNumActivations(uint32_t num); + void setNumThreadsPerQueue(uint32_t num); + void setSetSize(uint32_t num); + void setLibPath(std::string &aicLibPath); + void setSkipStage(std::string qaic_skip_stage); + + // Initialize Driver, Run, De-Init, get Results + + QStatus init(QID qid, QAicEventCallback callback); + QStatus loadDataset(); + QStatus setData(); + QStatus createBuffers(int idx, aicapi::IoDesc& ioDescProto, std::shared_ptr); + + QStatus run(uint32_t activation, uint32_t execobj, void* payload); + + QStatus deinit(); + uint64_t getInfCompletedCount(); + bool isBatchMode(); + + void* getBufferPtr(uint32_t act_idx,uint32_t exec_idx, uint32_t buf_idx) { + return inferenceBuffersList_[act_idx][exec_idx][buf_idx].buf; + } + + QStatus setBufferPtr(uint32_t act_idx, uint32_t set_idx, uint32_t buf_idx, void* ptr); + +private: + QStatus loadFileType(const std::string &filePath, size_t &sizeLoaded, + uint8_t *&dataPtr, + std::vector> &vector); + QAicContext *context_; + QAicConstants *constants_; + std::vector programs_; + // Properties + QAicContextProperties_t contextProperties_; + QAicConstantsProperties_t constantsProperties_; + QAicExecObjProperties_t execObjProperties_; + QAicQueueProperties queueProperties_; + + std::vector>> inferenceBuffersList_; + + // Per Activation Resources + std::vector queues_; + std::vector perQueueFinishEvents_; + std::vector> shActivationSets_; + QBuffer constDescBuf_; + QBuffer constBuf_; + QBuffer networkDescBuf_; + QBuffer progBuf_; + QID dev_; + std::vector modelBasePaths_; + std::vector> inferenceBufferVector_; + uint32_t numActivations_; + uint32_t numInferences_; + uint32_t numThreadsPerQueue_; + uint32_t setSize_; + bool activated_; + std::vector infDataSet; + + // Callback + QAicEventCallback callback_; + std::string entryPoint_; + std::vector customizedIoDescProtoBuffer_; +}; // QAicInfApi + +} // namespace qaic_device + +#endif diff --git a/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/run.sh new file mode 100644 index 000000000..c880c1f3f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-lib-qaic-api/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-llvm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-llvm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-llvm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-llvm/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-llvm/README-extra.md new file mode 100644 index 000000000..8020e09ba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-llvm/README-extra.md @@ -0,0 +1,96 @@ +# Get LLVM +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt). + +## Exported Variables +* `CM_LLVM_CLANG_BIN` +* `CM_LLVM_CLANG_BIN_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_WITH_PATH` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_COMPILER_*` +* `CM_LINKER_*` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 + +# CLI + +## Default +```bash +cm run script "get llvm" +``` +or +```bash +cm run script --tags=get,llvm +``` + +## Version + +```bash +cm run script "get llvm" --version=14.0.0 +``` + +## Version min +```bash +cm run script "get llvm" --version_min=12.0.0 +``` + +## Version max +```bash +cm run script "get llvm" --version_max=13.999.999 --version_max_usable=13.0.0 +``` + +## Detect llvm3 in non-standard path +```bash +cm run script "get llvm" --path={directory with llvm} +``` + +### Detect llvm with non-standard name +```bash +cm run script "get llvm" --input={full path to clang} +``` + +## Force new detection even if llvm is already found and cached +```bash +cm run script "get llvm" --new +``` + +## Test + +```bash +cm run script "app image corner-detection" +``` + +## Reproducibility matrix + +*Test detection and installation on different platforms* + +* Windows, Linux, MacOS + +### RHEL 9 + +#### v14.0.0: ✓ + +```bash +cm rm cache -f +cm run script "get llvm" --version=14.0.0 +cm run script "app image corner-detection" +``` + +#### v13.0.0: Need special command + +```bash +cm rm cache -f +cm run script "get llvm" --version=13.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "app image corner-detection" +``` + +#### v12.0.0: Need special command + +```bash +cm rm cache -f +cm run script "get llvm" --version=12.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "app image corner-detection" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-llvm/README.md b/cmx4mlops/cmx4mlops/repo/script/get-llvm/README.md new file mode 100644 index 000000000..b20834a51 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-llvm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-llvm](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/get-llvm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-llvm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-llvm/_cm.yaml new file mode 100644 index 000000000..49795a0ba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-llvm/_cm.yaml @@ -0,0 +1,48 @@ +alias: get-llvm +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +clean_files: [] +env: + CM_REQUIRE_INSTALL: 'no' +name: Detect or install LLVM compiler +new_env_keys: +- CM_LLVM_* +- CM_C_COMPILER_* +- CM_CXX_COMPILER_* +- CM_COMPILER_* +- CM_LINKER_* +- + CFLAGS +- + CXXFLAGS +- + FFLAGS +- + LDFLAGS +- +CM_HOST_OS_DEFAULT_INCLUDE_PATH +- +PATH +post_deps: +- tags: get,compiler-flags +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + names: llvm-install + reuse_version: true + tags: install,llvm +sort: 100 +tags: +- get +- llvm +- compiler +- c-compiler +- cpp-compiler +- get-llvm +uid: 99832a103ed04eb8 +variations: + from-prebuilt: + ad: + llvm-install: + tags: prebuilt + from-src: + ad: + llvm-install: + tags: src,_clang diff --git a/cmx4mlops/cmx4mlops/repo/script/get-llvm/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-llvm/customize.py new file mode 100644 index 000000000..6049d3159 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-llvm/customize.py @@ -0,0 +1,108 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name_c = 'clang.exe' if os_info['platform'] == 'windows' else 'clang' + + env['FILE_NAME_C'] = file_name_c + + if 'CM_LLVM_CLANG_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name_c, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_LLVM_CLANG_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + + r = i['automation'].parse_version({'match_text': r'clang version\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_LLVM_CLANG_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = env['CM_LLVM_CLANG_VERSION'] + env['CM_LLVM_CLANG_CACHE_TAGS'] = 'version-' + version + env['CM_COMPILER_CACHE_TAGS'] = 'version-' + version + ',family-llvm' + env['CM_COMPILER_FAMILY'] = 'LLVM' + env['CM_COMPILER_VERSION'] = env['CM_LLVM_CLANG_VERSION'] + + found_file_path = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + file_name_c = os.path.basename(found_file_path) + file_name_cpp = file_name_c.replace("clang", "clang++") + + env['CM_LLVM_CLANG_BIN'] = file_name_c + + # General compiler for general program compilation + env['CM_C_COMPILER_BIN'] = file_name_c + env['CM_C_COMPILER_WITH_PATH'] = found_file_path + env['CM_C_COMPILER_FLAG_OUTPUT'] = '-o ' + env['CM_C_COMPILER_FLAG_VERSION'] = '--version' + env['CM_C_COMPILER_FLAG_INCLUDE'] = '-I' + + env['CM_CXX_COMPILER_BIN'] = file_name_cpp + env['CM_CXX_COMPILER_WITH_PATH'] = os.path.join(found_path, file_name_cpp) + env['CM_CXX_COMPILER_FLAG_OUTPUT'] = '-o ' + env['CM_CXX_COMPILER_FLAG_VERSION'] = '--version' + env['CM_CXX_COMPILER_FLAG_INCLUDE'] = '-I' + + env['CM_COMPILER_FLAGS_FAST'] = "-O4" + # "-flto" - this flag is not always available (requires LLVMgold.so) + env['CM_LINKER_FLAGS_FAST'] = "-O4" + env['CM_COMPILER_FLAGS_DEBUG'] = "-O0" + env['CM_LINKER_FLAGS_DEBUG'] = "-O0" + env['CM_COMPILER_FLAGS_DEFAULT'] = "-O2" + env['CM_LINKER_FLAGS_DEFAULT'] = "-O2" + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-llvm/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-llvm/run.bat new file mode 100644 index 000000000..632b201da --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-llvm/run.bat @@ -0,0 +1,3 @@ +%CM_LLVM_CLANG_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-llvm/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-llvm/run.sh new file mode 100644 index 000000000..c24cbb1ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-llvm/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +clang_bin=${CM_LLVM_CLANG_BIN_WITH_PATH} +${clang_bin} --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-microtvm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-microtvm/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/README-extra.md new file mode 100644 index 000000000..5e8876519 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/README-extra.md @@ -0,0 +1,5 @@ +# GET-MICROTVM +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) clones the git repository of [Microtvm](https://github.com/octoml/microtvm) and cache it in CM for reuse across other CM scripts. + +## Exported Variables +1. [CN_MICROTVM_SOURCE](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-microtvm/customize.py#L24): Location in CM cache where microtvm git repository is cloned. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-microtvm/README.md b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/README.md new file mode 100644 index 000000000..253e9c6cb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/get-microtvm](https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/get-microtvm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-microtvm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/_cm.yaml new file mode 100644 index 000000000..c47a88f31 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/_cm.yaml @@ -0,0 +1,38 @@ +alias: get-microtvm +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: TinyML automation +default_version: main +deps: +- tags: detect,os +env: + CM_GIT_AUTH: 'yes' + CM_GIT_DEPTH: '' + CM_GIT_PATCH: 'no' + CM_GIT_URL: https://github.com/mlcommons/tiny_results_v1.0 +input_mapping: + ssh: CM_GIT_SSH +local_env_keys: +- CM_GIT_* +new_env_keys: +- CM_MICROTVM_* +tags: +- get +- src +- source +- microtvm +- tiny +uid: a9cad70972a140b9 +variations: + full-history: + env: + CM_GIT_DEPTH: --depth 10 + short-history: + env: + CM_GIT_DEPTH: --depth 10 +versions: + custom: {} + main: + env: + CM_GIT_CHECKOUT: main diff --git a/cmx4mlops/cmx4mlops/repo/script/get-microtvm/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/customize.py new file mode 100644 index 000000000..85440ac38 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/customize.py @@ -0,0 +1,39 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + env['CM_MICROTVM_SOURCE'] = os.path.join(os.getcwd(), 'microtvm') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-microtvm/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/run.sh new file mode 100644 index 000000000..2bffb48d8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-microtvm/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning microtvm from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES}..." + +if [ ! -d "microtvm" ]; then + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} microtvm + if [ "${?}" != "0" ]; then exit 1; fi +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/README.md new file mode 100644 index 000000000..3c1ebea2f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-3d-unet-kits19](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-3d-unet-kits19) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/_cm.yaml new file mode 100644 index 000000000..7dc7f5b06 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/_cm.yaml @@ -0,0 +1,100 @@ +alias: get-ml-model-3d-unet-kits19 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL: 3d-unet-kits19 + CM_ML_MODEL_DATASET: kits19 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +new_env_keys: +- CM_ML_MODEL_* +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- raw +- 3d-unet +- kits19 +- medical-imaging +uid: fb7e31419c0f4226 +variations: + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + onnx: + default: true + env: + CM_ML_MODEL_FRAMEWORK: onnx + group: framework + onnx,fp32: + deps: + - env: + CM_DOWNLOAD_CHECKSUM: 82f0618fde78f9839e7c712274019b4a + CM_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128_dynbatch.onnx + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1 + extra_cache_tags: 3d-unet,medical-imaging + force-cache: true + tags: download,file,download-file,_wget + env: + CM_ML_MODEL_ACCURACY: '0.86170' + CM_ML_MODEL_FILE: 3dunet_kits19_128x128x128_dynbatch.onnx + pytorch: + env: + CM_ML_MODEL_FRAMEWORK: pytorch + group: framework + pytorch,fp32: + deps: + - env: + CM_DOWNLOAD_CHECKSUM: 2251109371f408c9f10a4320ffdcaef8 + CM_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch.ptc + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_DOWNLOAD_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1 + extra_cache_tags: 3d-unet,medical-imaging + force-cache: true + tags: download,file,download-file,_wget + env: + CM_ML_MODEL_ACCURACY: '0.86170' + CM_ML_MODEL_FILE: 3dunet_kits19_pytorch.ptc + pytorch,fp32,weights: + deps: + - env: + CM_DAE_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1 + CM_DOWNLOAD_CHECKSUM: 09c696e3ec13d83c628498bcd831eb5b + CM_DOWNLOAD_FILENAME: 3dunet_kits19_pytorch_checkpoint.pth + extra_cache_tags: 3d-unet,medical-imaging + force-cache: true + tags: download-and-extract,_wget,_extract + env: + CM_ML_MODEL_ACCURACY: '0.86170' + CM_ML_MODEL_FILE: 3dunet_kits19_pytorch_checkpoint.pth + tensorflow: + alias: tf + tf: + env: + CM_ML_MODEL_FRAMEWORK: tensorflow + group: framework + tf,fp32: + deps: + - env: + CM_DAE_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_DAE_URL: https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1 + CM_DOWNLOAD_CHECKSUM: 9497108bd0504ae8f85a764a807b76a9 + CM_DOWNLOAD_FILENAME: 3dunet_kits19_128x128x128.tf.zip + extra_cache_tags: 3d-unet,medical-imaging + force-cache: true + tags: download-and-extract,_wget,_extract + env: + CM_ML_MODEL_ACCURACY: '0.86170' + CM_ML_MODEL_FILE: 3dunet_kits19_128x128x128.tf + weights: + env: + CM_MODEL_WEIGHTS_FILE: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/customize.py new file mode 100644 index 000000000..897e758c3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-3d-unet-kits19/customize.py @@ -0,0 +1,36 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']) + + if env.get("CM_DAE_EXTRACT_DOWNLOADED", " ") != " ": + env['CM_ML_MODEL_PATH'] = os.path.join(path, env['CM_ML_MODEL_FILE']) + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_PATH'] + else: + env['CM_ML_MODEL_PATH'] = path + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/README.md new file mode 100644 index 000000000..50aaa58fe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-bert-base-squad](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-bert-base-squad) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/_cm.yaml new file mode 100644 index 000000000..477f5570a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-base-squad/_cm.yaml @@ -0,0 +1,68 @@ +alias: get-ml-model-bert-base-squad +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL: BERT + CM_ML_MODEL_DATASET: squad-1.1 + CM_ML_MODEL_MAX_SEQ_LENGTH: '384' + CM_ML_MODEL_NAME: MLPERF BERT Base on SQuAD v1.1 + CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'no' +new_env_keys: +- CM_ML_MODEL* +post_deps: +- tags: get,bert,squad,vocab +prehook_deps: +- enable_if_env: + CM_TMP_ML_MODEL_REQUIRE_DOWNLOAD: 'yes' + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_EXTRACT_EXTRACTED_FILENAME: <<>> + CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: Path to the BERT vocab file + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- raw +- bert +- bert-base +- bert-squad +- language +- language-processing +uid: b3b10b452ce24c5f +variations: + deepsparse: + env: + CM_ML_MODEL_FRAMEWORK: deepsparse + CM_ML_MODEL_INPUT_IDS_NAME: input_ids + CM_ML_MODEL_INPUT_MASK_NAME: input_mask + CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + group: framework + deepsparse,int8: + deps: + - names: + - neural-magic-zoo-downloader + tags: get,ml-model,zoo,deepsparse,_bert-base-pruned95_obs_quant-none + env: + CM_ML_MODEL_F1: '87.89' + CM_ML_MODEL_FILE: model.onnx + CM_PRUNING_PERCENTAGE: '95' + fp32: + default: true + env: + CM_ML_MODEL_PRECISION: fp32 + group: precision + int8: + env: + CM_ML_MODEL_PRECISION: int8 + CM_ML_MODEL_QUANTIZED: 'yes' + group: precision diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/README.md new file mode 100644 index 000000000..048269a11 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-bert-large-squad](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-bert-large-squad) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/_cm.yaml new file mode 100644 index 000000000..e5b4d11bb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/_cm.yaml @@ -0,0 +1,265 @@ +alias: get-ml-model-bert-large-squad +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL: BERT + CM_ML_MODEL_DATASET: squad-1.1 + CM_ML_MODEL_MAX_SEQ_LENGTH: '384' + CM_ML_MODEL_NAME: MLPERF BERT Large on SQuAD v1.1 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> +new_env_keys: +- CM_ML_MODEL* +post_deps: +- tags: get,dataset-aux,squad-vocab +prehook_deps: +- env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_DOWNLOAD_URL1: <<>> + CM_EXTRACT_EXTRACTED_FILENAME: <<>> + CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + extra_cache_tags: bert-large,ml-model + force_cache: true + skip_if_env: + CM_ML_MODEL_BERT_PACKED: + - 'yes' + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- raw +- bert +- bert-large +- bert-squad +- language +- language-processing +uid: 5e865dbdc65949d2 +variations: + amazon-s3: + group: download-source + armi: + group: download-source + custom-url.#: + env: + CM_PACKAGE_URL: '#' + group: download-source + deepsparse: + default_variations: + download-source: github + env: + CM_ML_MODEL_FRAMEWORK: deepsparse + CM_ML_MODEL_INPUT_IDS_NAME: input_ids + CM_ML_MODEL_INPUT_MASK_NAME: input_mask + CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + group: framework + deepsparse,int8: + env: + CM_DAE_EXTRACT_DOWNLOADED: 'yes' + CM_ML_MODEL_F1: '90.21282641816266' + CM_ML_MODEL_FILE: oBERT-Large_95sparse_block4_qat.onnx + deepsparse,int8,github: + env: + CM_PACKAGE_URL: https://github.com/mlcommons/inference_results_v2.1/raw/master/open/NeuralMagic/code/bert/deepsparse/models/oBERT-Large_95sparse_block4_qat.onnx.tar.xz + fp32: + default: true + env: + CM_ML_MODEL_PRECISION: fp32 + group: precision + github: + group: download-source + int8: + env: + CM_ML_MODEL_PRECISION: int8 + CM_ML_MODEL_QUANTIZED: 'yes' + group: precision + onnx: + default: true + default_variations: + download-source: armi + env: + CM_ML_MODEL_FRAMEWORK: onnx + CM_ML_MODEL_INPUT_IDS_NAME: input_ids + CM_ML_MODEL_INPUT_MASK_NAME: input_mask + CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + group: framework + onnx,fp32: + env: + CM_DOWNLOAD_CHECKSUM: 819b25b19cd8e59080c10892689750ca + CM_ML_MODEL_F1: '90.874' + onnx,fp32,armi: + env: + CM_PACKAGE_URL: https://armi.in/files/model.onnx + CM_PACKAGE_URL1: https://zenodo.org/record/3733910/files/model.onnx + onnx,fp32,zenodo: + env: + CM_PACKAGE_URL: https://zenodo.org/record/3733910/files/model.onnx + onnx,int8: + env: + CM_DOWNLOAD_CHECKSUM: 45f88ffb2915362242703c85c38ec2d4 + CM_ML_MODEL_F1: '90.067' + CM_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx + onnx,int8,amazon-s3: + env: + CM_PACKAGE_URL: https://mlperf-public.s3.us-west-2.amazonaws.com/bert_large_v1_1_fake_quant.onnx + onnx,int8,zenodo: + env: + CM_PACKAGE_URL: https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx + onnxruntime: + base: + - onnx + packed: + deps: + - names: + - python + - python3 + tags: get,python3 + version_max: 3.8.999 + version_max_usable: 3.8.12 + - names: + - torch + - pytorch + tags: get,generic-python-lib,_torch + version: 1.8.1 + - names: + - tensorflow + tags: get,generic-python-lib,_package.tensorflow + version: 2.11.0 + - names: + - transformers + tags: get,generic-python-lib,_package.transformers + version: 2.4.0 + - names: + - protobuf + tags: get,generic-python-lib,_package.protobuf + version: 3.20.1 + - names: + - onnx + tags: get,generic-python-lib,_package.onnx + version: 1.12.0 + - names: + - onnx-graphsurgeon + tags: get,generic-python-lib,_onnx-graphsurgeon + version: 0.3.26 + - names: + - numpy + tags: get,generic-python-lib,_numpy + version: 1.23.0 + - names: + - inference-src + tags: get,mlperf,inference,src + env: + CM_ML_MODEL_BERT_PACKED: 'yes' + group: packing + new_env_keys: + - CM_BERT_ + prehook_deps: + - env: + CM_DOWNLOAD_CHECKSUM: 3089b27c559906a868878741d992ade7 + CM_DOWNLOAD_FILENAME: model.ckpt-5474.data-00000-of-00001 + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_DATA_PATH + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,checkpoint,weights,bert-large + force_cache: true + tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.data-00000-of-00001 + - env: + CM_DOWNLOAD_CHECKSUM: d23d61572d9404da4dac3363b5bc735b + CM_DOWNLOAD_FILENAME: model.ckpt-5474.index + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_INDEX_PATH + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,checkpoint-index,bert-large + force_cache: true + tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.index + - env: + CM_DOWNLOAD_CHECKSUM: 83e11e57eea14c9e9a246af74af40d66 + CM_DOWNLOAD_FILENAME: model.ckpt-5474.meta + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_META_PATH + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,checkpoint-meta,bert-large + force_cache: true + tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/model.ckpt-5474.meta + - env: + CM_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e + CM_DOWNLOAD_FILENAME: vocab.txt + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_VOCAB_PATH + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,vocab,bert-large + force_cache: true + tags: download,file,_wget,_url.https://zenodo.org/record/3733868/files/vocab.txt + - env: + CM_DOWNLOAD_CHECKSUM: 94c91ce422e8f36f9d98b4926e2ad688 + CM_DOWNLOAD_FILENAME: convert_model.py + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CONVERTER_CODE_PATH + extra_cache_tags: bert,checkpoint,converter,code,bert-large + force_cache: true + tags: download,file,_wget,_url.https://raw.githubusercontent.com/krai/axs2kilt/main/model_onnx_bert_large_packed_recipe/convert_model.py + pytorch: + default_variations: + download-source: armi + env: + CM_ML_MODEL_FRAMEWORK: pytorch + CM_ML_MODEL_INPUT_IDS_NAME: input_ids + CM_ML_MODEL_INPUT_MASK_NAME: input_mask + CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + group: framework + pytorch,fp32: + env: + CM_DOWNLOAD_CHECKSUM: 00fbcbfaebfa20d87ac9885120a6e9b4 + CM_ML_MODEL_F1: '90.874' + pytorch,fp32,armi: + env: + CM_PACKAGE_URL: https://armi.in/files/fp32/model.pytorch + CM_PACKAGE_URL1: https://zenodo.org/record/3733896/files/model.pytorch + pytorch,fp32,zenodo: + env: + CM_PACKAGE_URL: https://zenodo.org/record/3733896/files/model.pytorch + pytorch,int8: + env: + CM_DOWNLOAD_CHECKSUM: 0734c580cb53b4b56a3f400771ffcb7c + CM_ML_MODEL_F1: '90.633' + pytorch,int8,armi: + env: + CM_PACKAGE_URL: https://armi.in/files/int8/pytorch_model.bin + CM_PACKAGE_URL1: https://zenodo.org/record/4792496/files/pytorch_model.bin + pytorch,int8,zenodo: + env: + CM_PACKAGE_URL: https://zenodo.org/record/4792496/files/pytorch_model.bin + tensorflow: + base: + - tf + tf: + default_variations: + download-source: zenodo + env: + CM_ML_MODEL_FRAMEWORK: tf + CM_ML_MODEL_INPUT_IDS_NAME: input_ids + CM_ML_MODEL_INPUT_MASK_NAME: input_mask + CM_ML_MODEL_INPUT_SEGMENTS_NAME: segment_ids + CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: output_end_logits + CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: output_start_logits + group: framework + tf,fp32: + env: + CM_DOWNLOAD_CHECKSUM: dd72de12e8226f25f0128a1a864b97ad + CM_ML_MODEL_F1: '90.874' + tf,fp32,zenodo: + env: + CM_PACKAGE_URL: https://zenodo.org/record/3939747/files/model.pb + unpacked: + default: true + env: + CM_ML_MODEL_BERT_PACKED: 'no' + group: packing + zenodo: + group: download-source diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/customize.py new file mode 100644 index 000000000..5d4afe15a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/customize.py @@ -0,0 +1,48 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + if env.get('CM_ML_MODEL_BERT_PACKED', '') == 'yes': + i['run_script_input']['script_name'] = "run-packed" + env['CM_BERT_CONFIG_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_BERT_PATH'], "bert_config.json") + env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.getcwd() + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), "model.onnx") + env['CM_ML_MODEL_BERT_PACKED_PATH'] = os.path.join( + os.getcwd(), "model.onnx") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) + + if env.get('CM_ML_MODEL_PRECISION', '') == "fp32": + env['CM_ML_MODEL_BERT_LARGE_FP32_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + elif env.get('CM_ML_MODEL_PRECISION', '') == "int8": + env['CM_ML_MODEL_BERT_LARGE_INT8_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/run-packed.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/run-packed.sh new file mode 100644 index 000000000..4c7b016c9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-bert-large-squad/run-packed.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_BERT_CONVERTER_CODE_PATH} --src '${CM_BERT_CHECKPOINT_INDEX_PATH}/../model.ckpt-5474' --dest '$PWD/' --config_path '${CM_BERT_CONFIG_PATH}'" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/README.md new file mode 100644 index 000000000..c6f183d8e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-dlrm-terabyte](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-dlrm-terabyte) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/_cm.yaml new file mode 100644 index 000000000..6227a9a17 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/_cm.yaml @@ -0,0 +1,125 @@ +alias: get-ml-model-dlrm-terabyte +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_ML_MODEL: dlrm + CM_ML_MODEL_DATASET: criteo-terabyte + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +input_mapping: + dir: CM_DOWNLOAD_PATH + download_path: CM_DOWNLOAD_PATH + to: CM_DOWNLOAD_PATH +new_env_keys: +- CM_ML_MODEL_* +prehook_deps: +- env: + CM_DOWNLOAD_DOWNLOADED_FILENAME: <<>> + CM_EXTRACT_EXTRACTED_FILENAME: <<>> + extra_cache_tags: ml-model,dlrm,terabyte,raw,ml-model-dlrm + force_cache: true + names: + - dae + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- dlrm +- raw +- terabyte +- criteo-terabyte +- criteo +- recommendation +uid: 8fa7582c603a4db3 +variations: + debug: + env: + CM_ML_MODEL_DEBUG: 'yes' + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + onnx: + env: + CM_ML_MODEL_FRAMEWORK: onnx + group: framework + onnx,fp32: + env: + CM_DOWNLOAD_CHECKSUM: 763b964eaffe5f86e92cdcb60c5dc0de + CM_ML_MODEL_ACCURACY: '0.8025' + CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' + CM_ML_MODEL_FILE: tb00_40M.onnx + CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.onnx.tar + CM_UNTAR: 'yes' + onnx,fp32,debug: + env: + CM_DOWNLOAD_CHECKSUM: d11255cd9926cda9181a347861e4d263 + CM_ML_MODEL_ACCURACY: '0.8107' + CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000' + CM_ML_MODEL_FILE: tb0875_10M.onnx + CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.onnx.tar + CM_UNTAR: 'yes' + pytorch: + default: true + env: + CM_ML_MODEL_FRAMEWORK: pytorch + CM_TMP_MODEL_ADDITIONAL_NAME: dlrm_terabyte.pytorch + group: framework + pytorch,fp32: + env: + CM_DOWNLOAD_CHECKSUM: 2d49a5288cddb37c3c64860a06d79bb9 + CM_ML_MODEL_ACCURACY: '0.8025' + CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' + CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb00_40M.pt + pytorch,fp32,debug: + env: + CM_DOWNLOAD_CHECKSUM: b7cacffcf75f767faa9cb2af397723aa + CM_ML_MODEL_ACCURACY: '0.8107' + CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '10000000' + CM_PACKAGE_URL: https://dlrm.s3-us-west-1.amazonaws.com/models/tb0875_10M.pt + pytorch,fp32,weight_sharded: + default_variations: + download-tool: rclone + env: + CM_DOWNLOAD_CHECKSUM: '' + CM_ML_MODEL_ACCURACY: '0.8025' + CM_ML_MODEL_DLRM_MAX_INDEX_RANGE: '40000000' + CM_ML_MODEL_FILE: model_weights + CM_TMP_MODEL_ADDITIONAL_NAME: '' + pytorch,fp32,weight_sharded,rclone: + env: + CM_PACKAGE_URL: mlc-inference:mlcommons-inference-wg-public/model_weights + CM_RCLONE_CONFIG_NAME: mlc-inference + pytorch,fp32,weight_sharded,wget: + env: + CM_DAE_EXTRACT_DOWNLOADED: 'yes' + CM_DOWNLOAD_FILENAME: download + CM_EXTRACT_UNZIP: 'yes' + CM_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/XzfSeLgW8FYfR3S/download + rclone: + ad: + dae: + tags: _rclone + group: download-tool + weight_sharded: + default: true + env: + CM_DLRM_MULTIHOT_MODEL: 'yes' + group: type + wget: + ad: + dae: + tags: _wget + group: download-tool diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/run.sh new file mode 100644 index 000000000..d2595b32f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-dlrm-terabyte/run.sh @@ -0,0 +1,4 @@ +#/bin/bash +if [[ ${CM_TMP_MODEL_ADDITIONAL_NAME} ]]; then + ln -s ${CM_ML_MODEL_FILE} ${CM_TMP_MODEL_ADDITIONAL_NAME} +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/README.md new file mode 100644 index 000000000..372afe3c5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-efficientnet-lite](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-efficientnet-lite) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/_cm.yaml new file mode 100644 index 000000000..e40dd196c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/_cm.yaml @@ -0,0 +1,142 @@ +alias: get-ml-model-efficientnet-lite +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +default_env: + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 +env: + CM_EXTRACT_FOLDER: efficientnet-<<>> + CM_ML_MODEL: efficientnet-lite + CM_ML_MODEL_DATASET: imagenet2012-val + CM_ML_MODEL_DATA_LAYOUT: NHWC + CM_ML_MODEL_FILE: efficientnet-<<>>-<<>>.tflite + CM_ML_MODEL_FULL_NAME: efficientnet-<<>>-<<>> + CM_ML_MODEL_GIVEN_CHANNEL_MEANS: '' + CM_ML_MODEL_INPUT_LAYER_NAME: images + CM_ML_MODEL_INPUT_SHAPES: '\"input\": (BATCH_SIZE, 224, 224, 3)' + CM_ML_MODEL_MOBILENET_NAME_SUFFIX: '' + CM_ML_MODEL_NORMALIZE_DATA: 'yes' + CM_ML_MODEL_OUTPUT_LAYER_NAME: Softmax + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_SUBTRACT_MEANS: '0' + CM_ML_MODEL_WEIGHTS_ARE_CHECKPOINTS: 'yes' + CM_ML_MODEL_WEIGHTS_FILE: model.ckpt.data-00000-of-00001 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' + CM_PACKAGE_URL: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/efficientnet-<<>>.tar.gz + CM_UNTAR: 'yes' +new_env_keys: +- CM_ML_MODEL_* +- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- efficientnet +- raw +- ml-model-efficientnet +- ml-model-efficientnet-lite +- lite +- tflite +- image-classification +uid: 1041f681977d4b7c +valid_variation_combinations: +- - lite0 + - resolution-224 +- - lite1 + - resolution-240 +- - lite2 + - resolution-260 +- - lite3 + - resolution-280 +- - lite4 + - resolution-300 +variations: + fp32: + default: true + env: + CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: fp32 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + group: precision + int8: + alias: uint8 + lite0: + base: + - resolution-224 + default: true + env: + CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite0 + group: kind + lite1: + base: + - resolution-240 + env: + CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite1 + group: kind + lite2: + base: + - resolution-260 + env: + CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite2 + group: kind + lite3: + base: + - resolution-280 + env: + CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite3 + group: kind + lite4: + base: + - resolution-300 + env: + CM_ML_MODEL_EFFICIENTNET_LITE_KIND: lite4 + group: kind + resolution-224: + default: true + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224 + CM_ML_MODEL_IMAGE_HEIGHT: '224' + CM_ML_MODEL_IMAGE_WIDTH: '224' + CM_ML_MODEL_MOBILENET_RESOLUTION: '224' + group: resolution + resolution-240: + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.240 + CM_ML_MODEL_IMAGE_HEIGHT: '240' + CM_ML_MODEL_IMAGE_WIDTH: '240' + CM_ML_MODEL_MOBILENET_RESOLUTION: '240' + group: resolution + resolution-260: + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.260 + CM_ML_MODEL_IMAGE_HEIGHT: '260' + CM_ML_MODEL_IMAGE_WIDTH: '260' + CM_ML_MODEL_MOBILENET_RESOLUTION: '260' + group: resolution + resolution-280: + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.280 + CM_ML_MODEL_IMAGE_HEIGHT: '280' + CM_ML_MODEL_IMAGE_WIDTH: '280' + CM_ML_MODEL_MOBILENET_RESOLUTION: '280' + group: resolution + resolution-300: + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.300 + CM_ML_MODEL_IMAGE_HEIGHT: '300' + CM_ML_MODEL_IMAGE_WIDTH: '300' + CM_ML_MODEL_MOBILENET_RESOLUTION: '300' + group: resolution + tflite: {} + uint8: + env: + CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: int8 + CM_ML_MODEL_INPUTS_DATA_TYPE: uint8 + CM_ML_MODEL_PRECISION: uint8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: uint8 + group: precision diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/customize.py new file mode 100644 index 000000000..dbeedee41 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-efficientnet-lite/customize.py @@ -0,0 +1,67 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_PACKAGE_URL'] + env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url + + print('Downloading from {}'.format(url)) + + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r + + filename = r['filename'] + + if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes": + if env.get('CM_UNZIP') == "yes": + cmd = "unzip " + elif env.get('CM_UNTAR') == "yes": + cmd = "tar -xvzf " + os.system(cmd + filename) + + filename = env['CM_ML_MODEL_FILE'] + + extract_folder = env.get('CM_EXTRACT_FOLDER', '') + + if extract_folder: + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + path, extract_folder, filename) + else: + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) + else: + env['CM_ML_MODEL_FILE'] = filename + env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] + + env['CM_ML_MODEL_PATH'] = path + + if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']): + return { + 'return': 1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/README.md new file mode 100644 index 000000000..f45622846 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-gptj](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-gptj) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/_cm.yaml new file mode 100644 index 000000000..25e8deca4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/_cm.yaml @@ -0,0 +1,220 @@ +alias: get-ml-model-gptj +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +docker: + run: false +env: + CM_ML_MODEL: GPTJ + CM_ML_MODEL_DATASET: cnndm + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +input_mapping: + checkpoint: GPTJ_CHECKPOINT_PATH + download_path: CM_DOWNLOAD_PATH + to: CM_DOWNLOAD_PATH +new_env_keys: +- CM_ML_MODEL_* +- GPTJ_CHECKPOINT_PATH +prehook_deps: +- enable_if_env: + CM_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + CM_DOWNLOAD_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH + CM_EXTRACT_FINAL_ENV_NAME: GPTJ_CHECKPOINT_PATH + CM_EXTRACT_TO_FOLDER: gpt-j + extra_cache_tags: gptj,model + force_cache: true + names: + - dae + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_DOWNLOAD_URL +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- raw +- ml-model +- gptj +- gpt-j +- large-language-model +uid: a41166210f294fbf +variations: + batch_size.#: + env: + CM_ML_MODEL_BATCH_SIZE: '#' + fp32: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + fp8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp8 + group: precision + int4: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: int4 + CM_ML_MODEL_WEIGHT_DATA_TYPES: int4 + group: precision + int8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: int8 + CM_ML_MODEL_PRECISION: int8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + group: precision + intel: + default_variations: + framework: pytorch + group: model-provider + mlcommons: + default: true + default_variations: + precision: fp32 + group: model-provider + nvidia: + default_variations: + framework: pytorch + env: + CM_TMP_ML_MODEL_PROVIDER: nvidia + group: model-provider + pytorch: + default: true + env: + CM_ML_MODEL_DATA_LAYOUT: NCHW + CM_ML_MODEL_FRAMEWORK: pytorch + CM_ML_STARTING_WEIGHTS_FILENAME: <<>> + group: framework + pytorch,fp32: + env: + CM_DOWNLOAD_CHECKSUM_NOT_USED: e677e28aaf03da84584bb3073b7ee315 + CM_DOWNLOAD_EXTRA_OPTIONS: ' --output-document checkpoint.zip' + CM_PACKAGE_URL: https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download + CM_RCLONE_CONFIG_NAME: mlc-inference + CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/gpt-j + CM_UNZIP: 'yes' + required_disk_space: 22700 + pytorch,fp32,wget: + add_deps_recursive: + dae: + tags: _extract + pytorch,int4,intel: {} + pytorch,int8,intel: {} + pytorch,intel: + adr: + conda-package: + tags: _name.gptj-pt + default_variations: + precision: int8 + deps: + - tags: get,mlperf,inference,results + version: v3.1 + - env: + CM_GPTJ_INTEL_MODEL: '' + force_new_env_keys: + - GPTJ_CHECKPOINT_PATH + tags: get,ml-model,gpt-j,_fp32,_pytorch + - tags: get,conda,_name.gptj-pt + - tags: get,python,_conda.gptj-pt + - names: + - conda-package + - intel-openmp + tags: get,generic,conda-package,_package.intel-openmp,_source.intel + version: 2023.1.0 + - names: + - conda-package + - jemalloc + tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v3.1-gptj + - tags: get,dataset,cnndm,_calibration + env: + CM_GPTJ_INTEL_MODEL: 'yes' + pytorch,nvidia: + default_variations: + precision: fp8 + deps: + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TENSORRT_LLM_CHECKOUT_PATH + extra_cache_tags: tensorrt-llm + tags: get,git,repo,_lfs,_repo.https://github.com/NVIDIA/TensorRT-LLM.git,_sha.0ab9d17a59c284d2de36889832fe9fc7c8697604 + - names: + - cuda + tags: get,cuda + - tags: get,nvidia,scratch,space + - tags: get,cuda-devices,_with-pycuda + - env: {} + force_new_env_keys: + - GPTJ_CHECKPOINT_PATH + tags: get,ml-model,gpt-j,_fp32,_pytorch + - names: + - nvidia-inference-common-code + tags: get,nvidia,inference,common-code + - names: + - python + - python3 + tags: get,python3 + - tags: get,generic-python-lib,_package.safetensors + rclone: + add_deps_recursive: + dae: + tags: _rclone + default: true + env: + CM_DOWNLOAD_FILENAME: checkpoint + CM_DOWNLOAD_URL: <<>> + group: download-tool + saxml: + group: framework + saxml,fp32: + deps: + - tags: get,ml-model,gptj,_pytorch,_fp32 + - names: + - python + - python3 + tags: get,python3 + - tags: get,generic-python-lib,_package.jax[cpu] + - tags: get,generic-python-lib,_package.paxml + - tags: get,generic-python-lib,_package.praxis + - tags: get,generic-python-lib,_package.transformers + - tags: get,generic-python-lib,_package.accelerate + env: + CM_TMP_MODEL_SAXML: fp32 + new_env_keys: + - GPTJ_SAXML_CHECKPOINT_PATH + saxml,int8: + deps: + - tags: get,ml-model,gptj,_saxml,_fp32 + - names: + - python + - python3 + tags: get,python3 + version: 3.10.0 + - tags: get,generic-python-lib,_package.praxis + - tags: get,generic-python-lib,_package.apache-beam + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_SAXML_REPO_PATH + extra_cache_tags: saxml + names: + - saxml + tags: get,git,repo,_repo.https://github.com/google/saxml + env: + CM_TMP_MODEL_SAXML: int8 + uint8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: uint8 + CM_ML_MODEL_PRECISION: uint8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + group: precision + wget: + add_deps_recursive: + dae: + tags: _wget + env: + CM_DOWNLOAD_FILENAME: checkpoint.zip + CM_DOWNLOAD_URL: <<>> + group: download-tool diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/convert_gptj_ckpt.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/convert_gptj_ckpt.py new file mode 100644 index 000000000..544caac2b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/convert_gptj_ckpt.py @@ -0,0 +1,184 @@ +"""Convert weights from a gpt-j-6b model to a pax one. + +Usage: + +# Install the latest main branch of huggingface/transformers +pip3 install git+https://github.com/huggingface/transformers + +# Get a checkpiont from the GPTJ family +https://huggingface.co/EleutherAI/gpt-j-6b + +This points to +https://github.com/huggingface/transformers/blob/v4.30.2/src/transformers/models/gptj/modeling_flax_gptj.py +and in the default config, use_parallel_residual is true + +# Example cmd: +python3 -m convert_gptj_ckpt --base EleutherAI/gpt-j-6b --pax pax_3b +""" +import argparse +import jax +from jax.experimental import pjit +import numpy as np +from paxml import checkpoints +from paxml import train_states +from praxis import py_utils +from transformers import AutoModelForCausalLM + +# 6B example +num_layers = 28 +num_heads = 16 +dims_per_head = 256 +vocab = 50401 +num_gpus = 1 + + +def convert(base_model_path, pax_model_path): + """Convert from gpt-j-6b to pax.""" + print(f'Loading the base model from {base_model_path}') + + base = AutoModelForCausalLM.from_pretrained( + base_model_path, low_cpu_mem_usage=True + ) + for key, value in base.state_dict().items(): + print('%s %s' % (key, value.data.numpy().shape)) + + jax_weights = { + 'lm': { + 'embedding_lookup': { + 'emb_var': base.state_dict()[ + 'transformer.wte.weight' + ].data.numpy()[:vocab, :] + }, + 'softmax': { + 'logits_ffn': { + 'linear': { + 'w': ( + base.state_dict()['lm_head.weight'] + .data.numpy() + .transpose()[:, :vocab] + ), + }, + 'bias': {'b': base.state_dict()['lm_head.bias'].data.numpy()}, + } + }, + 'final_ln': { + 'scale': base.state_dict()[ + 'transformer.ln_f.weight' + ].data.numpy(), + 'bias': base.state_dict()['transformer.ln_f.bias'].data.numpy(), + }, + 'transformer': {}, + } + } + + for layer_idx in range(num_layers): + query = base.state_dict()[ + 'transformer.h.%d.attn.q_proj.weight' % layer_idx + ].data.numpy() + key = base.state_dict()[ + 'transformer.h.%d.attn.k_proj.weight' % layer_idx + ].data.numpy() + value = base.state_dict()[ + 'transformer.h.%d.attn.v_proj.weight' % layer_idx + ].data.numpy() + wc = np.stack((query, key, value)) + wc = np.reshape( + wc, [3, num_heads, dims_per_head, num_heads * dims_per_head] + ) + wc = np.transpose(wc, (0, 3, 1, 2)) + + w_post = base.state_dict()[ + 'transformer.h.%d.attn.out_proj.weight' % layer_idx + ].data.numpy() + w_post = np.reshape( + w_post, [num_heads * dims_per_head, num_heads, dims_per_head] + ) + layer_weight = { + 'self_attention': { + 'combined_qkv': { + 'w': wc, + }, + 'post': { + 'w': w_post, + }, + }, + 'ff_layer': { + 'ffn_layer1': { + 'linear': { + 'w': ( + base.state_dict()[ + 'transformer.h.%d.mlp.fc_in.weight' % layer_idx + ] + .data.numpy() + .transpose() + ), + }, + 'bias': { + 'b': base.state_dict()[ + 'transformer.h.%d.mlp.fc_in.bias' % layer_idx + ].data.numpy(), + }, + }, + 'ffn_layer2': { + 'linear': { + 'w': ( + base.state_dict()[ + 'transformer.h.%d.mlp.fc_out.weight' % layer_idx + ] + .data.numpy() + .transpose() + ), + }, + 'bias': { + 'b': base.state_dict()[ + 'transformer.h.%d.mlp.fc_out.bias' % layer_idx + ].data.numpy(), + }, + }, + }, + 'layer_norm': { + 'scale': base.state_dict()[ + 'transformer.h.%d.ln_1.weight' % layer_idx + ].data.numpy(), + 'bias': base.state_dict()[ + 'transformer.h.%d.ln_1.bias' % layer_idx + ].data.numpy(), + }, + } + jax_weights['lm']['transformer']['x_layers_%d' % + layer_idx] = layer_weight + + print(f'Saving the pax model to {pax_model_path}') + jax_states = train_states.TrainState( + step=0, mdl_vars={'params': jax_weights}, opt_states={} + ) + device_mesh = py_utils.create_device_mesh([1, 1, num_gpus]) + global_mesh = jax.sharding.Mesh( + device_mesh, ['replica', 'data_mdl2', 'mdl']) + + # Identity pjit is needed to output a GDA model_states. + def identity(x): + return x + + pjitted_identity = pjit.pjit( + identity, + in_shardings=None, + out_shardings=None) + with global_mesh: + jax_states_gda = pjitted_identity(jax_states) + + checkpoints.save_checkpoint( + jax_states_gda, + pax_model_path, + checkpoint_type=checkpoints.CheckpointType.GDA, + ) + print('done') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--base-model-path', type=str, required=True) + parser.add_argument('--pax-model-path', type=str, required=True) + args = parser.parse_args() + + convert(args.base_model_path, args.pax_model_path) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/customize.py new file mode 100644 index 000000000..22ef4bd42 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/customize.py @@ -0,0 +1,110 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if env.get('CM_GPTJ_INTEL_MODEL', '') == 'yes': + i['run_script_input']['script_name'] = 'run-intel' + harness_root = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], + 'closed', + 'Intel', + 'code', + 'gptj-99', + 'pytorch-cpu') + print(f"Harness Root: {harness_root}") + env['CM_HARNESS_CODE_ROOT'] = harness_root + env['CM_CALIBRATION_CODE_ROOT'] = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration') + + env['CHECKPOINT_DIR'] = env['GPTJ_CHECKPOINT_PATH'] + + env['QUANTIZED_MODEL_DIR'] = os.getcwd() + + if env['CM_ML_MODEL_WEIGHT_DATA_TYPES'] == "int8": + env['INT8_MODEL_DIR'] = os.getcwd() + else: + env['INT4_MODEL_DIR'] = os.getcwd() + + elif env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': + i['run_script_input']['script_name'] = 'run-nvidia' + if str(env.get('CM_DOCKER_DETACHED_MODE', '') + ).lower() in ['yes', 'true', "1"]: + env['DOCKER_RUN_OPTS'] = "--rm --ipc=host --ulimit memlock=-1 --ulimit stack=67108864" + gpu_arch = int( + float( + env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * + 10) + env['CM_GPU_ARCH'] = gpu_arch + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' + + else: + is_saxml = env.get('CM_TMP_MODEL_SAXML', '') + if is_saxml == "fp32": + i['run_script_input']['script_name'] = 'run-saxml' + elif is_saxml == "int8": + i['run_script_input']['script_name'] = 'run-saxml-quantized' + else: + path = env.get('GPTJ_CHECKPOINT_PATH', '').strip() + + if path == '' or not os.path.exists(path): + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if os.path.exists(os.path.join( + env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final")): + env['GPTJ_CHECKPOINT_PATH'] = os.path.join( + env['GPTJ_CHECKPOINT_PATH'], "checkpoint-final") + + is_saxml = env.get('CM_TMP_MODEL_SAXML', '') + if is_saxml == "fp32": + if os.path.exists("pax_gptj_checkpoint"): + env['GPTJ_SAXML_CHECKPOINT_PATH'] = os.path.join( + os.getcwd(), "pax_gptj_checkpoint") + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_CHECKPOINT_PATH'] + else: + return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'} + + elif is_saxml == "int8": + if os.path.exists("int8_ckpt"): + env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] = os.path.join( + os.getcwd(), "int8_ckpt") + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_SAXML_INT8_CHECKPOINT_PATH'] + else: + return {'return': 1, 'error': 'pax_gptj_checkpoint generation failed'} + elif env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + env['CM_NVIDIA_MLPERF_SCRATCH_PATH'], + 'models', + 'GPTJ-6B', + 'fp8-quantized-ammo', + 'GPTJ-FP8-quantized') + else: + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['GPTJ_CHECKPOINT_PATH'] + + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-int4-calibration.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-int4-calibration.sh new file mode 100644 index 000000000..45c3669e5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-int4-calibration.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +echo ${CM_CALIBRATION_CODE_ROOT} +cd ${CM_CALIBRATION_CODE_ROOT}/gpt-j/pytorch-cpu/INT4 +pip install -r requirements.txt +bash run_calibration_int4.sh + +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-intel.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-intel.sh new file mode 100644 index 000000000..f6cb2134d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-intel.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +export CALIBRATION_DATA_JSON=${CM_CALIBRATION_DATASET_CNNDM_PATH} + + +if [[ ${CM_ML_MODEL_WEIGHT_DATA_TYPES} == "int4" ]]; then + export INT4_CALIBRATION_DIR=${PWD}/quantized-int4-model + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-int4-calibration.sh + cd ${CM_HARNESS_CODE_ROOT} + bash run_quantization_int4.sh +else + cd ${CM_HARNESS_CODE_ROOT} + bash run_quantization.sh +fi + +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-nvidia.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-nvidia.sh new file mode 100644 index 000000000..27e5a675c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-nvidia.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +if [[ ! -e ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final ]]; then + mkdir -p ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/ + cp -r ${GPTJ_CHECKPOINT_PATH} ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/checkpoint-final + test $? -eq 0 || exit $? +fi + +echo "cd ${CM_TENSORRT_LLM_CHECKOUT_PATH}" +cd ${CM_TENSORRT_LLM_CHECKOUT_PATH} + +make -C docker build +test $? -eq 0 || exit $? + +export RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized --model_dir=/mnt/models/GPTJ-6B/checkpoint-final --qformat=fp8 --kv_cache_dtype=fp8 '" +export DOCKER_RUN_ARGS=" -v ${CM_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt" +make -C docker run LOCAL_USER=1 +test $? -eq 0 || exit $? + +${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH}/code/gptj/tensorrt/onnx_tune.py --fp8-scalers-path=${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/GPTJ-6B/fp8-quantized-ammo/GPTJ-FP8-quantized/rank0.safetensors --scaler 1.005 --index 15 +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-saxml-quantized.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-saxml-quantized.sh new file mode 100644 index 000000000..e74862be0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-saxml-quantized.sh @@ -0,0 +1,6 @@ +#!/bin/bash +CUR=$PWD +${CM_PYTHON_BIN_WITH_PATH} -m pip install jaxlib==0.4.24 +cd ${CM_TMP_CURRENT_SCRIPT_PATH} +${CM_PYTHON_BIN_WITH_PATH} ${CM_SAXML_REPO_PATH}/saxml/tools/offline_quantize.py --input_dir ${CM_ML_MODEL_FILE_WITH_PATH}/checkpoint_00000000/state --output_dir ${CUR}/int8_ckpt/checkpoint_00000000/state --quantization_configs "gptj" > offline_quantize2.log +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-saxml.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-saxml.sh new file mode 100644 index 000000000..031d736c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-gptj/run-saxml.sh @@ -0,0 +1,8 @@ +#!/bin/bash +CUR=$PWD +rm -rf pax_gptj_checkpoint +cd ${CM_TMP_CURRENT_SCRIPT_PATH} +${CM_PYTHON_BIN_WITH_PATH} -m convert_gptj_ckpt --base ${GPTJ_CHECKPOINT_PATH} --pax ${CUR}/pax_gptj_checkpoint +test $? -eq 0 || exit $? + +cd "$CUR" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/README-extra.md new file mode 100644 index 000000000..b7ec3407b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/README-extra.md @@ -0,0 +1,21 @@ +# Examples + +```bash +cmr "get ml-model huggingface zoo _model-stub.alpindale/Llama-2-13b-ONNX" --model_filename=FP32/LlamaV2_13B_float32.onnx --full_subfolder=FP32 +``` + +```bash +cmr "get ml-model huggingface zoo _model-stub.microsoft/Mistral-7B-v0.1-onnx" --model_filename=Mistral-7B-v0.1.onnx,Mistral-7B-v0.1.onnx.data +``` + +```bash +cmr "get ml-model huggingface zoo _model-stub.Intel/gpt-j-6B-int8-static" --model_filename=model.onnx --full_subfolder=. +``` + +```bash +cmr "get ml-model huggingface zoo _model-stub.runwayml/stable-diffusion-v1-5" --revision=onnx --model_filename=unet/model.onnx,unet/weights.pb +``` + +```bash +cmr "get ml-model huggingface zoo _model-stub.ctuning/mlperf-inference-bert-onnx-fp32-squad-v1.1" --model_filename=model.onnx +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/README.md new file mode 100644 index 000000000..82557d314 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-huggingface-zoo](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-huggingface-zoo) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/_cm.yaml new file mode 100644 index 000000000..d10c3f448 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/_cm.yaml @@ -0,0 +1,58 @@ +alias: get-ml-model-huggingface-zoo +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +deps: +- names: + - python3 + - python + tags: get,python3 +- tags: get,generic-python-lib,_huggingface_hub +env: {} +input_mapping: + download_path: CM_DOWNLOAD_PATH + env_key: CM_MODEL_ZOO_ENV_KEY + full_subfolder: CM_HF_FULL_SUBFOLDER + model_filename: CM_MODEL_ZOO_FILENAME + revision: CM_HF_REVISION + subfolder: CM_HF_SUBFOLDER +new_env_keys: +- CM_ML_MODEL* +- CM_MODEL_ZOO_STUB +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- model +- zoo +- raw +- model-zoo +- huggingface +tags_help: get ml-model huggingface zoo +uid: 53cf8252a443446a +variations: + clone-repo: + deps: + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ML_MODEL_PATH + tags: get,git,repo,_lfs + update_tags_from_env_with_prefix: + _repo.https://huggingface.co/: + - CM_MODEL_ZOO_STUB + env: + CM_GIT_CLONE_REPO: 'yes' + group: download-type + model-stub.#: + env: + CM_MODEL_ZOO_STUB: '#' + onnx-subfolder: + env: + CM_HF_SUBFOLDER: onnx + pierreguillou_bert_base_cased_squad_v1.1_portuguese: + env: + CM_MODEL_ZOO_STUB: pierreguillou/bert-base-cased-squad-v1.1-portuguese + prune: + env: + CM_MODEL_TASK: prune diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/customize.py new file mode 100644 index 000000000..6f80ad11f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/customize.py @@ -0,0 +1,64 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + script_path = i['run_script_input']['path'] + + path = env.get('CM_DOWNLOAD_PATH', '') + if path == '': + path = os.getcwd() + + if env.get('CM_GIT_CLONE_REPO', '') != 'yes': + run_cmd = env.get('CM_PYTHON_BIN_WITH_PATH') + " " + \ + os.path.join(script_path, 'download_model.py') + else: + run_cmd = '' + + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env_key = env.get('CM_MODEL_ZOO_ENV_KEY', '') + + path_file = env.get('CM_ML_MODEL_FILE_WITH_PATH', '') + if path_file != '': + path_dir = os.path.dirname(path_file) + + env['CM_ML_MODEL_PATH'] = path_dir + + if env_key != '': + env['CM_ML_MODEL_' + env_key + '_PATH'] = path_dir + + else: + path_dir = env['CM_ML_MODEL_PATH'] + + if env_key != '': + env['CM_ML_MODEL_' + env_key + '_FILE_WITH_PATH'] = path_dir + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/download_model.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/download_model.py new file mode 100644 index 000000000..2f3584278 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/download_model.py @@ -0,0 +1,105 @@ +from huggingface_hub import hf_hub_download +import os + +model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '') +model_task = os.environ.get('CM_MODEL_TASK', '') + +revision = os.environ.get('CM_HF_REVISION', '') + +if model_task == "prune": + print("Downloading model: " + model_stub) + + for filename in ["pytorch_model.bin", "config.json"]: + + downloaded_model_path = hf_hub_download(repo_id=model_stub, + filename=filename, + cache_dir=os.getcwd()) + + with open('tmp-run-env.out', 'w') as f: + f.write(f"CM_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),'')}") + +else: + subfolder = os.environ.get('CM_HF_SUBFOLDER', '') + full_subfolder = os.environ.get('CM_HF_FULL_SUBFOLDER', '') + + model_filename = os.environ.get('CM_MODEL_ZOO_FILENAME', '') + if model_filename == '': + model_filename = 'model.onnx' + + model_filenames = model_filename.split( + ',') if ',' in model_filename else [model_filename] + + base_model_filepath = None + + files = [] + if full_subfolder != '': + + from huggingface_hub import HfFileSystem + fs = HfFileSystem() + + # List all files in a directory + path = model_stub + '/' + full_subfolder + + print('') + print('Listing files in {} ...'.format(path)) + + def list_hf_files(path): + all_files = [] + + xrevision = None if revision == '' else revision + files = fs.ls(path, revision=xrevision) # , detail=False) + + for f in files: + fname = f['name'] + fdir = f['type'] == 'directory' + + if fdir: + all_files += list_hf_files(fname) + else: + all_files.append(fname) + + return all_files + + files = list_hf_files(path) + + print('') + print('Found {} files'.format(len(files))) + + for f in files: + + remove = len(model_stub) + 1 + + if revision != '': + remove += len(revision) + 1 + + ff = f[remove:] + + if ff not in model_filenames: + model_filenames.append(ff) + + print('') + for model_filename in model_filenames: + + print("Downloading file {} / {} ...".format(model_stub, model_filename)) + + extra_dir = os.path.dirname(model_filename) + + if extra_dir != '' and not os.path.exists(extra_dir): + os.makedirs(extra_dir) + + xrevision = None if revision == '' else revision + xsubfolder = None if subfolder == '' else subfolder + + downloaded_path = hf_hub_download(repo_id=model_stub, + subfolder=xsubfolder, + filename=model_filename, + revision=xrevision, + cache_dir=os.getcwd()) + print(downloaded_path) + if not base_model_filepath: + base_model_filepath = downloaded_path + + print('') + + with open('tmp-run-env.out', 'w') as f: + f.write(f"CM_ML_MODEL_FILE_WITH_PATH={base_model_filepath}") diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/run.bat new file mode 100644 index 000000000..6a4faa929 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/run.bat @@ -0,0 +1,3 @@ +echo %CM_RUN_CMD% +call %CM_RUN_CMD% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/run.sh new file mode 100644 index 000000000..111f4f2c8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-huggingface-zoo/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/README.md new file mode 100644 index 000000000..4137062c1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-llama2](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-llama2) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/_cm.yaml new file mode 100644 index 000000000..2ff45866b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/_cm.yaml @@ -0,0 +1,176 @@ +alias: get-ml-model-llama2 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +docker: + real_run: false +env: + CM_ML_MODEL_DATASET: openorca + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +input_mapping: + checkpoint: LLAMA2_CHECKPOINT_PATH +new_env_keys: +- CM_ML_MODEL_* +- LLAMA2_CHECKPOINT_PATH +- CM_NVIDIA_TP_SIZE +- CM_LLAMA2_FINAL_SAFE_TENSORS_PATH +prehook_deps: +- enable_if_env: + CM_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: {} + extra_cache_tags: llama2,llama-2 + force_env_keys: + - CM_GIT_CHECKOUT_FOLDER + names: + - hf-zoo + tags: get,ml-model,huggingface,zoo,_clone-repo +print_env_at_the_end: + LLAMA2_CHECKPOINT_PATH: LLAMA2 checkpoint path +tags: +- get +- raw +- ml-model +- language-processing +- llama2 +- llama2-70b +- text-summarization +uid: 5db97be9f61244c6 +variations: + L40s: + env: + CM_NVIDIA_TP_SIZE: 4 + group: gpu + amd: + default_env: + CM_LLAMA2_QUANTIZATION_DEVICE: '' + default_variations: + framework: pytorch + precision: fp8 + env: + CM_TMP_ML_MODEL_PROVIDER: amd + group: model-provider + new_env_keys: + - CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT + - CM_LLAMA2_FINAL_SAFE_TENSORS_PATH + batch_size.#: + env: + CM_ML_MODEL_BATCH_SIZE: '#' + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + fp8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp8 + CM_ML_MODEL_PRECISION: fp8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp8 + group: precision + generic: + env: + CM_NVIDIA_TP_SIZE: 2 + group: gpu + int8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: int8 + CM_ML_MODEL_PRECISION: int8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + group: precision + meta-llama/Llama-2-70b-chat-hf: + adr: + hf-zoo: + tags: _model-stub.meta-llama/Llama-2-70b-chat-hf + default: true + env: + CM_GIT_CHECKOUT_FOLDER: Llama-2-70b-chat-hf + CM_MODEL_ZOO_ENV_KEY: LLAMA2 + group: huggingface-stub + meta-llama/Llama-2-7b-chat-hf: + adr: + hf-zoo: + tags: _model-stub.meta-llama/Llama-2-7b-chat-hf + env: + CM_GIT_CHECKOUT_FOLDER: Llama-2-7b-chat-hf + CM_MODEL_ZOO_ENV_KEY: LLAMA2 + group: huggingface-stub + nvidia: + default_variations: + framework: pytorch + env: + CM_TMP_ML_MODEL_PROVIDER: nvidia + group: model-provider + pytorch: + default: true + env: + CM_ML_MODEL_FRAMEWORK: pytorch + group: framework + pytorch,amd: + default_variations: + gpu: generic + precision: fp8 + deps: + - names: + - python + - python3 + tags: get,python3 + - env: {} + force_new_env_keys: + - LLAMA2_CHECKPOINT_PATH + tags: get,ml-model,llama2-70b,_fp32,_pytorch + - tags: get,preprocessed,dataset,openorca,_calibration,_mlc + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH + extra_cache_tags: inference,results + tags: get,git,repo,_repo.https://github.com/mlcommons/inference_results_v4.1,_branch.cm-code-only + - tags: get,generic-python-lib,_quark-amd + - tags: get,generic-python-lib,_package.nltk + - tags: get,generic-python-lib,_torch_cuda + - tags: get,generic-python-lib,_package.compressed_tensors + pytorch,fp32: + env: {} + pytorch,nvidia: + default_variations: + gpu: generic + precision: fp8 + deps: + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TENSORRT_LLM_CHECKOUT_PATH + extra_cache_tags: tensorrt-llm + tags: get,git,repo,_repo.https://github.com/NVIDIA/TensorRT-LLM.git,_sha.0ab9d17a59c284d2de36889832fe9fc7c8697604 + - names: + - cuda + tags: get,cuda + - tags: get,nvidia,scratch,space + - tags: get,cuda-devices,_with-pycuda + - env: {} + force_new_env_keys: + - LLAMA2_CHECKPOINT_PATH + tags: get,ml-model,llama2-70b,_fp32,_pytorch + - names: + - nvidia-inference-common-code + tags: get,nvidia,inference,common-code + - names: + - python + - python3 + tags: get,python3 + stub.#: + adr: + hf-zoo: + tags: _model-stub.# + env: + CM_MODEL_ZOO_ENV_KEY: LLAMA2 + group: huggingface-stub + tp-size.#: + env: + CM_NVIDIA_TP_SIZE: '#' + group: gpu + uint8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: uint8 + CM_ML_MODEL_PRECISION: uint8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + group: precision diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/customize.py new file mode 100644 index 000000000..a463f7797 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/customize.py @@ -0,0 +1,57 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'nvidia': + i['run_script_input']['script_name'] = 'run-nvidia' + gpu_arch = int( + float( + env['CM_CUDA_DEVICE_PROP_GPU_COMPUTE_CAPABILITY']) * + 10) + env['CM_GPU_ARCH'] = gpu_arch + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' + else: + path = env.get('LLAMA2_CHECKPOINT_PATH', '').strip() + + if env.get('CM_TMP_ML_MODEL_PROVIDER', '') == 'amd': + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'no' + i['run_script_input']['script_name'] = 'run-amd' + env['AMD_CODE_DIR'] = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'AMD', 'code') + env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'] = os.getcwd() + env['CM_LLAMA2_FINAL_SAFE_TENSORS_PATH'] = os.path.join( + env['CM_LLAMA2_FINAL_SAFE_TENSORS_ROOT'], "llama.safetensors") + else: + if path == '' or not os.path.exists(path): + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + if env.get('LLAMA2_CHECKPOINT_PATH', '') == '': + env['LLAMA2_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] + else: + env['CM_ML_MODEL_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] + env['CM_ML_MODEL_LLAMA2_FILE_WITH_PATH'] = env['LLAMA2_CHECKPOINT_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/run-amd.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/run-amd.sh new file mode 100644 index 000000000..6f3ee48e9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/run-amd.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +code_dir=$AMD_CODE_DIR +model_dir=${LLAMA2_CHECKPOINT_PATH} +output_dir=$PWD +calib_dataset=${CM_DATASET_OPENORCA_CALIBRATION_PATH} + +cmd="cd $code_dir/llama2-70b-99.9/tools/quark-0.1.0+a9827f5-mlperf/examples/torch/language_modeling/" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? + +if [[ "x$CM_LLAMA2_QUANTIZATION_DEVICE" == "x" ]]; then + device_str="" +else + device_str="--device $CM_LLAMA2_QUANTIZATION_DEVICE" +fi +cmd="${CM_PYTHON_BIN_WITH_PATH} quantize_quark.py --model_dir $model_dir \ + --output_dir $output_dir \ + --quant_scheme w_fp8_a_fp8_o_fp8 \ + --dataset $calib_dataset \ + --num_calib_data 1000 \ + --model_export vllm_adopted_safetensors \ + --no_weight_matrix_merge $device_str" +echo "$cmd" +eval "$cmd" + +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/run-nvidia.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/run-nvidia.sh new file mode 100644 index 000000000..2e576280b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-llama2/run-nvidia.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +echo "Set tp size is ${CM_NVIDIA_TP_SIZE}" + +if [[ ! -e ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf ]]; then + mkdir -p ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf + cd ${LLAMA2_CHECKPOINT_PATH} + cp -r ${LLAMA2_CHECKPOINT_PATH}/* ${CM_NVIDIA_MLPERF_SCRATCH_PATH}/models/Llama2/Llama-2-70b-chat-hf + test $? -eq 0 || exit $? +fi + +echo "cd ${CM_TENSORRT_LLM_CHECKOUT_PATH}" +cd ${CM_TENSORRT_LLM_CHECKOUT_PATH} + +make -C docker build +test $? -eq 0 || exit $? + +if [ "${CM_NVIDIA_TP_SIZE}" -eq 1 ]; then + RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${CM_NVIDIA_TP_SIZE}pp1-fp8-02072024 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${CM_NVIDIA_TP_SIZE}'" +else + RUN_CMD="bash -c 'python3 scripts/build_wheel.py -a=${CM_GPU_ARCH} --clean --install --trt_root /usr/local/tensorrt/ && python examples/quantization/quantize.py --dtype=float16 --output_dir=/mnt/models/Llama2/fp8-quantized-ammo/llama2-70b-chat-hf-tp${CM_NVIDIA_TP_SIZE}pp1-fp8 --model_dir=/mnt/models/Llama2/Llama-2-70b-chat-hf --qformat=fp8 --kv_cache_dtype=fp8 --tp_size ${CM_NVIDIA_TP_SIZE}'" +fi +DOCKER_RUN_ARGS=" -v ${CM_NVIDIA_MLPERF_SCRATCH_PATH}:/mnt" +export DOCKER_RUN_ARGS="$DOCKER_RUN_ARGS" +export RUN_CMD="$RUN_CMD" +make -C docker run LOCAL_USER=1 +test $? -eq 0 || exit $? + +echo "MLPerf Nvidia scratch path is:${CM_NVIDIA_MLPERF_SCRATCH_PATH}" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/_cm.yaml new file mode 100644 index 000000000..2542d4dc7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/_cm.yaml @@ -0,0 +1,67 @@ +alias: get-ml-model-mixtral +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL_DATASET: '' + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +input_mapping: + checkpoint: MIXTRAL_CHECKPOINT_PATH +new_env_keys: +- CM_ML_MODEL_* +- MIXTRAL_CHECKPOINT_PATH +prehook_deps: +- enable_if_env: + CM_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: {} + extra_cache_tags: mixtral + force_env_keys: + - CM_GIT_CHECKOUT_FOLDER + names: + - hf-zoo + tags: get,ml-model,huggingface,zoo,_clone-repo +print_env_at_the_end: + MIXTRAL_CHECKPOINT_PATH: MIXTRAL checkpoint path +tags: +- get +- raw +- ml-model +- language-processing +- mixtral +- mixtral-8x7b +uid: 0c14127677f34ea2 +variations: + batch_size.#: + env: + CM_ML_MODEL_BATCH_SIZE: '#' + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + mistralai/Mixtral-8x7B-Instruct-v0.1: + adr: + hf-zoo: + tags: _model-stub.mistralai/Mixtral-8x7B-Instruct-v0.1 + default: true + env: + CM_GIT_CHECKOUT_FOLDER: Mixtral-8x7B-Instruct-v0.1 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 + CM_MODEL_ZOO_ENV_KEY: MIXTRAL + group: huggingface-stub + pytorch: + default: true + env: + CM_ML_MODEL_FRAMEWORK: pytorch + group: framework + stub.#: + adr: + hf-zoo: + tags: _model-stub.# + env: + CM_MODEL_ZOO_ENV_KEY: MIXTRAL + group: huggingface-stub diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/customize.py new file mode 100644 index 000000000..18b0a8ecf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mixtral/customize.py @@ -0,0 +1,39 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + path = env.get('MIXTRAL_CHECKPOINT_PATH', '').strip() + + if path == '' or not os.path.exists(path): + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if env.get('MIXTRAL_CHECKPOINT_PATH', '') == '': + env['MIXTRAL_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] + else: + env['CM_ML_MODEL_PATH'] = env['MIXTRAL_CHECKPOINT_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/README-extra.md new file mode 100644 index 000000000..63766e960 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/README-extra.md @@ -0,0 +1,15 @@ +# Get ML Model MobileNet +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the MobileNet model and adds it to CM cache with relevant meta data. + +## How To +```bash +cm run script --tags=get,ml-model,mobilenet,_[VARIATION] +``` +where, +* `[VARIATION]` is one of `tf-fp32`, `tf-int8`, `onnx-v1-opset-8`, `onnx-v1-opset-11`, `onnx-int8`. + +## Exported Variables +* `CM_ML_MODEL_FILE:` Model filename +* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* More env variables being exported are given in [cm.json file](_cm.json) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/README.md new file mode 100644 index 000000000..4b040cd78 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-mobilenet](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-mobilenet) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/_cm.yaml new file mode 100644 index 000000000..d690ffa69 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/_cm.yaml @@ -0,0 +1,280 @@ +alias: get-ml-model-mobilenet +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +default_env: + CM_ML_MODEL: mobilenet + CM_ML_MODEL_DATASET: imagenet2012-val + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_MOBILENET_NAME_SUFFIX: '' + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +new_env_keys: +- CM_ML_MODEL_* +- CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- mobilenet +- raw +- ml-model-mobilenet +- image-classification +uid: ce46675a3ab249e4 +variations: + fp32: + default: true + env: + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_MOBILENET_PRECISION: float + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + group: precision + from.google: + env: + CM_DOWNLOAD_SOURCE: google + group: source + from.zenodo: + env: + CM_DOWNLOAD_SOURCE: zenodo + group: source + int8: + base: + - quantized_ + env: + CM_ML_MODEL_INPUTS_DATA_TYPE: int8 + CM_ML_MODEL_MOBILENET_PRECISION: int8 + CM_ML_MODEL_PRECISION: int8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + group: precision + large: + env: + CM_ML_MODEL_MOBILENET_KIND: large + group: kind + large-minimalistic: + env: + CM_ML_MODEL_MOBILENET_KIND: large-minimalistic + group: kind + multiplier-0.25: + env: + CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.25' + CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '25' + group: multiplier + multiplier-0.35: + env: + CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.35' + CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '35' + group: multiplier + multiplier-0.5: + env: + CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.5' + CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '50' + group: multiplier + multiplier-0.75: + env: + CM_ML_MODEL_MOBILENET_MULTIPLIER: '0.75' + CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '75' + group: multiplier + multiplier-1.0: + env: + CM_ML_MODEL_MOBILENET_MULTIPLIER: '1.0' + CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: '100' + group: multiplier + onnx: + env: + CM_ML_MODEL_DATA_LAYOUT: NCHW + CM_ML_MODEL_FRAMEWORK: onnx + group: framework + onnx,fp32,v1: + env: + CM_ML_MODEL_INPUT_LAYER_NAME: input:0 + CM_ML_MODEL_NORMALIZE_DATA: 'yes' + CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV1/Predictions/Reshape_1:0 + CM_ML_MODEL_SUBTRACT_MEANS: 'no' + CM_ML_MODEL_VER: '1_1.0_224' + onnx,int8,v1: + env: + CM_ML_MODEL_FILE: mobilenet_sym_no_bn.onnx + CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 128.0 128.0 128.0 + CM_ML_MODEL_INPUT_LAYER_NAME: '0' + CM_ML_MODEL_NORMALIZE_DATA: 'no' + CM_ML_MODEL_OUTPUT_LAYER_NAME: '169' + CM_ML_MODEL_SUBTRACT_MEANS: 'yes' + CM_ML_MODEL_VER: 1_1.0_224_quant + CM_PACKAGE_URL: https://zenodo.org/record/3353417/files/Quantized%20MobileNet.zip + CM_UNZIP: 'yes' + onnx,opset-11,fp32,v1: + env: + CM_PACKAGE_URL: https://zenodo.org/record/4735651/files/mobilenet_v1_1.0_224.onnx + onnx,opset-8,fp32,v1: + env: + CM_PACKAGE_URL: https://zenodo.org/record/3157894/files/mobilenet_v1_1.0_224.onnx + opset-11: + env: + CM_ML_MODEL_ONNX_OPSET: '11' + group: opset-version + opset-8: + env: + CM_ML_MODEL_ONNX_OPSET: '8' + group: opset-version + quantized_: + env: + CM_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'yes' + resolution-128: + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.128 + CM_ML_MODEL_IMAGE_HEIGHT: '128' + CM_ML_MODEL_IMAGE_WIDTH: '128' + CM_ML_MODEL_MOBILENET_RESOLUTION: '128' + group: resolution + resolution-160: + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.160 + CM_ML_MODEL_IMAGE_HEIGHT: '160' + CM_ML_MODEL_IMAGE_WIDTH: '160' + CM_ML_MODEL_MOBILENET_RESOLUTION: '160' + group: resolution + resolution-192: + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.192 + CM_ML_MODEL_IMAGE_HEIGHT: '192' + CM_ML_MODEL_IMAGE_WIDTH: '192' + CM_ML_MODEL_MOBILENET_RESOLUTION: '192' + group: resolution + resolution-224: + env: + CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: _resolution.224 + CM_ML_MODEL_IMAGE_HEIGHT: '224' + CM_ML_MODEL_IMAGE_WIDTH: '224' + CM_ML_MODEL_MOBILENET_RESOLUTION: '224' + group: resolution + small: + env: + CM_ML_MODEL_MOBILENET_KIND: small + group: kind + small-minimalistic: + default_variations: + precision: fp32 + env: + CM_ML_MODEL_MOBILENET_KIND: small-minimalistic + group: kind + tf: + default: true + default_variations: + source: from.google + env: + CM_ML_MODEL_DATA_LAYOUT: NHWC + CM_ML_MODEL_INPUT_LAYER_NAME: input + CM_ML_MODEL_NORMALIZE_DATA: 'yes' + CM_ML_MODEL_SUBTRACT_MEANS: 'no' + group: framework + tf,fp32,v1,resolution-224,multiplier-1.0: + env: + CM_ML_MODEL_ACCURACY: '71.676' + tf,from.google,v1: + env: + CM_PACKAGE_URL: http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_<<>>_<<>><<>>.tgz + CM_UNTAR: 'yes' + tf,from.google,v2,fp32: + env: + CM_ML_MODEL_FILE: mobilenet_v2_<<>>_<<>>.tflite + CM_ML_MODEL_WEIGHTS_FILE: mobilenet_v2_<<>>_<<>>.ckpt.data-00000-of-00001 + CM_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_<<>>_<<>>.tgz + CM_UNTAR: 'yes' + tf,from.google,v2,quantized_: + env: + CM_EXTRACT_FOLDER: v2_<<>>_<<>> + CM_ML_MODEL_FILE: model.tflite + CM_ML_MODEL_WEIGHTS_FILE: <<>>_v2_<<>>_<<>>.ckpt.data-00000-of-00001 + CM_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v2/checkpoints/<<>>_v2_<<>>_<<>>.tgz + CM_UNTAR: 'yes' + tf,from.google,v3: + env: + CM_EXTRACT_FOLDER: v3-<<>>_<<>>_<<>>_<<>> + CM_ML_MODEL_FILE: v3-<<>>_<<>>_<<>>_<<>>.tflite + CM_PACKAGE_URL: https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-<<>>_<<>>_<<>>_<<>>.tgz + CM_UNTAR: 'yes' + tf,from.zenodo,v1: + env: + CM_PACKAGE_URL: https://zenodo.org/record/2269307/files/mobilenet_v1_<<>>_<<>><<>>.tgz + CM_UNTAR: 'yes' + tf,int8,v1,resolution-224,multiplier-1.0: + env: + CM_ML_MODEL_ACCURACY: '70.762' + tf,v1: + env: + CM_ML_MODEL_FILE: mobilenet_v1_<<>>_<<>><<>>.tflite + CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV1/Predictions/Reshape_1 + CM_ML_MODEL_VER: 1_<<>>_<<>><<>>_2018_08_02 + CM_ML_MODEL_WEIGHTS_FILE: mobilenet_v1_<<>>_<<>><<>>.ckpt.data-00000-of-00001 + tf,v1,fp32: + env: + CM_ML_MODEL_MOBILENET_NAME_PREFIX: '' + tf,v1,int8: + env: + CM_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant + tf,v1,uint8: + env: + CM_ML_MODEL_MOBILENET_NAME_SUFFIX: _quant + tf,v2,fp32: + env: + CM_ML_MODEL_MOBILENET_NAME_PREFIX: '' + CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Reshape_1 + CM_ML_MODEL_VER: 2_<<>>_<<>> + tf,v2,int8: + env: + CM_ML_MODEL_MOBILENET_NAME_PREFIX: quantized + CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Softmax + CM_ML_MODEL_VER: 2_<<>>_<<>> + tf,v2,uint8: + env: + CM_ML_MODEL_MOBILENET_NAME_PREFIX: quantized + CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV2/Predictions/Softmax + CM_ML_MODEL_VER: 2_<<>>_<<>> + tf,v3: + env: + CM_ML_MODEL_OUTPUT_LAYER_NAME: MobilenetV3/Predictions/Softmax + CM_ML_MODEL_VER: 3_<<>>_<<>> + tflite: + base: + - tf + uint8: + base: + - quantized_ + env: + CM_ML_MODEL_INPUTS_DATA_TYPE: uint8 + CM_ML_MODEL_MOBILENET_PRECISION: uint8 + CM_ML_MODEL_PRECISION: uint8 + CM_ML_MODEL_WEIGHTS_DATA_TYPE: uint8 + group: precision + v1: + default_variations: + multiplier: multiplier-1.0 + resolution: resolution-224 + env: + CM_ML_MODEL_FULL_NAME: mobilenet-v1-precision_<<>>-<<>>-<<>> + CM_ML_MODEL_MOBILENET_VERSION: '1' + group: version + v2: + default_variations: + multiplier: multiplier-1.0 + resolution: resolution-224 + env: + CM_ML_MODEL_FULL_NAME: mobilenet-v2-precision_<<>>-<<>>-<<>> + CM_ML_MODEL_MOBILENET_VERSION: '2' + CM_ML_MODEL_VER: '2' + group: version + v3: + default: true + default_variations: + multiplier: multiplier-1.0 + resolution: resolution-224 + env: + CM_ML_MODEL_FULL_NAME: mobilenet-v3-precision_<<>>-<<>>-<<>> + CM_ML_MODEL_MOBILENET_VERSION: '3' + CM_ML_MODEL_VER: '3' + group: version diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/customize.py new file mode 100644 index 000000000..dbeedee41 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-mobilenet/customize.py @@ -0,0 +1,67 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_PACKAGE_URL'] + env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url + + print('Downloading from {}'.format(url)) + + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r + + filename = r['filename'] + + if env.get('CM_UNZIP') == "yes" or env.get('CM_UNTAR') == "yes": + if env.get('CM_UNZIP') == "yes": + cmd = "unzip " + elif env.get('CM_UNTAR') == "yes": + cmd = "tar -xvzf " + os.system(cmd + filename) + + filename = env['CM_ML_MODEL_FILE'] + + extract_folder = env.get('CM_EXTRACT_FOLDER', '') + + if extract_folder: + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + path, extract_folder, filename) + else: + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) + else: + env['CM_ML_MODEL_FILE'] = filename + env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] + + env['CM_ML_MODEL_PATH'] = path + + if not os.path.exists(env['CM_ML_MODEL_FILE_WITH_PATH']): + return { + 'return': 1, 'error': f"Model file path {env['CM_ML_MODEL_FILE_WITH_PATH']} not existing. Probably the model name {env['CM_ML_MODEL_FILE']} in model meta is wrong"} + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/README.md new file mode 100644 index 000000000..ab33d6504 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/_cm.yaml new file mode 100644 index 000000000..3c78a1ce9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/_cm.yaml @@ -0,0 +1,223 @@ +alias: get-ml-model-neuralmagic-zoo +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +deps: +- names: + - python3 + - python + tags: get,python3 +- tags: get,generic-python-lib,_package.protobuf + version_max: 3.20.1 +- tags: get,generic-python-lib,_sparsezoo +env: {} +new_env_keys: +- CM_ML_MODEL* +- CM_MODEL_ZOO_STUB +- CM_MLPERF_CUSTOM_MODEL_PATH +- CM_GET_DEPENDENT_CACHED_PATH +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- model +- zoo +- deepsparse +- model-zoo +- sparse-zoo +- neuralmagic +- neural-magic +uid: adbb3f2525a14f97 +variations: + bert-base-pruned90-none: + env: + CM_ML_MODEL_FULL_NAME: bert-base-pruned90-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none + bert-base-pruned95_obs_quant-none: + env: + CM_ML_MODEL_FULL_NAME: bert-base-pruned95_obs_quant-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: int64 + CM_ML_MODEL_RETRAINING: 'yes' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none + bert-base_cased-pruned90-none: + env: + CM_ML_MODEL_FULL_NAME: bert-base_cased-pruned90-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-base-cased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none + bert-large-base-none: + env: + CM_ML_MODEL_FULL_NAME: bert-large-base-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none + bert-large-pruned80_quant-none-vnni: + env: + CM_ML_MODEL_FULL_NAME: bert-large-pruned80_quant-none-vnni-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: int64 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni + mobilebert-14layer_pruned50-none-vnni: + env: + CM_ML_MODEL_FULL_NAME: mobilebert-14layer_pruned50-none-vnni-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni + mobilebert-14layer_pruned50_quant-none-vnni: + env: + CM_ML_MODEL_FULL_NAME: mobilebert-14layer_pruned50_quant-none-vnni-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: int64 + CM_ML_MODEL_RETRAINING: 'yes' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni + mobilebert-base_quant-none: + env: + CM_ML_MODEL_FULL_NAME: mobilebert-base_quant-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: int64 + CM_ML_MODEL_RETRAINING: 'yes' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none + mobilebert-none-base-none: + env: + CM_ML_MODEL_FULL_NAME: mobilebert-none-base-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none + model-stub.#: + env: + CM_MODEL_ZOO_STUB: '#' + model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none: + alias: bert-base-pruned90-none + model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none: + alias: bert-base-pruned95_obs_quant-none + model-stub.zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none: + alias: bert-base_cased-pruned90-none + model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none: + alias: bert-large-base-none + model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni: + alias: bert-large-pruned80_quant-none-vnni + model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni: + alias: mobilebert-14layer_pruned50-none-vnni + model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni: + alias: mobilebert-14layer_pruned50_quant-none-vnni + model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none: + alias: mobilebert-none-base-none + model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none: + alias: mobilebert-base_quant-none + model-stub.zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none: + alias: obert-base-pruned90-none + model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none: + alias: obert-large-base-none + model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni: + alias: obert-large-pruned95-none-vnni + model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni: + alias: obert-large-pruned95_quant-none-vnni + model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none: + alias: obert-large-pruned97-none + model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none: + alias: obert-large-pruned97-quant-none + model-stub.zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none: + alias: oberta-base-pruned90-quant-none + model-stub.zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none: + alias: roberta-base-pruned85-quant-none + obert-base-pruned90-none: + env: + CM_ML_MODEL_FULL_NAME: obert-base-pruned90-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none + obert-large-base-none: + env: + CM_ML_MODEL_FULL_NAME: obert-large-base-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none + obert-large-pruned95-none-vnni: + env: + CM_ML_MODEL_FULL_NAME: obert-large-pruned95-none-vnni-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni + obert-large-pruned95_quant-none-vnni: + env: + CM_ML_MODEL_FULL_NAME: obert-large-pruned95_quant-none-vnni-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: int64 + CM_ML_MODEL_RETRAINING: 'yes' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni + obert-large-pruned97-none: + env: + CM_ML_MODEL_FULL_NAME: obert-large-pruned97-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: fp32 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: fp32 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none + obert-large-pruned97-quant-none: + env: + CM_ML_MODEL_FULL_NAME: obert-large-pruned97-quant-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: int64 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/bert-large-uncased + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none + oberta-base-pruned90-quant-none: + env: + CM_ML_MODEL_FULL_NAME: oberta-base-pruned90-quant-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: int64 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/roberta-base + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none + roberta-base-pruned85-quant-none: + env: + CM_ML_MODEL_FULL_NAME: roberta-base-pruned85-quant-none-bert-99 + CM_ML_MODEL_INPUTS_DATA_TYPE: int64 + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://huggingface.co/roberta-base + CM_ML_MODEL_WEIGHTS_DATA_TYPE: int8 + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: quantization, unstructured pruning + CM_MODEL_ZOO_STUB: zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/customize.py new file mode 100644 index 000000000..11dd18453 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/customize.py @@ -0,0 +1,57 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + model_stub = env.get('CM_MODEL_ZOO_STUB', '') + if model_stub == '': + + variations = list(i.get('meta', {}).get('variations', {}).keys()) + + variation_models = [] + for v in variations: + if '#' not in v: + variation_models.append(v) + + return {'return': 1, 'error': 'ENV CM_MODEL_ZOO_STUB is not set. Please select variation from {}'.format( + str(variation_models))} + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + onnx_path = os.path.join(env['CM_ML_MODEL_FILE_WITH_PATH'], "model.onnx") + + if os.path.exists(onnx_path): + env['CM_MLPERF_CUSTOM_MODEL_PATH'] = onnx_path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/download_sparse.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/download_sparse.py new file mode 100644 index 000000000..b2c9de607 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/download_sparse.py @@ -0,0 +1,10 @@ +from sparsezoo import Model +import os + +model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '') +print(f"Downloading model {model_stub}") +stub = f"{model_stub}" +model = Model(stub) + +with open('tmp-run-env.out', 'w') as f: + f.write(f"CM_ML_MODEL_FILE_WITH_PATH={model.path}") diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/run.bat new file mode 100644 index 000000000..854e9b668 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\download_sparse.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/run.sh new file mode 100644 index 000000000..9d7d529be --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-neuralmagic-zoo/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/README-extra.md new file mode 100644 index 000000000..42809e535 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/README-extra.md @@ -0,0 +1,15 @@ +# Get ML Model Resnet50 +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the Resnet50 model and adds it to CM cache with relevant meta data. + +## How To +```bash +cm run script --tags=get,ml-model,resnet50,_[VARIATION] +``` +where, +* `[VARIATION]` is one of `onnx` (alias `onnxruntime`), `pytorch`, `tensorflow` (alias `tf`) , `fp32`, `onnx-1.5-opset-8`, `onnx-1.5-opset-11`. + +## Exported Variables +* `CM_ML_MODEL_FILE:` Model filename +* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* More env variables being exported are given in [cm.json file](_cm.json) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/README.md new file mode 100644 index 000000000..d81061fbc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-resnet50](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-resnet50) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/_cm.yaml new file mode 100644 index 000000000..d8637acbb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/_cm.yaml @@ -0,0 +1,239 @@ +alias: get-ml-model-resnet50 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_ML_MODEL: RESNET50 + CM_ML_MODEL_DATASET: imagenet2012-val + CM_ML_MODEL_IMAGE_HEIGHT: '224' + CM_ML_MODEL_IMAGE_WIDTH: '224' + CM_ML_MODEL_NORMALIZE_DATA: '0' + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_SUBTRACT_MEANS: 'YES' + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +new_env_keys: +- CM_ML_MODEL_* +prehook_deps: +- env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_EXTRACT_EXTRACTED_FILENAME: <<>> + CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + extra_cache_tags: ml-model,resnet50,raw,ml-model-resnet50,_<<>> + force_cache: true + force_env_keys: + - CM_OUTDIRNAME + names: + - model-downloader + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- raw +- ml-model +- resnet50 +- ml-model-resnet50 +- image-classification +uid: 56203e4e998b4bc0 +variations: + argmax: + default: true + env: + CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: 'yes' + group: model-output + batch_size.#: + env: + CM_ML_MODEL_BATCH_SIZE: '#' + batch_size.1: + env: + CM_ML_MODEL_BATCH_SIZE: '1' + fix-input-shape: + deps: + - names: + - python + - python3 + tags: get,python3 + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + from-tf: {} + huggingface_default: + env: + CM_DOWNLOAD_CHECKSUM: f6a4da60cd5f084d97efc2c1ddb10beb + CM_PACKAGE_URL: https://huggingface.co/ctuning/mlperf-inference-resnet50-onnx-fp32-imagenet2012-v1.0/resolve/main/resnet50_v1.onnx + int8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: int8 + CM_ML_MODEL_PRECISION: int8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + group: precision + ncnn: + env: + CM_ML_MODEL_FRAMEWORK: ncnn + group: framework + ncnn,fp32: + env: + CM_DOWNLOAD_CHECKSUM: 0360777ab2178a65a8f78c35a7d619e0 + CM_PACKAGE_URL: https://zenodo.org/record/8073420/files/resnet50_v1.bin?download=1 + post_deps: + - env: + CM_DOWNLOAD_CHECKSUM: f9ba6c4d7f66348e6d24c06bfe3f4ae8 + CM_EXTRACT_EXTRACTED_FILENAME: <<>> + extra_cache_tags: ml-model-params,params,resnet50,ncnn,model-params + tags: download-and-extract,_url.https://zenodo.org/record/8073420/files/resnet50_v1.param?download= + no-argmax: + env: + CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: 'no' + group: model-output + onnx: + default: true + default_variations: + opset-version: opset-11 + env: + CM_ML_MODEL_DATA_LAYOUT: NCHW + CM_ML_MODEL_FRAMEWORK: onnx + CM_ML_MODEL_INPUT_LAYERS: input_tensor:0 + CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor:0 + CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)' + CM_ML_MODEL_OUTPUT_LAYERS: softmax_tensor:0 + CM_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor:0 + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + CM_ML_MODEL_VER: '1.5' + group: framework + onnx,from-tf: + env: + CM_DOWNLOAD_CHECKSUM: 7b94a2da05dd30f6c0af23a46bc08886 + CM_ML_MODEL_DATA_LAYOUT: NHWC + CM_ML_MODEL_FRAMEWORK: onnx + CM_ML_MODEL_INPUT_LAYERS: input_tensor + CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor + CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor\": (BATCH_SIZE, 224, 224, 3)' + CM_ML_MODEL_OUTPUT_LAYERS: softmax_tensor + CM_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://zenodo.org/record/2535873/files/resnet50_v1.pb + onnx,from-tf,fp32: + adr: + model-downloader: + tags: _gdown + env: + CM_DOWNLOAD_CHECKSUM: 04a510152d9eded924883bdfcf85dd4a + CM_DOWNLOAD_FILENAME: resnet50_v1_modified.onnx + CM_PACKAGE_URL: https://drive.google.com/uc?id=15wZ_8Vt12cb10IEBsln8wksD1zGwlbOM + onnx,opset-11: + env: + CM_DOWNLOAD_CHECKSUM: f6a4da60cd5f084d97efc2c1ddb10beb + CM_PACKAGE_URL: https://zenodo.org/record/4735647/files/resnet50_v1.onnx + onnx,opset-8: + env: + CM_DOWNLOAD_CHECKSUM: a638cf028b5870da29e09ccc2f7182e7 + CM_PACKAGE_URL: https://zenodo.org/record/2592612/files/resnet50_v1.onnx + onnxruntime: + alias: onnx + opset-11: + env: + CM_ML_MODEL_ONNX_OPSET: '11' + group: opset-version + opset-8: + env: + CM_ML_MODEL_ONNX_OPSET: '8' + group: opset-version + pytorch: + env: + CM_ML_MODEL_DATA_LAYOUT: NCHW + CM_ML_MODEL_FRAMEWORK: pytorch + CM_ML_MODEL_GIVEN_CHANNEL_MEANS: '?' + CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor:0 + CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": [BATCH_SIZE, 3, 224, 224]' + CM_ML_MODEL_OUTPUT_LAYERS: output + CM_ML_MODEL_OUTPUT_LAYER_NAME: '?' + CM_ML_STARTING_WEIGHTS_FILENAME: <<>> + group: framework + pytorch,fp32: + env: + CM_DOWNLOAD_CHECKSUM: 9e9c86b324d80e65229fab49b8d9a8e8 + CM_PACKAGE_URL: https://zenodo.org/record/4588417/files/resnet50-19c8e357.pth + pytorch,int8: + base: + - int8 + - pytorch + env: + CM_DOWNLOAD_CHECKSUM: 6893ea9769b0afce65bb0ddf002f4438 + CM_PACKAGE_URL: https://zenodo.org/record/4589637/files/resnet50_INT8bit_quantized.pt + tensorflow: + env: + CM_DOWNLOAD_CHECKSUM: 7b94a2da05dd30f6c0af23a46bc08886 + CM_ML_MODEL_ACCURACY: '76.456' + CM_ML_MODEL_DATA_LAYOUT: NHWC + CM_ML_MODEL_FRAMEWORK: tensorflow + CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + CM_ML_MODEL_INPUT_LAYERS: input_tensor + CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor + CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)' + CM_ML_MODEL_NORMALIZE_DATA: '0' + CM_ML_MODEL_OUTPUT_LAYERS: softmax_tensor + CM_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + CM_ML_MODEL_SUBTRACT_MEANS: 'YES' + CM_PACKAGE_URL: https://zenodo.org/record/2535873/files/resnet50_v1.pb + group: framework + tensorflow,fix-input-shape: + deps: + - names: + - tensorflow + tags: get,generic-python-lib,_package.tensorflow + env: + CM_ML_MODEL_TF_FIX_INPUT_SHAPE: 'yes' + tf: + alias: tensorflow + tflite: + env: + CM_ML_MODEL_ACCURACY: '76.456' + CM_ML_MODEL_DATA_LAYOUT: NHWC + CM_ML_MODEL_FRAMEWORK: tflite + CM_ML_MODEL_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + CM_ML_MODEL_INPUT_LAYERS: input_tensor + CM_ML_MODEL_INPUT_LAYER_NAME: input_tensor + CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' + CM_ML_MODEL_NORMALIZE_DATA: '0' + CM_ML_MODEL_OUTPUT_LAYERS: softmax_tensor + CM_ML_MODEL_OUTPUT_LAYER_NAME: softmax_tensor + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + CM_ML_MODEL_SUBTRACT_MEANS: 'YES' + group: framework + tflite,argmax: + env: + CM_DAE_EXTRACT_DOWNLOADED: 'yes' + CM_DOWNLOAD_CHECKSUM: 92b5ae497e0de5c2d487507953b6e5cc + CM_DOWNLOAD_FINAL_ENV_NAME: '' + CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_ML_MODEL_FILE: resnet50_v1.tflite + CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' + CM_PACKAGE_URL: https://www.dropbox.com/s/cvv2zlfo80h54uz/resnet50_v1.tflite.gz?dl=1 + tflite,int8,no-argmax: + env: + CM_DOWNLOAD_CHECKSUM: a657cf1f97545aefd058c1c718cc0e17 + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_ML_MODEL_FILE: resnet50_quant_full_mlperf_edgetpu.tflite + CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' + CM_PACKAGE_URL: https://zenodo.org/record/8234946/files/resnet50_quant_full_mlperf_edgetpu.tflite?download=1 + tflite,no-argmax: + env: + CM_DOWNLOAD_CHECKSUM: 53e81f9f9b459ecfb6d64add3da91564 + CM_ML_MODEL_FILE: resnet50_v1.no-argmax.tflite + CM_ML_MODEL_INPUT_SHAPES: '\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)' + CM_PACKAGE_URL: https://www.dropbox.com/s/vhuqo0wc39lky0a/resnet50_v1.no-argmax.tflite?dl=1 + uint8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: uint8 + CM_ML_MODEL_PRECISION: uint8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + group: precision diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/customize.py new file mode 100644 index 000000000..0e2b986ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/customize.py @@ -0,0 +1,44 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get('CM_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": + i['run_script_input']['script_name'] = "run-fix-input" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if env.get('CM_ML_MODEL_TF_FIX_INPUT_SHAPE', '') == "yes": + env['CM_ML_MODEL_STARTING_FILE_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), "resnet50_v1.pb") + + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + env['CM_DOWNLOAD_PATH'] = os.path.dirname( + env['CM_ML_MODEL_FILE_WITH_PATH']) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/run-fix-input.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/run-fix-input.sh new file mode 100644 index 000000000..5364b1233 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/run-fix-input.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +wget -nc https://raw.githubusercontent.com/krai/ck-mlperf/master/package/model-tf-mlperf-resnet/fix_input_shape.py +test $? -eq 0 || exit $? +${CM_PYTHON_BIN_WITH_PATH} "fix_input_shape.py" \ +--input_name "input_tensor" \ +--input_graph "${CM_ML_MODEL_FILE_WITH_PATH}" \ +--output_graph "resnet50_v1.pb" \ +--type b +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/run_config.yml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/run_config.yml new file mode 100644 index 000000000..938e3b641 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-resnet50/run_config.yml @@ -0,0 +1,6 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + +run_with_default_inputs: true #if false the script won't run automatic tests diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/README.md new file mode 100644 index 000000000..5a9c7c6f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-retinanet-nvidia](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-retinanet-nvidia) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/_cm.yaml new file mode 100644 index 000000000..4e114e43d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/_cm.yaml @@ -0,0 +1,45 @@ +alias: get-ml-model-retinanet-nvidia +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +default_env: + CM_TORCH_DEVICE: cpu +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +- tags: get,mlperf,training,src,_nvidia-retinanet +- tags: get,mlperf,inference,src +- tags: get,ml-model,retinanet,_pytorch,_fp32,_weights +- enable_if_env: + CM_TORCH_DEVICE: cpu + tags: get,generic-python-lib,_torch +- tags: get,generic-python-lib,_torchvision +- tags: get,generic-python-lib,_mlperf_logging +- enable_if_env: + CM_TORCH_DEVICE: cuda + tags: get,cuda +- enable_if_env: + CM_TORCH_DEVICE: cuda + tags: get,generic-python-lib,_torch_cuda +- tags: get,nvidia,mlperf,inference,common-code,-_custom +new_env_keys: +- CM_NVIDIA_RETINANET_* +- CM_ML_MODEL_* +print_env_at_the_end: + CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- nvidia-retinanet +- nvidia +uid: f059d249fac843ba +variations: + efficient-nms: + deps: + - tags: get,generic-python-lib,_polygraphy + env: + CM_NVIDIA_EFFICIENT_NMS: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/customize.py new file mode 100644 index 000000000..3fe50548f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/customize.py @@ -0,0 +1,49 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + if '+PYTHONPATH' not in env: + env['+PYTHONPATH'] = [] + env['+PYTHONPATH'].append( + os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + "single_stage_detector", + "ssd")) + env['CM_ML_MODEL_DYN_BATCHSIZE_PATH'] = os.path.join( + os.getcwd(), "retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") + if "CM_NVIDIA_EFFICIENT_NMS" in env: + env['CM_NVIDIA_MODEL_PATCHED_PATH'] = os.path.join( + os.getcwd(), "fpn_efficientnms_concatall.onnx") + env['CM_ML_MODEL_ANCHOR_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'], + "code", + "retinanet", + "tensorrt", + "onnx_generator", + "retinanet_anchor_xywh_1x1.npy") + return {'return': 0} + + +def postprocess(i): + env = i['env'] + env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = os.path.join( + os.getcwd(), "test_fpn_efficientnms_concatall.onnx") + if "CM_NVIDIA_EFFICIENT_NMS" in env: + env['CM_NVIDIA_RETINANET_EFFICIENT_NMS_CONCAT_MODEL_WITH_PATH'] = env['CM_NVIDIA_MODEL_PATCHED_PATH'] + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py new file mode 100644 index 000000000..d445ef01c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py @@ -0,0 +1,123 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import onnx +import argparse +import json +import re + +import onnx_graphsurgeon as gs +import numpy as np +import os + + +# in_onnx = "/work/code/retinanet/tensorrt/onnx_retina/ref_fpn_transreshapeconcat.onnx" +in_onnx = os.environ.get( + "CM_ML_MODEL_DYN_BATCHSIZE_PATH", + "build/models/retinanet-resnext50-32x4d/new/retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx") +out_onnx = os.environ.get( + "CM_NVIDIA_MODEL_PATCHED_PATH", + "/work/code/retinanet/tensorrt/onnx_generator/test_fpn_efficientnms_concatall.onnx") +# Anchor at [1, 1] +anchor_xywh_1x1_npy = os.environ.get( + "CM_ML_MODEL_ANCHOR_PATH", + "/work/code/retinanet/tensorrt/onnx_generator/retinanet_anchor_xywh_1x1.npy") + +graph = gs.import_onnx(onnx.load(in_onnx)) + +op = 'EfficientNMS_TRT' +node_name = 'efficientNMS' + +# (PluginField("score_threshold", nullptr, PluginFieldType::kFLOAT32, 1)); +# (PluginField("iou_threshold", nullptr, PluginFieldType::kFLOAT32, 1)); +# (PluginField("max_output_boxes", nullptr, PluginFieldType::kINT32, 1)); +# (PluginField("background_class", nullptr, PluginFieldType::kINT32, 1)); +# (PluginField("score_activation", nullptr, PluginFieldType::kINT32, 1)); +# (PluginField("box_coding", nullptr, PluginFieldType::kINT32, 1)); + +node_attrs = { + "background_class": -1, + "score_threshold": 0.05, + "iou_threshold": 0.5, + "max_output_boxes": 1000, + "score_activation": True, + "box_coding": 1, +} +attrs = { + "plugin_version": "1", + "plugin_namespace": "", +} +attrs.update(node_attrs) + +anchors = np.load(anchor_xywh_1x1_npy) +print(f"anchors shape: {anchors.shape}, top 4: {anchors[0, :]}") +anchors = np.expand_dims(anchors, axis=0) +print(f"anchors shape: {anchors.shape}") + +anchor_tensor = gs.Constant(name="anchor", values=anchors) + +tensors = graph.tensors() + +# Add EfficientNMS layer +# output tensors +num_detections = gs.Variable(name="num_detections", + dtype=np.int32, + shape=["batch_size", 1]) +detection_boxes = gs.Variable(name="detection_boxes", + dtype=np.float32, + shape=["batch_size", 1000, 4]) +detection_scores = gs.Variable(name="detection_scores", + dtype=np.float32, + shape=["batch_size", 1000]) +detection_classes = gs.Variable(name="detection_classes", + dtype=np.int32, + shape=["batch_size", 1000]) + +nms_inputs = [tensors["bbox_regression"], tensors["cls_logits"], anchor_tensor] +nms_outputs = [ + num_detections, + detection_boxes, + detection_scores, + detection_classes] + +graph.layer(op="EfficientNMS_TRT", + name="EfficientNMS", + inputs=nms_inputs, + outputs=nms_outputs, + attrs=attrs) + +# Add Concat plugin to concat all 4 tensors +concat_final_output = gs.Variable(name="concat_final_output", + dtype=np.float32, + shape=["batch_size", 7001]) +attrs = { + "plugin_version": "1", + "plugin_namespace": "", +} +graph.layer(op="RetinanetConcatNmsOutputsPlugin", + name="RetinanetConcatNmsOutputsPlugin", + inputs=[ + num_detections, + detection_boxes, + detection_scores, + detection_classes], + outputs=[concat_final_output], + attrs=attrs) + +graph.outputs = [concat_final_output] + +graph.cleanup().toposort() + +onnx.save_model(gs.export_onnx(graph), out_onnx) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/polygraphy_script.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/polygraphy_script.sh new file mode 100644 index 000000000..b992aa171 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/polygraphy_script.sh @@ -0,0 +1,24 @@ +# Set these parameters +RAW_ONNX_PATH=$1 +FOLDED_ONNX_PATH=$2 +BACKEND_ONNX_PATH=$3 +NMS_ONNX_PATH=$4 + +bbox_concat_node="bbox_regression" +classification_concat_node="cls_logits" + + +# Run once to install the dependencies. For some reason, this messes up Polygraphy's auto-fold loop, so we need to run a +# second time. +POLYGRAPHY_AUTOINSTALL_DEPS=1 polygraphy surgeon sanitize --fold-constants $RAW_ONNX_PATH -o $FOLDED_ONNX_PATH +polygraphy surgeon sanitize --fold-constants $RAW_ONNX_PATH -o $FOLDED_ONNX_PATH + +# Extract backend +polygraphy surgeon extract $FOLDED_ONNX_PATH \ + --outputs ${bbox_concat_node}:auto ${classification_concat_node}:auto \ + -o $BACKEND_ONNX_PATH + +# Extract NMS head +polygraphy surgeon extract $FOLDED_ONNX_PATH \ + --inputs ${classification_concat_node}:[batch_size,120087,264]:auto ${bbox_concat_node}:[batch_size,120087,4]:auto \ + -o $NMS_ONNX_PATH diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/run.sh new file mode 100644 index 000000000..592509b67 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet-nvidia/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash +#${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_INFERENCE_VISION_PATH}/tools/retinanet_pytorch_to_onnx.py --weights ${CM_ML_MODEL_FILE_WITH_PATH} +cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_TRAINING_SOURCE}/single_stage_detector/ssd/pth_to_onnx.py --num-classes 264 --image-size 800 800 --input ${CM_ML_MODEL_FILE_WITH_PATH} --output retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx --device ${CM_TORCH_DEVICE}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? +if [[ ${CM_NVIDIA_EFFICIENT_NMS} == "yes" ]]; then + cmd="bash ${CM_TMP_CURRENT_SCRIPT_PATH}/polygraphy_script.sh retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx folded.onnx backend.onnx nms.onnx" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? + cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/nvidia_patch_retinanet_efficientnms.py" + echo $cmd + eval $cmd + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/README-extra.md new file mode 100644 index 000000000..db25a8657 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/README-extra.md @@ -0,0 +1,16 @@ +# Get ML Model Retinanet +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) downloads the Retinanet model and adds it to CM cache with relevant meta data. + +## How To +```bash +cm run script --tags=get,ml-model,retinanet,_[VARIATION] +``` +where, +* `[VARIATION]` is one of `onnx-fp32`, `pytorch-fp32` or `pytorch-fp32-weights`. + +## Exported Variables +* `CM_ML_MODEL_FILE:` Model filename +* `CM_ML_MODEL_FILE_WITH_PATH:` Full path to model file +* `CM_ML_MODEL_PATH:` Path to folder containing the model file +* More env variables being exported are given in [cm.json file](_cm.json) + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/README.md new file mode 100644 index 000000000..12a4c7f5a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-retinanet](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-retinanet) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/_cm.yaml new file mode 100644 index 000000000..90e937000 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/_cm.yaml @@ -0,0 +1,123 @@ +alias: get-ml-model-retinanet +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL: retinanet + CM_ML_MODEL_DATASET: open-images + CM_ML_MODEL_IMAGE_HEIGHT: '800' + CM_ML_MODEL_IMAGE_WIDTH: '800' + CM_ML_MODEL_NORMALIZE_DATA: 'yes' + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_SUBTRACT_MEANS: 'yes' + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +new_env_keys: +- CM_ML_MODEL_* +- <<>> +prehook_deps: +- env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_EXTRACT_EXTRACTED_FILENAME: <<>> + CM_EXTRACT_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + extra_cache_tags: get,ml-model,model-retinanet + force_cache: true + names: + - dae + skip_if_env: + CM_TMP_ML_MODEL_RETINANET_NO_NMS: + - 'yes' + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_ML_MODEL_ACCURACY: Model accuracy + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- raw +- resnext50 +- retinanet +- object-detection +uid: 427bc5665e4541c2 +variations: + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + no-nms: + env: + CM_ML_MODEL_RETINANET_NO_NMS: 'yes' + CM_QAIC_PRINT_NODE_PRECISION_INFO: 'yes' + CM_TMP_ML_MODEL_RETINANET_NO_NMS: 'yes' + onnx: + default: true + env: + CM_ML_MODEL_DATA_LAYOUT: NCHW + CM_ML_MODEL_FRAMEWORK: onnx + group: framework + onnx,fp32: + env: + CM_DOWNLOAD_CHECKSUM: 4544f4e56e0a4684215831cc937ea45c + CM_ML_MODEL_ACCURACY: '0.3757' + CM_PACKAGE_URL: https://zenodo.org/record/6617879/files/resnext50_32x4d_fpn.onnx + required_disk_space: 150 + warning: This model is downloaded from Zenodo.org + onnx,no-nms: + deps: + - names: + - python, python3 + tags: get,python3 + - tags: get,generic-python-lib,_package.onnx + - tags: get,generic-python-lib,_package.onnxsim + - env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_RETINANET_NO_NMS_PATCH_FILE_PATH + extra_cache_tags: retinanet,training,patch,file + force_cache: true + tags: download,file,_url.https://raw.githubusercontent.com/arjunsuresh/ck-qaic/main/package/model-onnx-mlperf-retinanet-no-nms/remove-nms-and-extract-priors.patch + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_TRAINING_REPO_PATCHED_PATH + CM_GIT_PATCH_FILEPATHS: <<>> + extra_cache_tags: training,src,mlperf,patched + names: + - mlperf-training-src + tags: get,git,repo,_repo.https://github.com/mlcommons/training.git,_patch + - env: + CM_ENV_NAME_ML_MODEL_FILE: CM_ML_MODEL_RETINANET_PYTORCH_WEIGHTS_FILE_PATH + names: + - pytorch-weights + tags: get,ml-model,retinanet,_pytorch,_fp32,_weights + - names: + - torch + - pytorch + tags: get,generic-python-lib,_package.torch + version_min: 1.13.1 + env: {} + pytorch: + env: + CM_ML_MODEL_DATA_LAYOUT: NCHW + CM_ML_MODEL_FRAMEWORK: pytorch + group: framework + pytorch,fp32: + env: + CM_DOWNLOAD_CHECKSUM: a55f6bec3464f605ce8d686da8ac1533 + CM_ML_MODEL_ACCURACY: '0.3755' + CM_PACKAGE_URL: https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth + pytorch,fp32,weights: + add_deps_recursive: + dae: + tags: _extract + env: + CM_DOWNLOAD_CHECKSUM: '2037c152a6be18e371ebec654314f7e0 ' + CM_ML_MODEL_ACCURACY: '0.3755' + CM_ML_MODEL_FILE: retinanet_model_10.pth + CM_PACKAGE_URL: https://zenodo.org/record/6605272/files/retinanet_model_10.zip?download=1 + CM_UNZIP: 'yes' + weights: + env: + CM_MODEL_WEIGHTS_FILE: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/customize.py new file mode 100644 index 000000000..3e9d2b511 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/customize.py @@ -0,0 +1,45 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get('CM_TMP_ML_MODEL_RETINANET_NO_NMS', '') == 'yes': + i['run_script_input']['script_name'] = "run-no-nms" + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), "retinanet.onnx") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) + if env.get('CM_ENV_NAME_ML_MODEL_FILE', '') != '': + env[env['CM_ENV_NAME_ML_MODEL_FILE']] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + if env.get("CM_QAIC_PRINT_NODE_PRECISION_INFO", '') == 'yes': + env['CM_ML_MODEL_RETINANET_QAIC_NODE_PRECISION_INFO_FILE_PATH'] = os.path.join( + os.getcwd(), 'node-precision-info.yaml') + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/node-precision-info.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/node-precision-info.py new file mode 100644 index 000000000..15d5213b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/node-precision-info.py @@ -0,0 +1,88 @@ +import onnx +import os +import sys +import argparse +import yaml + + +def parse_args(add_help=True): + parser = argparse.ArgumentParser( + description='Print node precision info for the onnx file', + add_help=add_help) + parser.add_argument( + '--input', + default="retinanet.onnx", + help='input onnx file') + parser.add_argument( + '--output', + default="node-precision.yaml", + help='output node precision file') + args = parser.parse_args() + + return args + + +def main(args): + + onnx_model = onnx.load(args.input) + list1 = [ + "/backbone/fpn/inner_blocks.0/Conv_output_0", + "/head/classification_head/Sigmoid_output_0", + "/head/classification_head/Sigmoid_1_output_0", + "/head/classification_head/Sigmoid_2_output_0", + "/head/classification_head/Sigmoid_3_output_0", + "/head/classification_head/Sigmoid_4_output_0" + ] + list2 = [ + "1421", + "1481", + "1517", + "1553", + "1589", + "1625", + ] + + # check which list of node names is valid + node_names = [] + valid_list = None + + # for n in enumerate_model_node_outputs(onnx_model): + for n in onnx_model.graph.node: + node_names.append(n.output[0]) + + if set(list1) < set(node_names): + valid_list = list1 + elif set(list2) < set(node_names): + valid_list = list2 + else: + print("Node names are not matching with the expected ones in the input onnx file.") + sys.exit(1) + + node_precision_info = {} + node_precision_info['FP16NodeInstanceNames'] = [] + + fp16nodes = valid_list + fp16nodes += ["boxes_1", + "boxes_2", + "boxes_3", + "boxes_4", + "boxes_5", + "scores_1", + "scores_2", + "scores_3", + "scores_4", + "scores_5"] + + # node_precision_info['FP16NodeInstanceNames'] = "["+", ".join(fp16nodes)+"]" + node_precision_info['FP16NodeInstanceNames'] = fp16nodes + + yaml_output = yaml.safe_dump(node_precision_info, default_style=None) + with open(args.output, "w") as f: + f.write(yaml_output) + + print(f"Node precision info successfully printed out to {args.output}") + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/run-no-nms.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/run-no-nms.sh new file mode 100644 index 000000000..48be9d1e6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-retinanet/run-no-nms.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +cmd="PYTHONPATH=${PYTHONPATH}:${CM_MLPERF_TRAINING_REPO_PATCHED_PATH}/single_stage_detector/ssd/ ${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_TRAINING_REPO_PATCHED_PATH}/single_stage_detector/scripts/pth_to_onnx.py --input ${CM_ML_MODEL_RETINANET_PYTORCH_WEIGHTS_FILE_PATH} --output $PWD/retinanet.onnx --image-size 800 800" +run "$cmd" + +if [[ ${CM_QAIC_PRINT_NODE_PRECISION_INFO} == "yes" ]]; then + cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/node-precision-info.py --input $PWD/retinanet.onnx --output $PWD/node-precision-info.yaml" + run "$cmd" +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/_cm.yaml new file mode 100644 index 000000000..0bc4b1eab --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/_cm.yaml @@ -0,0 +1,65 @@ +alias: get-ml-model-rgat +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL: RGAT + CM_ML_MODEL_DATASET: ICBH +input_mapping: + checkpoint: RGAT_CHECKPOINT_PATH + download_path: CM_DOWNLOAD_PATH + to: CM_DOWNLOAD_PATH +new_env_keys: +- CM_ML_MODEL_* +- RGAT_CHECKPOINT_PATH +prehook_deps: +- enable_if_env: + CM_DOWNLOAD_TOOL: + - rclone + CM_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_PATH + extra_cache_tags: rgat,gnn,model + force_cache: true + names: + - dae + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_DOWNLOAD_URL +print_env_at_the_end: + RGAT_CHECKPOINT_PATH: R-GAT checkpoint path +tags: +- get +- raw +- ml-model +- rgat +uid: b409fd66c5ad4ed5 +variations: + pytorch: + default: true + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + mlcommons: + default: true + default_variations: + download-tool: rclone + group: download-source + rclone: + adr: + dae: + tags: _rclone + env: + CM_DOWNLOAD_TOOL: rclone + CM_RCLONE_CONFIG_NAME: mlc-inference + group: download-tool + rclone,fp32: + env: + CM_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/R-GAT/RGAT.pt diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/customize.py new file mode 100644 index 000000000..dbecb0d8a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rgat/customize.py @@ -0,0 +1,41 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + path = env.get('RGAT_CHECKPOINT_PATH', '').strip() + + if path == '' or not os.path.exists(path): + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if env.get('RGAT_CHECKPOINT_PATH', '') == '': + env['RGAT_CHECKPOINT_PATH'] = os.path.join( + env['CM_ML_MODEL_PATH'], "RGAT.pt") + elif env.get('CM_ML_MODEL_PATH', '') == '': + env['CM_ML_MODEL_PATH'] = env['RGAT_CHECKPOINT_PATH'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['RGAT_CHECKPOINT_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/README.md new file mode 100644 index 000000000..36ebdd624 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-rnnt](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-rnnt) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/_cm.yaml new file mode 100644 index 000000000..913508aaf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/_cm.yaml @@ -0,0 +1,52 @@ +alias: get-ml-model-rnnt +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL: rnnt + CM_ML_MODEL_DATASET: librispeech + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +new_env_keys: +- CM_ML_MODEL_* +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- ml-model +- rnnt +- raw +- librispeech +- speech-recognition +uid: 8858f18b89774d28 +variations: + amazon-s3: + group: download-src + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + pytorch: + default: true + env: + CM_ML_MODEL_FRAMEWORK: pytorch + group: framework + pytorch,fp32: + env: + CM_ML_MODEL_ACCURACY: '0.07452253714852645' + CM_PACKAGE_URL: https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 + pytorch,fp32,amazon-s3: + env: {} + pytorch,fp32,zenodo: + env: + CM_PACKAGE_URL: https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt?download=1 + weights: + env: + CM_MODEL_WEIGHTS_FILE: 'yes' + zenodo: + default: true + group: download-src diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/customize.py new file mode 100644 index 000000000..15fec7cf5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-rnnt/customize.py @@ -0,0 +1,51 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + path = os.getcwd() + + url = env['CM_PACKAGE_URL'] + + print('Downloading from {}'.format(url)) + + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r + + filename = r['filename'] + + if env.get('CM_UNZIP') == "yes": + os.system("unzip " + filename) + filename = env['CM_ML_MODEL_FILE'] + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join(path, filename) + else: + # Add to path + env['CM_ML_MODEL_FILE'] = filename + env['CM_ML_MODEL_FILE_WITH_PATH'] = r['path'] + + env['CM_ML_MODEL_PATH'] = path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/README.md new file mode 100644 index 000000000..c85c2bdce --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-stable-diffusion](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-stable-diffusion) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/_cm.yaml new file mode 100644 index 000000000..b2326daff --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/_cm.yaml @@ -0,0 +1,129 @@ +alias: get-ml-model-stable-diffusion +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL: SDXL + CM_ML_MODEL_DATASET: openorca + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +input_mapping: + checkpoint: SDXL_CHECKPOINT_PATH + download_path: CM_DOWNLOAD_PATH + to: CM_DOWNLOAD_PATH +new_env_keys: +- CM_ML_MODEL_* +- SDXL_CHECKPOINT_PATH +prehook_deps: +- enable_if_env: + CM_DOWNLOAD_TOOL: + - git + CM_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + CM_GIT_CHECKOUT_FOLDER: stable-diffusion-xl-base-1.0 + CM_MODEL_ZOO_ENV_KEY: SDXL + force_env_keys: + - CM_GIT_CHECKOUT_FOLDER + names: + - hf-zoo + tags: get,ml-model,huggingface,zoo,_clone-repo,_model-stub.stabilityai/stable-diffusion-xl-base-1.0 +- enable_if_env: + CM_DOWNLOAD_TOOL: + - rclone + CM_TMP_REQUIRE_DOWNLOAD: + - 'yes' + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_PATH + extra_cache_tags: stable-diffusion,sdxl,model + force_cache: true + names: + - dae + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_DOWNLOAD_URL +print_env_at_the_end: + SDXL_CHECKPOINT_PATH: Stable diffusion checkpoint path +tags: +- get +- raw +- ml-model +- stable-diffusion +- sdxl +- text-to-image +uid: 22c6516b2d4d4c23 +variations: + batch_size.#: + env: + CM_ML_MODEL_BATCH_SIZE: '#' + fp16: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp16 + CM_ML_MODEL_PRECISION: fp16 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp16 + group: precision + fp32: + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + git: + env: + CM_DOWNLOAD_TOOL: git + group: download-tool + huggingface: + default_variations: + download-tool: git + group: download-source + int8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: int8 + CM_ML_MODEL_PRECISION: int8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + group: precision + mlcommons: + default: true + default_variations: + download-tool: rclone + group: download-source + pytorch: + default: true + env: + CM_ML_MODEL_FRAMEWORK: pytorch + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: https://github.com/mlcommons/inference/tree/master/text_to_image#download-model + group: framework + pytorch,fp16: + required_disk_space: 6500 + pytorch,fp32: + env: {} + required_disk_space: 13000 + rclone: + adr: + dae: + tags: _rclone + env: + CM_DOWNLOAD_TOOL: rclone + CM_RCLONE_CONFIG_NAME: mlc-inference + group: download-tool + rclone,fp16: + env: + CM_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp16 + rclone,fp32: + env: + CM_DOWNLOAD_URL: mlc-inference:mlcommons-inference-wg-public/stable_diffusion_fp32 + uint8: + env: + CM_ML_MODEL_INPUT_DATA_TYPES: uint8 + CM_ML_MODEL_PRECISION: uint8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + group: precision + wget: + adr: + dae: + tags: _wget + env: + CM_DOWNLOAD_TOOL: wget + group: download-tool diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/customize.py new file mode 100644 index 000000000..828e87cfb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-stable-diffusion/customize.py @@ -0,0 +1,40 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + path = env.get('SDXL_CHECKPOINT_PATH', '').strip() + + if path == '' or not os.path.exists(path): + env['CM_TMP_REQUIRE_DOWNLOAD'] = 'yes' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if env.get('SDXL_CHECKPOINT_PATH', '') == '': + env['SDXL_CHECKPOINT_PATH'] = env['CM_ML_MODEL_PATH'] + elif env.get('CM_ML_MODEL_PATH', '') == '': + env['CM_ML_MODEL_PATH'] = env['SDXL_CHECKPOINT_PATH'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['SDXL_CHECKPOINT_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/README.md new file mode 100644 index 000000000..b8072357c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-tiny-resnet](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-tiny-resnet) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/_cm.yaml new file mode 100644 index 000000000..4f8406e29 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/_cm.yaml @@ -0,0 +1,108 @@ +alias: get-ml-model-tiny-resnet +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ML_MODEL_FILE_WITH_PATH + CM_ML_MODEL: RESNET + CM_ML_MODEL_DATASET: cifar-10 + CM_ML_MODEL_IMAGE_HEIGHT: '32' + CM_ML_MODEL_IMAGE_WIDTH: '32' + CM_ML_MODEL_NORMALIZE_DATA: '0' + CM_ML_MODEL_RETRAINING: 'no' + CM_ML_MODEL_SUBTRACT_MEANS: 'YES' + CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' +new_env_keys: +- CM_ML_MODEL_* +prehook_deps: +- enable_if_env: + CM_PACKAGE_URL: + - 'on' + env: + CM_EXTRACT_EXTRACTED_FILENAME: <<>> + tags: download-and-extract + update_tags_from_env_with_prefix: + _url.: + - CM_PACKAGE_URL +print_env_at_the_end: + CM_ML_MODEL_FILE_WITH_PATH: Path to the ML model +tags: +- get +- raw +- ml-model +- resnet +- pretrained +- tiny +- model +- ic +- ml-model-tiny-resnet +- image-classification +uid: dd5ec11c3f6e49eb +variations: + batch_size.#: + env: + CM_ML_MODEL_BATCH_SIZE: '#' + fp32: + add_deps_tags: + dependent-model: + tags: _int8 + env: + CM_ML_MODEL_INPUT_DATA_TYPES: fp32 + CM_ML_MODEL_PRECISION: fp32 + CM_ML_MODEL_WEIGHT_DATA_TYPES: fp32 + group: precision + int8: + add_deps_recursive: + dependent-model: + tags: _int8 + default: true + env: + CM_ML_MODEL_INPUT_DATA_TYPES: int8 + CM_ML_MODEL_PRECISION: int8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: int8 + group: precision + onnx: + deps: + - names: + - python,python3 + tags: get,python3 + - names: + - tflite-resnet-model + - dependent-model + tags: get,tiny,model,resnet,_tflite + - names: + - tf2onnx + tags: get,generic-python-lib,_package.tf2onnx + env: + CM_TMP_ML_MODEL_TF2ONNX: 'yes' + group: framework + tflite: + default: true + env: + CM_ML_MODEL_ACCURACY: '85' + CM_ML_MODEL_DATA_LAYOUT: NHWC + CM_ML_MODEL_FRAMEWORK: tflite + CM_ML_MODEL_GIVEN_CHANNEL_MEANS: '' + CM_ML_MODEL_INPUT_LAYERS: '' + CM_ML_MODEL_INPUT_LAYER_NAME: '' + CM_ML_MODEL_INPUT_SHAPES: '' + CM_ML_MODEL_NORMALIZE_DATA: '0' + CM_ML_MODEL_OUTPUT_LAYERS: '' + CM_ML_MODEL_OUTPUT_LAYER_NAME: '' + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: <<>> + CM_ML_MODEL_SUBTRACT_MEANS: 'YES' + group: framework + tflite,int8: + env: + CM_DOWNLOAD_CHECKSUM: 2d6dd48722471313e4c4528249205ae3 + CM_PACKAGE_URL: https://github.com/mlcommons/tiny/raw/master/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite + uint8: + add_deps_tags: + dependent-model: + tags: _int8 + env: + CM_ML_MODEL_INPUT_DATA_TYPES: uint8 + CM_ML_MODEL_PRECISION: uint8 + CM_ML_MODEL_WEIGHT_DATA_TYPES: uint8 + group: precision diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/customize.py new file mode 100644 index 000000000..36c10bb48 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/customize.py @@ -0,0 +1,41 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get("CM_TMP_ML_MODEL_TF2ONNX", "") == "yes": + outputfile = env.get('CM_ML_MODEL_OUTFILE', 'model_quant.onnx') + env['CM_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + " -m tf2onnx.convert --tflite " + \ + env['CM_ML_MODEL_FILE_WITH_PATH'] + " --output " + \ + outputfile + " --inputs-as-nchw \"input_1_int8\"" + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), outputfile) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_ML_MODEL_FILE'] = os.path.basename( + env['CM_ML_MODEL_FILE_WITH_PATH']) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/run.sh new file mode 100644 index 000000000..e935cf158 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/run_config.yml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/run_config.yml new file mode 100644 index 000000000..938e3b641 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-tiny-resnet/run_config.yml @@ -0,0 +1,6 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + +run_with_default_inputs: true #if false the script won't run automatic tests diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/README.md b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/README.md new file mode 100644 index 000000000..32cabd3d7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/_cm.yaml new file mode 100644 index 000000000..3f5b3f648 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/_cm.yaml @@ -0,0 +1,37 @@ +alias: get-ml-model-using-imagenet-from-model-zoo +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +env: + CM_ML_MODEL: resnet + CM_ML_MODEL_DATASET: imagenet +new_env_keys: +- CM_ML_MODEL* +tags: +- get +- ml-model +- model-zoo +- zoo +- imagenet +- image-classification +uid: 153e08828c4e45cc +variations: + model.#: + deps: + - names: + - neural-magic-zoo-downloader + tags: get,ml-model,zoo,deepsparse,_model-stub.# + group: model-source + model.resnet101-pytorch-base: + deps: + - names: + - neural-magic-zoo-downloader + tags: get,ml-model,zoo,deepsparse,_model-stub.zoo:cv/classification/resnet_v1-101/pytorch/sparseml/imagenet/base-none + group: model-source + model.resnet50-pruned95-uniform-quant: + deps: + - names: + - neural-magic-zoo-downloader + tags: get,ml-model,zoo,deepsparse,_model-stub.zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned95_uniform_quant-none + group: model-source diff --git a/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/customize.py new file mode 100644 index 000000000..571c4ea25 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-ml-model-using-imagenet-from-model-zoo/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + cm = automation.cmind + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/README.md new file mode 100644 index 000000000..7dbb7fe19 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/_cm.yaml new file mode 100644 index 000000000..1ddab564e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/_cm.yaml @@ -0,0 +1,37 @@ +alias: get-mlperf-inference-intel-scratch-space +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +deps: [] +docker: + run: false +input_description: {} +input_mapping: + scratch_path: MLPERF_INTEL_SCRATCH_PATH +new_env_keys: +- CM_INTEL_MLPERF_SCRATCH_PATH +- CM_INTEL_SCRATCH_SPACE_VERSION +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- mlperf +- inference +- intel +- scratch +- space +uid: e83fca30851f45ef +variations: + version.#: + env: + CM_INTEL_SCRATCH_SPACE_VERSION: '#' + group: version + version.4_0: + default: true + env: + CM_INTEL_SCRATCH_SPACE_VERSION: '4_0' + group: version +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/customize.py new file mode 100644 index 000000000..a25109453 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/customize.py @@ -0,0 +1,40 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_INTEL_MLPERF_SCRATCH_PATH', '') == '': + env['CM_INTEL_MLPERF_SCRATCH_PATH'] = os.getcwd() + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_INTEL_MLPERF_SCRATCH_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/run.sh new file mode 100644 index 000000000..eb5ce2456 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-intel-scratch-space/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +scratch_path=${CM_NVIDIA_MLPERF_SCRATCH_PATH} +mkdir -p ${scratch_path}/data +mkdir -p ${scratch_path}/preprocessed_data +mkdir -p ${scratch_path}/models diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/README-extra.md new file mode 100644 index 000000000..7af6a0e4a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/README-extra.md @@ -0,0 +1,26 @@ +# Get MLCommons Inference Loadgen + +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) builds and installs +the Loadgen library from [MLCommons Inference repository](https://github.com/mlcommons/inference). + +## Commands +To install +``` +cm run script --tags=get,mlperf,inference,loadgen --version=[VERSION] +``` +where +[VERSION] is one of +* `master:` Uses the master branch of inference source repository to build loadgen +* `r2.1:` Uses the release branch used for MLCommons inference 2.1 round to build loadgen + +## Exported Variables +* `C_INCLUDE_PATH` +* `CPLUS_INCLUDE_PATH` +* `LD_LIBRARY_PATH` +* `DYLD_FALLBACK_LIBRARY_PATH` +* `PYTHONPATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 +3. Windows (installs into Python distro directly) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/README.md new file mode 100644 index 000000000..96fbe4733 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/_cm.yaml new file mode 100644 index 000000000..7b879a448 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/_cm.yaml @@ -0,0 +1,193 @@ +alias: get-mlperf-inference-loadgen +uid: 64c3d98d0ba04950 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: MLPerf benchmark support + +default_env: + CM_SHARED_BUILD: 'no' + +default_version: master + +deps: +- tags: detect,os +- names: + - python3 + - python + tags: get,python3 +- force_env_keys: + - CM_GIT_URL + - CM_GIT_CHECKOUT + names: + - inference-src-loadgen + skip_if_env: + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: + - 'YES' + tags: get,mlcommons,inference,src +- enable_if_env: + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: + - 'YES' + force_cache: true + names: + - inference-src-loadgen-download + tags: download-and-extract,file,_wget,_extract + update_tags_from_env_with_prefix: + _url.: + - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL +- names: + - compiler + skip_if_any_env: + CM_HOST_OS_TYPE: + - windows + CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: + - 'yes' + tags: get,compiler +- enable_if_env: + CM_HOST_OS_TYPE: + - windows + skip_if_env: + CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: + - 'yes' + names: + - compiler + tags: get,cl +- names: + - cmake + tags: get,cmake + version_min: '3.12' +- names: + - pip-package + - wheel + tags: get,generic-python-lib,_package.wheel +- names: + - pip-package + - pip + tags: get,generic-python-lib,_pip +- names: + - pip-package + - pybind11 + tags: get,generic-python-lib,_package.pybind11 +- names: + - pip-package + - setuputils + tags: get,generic-python-lib,_package.setuptools + +extra_cache_tags_from_env: +- env: CM_PYTHON_CACHE_TAGS + prefix: python- +- env: CM_COMPILER_CACHE_TAGS + prefix: compiler- + +new_env_keys: +- +PYTHONPATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +- CM_MLPERF_INFERENCE_LOADGEN_* + +tags: +- get +- loadgen +- inference +- inference-loadgen +- mlperf +- mlcommons + +variations: + from-pip: + env: + CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: 'yes' + deps: + - tags: get,generic-python-lib,_package.mlcommons-loadgen + copy: + add_deps: + inference-src-loadgen: + env: + CM_GIT_URL: https://github.com/cknowledge/mlperf-inference-loadgen-copy + # You still need to add --version=main since it's forced here to + custom-python: + ad: + pip-package: + tags: _custom-python + python3: + skip_if_env: + CM_TMP_USE_CUSTOM_PYTHON: + - 'on' + env: + CM_TMP_USE_CUSTOM_PYTHON: 'on' + keep-build: + group: clean-build + env: + CM_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN: 'no' + clean-build: + group: clean-build + default: true + env: + CM_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN: 'yes' + download: + env: + CM_DOWNLOAD_CHECKSUM: af3f9525965b2c1acc348fb882a5bfd1 + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 + CM_MLPERF_INFERENCE_LOADGEN_VERSION: v3.1 + CM_VERIFY_SSL: false + download_v3.1: + env: + CM_DOWNLOAD_CHECKSUM: af3f9525965b2c1acc348fb882a5bfd1 + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0 + CM_MLPERF_INFERENCE_LOADGEN_VERSION: v3.1 + CM_VERIFY_SSL: false + download_v4.0: + env: + CM_DOWNLOAD_CHECKSUM: b4d97525d9ad0539a64667f2a3ca20c5 + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: 'YES' + CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: https://www.dropbox.com/scl/fi/gk5e9kziju5t56umxyzyx/loadgen.zip?rlkey=vsie4xnzml1inpjplm5cg7t54&dl=0 + CM_MLPERF_INFERENCE_LOADGEN_VERSION: v4.0 + CM_VERIFY_SSL: false + no-compilation-warnings: + env: + '+ CXXFLAGS': + - '-Werror' + - '-Wno-unused-parameter' + +versions: + custom: + add_deps: + inference-src-loadgen: + version: custom + main: + add_deps: + inference-src-loadgen: + version: main + master: + add_deps: + inference-src-loadgen: + version: master + pybind_fix: + add_deps: + inference-src-loadgen: + version: pybind_fix + r2.1: + add_deps: + inference-src-loadgen: + tags: _pybind + version: r2.1 + r3.0: + add_deps: + inference-src-loadgen: + tags: _pybind + version: r3.0 + r3.1: + add_deps: + inference-src-loadgen: + tags: _pybind + version: r3.1 + +print_env_at_the_end: + CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH: "Path to the tool" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/customize.py new file mode 100644 index 000000000..de6d6299b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/customize.py @@ -0,0 +1,68 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if env.get('CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': + i['run_script_input']['script_name'] = "donotrun" + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if env.get('CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': + return {'return': 0} + + for key in ['+PYTHONPATH', '+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: + env[key] = [] + + # On Windows installs directly into Python distro for simplicity +# if os_info['platform'] != 'windows': + + cur_path = os.getcwd() + install_path = os.path.join(cur_path, 'install') + + env['CM_MLPERF_INFERENCE_LOADGEN_INSTALL_PATH'] = install_path + + build_path = os.path.join(cur_path, 'build') + if os.path.exists(build_path): + env['CM_MLPERF_INFERENCE_LOADGEN_BUILD_PATH'] = build_path + + include_path = os.path.join(install_path, 'include') + lib_path = os.path.join(install_path, 'lib') + python_path = os.path.join(install_path, 'python') + + env['+C_INCLUDE_PATH'].append(include_path) + env['+CPLUS_INCLUDE_PATH'].append(include_path) + env['CM_MLPERF_INFERENCE_LOADGEN_INCLUDE_PATH'] = include_path + + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + env['CM_MLPERF_INFERENCE_LOADGEN_LIBRARY_PATH'] = lib_path + + env['+PYTHONPATH'].append(python_path) + env['CM_MLPERF_INFERENCE_LOADGEN_PYTHON_PATH'] = python_path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/run.bat new file mode 100644 index 000000000..6d97f12b4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/run.bat @@ -0,0 +1,39 @@ +@echo off + +echo ======================================================= + +set CUR_DIR=%cd% +echo Current path in CM script: %CUR_DIR% + +if "%CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD%" == "YES" ( + set CM_MLPERF_INFERENCE_SOURCE=%CM_EXTRACT_EXTRACTED_PATH% +) + +set INSTALL_DIR=%CUR_DIR%\install + +echo. +echo Switching to %CM_MLPERF_INFERENCE_SOURCE%\loadgen + +cd %CM_MLPERF_INFERENCE_SOURCE%\loadgen +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo. +echo Running %CM_PYTHON_BIN% setup.py develop + +%CM_PYTHON_BIN% setup.py develop +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= +cmake ^ + -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% ^ + %CM_MLPERF_INFERENCE_SOURCE%\loadgen ^ + -DPYTHON_EXECUTABLE:FILEPATH=%CM_PYTHON_BIN_WITH_PATH% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= +cmake --build . --target install +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +del /Q /S build + +echo ======================================================= diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/run.sh new file mode 100644 index 000000000..ac61ad329 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/run.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +CUR_DIR=$PWD + +mkdir -p install +mkdir -p build + +INSTALL_DIR="${CUR_DIR}/install" + +echo "******************************************************" + +cd build + +if [ "${CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD}" == "YES" ]; then + export CM_MLPERF_INFERENCE_SOURCE="${CM_EXTRACT_EXTRACTED_PATH}" +fi + + +if [ -z "${CM_MLPERF_INFERENCE_SOURCE}" ]; then + echo "Error: env CM_MLPERF_INFERENCE_SOURCE is not defined - something is wrong with script automation!" + exit 1 +fi + +cmake \ + -DCMAKE_INSTALL_PREFIX="${INSTALL_DIR}" \ + "${CM_MLPERF_INFERENCE_SOURCE}/loadgen" \ + -DPYTHON_EXECUTABLE:FILEPATH="${CM_PYTHON_BIN_WITH_PATH}" -B . +if [ ${?} -ne 0 ]; then exit $?; fi + +echo "******************************************************" +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} + +cmake --build . --target install -j "${CM_MAKE_CORES}" +if [ ${?} -ne 0 ]; then exit $?; fi + +# Clean build directory (too large) +cd "${CUR_DIR}" +if [[ $CM_MLPERF_INFERENCE_LOADGEN_BUILD_CLEAN == "yes" ]]; then + rm -rf build +fi + + +cd "${CM_MLPERF_INFERENCE_SOURCE}/loadgen" +${CM_PYTHON_BIN_WITH_PATH} -m pip install . --target="${MLPERF_INFERENCE_PYTHON_SITE_BASE}" + +if [ ${?} -ne 0 ]; then exit $?; fi + +# Clean the built wheel +#find . -name 'mlcommons_loadgen*.whl' | xargs rm + +echo "******************************************************" +echo "Loadgen is built and installed to ${INSTALL_DIR} ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/tests/download-and-install.bat b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/tests/download-and-install.bat new file mode 100644 index 000000000..868f0296c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-loadgen/tests/download-and-install.bat @@ -0,0 +1,2 @@ +cmr "get loadgen _download" + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/README-extra.md new file mode 100644 index 000000000..411a2248c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/README-extra.md @@ -0,0 +1,9 @@ +# Get MLPerf Nvidia Common code +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) exports the PYTHONPATH to the common code used by Nvidia for MLPerf submissions + +## Exported Variables +* `+PYTHONPATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/README.md new file mode 100644 index 000000000..b104518bb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/_cm.yaml new file mode 100644 index 000000000..bb3828b00 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/_cm.yaml @@ -0,0 +1,55 @@ +alias: get-mlperf-inference-nvidia-common-code +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +clean_files: [] +default_version: r3.1 +deps: +- inherit_variation_tags: true + names: + - mlperf-inference-results + tags: get,mlperf,inference,results,official,_code-only +new_env_keys: +- +PYTHONPATH +- CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH +tags: +- get +- nvidia +- mlperf +- inference +- common-code +uid: 26b78bf3ffdc4926 +variations: + ctuning: + group: repo-owner + custom: + group: repo-owner + go: + group: repo-owner + mlcommons: + default: true + group: repo-owner + nvidia-only: + group: repo-owner +versions: + r2.1: + add_deps_recursive: + mlperf-inference-results: + version: v2.1 + r3.0: + add_deps_recursive: + mlperf-inference-results: + version: v3.0 + r3.1: + add_deps_recursive: + mlperf-inference-results: + version: v3.1 + r4.0: + add_deps_recursive: + mlperf-inference-results: + version: v4.0 + r4.1: + add_deps_recursive: + mlperf-inference-results: + version: v4.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/customize.py new file mode 100644 index 000000000..61a2bfe4b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-common-code/customize.py @@ -0,0 +1,32 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH'] = os.path.join( + env['CM_MLPERF_INFERENCE_RESULTS_PATH'], "closed", "NVIDIA") + env['+PYTHONPATH'] = [env['CM_MLPERF_INFERENCE_NVIDIA_CODE_PATH']] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md new file mode 100644 index 000000000..582991f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/README.md new file mode 100644 index 000000000..c87b6269a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/_cm.yaml new file mode 100644 index 000000000..b6612265f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/_cm.yaml @@ -0,0 +1,46 @@ +alias: get-mlperf-inference-nvidia-scratch-space +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +deps: [] +docker: + run: false +input_description: {} +input_mapping: + scratch_path: CM_NVIDIA_MLPERF_SCRATCH_PATH +new_env_keys: +- CM_NVIDIA_MLPERF_SCRATCH_PATH +- MLPERF_SCRATCH_PATH +- CM_NVIDIA_SCRATCH_SPACE_VERSION +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- mlperf +- inference +- nvidia +- scratch +- space +uid: 0b2bec8b29fb4ab7 +variations: + version.#: + env: + CM_NVIDIA_SCRATCH_SPACE_VERSION: '#' + group: version + version.4_0: + env: + CM_NVIDIA_SCRATCH_SPACE_VERSION: '4_0' + group: version + version.4_1: + env: + CM_NVIDIA_SCRATCH_SPACE_VERSION: '4_1' + group: version + version.4_1-dev: + default: true + env: + CM_NVIDIA_SCRATCH_SPACE_VERSION: 4_1-dev + group: version +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/customize.py new file mode 100644 index 000000000..90a733a7f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/customize.py @@ -0,0 +1,44 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_NVIDIA_MLPERF_SCRATCH_PATH', '') == '': + if env.get('MLPERF_SCRATCH_PATH', '') != '': + env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] = env['MLPERF_SCRATCH_PATH'] + else: + env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] = os.getcwd() + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['MLPERF_SCRATCH_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVIDIA_MLPERF_SCRATCH_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/run.sh new file mode 100644 index 000000000..eb5ce2456 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-nvidia-scratch-space/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +scratch_path=${CM_NVIDIA_MLPERF_SCRATCH_PATH} +mkdir -p ${scratch_path}/data +mkdir -p ${scratch_path}/preprocessed_data +mkdir -p ${scratch_path}/models diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/README.md new file mode 100644 index 000000000..bf0b86082 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/_cm.yaml new file mode 100644 index 000000000..4aad78007 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/_cm.yaml @@ -0,0 +1,44 @@ +alias: get-mlperf-inference-results-dir +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +deps: [] +docker: + run: false +input_description: {} +input_mapping: + results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR +new_env_keys: +- CM_MLPERF_INFERENCE_RESULTS_DIR +- CM_MLPERF_INFERENCE_RESULTS_VERSION +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- mlperf +- inference +- local +- results +- dir +- directory +uid: 84f3c5aad5e1444b +variations: + path.#: + CM_MLPERF_INFERENCE_RESULTS_DIR: '#' + version.#: + env: + CM_MLPERF_INFERENCE_RESULTS_VERSION: '#' + group: version + version.4_1: + env: + CM_MLPERF_INFERENCE_RESULTS_VERSION: '4_1' + group: version + version.4_1-dev: + default: true + env: + CM_MLPERF_INFERENCE_RESULTS_VERSION: 4_1-dev + group: version +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/customize.py new file mode 100644 index 000000000..978e89c5b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results-dir/customize.py @@ -0,0 +1,40 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', '') == '': + env['CM_MLPERF_INFERENCE_RESULTS_DIR'] = os.getcwd() + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/README-extra.md new file mode 100644 index 000000000..8ed3bed39 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/README-extra.md @@ -0,0 +1,18 @@ +# Get MLCommons Inference Results +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Inference results repository](https://github.com/mlcommons/inference_v2.1). + +## Commands +To install +``` +cm run script --tags=get,mlperf,inference,results --version=[VERSION] +``` + +[VERSION] is one of +* `v2.1:` MLCommons inference 2.1 round results + +## Exported Variables +* `CM_MLPERF_INFERENCE_RESULTS_PATH`: Directory path to the inference results repository + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/README.md new file mode 100644 index 000000000..543d83380 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-results](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-results) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/_cm.yaml new file mode 100644 index 000000000..22ceaa92a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/_cm.yaml @@ -0,0 +1,83 @@ +alias: get-mlperf-inference-results +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + CM_GIT_CHECKOUT: master + CM_GIT_DEPTH: --depth 1 + CM_GIT_PATCH: 'no' +default_version: v4.0 +deps: [] +new_env_keys: +- CM_MLPERF_INFERENCE_RESULTS_* +prehook_deps: +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_RESULTS_PATH + extra_cache_tags: mlperf,inference,results,official + force_env_keys: + - CM_GIT_* + names: + - inference-results-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _repo.: + - CM_GIT_URL +tags: +- get +- results +- inference +- official +- inference-results +- mlcommons +- mlperf +uid: 36bae5b25dbe41da +variations: + code-only: + adr: + inference-results-repo: + tags: _branch.cm-code-only + group: repo-branch + ctuning: + env: + GITHUB_REPO_OWNER: ctuning + group: source-repo + custom: + env: + GITHUB_REPO_OWNER: arjunsuresh + group: source-repo + go: + env: + GITHUB_REPO_OWNER: GATEOverflow + group: source-repo + mlcommons: + default: true + env: + GITHUB_REPO_OWNER: mlcommons + group: source-repo + nvidia-only: + env: + GITHUB_REPO_OWNER: GATEOverflow + NVIDIA_ONLY: 'yes' + group: source-repo +versions: + v2.1: + env: + CM_GIT_URL: https://github.com/<<>>/inference_results_v2.1.git + CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v2.1 + v3.0: + env: + CM_GIT_URL: https://github.com/<<>>/inference_results_v3.0.git + CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v3.0 + v3.1: + env: + CM_GIT_URL: https://github.com/<<>>/inference_results_v3.1.git + CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v3.1 + v4.0: + env: + CM_GIT_URL: https://github.com/<<>>/inference_results_v4.0.git + CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v4.0 + v4.1: + env: + CM_GIT_URL: https://github.com/<<>>/inference_results_v4.1.git + CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v4.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/customize.py new file mode 100644 index 000000000..093e95040 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-results/customize.py @@ -0,0 +1,59 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if env.get('NVIDIA_ONLY', '') == 'yes': + env['CM_GIT_URL'] = "https://github.com/GATEOverflow/nvidia-inference-code.git" + + if 'GITHUB_REPO_OWNER' in env and '<<>>' in env['CM_GIT_URL']: + env['CM_GIT_URL'] = env['CM_GIT_URL'].replace( + '<<>>', env['GITHUB_REPO_OWNER']) + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + + need_version = env.get('CM_VERSION', '') + versions = meta['versions'] + + if need_version != '' and not need_version in versions: + env['CM_GIT_CHECKOUT'] = need_version + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': + env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] + +# env['CM_MLPERF_INFERENCE_RESULTS_PATH'] = os.path.join(os.getcwd(), "inference_results_"+env['CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME']) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/README-extra.md new file mode 100644 index 000000000..a96611831 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/README-extra.md @@ -0,0 +1,29 @@ +# Get MLCommons Inference Source +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Inference repository](https://github.com/mlcommons/inference). + +## Commands +To install +``` +cm run script --tags=get,mlperf,inference,src,[VARIATION] --version=[VERSION] +``` +where [VARIATION] is one of +* `default:` Works with the official MLCommons inference repository. Uses `short-history` variation +* `patch:` Applies the `git.patch` to the cloned git repository +* `octoml:` Works with the OctoML fork of the MLCommons inference repository. Uses `short-history` variation +* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) +* `full-history:` Uses the full git history +* `no-recurse-submodules:` Only download the main repository + +[VERSION] is one of +* `master:` Uses the master branch +* `r2.1:` Uses the release branch used for MLCommons inference 2.1 round + +## Exported Variables +* `CM_MLPERF_INFERENCE_SOURCE`: Directory path of the cloned inference repository +* `CM_MLPERF_INFERENCE_VISION_PATH`: Directory path to the vision folder inside the inference repository +* `PYTHONPATH`: Is appended with the paths to vision module and the submission tools module +* `CM_MLPERF_INFERENCE_MODELS`: This `state` variable contains the configuration of the MLPerf models as per the selected version + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/README.md new file mode 100644 index 000000000..afdac3a83 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-src](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/_cm.yaml new file mode 100644 index 000000000..e19e65378 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/_cm.yaml @@ -0,0 +1,173 @@ +alias: get-mlperf-inference-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + CM_GIT_CHECKOUT_FOLDER: inference + CM_GIT_DEPTH: --depth 4 + CM_GIT_PATCH: 'no' + CM_GIT_RECURSE_SUBMODULES: '' +default_version: master +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +new_env_keys: +- CM_MLPERF_INFERENCE_3DUNET_PATH +- CM_MLPERF_INFERENCE_BERT_PATH +- CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH +- CM_MLPERF_INFERENCE_CONF_PATH +- CM_MLPERF_INFERENCE_DLRM_PATH +- CM_MLPERF_INFERENCE_DLRM_V2_PATH +- CM_MLPERF_INFERENCE_GPTJ_PATH +- CM_MLPERF_INFERENCE_RNNT_PATH +- CM_MLPERF_INFERENCE_SOURCE +- CM_MLPERF_INFERENCE_SOURCE_VERSION +- CM_MLPERF_INFERENCE_VERSION +- CM_MLPERF_INFERENCE_VISION_PATH +- CM_MLPERF_LAST_RELEASE +- +PYTHONPATH +prehook_deps: +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_INFERENCE_SOURCE + extra_cache_tags: inference,src + force_env_keys: + - CM_GIT_* + names: + - inference-git-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_SHA + _submodules.: + - CM_GIT_SUBMODULES +print_env_at_the_end_disabled: + CM_MLPERF_INFERENCE_CONF_PATH: Path to the MLPerf inference benchmark configuration + file + CM_MLPERF_INFERENCE_SOURCE: Path to MLPerf inference benchmark sources +tags: +- get +- src +- source +- inference +- inference-src +- inference-source +- mlperf +- mlcommons +uid: 4b57186581024797 +variations: + 3d-unet: + env: + CM_SUBMODULE_3D_UNET: 'yes' + branch.#: + default_version: custom + env: + CM_GIT_CHECKOUT: '#' + group: checkout + deeplearningexamples: + env: + CM_SUBMODULE_DEEPLEARNINGEXAMPLES: 'yes' + deepsparse: + base: + - _branch.deepsparse + - _repo.https://github.com/neuralmagic/inference + full-history: + env: + CM_GIT_DEPTH: '' + group: git-history + gn: + env: + CM_SUBMODULE_GN: 'yes' + no-recurse-submodules: + env: + CM_GIT_RECURSE_SUBMODULES: '' + nvidia-pycocotools: + base: + - patch + env: + CM_GIT_PATCH_FILENAME: coco.patch + octoml: + base: + - short-history + - _repo.https://github.com/octoml/inference + env: + CM_GIT_URL: https://github.com/octoml/inference + openimages-nvidia-pycocotools: + base: + - patch + env: + CM_GIT_PATCH_FILENAME: openimages-pycocotools.patch + patch: + ad: + inference-git-repo: + tags: _patch + env: + CM_GIT_PATCH: 'yes' + pybind: + env: + CM_SUBMODULE_PYBIND: 'yes' + recurse-submodules: + env: + CM_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' + repo.#: + env: + CM_GIT_URL: '#' + sha.#: + env: + CM_GIT_SHA: '#' + group: checkout + short-history: + default: true + env: + CM_GIT_DEPTH: --depth 10 + group: git-history + submodules.#: + env: + CM_GIT_SUBMODULES: '#' +versions: + custom: + env: + CM_MLPERF_LAST_RELEASE: v4.1 + deepsparse: + env: + CM_MLPERF_LAST_RELEASE: v4.1 + CM_TMP_GIT_CHECKOUT: deepsparse + CM_TMP_GIT_URL: https://github.com/neuralmagic/inference + main: + env: + CM_MLPERF_LAST_RELEASE: v4.1 + CM_TMP_GIT_CHECKOUT: main + master: + env: + CM_MLPERF_LAST_RELEASE: v4.1 + CM_TMP_GIT_CHECKOUT: master + r2.1: + env: + CM_MLPERF_LAST_RELEASE: v2.1 + CM_TMP_GIT_CHECKOUT: v2.1 + r3.0: + adr: + inference-git-repo: + tags: _tag.v3.0 + env: + CM_MLPERF_LAST_RELEASE: v3.0 + CM_TMP_GIT_CHECKOUT: '' + r3.1: + adr: + inference-git-repo: + tags: _tag.v3.1 + env: + CM_MLPERF_LAST_RELEASE: v3.1 + CM_TMP_GIT_CHECKOUT: '' + tvm: + env: + CM_MLPERF_LAST_RELEASE: v3.1 + CM_TMP_GIT_CHECKOUT: tvm + CM_TMP_GIT_URL: https://github.com/mlcommons/inference diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/customize.py new file mode 100644 index 000000000..0f6f10e2b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/customize.py @@ -0,0 +1,183 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script +# yet'} + + env = i['env'] + meta = i['meta'] + + script_path = i['run_script_input']['path'] + + if env.get('CM_GIT_CHECKOUT', '') == '' and env.get( + 'CM_GIT_URL', '') == '' and env.get('CM_VERSION', '') == '': + # if custom checkout and url parameters are not set and CM_VERSION is + # not specified + env['CM_VERSION'] = "master" + env["CM_GIT_CHECKOUT"] = "master" + env["CM_GIT_URL"] = "https://github.com/mlcommons/inference" + elif env.get('CM_GIT_CHECKOUT', '') != '' and env.get('CM_TMP_GIT_CHECKOUT', '') != '' and env.get('CM_GIT_CHECKOUT', '') != env.get('CM_TMP_GIT_CHECKOUT', ''): + # if checkout branch is assigned inside version and custom branch is + # also specified + return { + "return": 1, "error": "Conflicting branches between version assigned and user specified."} + elif env.get('CM_GIT_URL', '') != '' and env.get('CM_TMP_GIT_URL', '') != '' and env.get('CM_GIT_URL', '') != env.get('CM_TMP_GIT_URL', ''): + # if GIT URL is assigned inside version and custom branch is also + # specified + return { + "return": 1, "error": "Conflicting URL's between version assigned and user specified."} + + if env.get('CM_VERSION', '') == '': + env['CM_VERSION'] = "custom" + + # check whether branch and url is specified, + # if not try to assign the values specified in version parameters, + # if version parameters does not have the value to a parameter, set the + # default one + if env.get('CM_GIT_CHECKOUT', '') == '': + if env.get('CM_TMP_GIT_CHECKOUT', '') != '': + env["CM_GIT_CHECKOUT"] = env["CM_TMP_GIT_CHECKOUT"] + else: + env["CM_GIT_CHECKOUT"] = "master" + + if env.get('CM_GIT_URL', '') == '': + if env.get('CM_TMP_GIT_URL', '') != '': + env["CM_GIT_URL"] = env["CM_TMP_GIT_URL"] + else: + env["CM_GIT_URL"] = "https://github.com/mlcommons/inference" + + if env.get("CM_MLPERF_LAST_RELEASE", '') == '': + env["CM_MLPERF_LAST_RELEASE"] = "v4.1" + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + if 'CM_GIT_RECURSE_SUBMODULES' not in env: + env['CM_GIT_RECURSE_SUBMODULES'] = '' + submodules = [] + possible_submodules = { + "gn": "third_party/gn", + "pybind": "third_party/pybind", + "deeplearningexamples": "language/bert/DeepLearningExamples", + "3d-unet": "vision/medical_imaging/3d-unet-brats19/nnUnet" + } + for submodule in possible_submodules: + env_name = submodule.upper().replace("-", "_") + if env.get("CM_SUBMODULE_" + env_name) == "yes": + submodules.append(possible_submodules[submodule]) + + env['CM_GIT_SUBMODULES'] = ",".join(submodules) + + if env.get('CM_GIT_PATCH_FILENAME', '') != '': + patch_file_name = env['CM_GIT_PATCH_FILENAME'] + env['CM_GIT_PATCH_FILEPATHS'] = os.path.join( + script_path, 'patch', patch_file_name) + + need_version = env.get('CM_VERSION', '') + versions = meta['versions'] + + if need_version != '' and not need_version in versions: + env['CM_GIT_CHECKOUT'] = need_version + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + inference_root = env['CM_MLPERF_INFERENCE_SOURCE'] + env['CM_MLPERF_INFERENCE_VISION_PATH'] = os.path.join( + inference_root, 'vision') + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] = os.path.join( + inference_root, 'vision', 'classification_and_detection') + env['CM_MLPERF_INFERENCE_BERT_PATH'] = os.path.join( + inference_root, 'language', 'bert') + env['CM_MLPERF_INFERENCE_GPTJ_PATH'] = os.path.join( + inference_root, 'language', 'gpt-j') + env['CM_MLPERF_INFERENCE_RNNT_PATH'] = os.path.join( + inference_root, 'speech_recognition', 'rnnt') + env['CM_MLPERF_INFERENCE_DLRM_PATH'] = os.path.join( + inference_root, 'recommendation', 'dlrm') + env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'] = os.path.join( + inference_root, 'recommendation', 'dlrm_v2') + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] = os.path.join( + inference_root, 'vision', 'medical_imaging', '3d-unet-kits19') + + env['CM_GET_DEPENDENT_CACHED_PATH'] = inference_root + +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] + env['+PYTHONPATH'] = [] + env['+PYTHONPATH'].append( + os.path.join( + env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], + 'python')) + + if os.path.exists(os.path.join(inference_root, "loadgen", "VERSION.txt")): + with open(os.path.join(inference_root, "loadgen", "VERSION.txt")) as f: + version_info = f.read().strip() + env['CM_MLPERF_INFERENCE_SOURCE_VERSION'] = version_info + + if env.get('CM_GET_MLPERF_IMPLEMENTATION_ONLY', '') == "yes": + return {'return': 0} + + env['CM_MLPERF_INFERENCE_CONF_PATH'] = os.path.join( + inference_root, 'mlperf.conf') + env['+PYTHONPATH'].append( + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + 'tools', + 'submission')) + + valid_models = get_valid_models( + env['CM_MLPERF_LAST_RELEASE'], + env['CM_MLPERF_INFERENCE_SOURCE']) + + state['CM_MLPERF_INFERENCE_MODELS'] = valid_models + + if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': + env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] + + return {'return': 0, 'version': env['CM_VERSION']} + + +def get_valid_models(mlperf_version, mlperf_path): + + import sys + + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + + sys.path.append(submission_checker_dir) + + if not os.path.exists(os.path.join( + submission_checker_dir, "submission_checker.py")): + shutil.copy(os.path.join(submission_checker_dir, "submission-checker.py"), os.path.join(submission_checker_dir, + "submission_checker.py")) + + import submission_checker as checker + + config = checker.MODEL_CONFIG + + valid_models = config[mlperf_version]["models"] + + return valid_models diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/coco.patch b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/coco.patch new file mode 100644 index 000000000..191155206 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/coco.patch @@ -0,0 +1,24 @@ +diff --git a/vision/classification_and_detection/tools/accuracy-openimages.py b/vision/classification_and_detection/tools/accuracy-openimages.py +index 0192dde..7fb0dd6 100644 +--- a/vision/classification_and_detection/tools/accuracy-openimages.py ++++ b/vision/classification_and_detection/tools/accuracy-openimages.py +@@ -34,7 +34,7 @@ def get_args(): + def main(): + args = get_args() + +- cocoGt = COCO(os.path.join(args.openimages_dir, "annotations/openimages-mlperf.json")) ++ cocoGt = COCO(os.path.join(args.openimages_dir, "annotations/openimages-mlperf.json"), use_ext=True) + + if args.use_inv_map: + inv_map = [0] + cocoGt.getCatIds() # First label in inv_map is not used +@@ -98,8 +98,8 @@ def main(): + with open(args.output_file, "w") as fp: + json.dump(detections, fp, sort_keys=True, indent=4) + +- cocoDt = cocoGt.loadRes(args.output_file) # Load from file to bypass error with Python3 +- cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox') ++ cocoDt = cocoGt.loadRes(args.output_file,use_ext=True) # Load from file to bypass error with Python3 ++ cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox',use_ext=True) + cocoEval.params.imgIds = list(image_ids) + cocoEval.evaluate() + cocoEval.accumulate() diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/git.patch b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/git.patch new file mode 100644 index 000000000..b822563c9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/git.patch @@ -0,0 +1,1925 @@ +diff --git a/tools/submission/submission_checker.py b/tools/submission/submission_checker.py +new file mode 100755 +index 0000000..d28fb03 +--- /dev/null ++++ b/tools/submission/submission_checker.py +@@ -0,0 +1,1906 @@ ++""" ++A checker for mlperf inference submissions ++""" ++ ++from __future__ import division ++from __future__ import print_function ++from __future__ import unicode_literals ++ ++import argparse ++import datetime ++import json ++import logging ++import os ++import re ++import sys ++ ++from log_parser import MLPerfLog ++ ++# pylint: disable=missing-docstring ++ ++ ++logging.basicConfig(level=logging.INFO) ++log = logging.getLogger("main") ++ ++submission_checker_dir = os.path.dirname(os.path.realpath(__file__)) ++ ++MODEL_CONFIG = { ++ "v0.5": { ++ "models": ["ssd-small", "ssd-large", "mobilenet", "resnet", "gnmt"], ++ "required-scenarios-datacenter": { ++ # anything goes ++ }, ++ "optional-scenarios-datacenter": { ++ # anything goes ++ }, ++ "required-scenarios-edge": { ++ # anything goes ++ }, ++ "optional-scenarios-edge": { ++ # anything goes ++ }, ++ "accuracy-target": { ++ "mobilenet": ("acc", 71.68 * 0.98), ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "gnmt": ("bleu", 23.9 * 0.99), ++ }, ++ "performance-sample-count": { ++ "mobilenet": 1024, ++ "resnet": 1024, ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "gnmt": 3903900, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "resnet50": "resnet", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 3133965575612453542, ++ "sample_index_rng_seed": 665484352860916858, ++ "schedule_rng_seed": 3622009729038561421, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 195, ++ "sample_index_rng_seed" : 235, ++ "schedule_rng_seed" : 634, ++ }, ++ "ignore_errors": [ ++ "check for ERROR in detailed", ++ "Loadgen built with uncommitted changes", ++ "Ran out of generated queries to issue before the minimum query count and test duration were reached", ++ "CAS failed", ++ ], ++ }, ++ "v0.7": { ++ "models": [ ++ "ssd-small", "ssd-large", "resnet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ "required-scenarios-datacenter": { ++ "resnet": ["Offline"], ++ "ssd-large": ["Offline"], ++ "rnnt": ["Offline"], ++ "bert-99": ["Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ "resnet": ["Server"], ++ "ssd-large": ["Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-edge": { ++ "resnet": ["MultiStream"], ++ "ssd-small": ["MultiStream"], ++ "ssd-large": ["MultiStream"], ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.853 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.853 * 0.999), ++ }, ++ "performance-sample-count": { ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "resnet": 1024, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 16, ++ "3d-unet-99.9": 16, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 12786827339337101903, ++ "sample_index_rng_seed": 12640797754436136668, ++ "schedule_rng_seed": 3135815929913719677, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ "CAS failed", ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000, "MultiStream": 50000000}, ++ "ssd-small": {"MultiStream": 50000000}, ++ "ssd-large": {"Server": 100000000, "MultiStream": 66000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "Server": 270336, "MultiStream": 270336, "Offline": 1}, ++ "ssd-small": {"SingleStream": 1024, "MultiStream": 270336, "Offline": 1}, ++ "ssd-large": {"SingleStream": 1024, "Server": 270336, "MultiStream": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++ "v1.0": { ++ "models": [ ++ "ssd-small", "ssd-large", "resnet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ "required-scenarios-datacenter": { ++ "resnet": ["Offline"], ++ "ssd-large": ["Offline"], ++ "rnnt": ["Offline"], ++ "bert-99": ["Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ "resnet": ["Server"], ++ "ssd-large": ["Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-edge": { ++ "resnet": ["MultiStream"], ++ "ssd-small": ["MultiStream"], ++ "ssd-large": ["MultiStream"], ++ }, ++ "required-scenarios-datacenter-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-datacenter-edge": { ++ "resnet": ["MultiStream", "Server"], ++ "ssd-small": ["MultiStream"], ++ "ssd-large": ["MultiStream", "Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.853 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.853 * 0.999), ++ }, ++ "performance-sample-count": { ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "resnet": 1024, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 16, ++ "3d-unet-99.9": 16, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 7322528924094909334, ++ "sample_index_rng_seed": 1570999273408051088, ++ "schedule_rng_seed": 3507442325620259414, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000, "MultiStream": 50000000}, ++ "ssd-small": {"MultiStream": 50000000}, ++ "ssd-large": {"Server": 100000000, "MultiStream": 66000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "Server": 270336, "MultiStream": 270336, "Offline": 1}, ++ "ssd-small": {"SingleStream": 1024, "MultiStream": 270336, "Offline": 1}, ++ "ssd-large": {"SingleStream": 1024, "Server": 270336, "MultiStream": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++ "v1.1": { ++ "models": [ ++ "ssd-small", "ssd-large", "resnet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ "required-scenarios-datacenter": { ++ "resnet": ["Offline"], ++ "ssd-large": ["Offline"], ++ "rnnt": ["Offline"], ++ "bert-99": ["Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ "resnet": ["Server"], ++ "ssd-large": ["Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "required-scenarios-datacenter-edge": { ++ "resnet": ["SingleStream", "Offline"], ++ "ssd-small": ["SingleStream", "Offline"], ++ "ssd-large": ["SingleStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "bert-99.9": ["Offline"], ++ "dlrm-99": ["Offline"], ++ "dlrm-99.9": ["Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-datacenter-edge": { ++ "resnet": ["Server"], ++ "ssd-large": ["Server"], ++ "rnnt": ["Server"], ++ "bert-99": ["Server"], ++ "bert-99.9": ["Server"], ++ "dlrm-99": ["Server"], ++ "dlrm-99.9": ["Server"], ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.853 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.853 * 0.999), ++ }, ++ "performance-sample-count": { ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "resnet": 1024, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 16, ++ "3d-unet-99.9": 16, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 1624344308455410291, ++ "sample_index_rng_seed": 517984244576520566, ++ "schedule_rng_seed": 10051496985653635065, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000, "MultiStream": 50000000}, ++ "ssd-small": {"MultiStream": 50000000}, ++ "ssd-large": {"Server": 100000000, "MultiStream": 66000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "ssd-small": {"SingleStream": 1024, "Offline": 1}, ++ "ssd-large": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++ "v2.0": { ++ "models": [ ++ "ssd-small", "ssd-large", "resnet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ # FIXME: required/optional scenarios for v2.0 needs to be filled up correctly; below lists are temporary ++ "required-scenarios-datacenter": { ++ "resnet": ["Server", "Offline"], ++ "ssd-large": ["Server", "Offline"], ++ "rnnt": ["Server", "Offline"], ++ "bert-99": ["Server", "Offline"], ++ "bert-99.9": ["Server", "Offline"], ++ "dlrm-99": ["Server", "Offline"], ++ "dlrm-99.9": ["Server", "Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "MultiStream", "Offline"], ++ "ssd-small": ["SingleStream", "MultiStream", "Offline"], ++ "ssd-large": ["SingleStream", "MultiStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-edge": { ++ }, ++ "required-scenarios-datacenter-edge": { ++ "resnet": ["SingleStream", "Offline", "MultiStream", "Server"], ++ "ssd-small": ["SingleStream", "Offline", "MultiStream"], ++ "ssd-large": ["SingleStream", "Offline", "MultiStream", "Server"], ++ "rnnt": ["SingleStream", "Offline", "Server"], ++ "bert-99": ["SingleStream", "Offline", "Server"], ++ "bert-99.9": ["Offline", "Server"], ++ "dlrm-99": ["Offline", "Server"], ++ "dlrm-99.9": ["Offline", "Server"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-datacenter-edge": { ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "ssd-small": ("mAP", 22 * 0.99), ++ "ssd-large": ("mAP", 20 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.86331 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.86331 * 0.999), ++ }, ++ "performance-sample-count": { ++ "ssd-small": 256, ++ "ssd-large": 64, ++ "resnet": 1024, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 42, ++ "3d-unet-99.9": 42, ++ }, ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ "ssd_resnet101_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet101_v1_fpn_1024x1024": "ssd-large", ++ "ssd_resnet152_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet152_v1_fpn_1024x1024": "ssd-large", ++ "rcnn-resnet50-lowproposals-coco": "ssd-large", ++ "rcnn-inception-resnet-v2-lowproposals-coco": "ssd-large", ++ "rcnn-inception-v2-coco": "ssd-large", ++ "rcnn-nas-lowproposals-coco": "ssd-large", ++ "rcnn-resnet101-lowproposals-coco": "ssd-large", ++ "ssd_mobilenet_v1_coco": "ssd-small", ++ "ssd_mobilenet_v1_fpn_640x640": "ssd-small", ++ "ssd_mobilenet_v1_quantized_coco": "ssd-small", ++ "ssd_mobilenet_v2_320x320": "ssd-small", ++ "ssd_mobilenet_v2_fpnlite_320x320": "ssd-small", ++ "ssd_mobilenet_v2_fpnlite_640x640": "ssd-small", ++ "ssd_resnet50_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet50_v1_fpn_1024x1024": "ssd-large", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 6655344265603136530, ++ "sample_index_rng_seed": 15863379492028895792, ++ "schedule_rng_seed": 12662793979680847247, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000}, ++ "ssd-large": {"Server": 100000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "MultiStream": 270336, "Server": 270336, "Offline": 1}, ++ "ssd-small": {"SingleStream": 1024, "MultiStream": 270336, "Offline": 1}, ++ "ssd-large": {"SingleStream": 1024, "MultiStream": 270336, "Server": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++ "v2.1": { ++ "models": [ ++ "resnet", "retinanet", "rnnt", ++ "bert-99", "bert-99.9", ++ "dlrm-99", "dlrm-99.9", ++ "3d-unet-99", "3d-unet-99.9", ++ ], ++ "required-scenarios-datacenter": { ++ "resnet": ["Server", "Offline"], ++ "retinanet": ["Server", "Offline"], ++ "rnnt": ["Server", "Offline"], ++ "bert-99": ["Server", "Offline"], ++ "bert-99.9": ["Server", "Offline"], ++ "dlrm-99": ["Server", "Offline"], ++ "dlrm-99.9": ["Server", "Offline"], ++ "3d-unet-99": ["Offline"], ++ "3d-unet-99.9": ["Offline"], ++ }, ++ "optional-scenarios-datacenter": { ++ }, ++ "required-scenarios-edge": { ++ "resnet": ["SingleStream", "MultiStream", "Offline"], ++ "retinanet": ["SingleStream", "MultiStream", "Offline"], ++ "rnnt": ["SingleStream", "Offline"], ++ "bert-99": ["SingleStream", "Offline"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-edge": { ++ }, ++ "required-scenarios-datacenter-edge": { ++ "resnet": ["SingleStream", "Offline", "MultiStream", "Server"], ++ "retinanet": ["SingleStream", "Offline", "MultiStream", "Server"], ++ "rnnt": ["SingleStream", "Offline", "Server"], ++ "bert-99": ["SingleStream", "Offline", "Server"], ++ "bert-99.9": ["Offline", "Server"], ++ "dlrm-99": ["Offline", "Server"], ++ "dlrm-99.9": ["Offline", "Server"], ++ "3d-unet-99": ["SingleStream", "Offline"], ++ "3d-unet-99.9": ["SingleStream", "Offline"], ++ }, ++ "optional-scenarios-datacenter-edge": { ++ }, ++ "accuracy-target": { ++ "resnet": ("acc", 76.46 * 0.99), ++ "retinanet": ("mAP", 37.55 * 0.99), ++ "rnnt": ("WER", (100 - 7.452) * 0.99), ++ "bert-99": ("F1", 90.874 * 0.99), ++ "bert-99.9": ("F1", 90.874 * 0.999), ++ "dlrm-99": ("AUC", 80.25 * 0.99), ++ "dlrm-99.9": ("AUC", 80.25 * 0.999), ++ "3d-unet-99": ("DICE", 0.86170 * 0.99), ++ "3d-unet-99.9": ("DICE", 0.86170 * 0.999), ++ }, ++ "performance-sample-count": { ++ "resnet": 1024, ++ # TODO: Update perf sample count for retinanet ++ "retinanet": 64, ++ "rnnt": 2513, ++ "bert-99": 10833, ++ "bert-99.9": 10833, ++ "dlrm-99": 204800, ++ "dlrm-99.9": 204800, ++ "3d-unet-99": 42, ++ "3d-unet-99.9": 42, ++ }, ++ # TODO: Update this list. ++ "model_mapping": { ++ # map model names to the official mlperf model class ++ "ssd-mobilenet": "ssd-small", ++ "ssd-resnet34": "ssd-large", ++ "mobilenet": "resnet", ++ "resnet50": "resnet", ++ "ssd_resnet101_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet101_v1_fpn_1024x1024": "ssd-large", ++ "ssd_resnet152_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet152_v1_fpn_1024x1024": "ssd-large", ++ "rcnn-resnet50-lowproposals-coco": "ssd-large", ++ "rcnn-inception-resnet-v2-lowproposals-coco": "ssd-large", ++ "rcnn-inception-v2-coco": "ssd-large", ++ "rcnn-nas-lowproposals-coco": "ssd-large", ++ "rcnn-resnet101-lowproposals-coco": "ssd-large", ++ "ssd_mobilenet_v1_coco": "ssd-small", ++ "ssd_mobilenet_v1_fpn_640x640": "ssd-small", ++ "ssd_mobilenet_v1_quantized_coco": "ssd-small", ++ "ssd_mobilenet_v2_320x320": "ssd-small", ++ "ssd_mobilenet_v2_fpnlite_320x320": "ssd-small", ++ "ssd_mobilenet_v2_fpnlite_640x640": "ssd-small", ++ "ssd_resnet50_v1_fpn_640x640": "ssd-small", ++ "ssd_resnet50_v1_fpn_1024x1024": "ssd-large", ++ }, ++ "seeds": { ++ "qsl_rng_seed": 14284205019438841327, ++ "sample_index_rng_seed": 4163916728725999944, ++ "schedule_rng_seed": 299063814864929621, ++ }, ++ "test05_seeds": { ++ "qsl_rng_seed" : 313588358309856706, ++ "sample_index_rng_seed" : 471397156132239067, ++ "schedule_rng_seed" : 413914573387865862, ++ }, ++ "ignore_errors": [ ++ ], ++ "latency-constraint": { ++ "resnet": {"Server": 15000000}, ++ "retinanet": {"Server": 100000000}, ++ "rnnt": {"Server": 1000000000}, ++ "bert-99": {"Server": 130000000}, ++ "bert-99.9": {"Server": 130000000}, ++ "dlrm-99": {"Server": 30000000}, ++ "dlrm-99.9": {"Server": 30000000}, ++ }, ++ "min-queries": { ++ "resnet": {"SingleStream": 1024, "MultiStream": 270336, "Server": 270336, "Offline": 1}, ++ "retinanet": {"SingleStream": 1024, "MultiStream": 270336, "Server": 270336, "Offline": 1}, ++ "rnnt": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "bert-99.9": {"SingleStream": 1024, "Server": 270336, "Offline": 1}, ++ "dlrm-99": {"Server": 270336, "Offline": 1}, ++ "dlrm-99.9": {"Server": 270336, "Offline": 1}, ++ "3d-unet-99": {"SingleStream": 1024, "Offline": 1}, ++ "3d-unet-99.9": {"SingleStream": 1024, "Offline": 1}, ++ }, ++ }, ++} ++ ++VALID_DIVISIONS = ["open", "closed", "network"] ++VALID_AVAILABILITIES = ["available", "preview", "rdi"] ++REQUIRED_PERF_FILES = ["mlperf_log_summary.txt", "mlperf_log_detail.txt"] ++OPTIONAL_PERF_FILES = ["mlperf_log_accuracy.json"] ++REQUIRED_PERF_POWER_FILES = ["spl.txt"] ++REQUIRED_POWER_FILES = ["client.json", "client.log", "ptd_logs.txt", "server.json", "server.log"] ++REQUIRED_ACC_FILES = ["mlperf_log_summary.txt", "mlperf_log_detail.txt", "accuracy.txt", "mlperf_log_accuracy.json"] ++REQUIRED_MEASURE_FILES = ["mlperf.conf", "user.conf", "README.md"] ++MS_TO_NS = 1000 * 1000 ++S_TO_MS = 1000 ++MAX_ACCURACY_LOG_SIZE = 10 * 1024 ++OFFLINE_MIN_SPQ = 24576 ++TEST_DURATION_MS_PRE_1_0 = 60000 ++TEST_DURATION_MS = 600000 ++REQUIRED_COMP_PER_FILES = ["mlperf_log_summary.txt", "mlperf_log_detail.txt"] ++REQUIRED_TEST01_ACC_FILES_1 = ["mlperf_log_accuracy.json", "accuracy.txt"] ++REQUIRED_TEST01_ACC_FILES = REQUIRED_TEST01_ACC_FILES_1 + ["baseline_accuracy.txt", "compliance_accuracy.txt"] ++ ++SCENARIO_MAPPING = { ++ "singlestream": "SingleStream", ++ "multistream": "MultiStream", ++ "server": "Server", ++ "offline": "Offline", ++} ++ ++RESULT_FIELD = { ++ "Offline": "Samples per second", ++ "SingleStream": "90th percentile latency (ns)", ++ "MultiStream": "Samples per query", ++ "Server": "Scheduled samples per second" ++} ++ ++RESULT_FIELD_NEW = { ++ "v0.5": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "result_90.00_percentile_latency_ns", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "result_99.00_percentile_per_query_latency_ns", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v0.7": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "result_90.00_percentile_latency_ns", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "result_99.00_percentile_per_query_latency_ns", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v1.0": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "result_90.00_percentile_latency_ns", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "result_99.00_percentile_per_query_latency_ns", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v1.1": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "result_90.00_percentile_latency_ns", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "result_99.00_percentile_per_query_latency_ns", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v2.0": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "early_stopping_latency_ss", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "early_stopping_latency_ms", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++ "v2.1": { ++ "Offline": "result_samples_per_second", ++ "SingleStream": "early_stopping_latency_ss", ++ "MultiStreamLegacy": "effective_samples_per_query", ++ "MultiStream": "early_stopping_latency_ms", ++ "Server": "result_scheduled_samples_per_sec" ++ }, ++} ++ ++ACC_PATTERN = { ++ "acc": ++ r"^accuracy=([\d\.]+).*", ++ "AUC": ++ r"^AUC=([\d\.]+).*", ++ "mAP": ++ r"^mAP=([\d\.]+).*", ++ "bleu": ++ r"^BLEU\:\s*([\d\.]+).*", ++ "F1": ++ r"^{[\"\']exact_match[\"\']\:\s*[\d\.]+,\s*[\"\']f1[\"\']\:\s*([\d\.]+)}", ++ "WER": ++ r"Word Error Rate\:.*, accuracy=([0-9\.]+)%", ++ "DICE": ++ r"Accuracy\:\s*mean\s*=\s*([\d\.]+).*", ++} ++ ++SYSTEM_DESC_REQUIRED_FIELDS = [ ++ "division", "submitter", "status", "system_name", "number_of_nodes", "host_processor_model_name", ++ "host_processors_per_node", "host_processor_core_count", "host_memory_capacity", "host_storage_capacity", ++ "host_storage_type", "accelerators_per_node", "accelerator_model_name", "accelerator_memory_capacity", ++ "framework", "operating_system" ++] ++ ++SYSTEM_DESC_REQUIED_FIELDS_SINCE_V1 = [ ++ "system_type", "other_software_stack", "host_processor_frequency", "host_processor_caches", ++ "host_memory_configuration", "host_processor_interconnect", "host_networking", "host_networking_topology", ++ "accelerator_frequency", "accelerator_host_interconnect", "accelerator_interconnect", ++ "accelerator_interconnect_topology", "accelerator_memory_configuration", ++ "accelerator_on-chip_memories", "cooling", "hw_notes", "sw_notes" ++] ++ ++SYSTEM_DESC_REQUIED_FIELDS_POWER = [ ++ "power_management", "filesystem", "boot_firmware_version", "management_firmware_version", "other_hardware", ++ "number_of_type_nics_installed", "nics_enabled_firmware", "nics_enabled_os", "nics_enabled_connected", ++ "network_speed_mbit", "power_supply_quantity_and_rating_watts", "power_supply_details", "disk_drives", ++ "disk_controllers" ++] ++ ++SYSTEM_DESC_IS_NETWORK_MODE = "is_network" ++SYSTEM_DESC_REQUIRED_FIELDS_NETWORK_MODE = [ ++ SYSTEM_DESC_IS_NETWORK_MODE, "network_type", "network_media", "network_rate", "nic_loadgen", ++ "number_nic_loadgen", "net_software_stack_loadgen", "network_protocol", "number_connections", "nic_sut", ++ "number_nic_sut", "net_software_stack_sut", "network_topology" ++] ++NETWORK_MODE_REQUIRED_SUBSTRING_IN_SUT_NAME = "Network SUT" ++ ++SYSTEM_IMP_REQUIRED_FILES = [ ++ "input_data_types", "retraining", "starting_weights_filename", "weight_data_types", ++ "weight_transformations", ++] ++ ++ ++class Config(): ++ """Select config value by mlperf version and submission type.""" ++ def __init__(self, version, extra_model_benchmark_map, ignore_uncommited=False, more_power_check=False): ++ self.base = MODEL_CONFIG.get(version) ++ self.set_extra_model_benchmark_map(extra_model_benchmark_map) ++ self.version = version ++ self.models = self.base["models"] ++ self.seeds = self.base["seeds"] ++ self.test05_seeds = self.base["test05_seeds"] ++ self.accuracy_target = self.base["accuracy-target"] ++ self.performance_sample_count = self.base["performance-sample-count"] ++ self.latency_constraint = self.base.get("latency-constraint", {}) ++ self.min_queries = self.base.get("min-queries", {}) ++ self.required = None ++ self.optional = None ++ self.ignore_uncommited = ignore_uncommited ++ self.more_power_check = more_power_check ++ ++ def set_extra_model_benchmark_map(self, extra_model_benchmark_map): ++ if extra_model_benchmark_map: ++ for mapping in extra_model_benchmark_map.split(';'): ++ model_name, mlperf_model = mapping.split(':') ++ self.base['model_mapping'][model_name] = mlperf_model ++ ++ def set_type(self, submission_type): ++ if submission_type is None and self.version in ["v0.5"]: ++ return ++ elif submission_type == "datacenter": ++ self.required = self.base["required-scenarios-datacenter"] ++ self.optional = self.base["optional-scenarios-datacenter"] ++ elif submission_type == "edge": ++ self.required = self.base["required-scenarios-edge"] ++ self.optional = self.base["optional-scenarios-edge"] ++ elif submission_type == "datacenter,edge" or submission_type == "edge,datacenter": ++ self.required = self.base["required-scenarios-datacenter-edge"] ++ self.optional = self.base["optional-scenarios-datacenter-edge"] ++ else: ++ raise ValueError("invalid system type") ++ ++ def get_mlperf_model(self, model): ++ # preferred - user is already using the official name ++ if model in self.models: ++ return model ++ ++ # simple mapping, ie resnet50->resnet ? ++ mlperf_model = self.base["model_mapping"].get(model) ++ if mlperf_model: ++ return mlperf_model ++ ++ # try to guess ++ if "ssdlite" in model or "ssd-inception" in model or "yolo" in model or \ ++ "ssd-mobilenet" in model or "ssd-resnet50" in model: ++ model = "ssd-small" ++ elif "mobilenet" in model: ++ model = "mobilenet" ++ elif "efficientnet" in model or "resnet50" in model: ++ model = "resnet" ++ elif "rcnn" in model: ++ model = "ssd-small" ++ elif "bert-99.9" in model: ++ model = "bert-99.9" ++ elif "bert-99" in model: ++ model = "bert-99" ++ # map again, for example v0.7 does not have mobilenet so it needs to be mapped to resnet ++ mlperf_model = self.base["model_mapping"].get(model, model) ++ return mlperf_model ++ ++ def get_required(self, model): ++ if self.version in ["v0.5"]: ++ return set() ++ model = self.get_mlperf_model(model) ++ if model not in self.required: ++ return None ++ return set(self.required[model]) ++ ++ def get_optional(self, model): ++ if self.version in ["v0.5"]: ++ return set(["SingleStream", "MultiStream", "Server", "Offline"]) ++ model = self.get_mlperf_model(model) ++ if model not in self.optional: ++ return set() ++ return set(self.optional[model]) ++ ++ def get_accuracy_target(self, model): ++ if model not in self.accuracy_target: ++ raise ValueError("model not known: " + model) ++ return self.accuracy_target[model] ++ ++ def get_performance_sample_count(self, model): ++ model = self.get_mlperf_model(model) ++ if model not in self.performance_sample_count: ++ raise ValueError("model not known: " + model) ++ return self.performance_sample_count[model] ++ ++ def ignore_errors(self, line): ++ for error in self.base["ignore_errors"]: ++ if error in line: ++ return True ++ if self.ignore_uncommited and "ERROR : Loadgen built with uncommitted changes!" in line: ++ return True ++ return False ++ ++ def get_min_query_count(self, model, scenario): ++ model = self.get_mlperf_model(model) ++ if model not in self.min_queries: ++ raise ValueError("model not known: " + model) ++ return self.min_queries[model].get(scenario) ++ ++ def has_new_logging_format(self): ++ return self.version not in ["v0.5", "v0.7"] ++ ++ def uses_legacy_multistream(self): ++ return self.version in ["v0.5", "v0.7", "v1.0", "v1.1"] ++ ++ ++ def uses_early_stopping(self, scenario): ++ return (self.version not in ["v0.5", "v0.7", "v1.0", "v1.1"]) and ( ++ scenario in ["Server", "SingleStream", "MultiStream"] ++ ) ++ ++ def has_query_count_in_log(self): ++ return self.version not in ["v0.5", "v0.7", "v1.0", "v1.1"] ++ ++ ++ def has_power_utc_timestamps(self): ++ return self.version not in ["v0.5", "v0.7", "v1.0"] ++ ++ ++ ++def get_args(): ++ """Parse commandline.""" ++ parser = argparse.ArgumentParser() ++ parser.add_argument("--input", required=True, help="submission directory") ++ parser.add_argument("--version", default="v2.1", choices=list(MODEL_CONFIG.keys()), help="mlperf version") ++ parser.add_argument("--submitter", help="filter to submitter") ++ parser.add_argument("--csv", default="summary.csv", help="csv file with results") ++ parser.add_argument("--skip_compliance", action="store_true", help="Pass this cmdline option to skip checking compliance/ dir") ++ parser.add_argument("--extra-model-benchmark-map", help="extra model name to benchmark mapping") ++ parser.add_argument("--debug", action="store_true", help="extra debug output") ++ parser.add_argument("--submission-exceptions", action="store_true", help="ignore certain errors for submission") ++ parser.add_argument("--more-power-check", action="store_true", help="apply Power WG's check.py script on each power submission. Requires Python 3.7+") ++ args = parser.parse_args() ++ return args ++ ++ ++def list_dir(*path): ++ path = os.path.join(*path) ++ return [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))] ++ ++ ++def list_files(*path): ++ path = os.path.join(*path) ++ return [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] ++ ++ ++def split_path(m): ++ return m.replace("\\", "/").split("/") ++ ++ ++def find_error_in_detail_log(config, fname): ++ is_valid = True ++ if not os.path.exists(fname): ++ log.error("%s is missing", fname) ++ is_valid = False ++ else: ++ if config.has_new_logging_format(): ++ mlperf_log = MLPerfLog(fname) ++ if mlperf_log.has_error(): ++ if config.ignore_uncommited: ++ has_other_errors = False ++ for error in mlperf_log.get_errors(): ++ if "Loadgen built with uncommitted changes!" not in error["value"]: ++ has_other_errors = True ++ ++ log.error("%s contains errors:", fname) ++ for error in mlperf_log.get_errors(): ++ log.error("%s", error["value"]) ++ ++ if not config.ignore_uncommited or has_other_errors: ++ is_valid = False ++ else: ++ with open(fname, "r") as f: ++ for line in f: ++ # look for: ERROR ++ if "ERROR" in line: ++ if config.ignore_errors(line): ++ if "ERROR : Loadgen built with uncommitted changes!" in line: ++ log.warning("%s contains error: %s", fname, line) ++ continue ++ log.error("%s contains error: %s", fname, line) ++ is_valid = False ++ return is_valid ++ ++ ++def check_accuracy_dir(config, model, path, verbose): ++ is_valid = False ++ acc = None ++ hash_val = None ++ acc_type, acc_target = config.get_accuracy_target(model) ++ pattern = ACC_PATTERN[acc_type] ++ with open(os.path.join(path, "accuracy.txt"), "r", encoding="utf-8") as f: ++ for line in f: ++ m = re.match(pattern, line) ++ if m: ++ acc = m.group(1) ++ m = re.match(r"^hash=([\w\d]+)$", line) ++ if m: ++ hash_val = m.group(1) ++ if hash_val and acc: ++ break ++ ++ if acc and float(acc) >= acc_target: ++ is_valid = True ++ elif verbose: ++ log.warning("%s accuracy not met: expected=%f, found=%s", path, acc_target, acc) ++ ++ if not hash_val: ++ log.error("%s not hash value for mlperf_log_accuracy.json", path) ++ is_valid = False ++ ++ # check mlperf_log_accuracy.json ++ fname = os.path.join(path, "mlperf_log_accuracy.json") ++ if not os.path.exists(fname): ++ log.error("%s is missing", fname) ++ is_valid = False ++ else: ++ if os.stat(fname).st_size > MAX_ACCURACY_LOG_SIZE: ++ log.error("%s is not truncated", fname) ++ is_valid = False ++ ++ # check if there are any errors in the detailed log ++ fname = os.path.join(path, "mlperf_log_detail.txt") ++ if not find_error_in_detail_log(config, fname): ++ is_valid = False ++ ++ return is_valid, acc ++ ++ ++def check_performance_dir(config, model, path, scenario_fixed, division, system_json): ++ is_valid = False ++ rt = {} ++ ++ # look for: Result is: VALID ++ if config.has_new_logging_format(): ++ fname = os.path.join(path, "mlperf_log_detail.txt") ++ mlperf_log = MLPerfLog(fname) ++ if "result_validity" in mlperf_log.get_keys() and mlperf_log["result_validity"] == "VALID": ++ is_valid = True ++ performance_sample_count = mlperf_log["effective_performance_sample_count"] ++ qsl_rng_seed = mlperf_log["effective_qsl_rng_seed"] ++ sample_index_rng_seed = mlperf_log["effective_sample_index_rng_seed"] ++ schedule_rng_seed = mlperf_log["effective_schedule_rng_seed"] ++ scenario = mlperf_log["effective_scenario"] ++ scenario_for_res = "MultiStreamLegacy" if scenario == "MultiStream" and config.uses_legacy_multistream() else\ ++ scenario ++ res = float(mlperf_log[RESULT_FIELD_NEW[config.version][scenario_for_res]]) ++ latency_99_percentile = mlperf_log["result_99.00_percentile_latency_ns"] ++ latency_mean = mlperf_log["result_mean_latency_ns"] ++ if scenario in ["MultiStream"]: ++ latency_99_percentile = mlperf_log["result_99.00_percentile_per_query_latency_ns"] ++ latency_mean = mlperf_log["result_mean_query_latency_ns"] ++ min_query_count = mlperf_log["effective_min_query_count"] ++ samples_per_query = mlperf_log["effective_samples_per_query"] ++ min_duration = mlperf_log["effective_min_duration_ms"] ++ if scenario == "SingleStream": ++ # qps_wo_loadgen_overhead is only used for inferring Offline from SingleStream; only for old submissions ++ qps_wo_loadgen_overhead = mlperf_log["result_qps_without_loadgen_overhead"] ++ sut_name = mlperf_log["sut_name"] ++ else: ++ fname = os.path.join(path, "mlperf_log_summary.txt") ++ with open(fname, "r") as f: ++ for line in f: ++ m = re.match(r"^Result\s+is\s*\:\s+VALID", line) ++ if m: ++ is_valid = True ++ m = re.match(r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.][\w\+\.\s]*)", line) ++ if m: ++ rt[m.group(1).strip()] = m.group(2).strip() ++ performance_sample_count = int(rt['performance_sample_count']) ++ qsl_rng_seed = int(rt["qsl_rng_seed"]) ++ sample_index_rng_seed = int(rt["sample_index_rng_seed"]) ++ schedule_rng_seed = int(rt["schedule_rng_seed"]) ++ scenario = rt["Scenario"].replace(" ","") ++ res = float(rt[RESULT_FIELD[scenario]]) ++ latency_99_percentile = int(rt['99.00 percentile latency (ns)']) ++ latency_mean = int(rt['Mean latency (ns)']) ++ min_query_count = int(rt['min_query_count']) ++ samples_per_query = int(rt['samples_per_query']) ++ min_duration = int(rt["min_duration (ms)"]) ++ if scenario == "SingleStream": ++ qps_wo_loadgen_overhead = float(rt["QPS w/o loadgen overhead"]) ++ sut_name = str(rt['System Under Test (SUT) name: ']) ++ ++ # check if there are any errors in the detailed log ++ fname = os.path.join(path, "mlperf_log_detail.txt") ++ if not find_error_in_detail_log(config, fname): ++ is_valid = False ++ ++ required_performance_sample_count = config.get_performance_sample_count(model) ++ if performance_sample_count < required_performance_sample_count: ++ log.error("%s performance_sample_count, found %d, needs to be >= %d", ++ fname, performance_sample_count, required_performance_sample_count) ++ is_valid = False ++ ++ config_seeds = config.seeds if "TEST05" not in fname else config.test05_seeds ++ if qsl_rng_seed != config_seeds["qsl_rng_seed"]: ++ log.error("%s qsl_rng_seed is wrong, expected=%s, found=%s", fname, config_seeds["qsl_rng_seed"], qsl_rng_seed) ++ if sample_index_rng_seed != config_seeds["sample_index_rng_seed"]: ++ log.error("%s sample_index_rng_seed is wrong, expected=%s, found=%s", fname, config_seeds["sample_index_rng_seed"], sample_index_rng_seed) ++ if schedule_rng_seed != config_seeds["schedule_rng_seed"]: ++ log.error("%s schedule_rng_seed is wrong, expected=%s, found=%s", fname, config_seeds["schedule_rng_seed"], schedule_rng_seed) ++ ++ if scenario == "SingleStream" or (scenario == "MultiStream" and not config.uses_legacy_multistream()): ++ res /= MS_TO_NS ++ ++ # Check if current scenario (and version) uses early stopping ++ uses_early_stopping = config.uses_early_stopping(scenario) ++ ++ if config.version != "v0.5": ++ # FIXME: for open we script this because open can submit in all scenarios ++ # not supported for v0.5 ++ ++ if uses_early_stopping: ++ # check if early_stopping condition was met ++ if not mlperf_log["early_stopping_met"]: ++ early_stopping_result = mlperf_log["early_stopping_result"] ++ log.error("Early stopping condition was not met, msg=%s", early_stopping_result) ++ ++ # If the scenario has a target latency (Server scenario), check ++ # that the target latency that was passed to the early stopping ++ # is less than the target latency. ++ target_latency = config.latency_constraint.get(model, dict()).get(scenario) ++ if target_latency: ++ early_stopping_latency_ns = mlperf_log["effective_target_latency_ns"] ++ log.info("Target latency: %s, Early Stopping Latency: %s, Scenario: %s", ++ target_latency, early_stopping_latency_ns, scenario) ++ if early_stopping_latency_ns > target_latency: ++ log.error("%s Latency constraint with early stopping not met, expected=%s, found=%s", ++ fname, target_latency, early_stopping_latency_ns) ++ ++ else: ++ # check if the benchmark meets latency constraint ++ target_latency = config.latency_constraint.get(model, dict()).get(scenario) ++ log.info("Target latency: %s, Latency: %s, Scenario: %s", target_latency, latency_99_percentile, scenario) ++ if target_latency: ++ if latency_99_percentile > target_latency: ++ log.error("%s Latency constraint not met, expected=%s, found=%s", ++ fname, target_latency, latency_99_percentile) ++ ++ # Check Minimum queries were issued to meet test duration ++ # Check if this run uses early stopping. If it does, get the ++ # min_queries from the detail log, otherwise get this value ++ # from the config ++ if not uses_early_stopping: ++ required_min_query_count = config.get_min_query_count(model, scenario) ++ if required_min_query_count and min_query_count < required_min_query_count: ++ log.error("%s Required minimum Query Count not met by user config, Expected=%s, Found=%s", ++ fname, required_min_query_count, min_query_count) ++ ++ if scenario == "Offline" and (samples_per_query < OFFLINE_MIN_SPQ): ++ log.error("%s Required minimum samples per query not met by user config, Expected=%s, Found=%s", ++ fname, OFFLINE_MIN_SPQ, samples_per_query) ++ ++ # Test duration of 600s is met ++ required_min_duration = TEST_DURATION_MS_PRE_1_0 if config.version in ["v0.5", "v0.7"] else TEST_DURATION_MS ++ if min_duration < required_min_duration: ++ log.error("%s Test duration lesser than 600s in user config. expected=%s, found=%s", ++ fname, required_min_duration, min_duration) ++ ++ inferred = False ++ # special case for results inferred from different scenario ++ if scenario_fixed in ["Offline"] and scenario in ["SingleStream"]: ++ inferred = True ++ res = qps_wo_loadgen_overhead ++ ++ if (scenario_fixed in ["Offline"] and not config.uses_legacy_multistream()) and scenario in ["MultiStream"]: ++ inferred = True ++ res = samples_per_query * S_TO_MS / (latency_mean / MS_TO_NS) ++ ++ if (scenario_fixed in ["MultiStream"] and not config.uses_legacy_multistream()) and scenario in ["SingleStream"]: ++ inferred = True ++ # samples_per_query does not match with the one reported in the logs ++ # when inferring MultiStream from SingleStream ++ samples_per_query = 8 ++ if uses_early_stopping: ++ early_stopping_latency_ms = mlperf_log["early_stopping_latency_ms"] ++ if early_stopping_latency_ms == 0: ++ log.error("Not enough samples were processed for early stopping to make an estimate") ++ is_valid = False ++ res = (early_stopping_latency_ms * samples_per_query) / MS_TO_NS ++ else: ++ res = (latency_99_percentile * samples_per_query) / MS_TO_NS ++ ++ is_network_system, is_network_mode_valid = is_system_over_network(division, system_json, path) ++ is_valid &= is_network_mode_valid ++ if is_network_system: ++ # for network mode verify the SUT name is valid, accodring to the rules (must include "Network SUT" in name) ++ if NETWORK_MODE_REQUIRED_SUBSTRING_IN_SUT_NAME not in sut_name: ++ log.error( ++ f"{fname} invalid sut name for network mode. expecting the substring '{NETWORK_MODE_REQUIRED_SUBSTRING_IN_SUT_NAME}' got '{sut_name}'") ++ is_valid = False ++ ++ return is_valid, res, inferred ++ ++ ++def check_power_dir(power_path, ranging_path, testing_path, scenario_fixed, config): ++ ++ more_power_check = config.more_power_check ++ ++ is_valid = True ++ power_metric = 0 ++ ++ # check if all the required files are present ++ required_files = REQUIRED_PERF_FILES + REQUIRED_PERF_POWER_FILES ++ diff = files_diff(list_files(testing_path), required_files, OPTIONAL_PERF_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", testing_path, diff) ++ is_valid = False ++ diff = files_diff(list_files(ranging_path), required_files, OPTIONAL_PERF_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", ranging_path, diff) ++ is_valid = False ++ diff = files_diff(list_files(power_path), REQUIRED_POWER_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", power_path, diff) ++ is_valid = False ++ ++ # parse the power logs ++ if config.has_power_utc_timestamps(): ++ server_timezone = datetime.timedelta(0) ++ client_timezone = datetime.timedelta(0) ++ else: ++ server_json_fname = os.path.join(power_path, "server.json") ++ with open(server_json_fname) as f: ++ server_timezone = datetime.timedelta(seconds=json.load(f)["timezone"]) ++ client_json_fname = os.path.join(power_path, "client.json") ++ with open(client_json_fname) as f: ++ client_timezone = datetime.timedelta(seconds=json.load(f)["timezone"]) ++ detail_log_fname = os.path.join(testing_path, "mlperf_log_detail.txt") ++ mlperf_log = MLPerfLog(detail_log_fname) ++ datetime_format = '%m-%d-%Y %H:%M:%S.%f' ++ power_begin = datetime.datetime.strptime(mlperf_log["power_begin"], datetime_format) + client_timezone ++ power_end = datetime.datetime.strptime(mlperf_log["power_end"], datetime_format) + client_timezone ++ # Obtain the scenario also from logs to check if power is inferred ++ if config.has_new_logging_format(): ++ scenario = mlperf_log["effective_scenario"] ++ else: ++ rt = {} ++ fname = os.path.join(testing_path, "mlperf_log_summary.txt") ++ with open(fname, "r") as f: ++ for line in f: ++ m = re.match(r"^Result\s+is\s*\:\s+VALID", line) ++ if m: ++ is_valid = True ++ m = re.match(r"^\s*([\w\s.\(\)\/]+)\s*\:\s*([\w\+\.][\w\+\.\s]*)", line) ++ if m: ++ rt[m.group(1).strip()] = m.group(2).strip() ++ scenario = rt["Scenario"].replace(" ","") ++ spl_fname = os.path.join(testing_path, "spl.txt") ++ power_list = [] ++ with open(spl_fname) as f: ++ for line in f: ++ timestamp = datetime.datetime.strptime(line.split(",")[1], datetime_format) + server_timezone ++ if timestamp > power_begin and timestamp < power_end: ++ power_list.append(float(line.split(",")[3])) ++ if len(power_list) == 0: ++ log.error("%s has no power samples falling in power range: %s - %s", spl_fname, power_begin, power_end) ++ is_valid = False ++ else: ++ avg_power = sum(power_list) / len(power_list) ++ power_duration = (power_end - power_begin).total_seconds() ++ if scenario_fixed in ["Offline", "Server"]: ++ # In Offline and Server scenarios, the power metric is in W. ++ power_metric = avg_power ++ else: ++ # In SingleStream and MultiStream scenarios, the power metric is in J/query. ++ assert scenario_fixed in ["MultiStream", "SingleStream"], "Unknown scenario: {:}".format(scenario_fixed) ++ if not config.has_query_count_in_log(): ++ # Before v2.0, LoadGen does NOT print out the actual number of queries in detail logs. There is a ++ # "generated_query_count", but LoadGen exits early when the min_duration has been met, so it is not equal to ++ # the actual number of queries. To work around it, make use of "result_qps_with_loadgen_overhead", which is ++ # defined as: (sample_count - 1) / pr.final_query_issued_time, where final_query_issued_time can be ++ # approximated by power_duration (off by one query worth of latency, which is in general negligible compared ++ # to 600-sec total runtime and can be offsetted by removing the "+1" when reconstructing the sample_count). ++ # As for MultiStream, it always runs for 270336 queries, so using "generated_query_count" as above is fine. ++ if scenario_fixed in ["MultiStream"]: ++ num_queries = mlperf_log["generated_query_count"] * mlperf_log["generated_samples_per_query"] ++ elif scenario_fixed in ["SingleStream"]: ++ num_queries = mlperf_log["result_qps_with_loadgen_overhead"] * power_duration ++ else: ++ # Starting from v2.0, LoadGen logs the actual number of issued queries. ++ num_queries = int(mlperf_log["result_query_count"]) ++ power_metric = avg_power * power_duration / num_queries ++ ++ if (scenario_fixed in ["MultiStream"] and not config.uses_legacy_multistream()) and scenario in ["SingleStream"]: ++ samples_per_query = 8 ++ power_metric = avg_power * power_duration * samples_per_query / num_queries ++ ++ if more_power_check: ++ python_version_major = int(sys.version.split(" ")[0].split(".")[0]) ++ python_version_minor = int(sys.version.split(" ")[0].split(".")[1]) ++ assert python_version_major == 3 and python_version_minor >= 7, "The --more-power-check only supports Python 3.7+" ++ assert os.path.exists(os.path.join(submission_checker_dir, "power-dev", "compliance", "check.py")), \ ++ "Please run 'git submodule update --init tools/submission/power-dev' to get Power WG's check.py." ++ sys.path.insert(0, os.path.join(submission_checker_dir, "power-dev")) ++ from compliance.check import check as check_power_more ++ perf_path = os.path.dirname(power_path) ++ check_power_result = check_power_more(perf_path) ++ sys.stdout.flush() ++ sys.stderr.flush() ++ if check_power_result != 0: ++ log.error("Power WG check.py did not pass for: %s", perf_path) ++ is_valid = False ++ ++ return is_valid, power_metric ++ ++ ++ ++def files_diff(list1, list2, optional=None): ++ """returns a list of files that are missing or added.""" ++ if not optional: ++ optional = [] ++ optional = optional + ["mlperf_log_trace.json", "results.json", ".gitkeep"] ++ return set(list1).symmetric_difference(set(list2)) - set(optional) ++ ++def is_system_over_network(division, system_json, path): ++ """ ++ Verify whether the submitted system is over network and whether it is valid for the division ++ ++ for 'network' division, it is mandatory that the system is over-network ++ for 'closed' division, the system must not be over-network ++ for 'open' division, the system may be either local or over-network ++ """ ++ is_network_mode_sys_spec_str = system_json.get(SYSTEM_DESC_IS_NETWORK_MODE) ++ is_network_system = is_network_mode_sys_spec_str.lower()=="true" if is_network_mode_sys_spec_str is not None else False ++ # verify that the system corresponds the division ++ is_valid = True ++ expected_state_by_division = {"network": True, "closed": False} ++ if division in expected_state_by_division: ++ is_valid = expected_state_by_division[division] is is_network_system ++ if not is_valid: ++ log.error(f"{path} incorrect network mode (={is_network_system}) for division '{division}'") ++ return is_network_system, is_valid ++ ++def check_results_dir(config, filter_submitter, skip_compliance, csv, debug=False): ++ """ ++ Walk the results directory and do the checking. ++ ++ We are called with the cdw at the root of the submission directory. ++ level1 division - closed|open|network ++ level2 submitter - for example mlperf_org ++ level3 - results, systems, measurements, code ++ ++ For results the structure from here is: ++ results/$system_desc/$benchmark_model/$scenario/performance/run_n ++ and ++ results/$system_desc/$benchmark_model/$scenario/accuracy ++ ++ We first walk into results/$system_desc ++ make sure there is a system_desc.json and its good ++ Next we walk into the model ++ make sure the model is good, make sure all required scenarios are there. ++ Next we walk into each scenario ++ check the performance directory ++ check the accuracy directory ++ if all was good, add the result to the results directory ++ if there are errors write a None as result so we can report later what failed ++ """ ++ head = [ ++ "Organization", "Availability", "Division", "SystemType", "SystemName", ++ "Platform", "Model", "MlperfModel", "Scenario", "Result", "Accuracy", ++ "number_of_nodes", "host_processor_model_name", ++ "host_processors_per_node", "host_processor_core_count", ++ "accelerator_model_name", "accelerators_per_node", "Location", ++ "framework", "operating_system", "notes", "compilance", "errors", ++ "version", "inferred", "has_power", "Units" ++ ] ++ fmt = ",".join(["{}"] * len(head)) + "\n" ++ csv.write(",".join(head) + "\n") ++ results = {} ++ ++ def log_result(submitter, ++ available, ++ division, ++ system_type, ++ system_name, ++ system_desc, ++ model_name, ++ mlperf_model, ++ scenario_fixed, ++ r, ++ acc, ++ system_json, ++ name, ++ compilance, ++ errors, ++ config, ++ inferred=0, ++ power_metric=0): ++ ++ notes = system_json.get("hw_notes", "") ++ if system_json.get("sw_notes"): ++ notes = notes + ". " + system_json.get("sw_notes") ++ unit_dict = { ++ "SingleStream": "Latency (ms)", ++ "MultiStream": "Latency (ms)", ++ "Offline": "Samples/s", ++ "Server": "Queries/s", ++ } ++ power_unit_dict = { ++ "SingleStream": "Joules", ++ "MultiStream": "Joules", ++ "Offline": "Watts", ++ "Server": "Watts", ++ } ++ unit = unit_dict[scenario_fixed] ++ power_unit = power_unit_dict[scenario_fixed] ++ ++ csv.write( ++ fmt.format(submitter, available, division, '\"' + system_type + '\"', ++ '\"' + system_name + '\"', system_desc, model_name, ++ mlperf_model, scenario_fixed, r, acc, ++ system_json.get("number_of_nodes"), ++ '"' + system_json.get("host_processor_model_name") + '"', ++ system_json.get("host_processors_per_node"), ++ system_json.get("host_processor_core_count"), ++ '"' + system_json.get("accelerator_model_name") + '"', ++ system_json.get("accelerators_per_node"), ++ name.replace("\\", "/"), ++ '"' + system_json.get("framework", "") + '"', ++ '"' + system_json.get("operating_system", "") + '"', ++ '"' + notes + '"', compilance, errors, config.version, ++ inferred, power_metric > 0, unit)) ++ ++ if power_metric > 0: ++ csv.write( ++ fmt.format(submitter, available, division, '\"' + system_type + '\"', ++ '\"' + system_name + '\"', system_desc, model_name, ++ mlperf_model, scenario_fixed, power_metric, acc, ++ system_json.get("number_of_nodes"), ++ '"' + system_json.get("host_processor_model_name") + '"', ++ system_json.get("host_processors_per_node"), ++ system_json.get("host_processor_core_count"), ++ '"' + system_json.get("accelerator_model_name") + '"', ++ system_json.get("accelerators_per_node"), ++ name.replace("\\", "/"), ++ '"' + system_json.get("framework", "") + '"', ++ '"' + system_json.get("operating_system", "") + '"', ++ '"' + notes + '"', compilance, errors, config.version, ++ inferred, power_metric > 0, power_unit)) ++ ++ # we are at the top of the submission directory ++ for division in list_dir("."): ++ # we are looking at ./$division, ie ./closed ++ if division not in VALID_DIVISIONS: ++ if division not in [".git", ".github", "assets"]: ++ log.error("invalid division in input dir %s", division) ++ continue ++ is_closed_or_network = division in ["closed", "network"] ++ ++ for submitter in list_dir(division): ++ # we are looking at ./$division/$submitter, ie ./closed/mlperf_org ++ if filter_submitter and submitter != filter_submitter: ++ continue ++ results_path = os.path.join(division, submitter, "results") ++ if not os.path.exists(results_path): ++ continue ++ ++ for system_desc in list_dir(results_path): ++ # we are looking at ./$division/$submitter/results/$system_desc, ie ./closed/mlperf_org/results/t4-ort ++ ++ # ++ # check if system_id is good. ++ # ++ system_id_json = os.path.join(division, submitter, "systems", system_desc + ".json") ++ if not os.path.exists(system_id_json): ++ log.error("no system_desc for %s/%s/%s", division, submitter, system_desc) ++ results[os.path.join(results_path, system_desc)] = None ++ continue ++ ++ name = os.path.join(results_path, system_desc) ++ with open(system_id_json) as f: ++ system_json = json.load(f) ++ available = system_json.get("status").lower() ++ if available not in VALID_AVAILABILITIES: ++ log.error("%s has invalid status (%s)", system_id_json, available) ++ results[name] = None ++ continue ++ system_type = system_json.get("system_type") ++ if config.version not in ["v0.5"]: ++ valid_system_types = ["datacenter", "edge"] ++ if config.version not in ["v0.7"]: ++ valid_system_types += ["datacenter,edge", "edge,datacenter"] ++ if system_type not in valid_system_types: ++ log.error("%s has invalid system type (%s)", system_id_json, system_type) ++ results[name] = None ++ continue ++ config.set_type(system_type) ++ if not check_system_desc_id(name, system_json, submitter, division, config.version): ++ results[name] = None ++ continue ++ ++ # ++ # Look at each model ++ # ++ for model_name in list_dir(results_path, system_desc): ++ ++ # we are looking at ./$division/$submitter/results/$system_desc/$model, ++ # ie ./closed/mlperf_org/results/t4-ort/bert ++ name = os.path.join(results_path, system_desc, model_name) ++ mlperf_model = config.get_mlperf_model(model_name) ++ ++ if is_closed_or_network and mlperf_model not in config.models: ++ # for closed/network divisions we want the model name to match. ++ # for open division the model_name might be different than the task ++ log.error("%s has an invalid model %s for closed/network division", name, ++ model_name) ++ results[name] = None ++ continue ++ ++ # ++ # Look at each scenario ++ # ++ required_scenarios = config.get_required(mlperf_model) ++ if required_scenarios is None: ++ log.error("%s has an invalid model %s, system_type=%s", name, ++ mlperf_model, system_type) ++ results[name] = None ++ continue ++ ++ errors = 0 ++ all_scenarios = set(list(required_scenarios) + list(config.get_optional(mlperf_model))) ++ for scenario in list_dir(results_path, system_desc, model_name): ++ # some submissions in v0.5 use lower case scenarios - map them for now ++ scenario_fixed = SCENARIO_MAPPING.get(scenario, scenario) ++ ++ # we are looking at ./$division/$submitter/results/$system_desc/$model/$scenario, ++ # ie ./closed/mlperf_org/results/t4-ort/bert/Offline ++ name = os.path.join(results_path, system_desc, model_name, scenario) ++ results[name] = None ++ if is_closed_or_network and scenario_fixed not in all_scenarios: ++ log.warning("%s ignoring scenario %s (neither required nor optional)", name, scenario) ++ continue ++ ++ # check if measurement_dir is good. ++ measurement_dir = os.path.join(division, submitter, "measurements", ++ system_desc, model_name, scenario) ++ if not os.path.exists(measurement_dir): ++ log.error("no measurement_dir for %s", measurement_dir) ++ results[measurement_dir] = None ++ errors += 1 ++ else: ++ if not check_measurement_dir(measurement_dir, name, system_desc, ++ os.path.join(division, submitter), model_name, scenario): ++ log.error("%s measurement_dir has issues", measurement_dir) ++ # results[measurement_dir] = None ++ errors += 1 ++ # FIXME: we should not accept this submission ++ # continue ++ ++ # check accuracy ++ accuracy_is_valid = False ++ acc_path = os.path.join(name, "accuracy") ++ if not os.path.exists(os.path.join(acc_path, "accuracy.txt")): ++ log.error( ++ "%s has no accuracy.txt. Generate it with accuracy-imagenet.py or accuracy-coco.py or " ++ "process_accuracy.py", acc_path) ++ else: ++ diff = files_diff(list_files(acc_path), REQUIRED_ACC_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", acc_path, diff) ++ accuracy_is_valid, acc = check_accuracy_dir(config, mlperf_model, acc_path, debug or is_closed_or_network) ++ if not accuracy_is_valid and not is_closed_or_network: ++ if debug: ++ log.warning("%s, accuracy not valid but taken for open", acc_path) ++ accuracy_is_valid = True ++ if not accuracy_is_valid: ++ # a little below we'll not copy this into the results csv ++ errors += 1 ++ log.error("%s, accuracy not valid", acc_path) ++ ++ inferred = 0 ++ if scenario in ["Server"] and config.version in ["v0.5", "v0.7"]: ++ n = ["run_1", "run_2", "run_3", "run_4", "run_5"] ++ else: ++ n = ["run_1"] ++ ++ # check if this submission has power logs ++ power_path = os.path.join(name, "performance", "power") ++ has_power = os.path.exists(power_path) ++ if has_power: ++ log.info("Detected power logs for %s", name) ++ ++ for i in n: ++ perf_path = os.path.join(name, "performance", i) ++ if not os.path.exists(perf_path): ++ log.error("%s is missing", perf_path) ++ continue ++ if has_power: ++ required_perf_files = REQUIRED_PERF_FILES + REQUIRED_PERF_POWER_FILES ++ else: ++ required_perf_files = REQUIRED_PERF_FILES ++ diff = files_diff(list_files(perf_path), required_perf_files, OPTIONAL_PERF_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", perf_path, diff) ++ ++ try: ++ is_valid, r, is_inferred = check_performance_dir(config, mlperf_model, perf_path, scenario_fixed, division, system_json) ++ if is_inferred: ++ inferred = 1 ++ log.info("%s has inferred results, qps=%s", perf_path, r) ++ except Exception as e: ++ log.error("%s caused exception in check_performance_dir: %s", perf_path, e) ++ is_valid, r = False, None ++ ++ power_metric = 0 ++ if has_power: ++ try: ++ ranging_path = os.path.join(name, "performance", "ranging") ++ power_is_valid, power_metric = check_power_dir(power_path, ranging_path, perf_path, scenario_fixed, ++ config) ++ if not power_is_valid: ++ is_valid = False ++ power_metric = 0 ++ except Exception as e: ++ log.error("%s caused exception in check_power_dir: %s", perf_path, e) ++ is_valid, r, power_metric = False, None, 0 ++ ++ if is_valid: ++ results[name] = r if r is None or power_metric == 0 else "{:f} with power_metric = {:f}".format(r, power_metric) ++ required_scenarios.discard(scenario_fixed) ++ else: ++ log.error("%s has issues", perf_path) ++ errors += 1 ++ ++ # check if compliance dir is good for CLOSED division ++ compliance = 0 if is_closed_or_network else 1 ++ if is_closed_or_network and not skip_compliance: ++ compliance_dir = os.path.join(division, submitter, "compliance", ++ system_desc, model_name, scenario) ++ if not os.path.exists(compliance_dir): ++ log.error("no compliance dir for %s", name) ++ results[name] = None ++ else: ++ if not check_compliance_dir(compliance_dir, mlperf_model, scenario_fixed, config, division, system_json): ++ log.error("compliance dir %s has issues", compliance_dir) ++ results[name] = None ++ else: ++ compliance = 1 ++ ++ if results.get(name): ++ if accuracy_is_valid: ++ log_result(submitter, available, division, system_type, system_json.get("system_name"), system_desc, model_name, mlperf_model, ++ scenario_fixed, r, acc, system_json, name, compliance, errors, config, inferred=inferred, power_metric=power_metric) ++ else: ++ results[name] = None ++ log.error("%s is OK but accuracy has issues", name) ++ ++ if required_scenarios: ++ name = os.path.join(results_path, system_desc, model_name) ++ if is_closed_or_network: ++ results[name] = None ++ log.error("%s does not have all required scenarios, missing %s", name, required_scenarios) ++ elif debug: ++ log.warning("%s ignoring missing scenarios in open division (%s)", name, required_scenarios) ++ ++ return results ++ ++ ++def check_system_desc_id(fname, systems_json, submitter, division, version): ++ is_valid = True ++ # check all required fields ++ if version in ["v0.5", "v0.7"]: ++ required_fields = SYSTEM_DESC_REQUIRED_FIELDS ++ else: ++ required_fields = SYSTEM_DESC_REQUIRED_FIELDS + SYSTEM_DESC_REQUIED_FIELDS_SINCE_V1 ++ ++ is_network_system, is_network_mode_valid = is_system_over_network(division, systems_json, fname) ++ is_valid &= is_network_mode_valid ++ if is_network_system: ++ required_fields += SYSTEM_DESC_REQUIRED_FIELDS_NETWORK_MODE ++ ++ for k in required_fields: ++ if k not in systems_json: ++ is_valid = False ++ log.error("%s, field %s is missing", fname, k) ++ ++ if version in ["v0.5", "v0.7"]: ++ all_fields = required_fields + SYSTEM_DESC_REQUIED_FIELDS_SINCE_V1 ++ else: ++ # TODO: SYSTEM_DESC_REQUIED_FIELDS_POWER should be mandatory when a submission has power logs, but since we ++ # check power submission in check_results_dir, the information is not available yet at this stage. ++ all_fields = required_fields + SYSTEM_DESC_REQUIED_FIELDS_POWER ++ for k in systems_json.keys(): ++ if k not in all_fields: ++ log.warning("%s, field %s is unknown", fname, k) ++ ++ if systems_json.get("submitter").lower() != submitter.lower(): ++ log.error("%s has submitter %s, directory has %s", fname, systems_json.get("submitter"), submitter) ++ is_valid = False ++ if systems_json.get("division") != division: ++ log.error("%s has division %s, division has %s", fname, systems_json.get("division"), division) ++ is_valid = False ++ return is_valid ++ ++ ++def check_measurement_dir(measurement_dir, fname, system_desc, root, model, scenario): ++ files = list_files(measurement_dir) ++ system_file = None ++ is_valid = True ++ for i in REQUIRED_MEASURE_FILES: ++ if i not in files: ++ log.error("%s is missing %s", measurement_dir, i) ++ is_valid = False ++ for i in files: ++ if i.startswith(system_desc) and i.endswith("_" + scenario + ".json"): ++ system_file = i ++ end = len("_" + scenario + ".json") ++ break ++ elif i.startswith(system_desc) and i.endswith(".json"): ++ system_file = i ++ end = len(".json") ++ break ++ if system_file: ++ with open(os.path.join(measurement_dir, system_file), "r") as f: ++ j = json.load(f) ++ for k in SYSTEM_IMP_REQUIRED_FILES: ++ if k not in j: ++ is_valid = False ++ log.error("%s, field %s is missing", fname, k) ++ ++ impl = system_file[len(system_desc) + 1:-end] ++ code_dir = os.path.join(root, "code", model) ++ if os.path.isfile(code_dir): ++ with open(code_dir, "r") as f: ++ line = f.read() ++ code_dir = os.path.join(root, "code", line.strip(), impl) ++ else: ++ code_dir = os.path.join(root, "code", model, impl) ++ ++ if not os.path.exists(code_dir): ++ # see if the code dir is per model ++ if not os.path.exists(os.path.dirname(code_dir)): ++ log.error("%s is missing code_dir %s", fname, code_dir) ++ is_valid = False ++ else: ++ log.error("%s is missing %s*.json", fname, system_desc) ++ is_valid = False ++ ++ return is_valid ++ ++def check_compliance_perf_dir(test_dir): ++ is_valid = False ++ ++ fname = os.path.join(test_dir, "verify_performance.txt") ++ if not os.path.exists(fname): ++ log.error("%s is missing in %s", fname, test_dir) ++ is_valid = False ++ else: ++ with open(fname, "r") as f: ++ for line in f: ++ # look for: TEST PASS ++ if "TEST PASS" in line: ++ is_valid = True ++ break ++ if is_valid == False: ++ log.error("Compliance test performance check in %s failed", test_dir) ++ ++ # Check performance dir ++ test_perf_path = os.path.join(test_dir, "performance", "run_1") ++ if not os.path.exists(test_perf_path): ++ log.error("%s has no performance/run_1 directory", test_dir) ++ is_valid = False ++ else: ++ diff = files_diff( ++ list_files(test_perf_path), REQUIRED_COMP_PER_FILES, ++ ["mlperf_log_accuracy.json"]) ++ if diff: ++ log.error("%s has file list mismatch (%s)", test_perf_path, diff) ++ is_valid = False ++ ++ return is_valid ++ ++def check_compliance_acc_dir(test_dir): ++ is_valid = False ++ acc_passed = False ++ ++ fname = os.path.join(test_dir, "verify_accuracy.txt") ++ if not os.path.exists(fname): ++ log.error("%s is missing in %s", fname, test_dir) ++ else: ++ # Accuracy can fail for TEST01 ++ is_valid = True ++ with open(fname, "r") as f: ++ for line in f: ++ # look for: TEST PASS ++ if "TEST PASS" in line: ++ acc_passed = True ++ break ++ if acc_passed == False: ++ log.info("Compliance test accuracy check in %s failed", test_dir) ++ ++ # Check Accuracy dir ++ test_acc_path = os.path.join(test_dir, "accuracy") ++ if not os.path.exists(test_acc_path): ++ log.error("%s has no accuracy directory", test_dir) ++ is_valid = False ++ else: ++ diff = files_diff(list_files(test_acc_path), REQUIRED_TEST01_ACC_FILES_1 if acc_passed else REQUIRED_TEST01_ACC_FILES) ++ if diff: ++ log.error("%s has file list mismatch (%s)", test_acc_path, diff) ++ is_valid = False ++ ++ return is_valid ++ ++def check_compliance_dir(compliance_dir, model, scenario, config, division, system_json): ++ compliance_perf_pass = True ++ compliance_perf_dir_pass = True ++ compliance_acc_pass = True ++ test_list = ["TEST01", "TEST04", "TEST05"] ++ ++ if model in ["rnnt", "bert-99", "bert-99.9", "dlrm-99", "dlrm-99.9", "3d-unet-99", "3d-unet-99.9", "retinanet"]: ++ test_list.remove("TEST04") ++ ++ #Check performance of all Tests ++ for test in test_list: ++ test_dir = os.path.join(compliance_dir, test) ++ if not os.path.exists(test_dir): ++ log.error("Missing %s in compliance dir %s", test, compliance_dir) ++ compliance_perf_dir_pass = False ++ else: ++ try: ++ compliance_perf_dir = os.path.join(compliance_dir, test, "performance","run_1") ++ compliance_perf_valid, r, is_inferred = check_performance_dir(config, model, compliance_perf_dir, scenario, division, system_json) ++ if is_inferred: ++ log.info("%s has inferred results, qps=%s", compliance_perf_dir, r) ++ except Exception as e: ++ log.error("%s caused exception in check_performance_dir: %s", compliance_perf_dir, e) ++ is_valid, r = False, None ++ compliance_perf_pass = compliance_perf_pass and check_compliance_perf_dir(test_dir) and compliance_perf_valid ++ ++ ++ ++ #Check accuracy for TEST01 ++ compliance_acc_pass = check_compliance_acc_dir(os.path.join(compliance_dir, "TEST01")) ++ ++ return compliance_perf_pass and compliance_acc_pass and compliance_perf_dir_pass ++ ++def main(): ++ args = get_args() ++ ++ config = Config(args.version, args.extra_model_benchmark_map, ignore_uncommited=args.submission_exceptions, ++ more_power_check=args.more_power_check) ++ ++ with open(args.csv, "w") as csv: ++ os.chdir(args.input) ++ # check results directory ++ results = check_results_dir(config, args.submitter, args.skip_compliance, csv, args.debug) ++ ++ # log results ++ log.info("---") ++ with_results = 0 ++ for k, v in sorted(results.items()): ++ if v: ++ log.info("Results %s %s", k, v) ++ with_results += 1 ++ log.info("---") ++ for k, v in sorted(results.items()): ++ if v is None: ++ log.error("NoResults %s", k) ++ ++ # print summary ++ log.info("---") ++ log.info("Results=%d, NoResults=%d", with_results, len(results) - with_results) ++ if len(results) != with_results: ++ log.error("SUMMARY: submission has errors") ++ return 1 ++ else: ++ log.info("SUMMARY: submission looks OK") ++ return 0 ++ ++ ++if __name__ == "__main__": ++ sys.exit(main()) +diff --git a/vision/classification_and_detection/run_local.sh b/vision/classification_and_detection/run_local.sh +index e69e3b8..fa5c482 100755 +--- a/vision/classification_and_detection/run_local.sh ++++ b/vision/classification_and_detection/run_local.sh +@@ -4,7 +4,7 @@ source ./run_common.sh + + common_opt="--mlperf_conf ../../mlperf.conf" + dataset="--dataset-path $DATA_DIR" +-OUTPUT_DIR=`pwd`/output/$name ++OUTPUT_DIR=${OUTPUT_DIR:-`pwd`/output/$name} + if [ ! -d $OUTPUT_DIR ]; then + mkdir -p $OUTPUT_DIR + fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/openimages-pycocotools.patch b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/openimages-pycocotools.patch new file mode 100644 index 000000000..7dc312678 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/openimages-pycocotools.patch @@ -0,0 +1,24 @@ +diff --git a/vision/classification_and_detection/tools/accuracy-openimages.py b/vision/classification_and_detection/tools/accuracy-openimages.py +index 655ae5c..497dcbd 100644 +--- a/vision/classification_and_detection/tools/accuracy-openimages.py ++++ b/vision/classification_and_detection/tools/accuracy-openimages.py +@@ -36,7 +36,7 @@ def main(): + annotations_file = os.environ.get('DATASET_ANNOTATIONS_FILE_PATH') + if not annotations_file: + annotations_file = os.path.join(args.openimages_dir, "annotations/openimages-mlperf.json") +- cocoGt = COCO(annotations_file) ++ cocoGt = COCO(annotations_file, use_ext=True) + + if args.use_inv_map: + inv_map = [0] + cocoGt.getCatIds() # First label in inv_map is not used +@@ -100,8 +100,8 @@ def main(): + with open(args.output_file, "w") as fp: + json.dump(detections, fp, sort_keys=True, indent=4) + +- cocoDt = cocoGt.loadRes(args.output_file) # Load from file to bypass error with Python3 +- cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox') ++ cocoDt = cocoGt.loadRes(args.output_file, use_ext=True) # Load from file to bypass error with Python3 ++ cocoEval = COCOeval(cocoGt, cocoDt, iouType='bbox', use_ext=True) + cocoEval.params.imgIds = list(image_ids) + cocoEval.evaluate() + cocoEval.accumulate() diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/windows-openimages.patch b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/windows-openimages.patch new file mode 100644 index 000000000..5be282ac6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/windows-openimages.patch @@ -0,0 +1,64 @@ +--- a/vision/classification_and_detection/tools/openimages.py Thu May 11 12:46:00 2023 ++++ b/vision/classification_and_detection/tools/openimages.py Thu May 11 13:02:53 2023 +@@ -57,6 +57,44 @@ + MAP_CLASSES_URL = "https://storage.googleapis.com/openimages/v5/class-descriptions-boxable.csv" + MAP_CLASSES_FILE = "class-descriptions-boxable.csv" + CHUNK_SIZE = 1024 * 8 ++MLPERF_CLASSES=['Airplane', 'Antelope', 'Apple', 'Backpack', 'Balloon', 'Banana', ++'Barrel', 'Baseball bat', 'Baseball glove', 'Bee', 'Beer', 'Bench', 'Bicycle', ++'Bicycle helmet', 'Bicycle wheel', 'Billboard', 'Book', 'Bookcase', 'Boot', ++'Bottle', 'Bowl', 'Bowling equipment', 'Box', 'Boy', 'Brassiere', 'Bread', ++'Broccoli', 'Bronze sculpture', 'Bull', 'Bus', 'Bust', 'Butterfly', 'Cabinetry', ++'Cake', 'Camel', 'Camera', 'Candle', 'Candy', 'Cannon', 'Canoe', 'Carrot', 'Cart', ++'Castle', 'Cat', 'Cattle', 'Cello', 'Chair', 'Cheese', 'Chest of drawers', 'Chicken', ++'Christmas tree', 'Coat', 'Cocktail', 'Coffee', 'Coffee cup', 'Coffee table', 'Coin', ++'Common sunflower', 'Computer keyboard', 'Computer monitor', 'Convenience store', ++'Cookie', 'Countertop', 'Cowboy hat', 'Crab', 'Crocodile', 'Cucumber', 'Cupboard', ++'Curtain', 'Deer', 'Desk', 'Dinosaur', 'Dog', 'Doll', 'Dolphin', 'Door', 'Dragonfly', ++'Drawer', 'Dress', 'Drum', 'Duck', 'Eagle', 'Earrings', 'Egg (Food)', 'Elephant', ++'Falcon', 'Fedora', 'Flag', 'Flowerpot', 'Football', 'Football helmet', 'Fork', ++'Fountain', 'French fries', 'French horn', 'Frog', 'Giraffe', 'Girl', 'Glasses', ++'Goat', 'Goggles', 'Goldfish', 'Gondola', 'Goose', 'Grape', 'Grapefruit', 'Guitar', ++'Hamburger', 'Handbag', 'Harbor seal', 'Headphones', 'Helicopter', 'High heels', ++'Hiking equipment', 'Horse', 'House', 'Houseplant', 'Human arm', 'Human beard', ++'Human body', 'Human ear', 'Human eye', 'Human face', 'Human foot', 'Human hair', ++'Human hand', 'Human head', 'Human leg', 'Human mouth', 'Human nose', 'Ice cream', ++'Jacket', 'Jeans', 'Jellyfish', 'Juice', 'Kitchen & dining room table', 'Kite', ++'Lamp', 'Lantern', 'Laptop', 'Lavender (Plant)', 'Lemon', 'Light bulb', 'Lighthouse', ++'Lily', 'Lion', 'Lipstick', 'Lizard', 'Man', 'Maple', 'Microphone', 'Mirror', ++'Mixing bowl', 'Mobile phone', 'Monkey', 'Motorcycle', 'Muffin', 'Mug', 'Mule', ++'Mushroom', 'Musical keyboard', 'Necklace', 'Nightstand', 'Office building', ++'Orange', 'Owl', 'Oyster', 'Paddle', 'Palm tree', 'Parachute', 'Parrot', 'Pen', ++'Penguin', 'Personal flotation device', 'Piano', 'Picture frame', 'Pig', 'Pillow', ++'Pizza', 'Plate', 'Platter', 'Porch', 'Poster', 'Pumpkin', 'Rabbit', 'Rifle', ++'Roller skates', 'Rose', 'Salad', 'Sandal', 'Saucer', 'Saxophone', 'Scarf', 'Sea lion', ++'Sea turtle', 'Sheep', 'Shelf', 'Shirt', 'Shorts', 'Shrimp', 'Sink', 'Skateboard', ++'Ski', 'Skull', 'Skyscraper', 'Snake', 'Sock', 'Sofa bed', 'Sparrow', 'Spider', 'Spoon', ++'Sports uniform', 'Squirrel', 'Stairs', 'Stool', 'Strawberry', 'Street light', ++'Studio couch', 'Suit', 'Sun hat', 'Sunglasses', 'Surfboard', 'Sushi', 'Swan', ++'Swimming pool', 'Swimwear', 'Tank', 'Tap', 'Taxi', 'Tea', 'Teddy bear', 'Television', ++'Tent', 'Tie', 'Tiger', 'Tin can', 'Tire', 'Toilet', 'Tomato', 'Tortoise', 'Tower', ++'Traffic light', 'Train', 'Tripod', 'Truck', 'Trumpet', 'Umbrella', 'Van', 'Vase', ++'Vehicle registration plate', 'Violin', 'Wall clock', 'Waste container', 'Watch', ++'Whale', 'Wheel', 'Wheelchair', 'Whiteboard', 'Window', 'Wine', 'Wine glass', 'Woman', ++'Zebra', 'Zucchini'] + + + def get_args(): +@@ -70,14 +108,14 @@ + ) + parser.add_argument( + "--classes", +- default=None, ++ default=MLPERF_CLASSES, + nargs="+", + type=str, + help="Classes to download. default to all classes", + ) + parser.add_argument( + "--output-labels", +- default="labels.json", ++ default="openimages-mlperf.json", + type=str, + help="Name of the file to output output the labels", + ) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/windows-openimages2.patch b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/windows-openimages2.patch new file mode 100644 index 000000000..fa0e43fcd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-src/patch/windows-openimages2.patch @@ -0,0 +1,11 @@ +--- a/vision/classification_and_detection/python/openimages.py Thu May 11 13:56:13 2023 ++++ b/vision/classification_and_detection/python/openimages.py Thu May 11 13:02:53 2023 +@@ -85,7 +85,7 @@ + not_found += 1 + continue + else: +- src = os.path.join(data_path, image_name) ++ src = os.path.join(data_path, "validation", "data", image_name) + if not os.path.exists(src): + # if the image does not exists ignore it + not_found += 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/README.md new file mode 100644 index 000000000..c74f6d8a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/_cm.yaml new file mode 100644 index 000000000..84f4b30cc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/_cm.yaml @@ -0,0 +1,38 @@ +alias: get-mlperf-inference-submission-dir +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +deps: [] +docker: + run: false +input_description: {} +input_mapping: + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR +new_env_keys: +- CM_MLPERF_INFERENCE_SUBMISSION_DIR +- CM_MLPERF_INFERENCE_SUBMISSION_VERSION +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- mlperf +- inference +- submission +- local +- dir +- directory +uid: ddf36a41d6934a7e +variations: + version.#: + env: + CM_MLPERF_INFERENCE_SUBMISSION_VERSION: '#' + group: version + version.4_1-dev: + default: true + env: + CM_MLPERF_INFERENCE_SUBMISSION_VERSION: 4_1-dev + group: version +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/customize.py new file mode 100644 index 000000000..583a94117 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-submission-dir/customize.py @@ -0,0 +1,43 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') == '': + if not os.path.exists("mlperf-inference-submission"): + os.mkdir("mlperf-inference-submission") + env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] = os.path.join( + os.getcwd(), "mlperf-inference-submission") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/README-extra.md new file mode 100644 index 000000000..41e6b8cc9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/README-extra.md @@ -0,0 +1,6 @@ +# Get Config SUT MLPerf Inference +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) loads the MLPerf inference performance configuration of a given System Under Test (SUT). + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/README.md new file mode 100644 index 000000000..993c273cf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/_cm.yaml new file mode 100644 index 000000000..8913bdc29 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/_cm.yaml @@ -0,0 +1,32 @@ +alias: get-mlperf-inference-sut-configs +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +default_env: + CM_GIT_URL: '' + CM_SUT_CONFIGS_PATH: '' +deps: +- env: + CM_CACHE_DIR_ENV_NAME: CM_SUT_CONFIGS_PATH + extra_cache_tags: mlperf,inference,sut,configs + tags: get,cache,dir,_name.mlperf-inference-sut-configs +input_mapping: + configs_git_url: CM_GIT_URL + repo_path: CM_SUT_CONFIGS_PATH + run_config: CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX +new_env_keys: +- CM_HW_* +- CM_SUT_* +- CM_MLPERF_INFERENCE_SUT_RUN_CONFIG +new_state_keys: +- CM_SUT_* +tags: +- get +- mlperf +- inference +- sut +- configs +- sut-configs +uid: c2fbf72009e2445b +variations: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/IntelSPR.24c/intel-implementation/cpu-device/pytorch-framework/default-config.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/IntelSPR.24c/intel-implementation/cpu-device/pytorch-framework/default-config.yaml new file mode 100644 index 000000000..040ccbdcc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/IntelSPR.24c/intel-implementation/cpu-device/pytorch-framework/default-config.yaml @@ -0,0 +1,38 @@ +--- + resnet50: + Offline: + target_qps: 1000.0 + Server: + target_qps: 500.0 + retinanet: + Offline: + target_qps: 50.0 + Server: + target_qps: 30 + bert-99: + Offline: + target_qps: 100 + bert-99.9: + Offline: + target_qps: 100 + 3d-unet-99: + Offline: + target_qps: 1.0 + 3d-unet-99.9: + Offline: + target_qps: 1.0 + gptj-99.9: + Offline: + target_qps: 0.5 + Server: + target_qps: 0.3 + gptj-99: + Offline: + target_qps: 0.5 + Server: + target_qps: 0.3 + sdxl: + Offline: + target_qps: 0.1 + Server: + target_qps: 0.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/IntelSPR.24c/intel-implementation/cpu-device/pytorch-framework/framework-version-default/default-config.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/IntelSPR.24c/intel-implementation/cpu-device/pytorch-framework/framework-version-default/default-config.yaml new file mode 100644 index 000000000..040ccbdcc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/IntelSPR.24c/intel-implementation/cpu-device/pytorch-framework/framework-version-default/default-config.yaml @@ -0,0 +1,38 @@ +--- + resnet50: + Offline: + target_qps: 1000.0 + Server: + target_qps: 500.0 + retinanet: + Offline: + target_qps: 50.0 + Server: + target_qps: 30 + bert-99: + Offline: + target_qps: 100 + bert-99.9: + Offline: + target_qps: 100 + 3d-unet-99: + Offline: + target_qps: 1.0 + 3d-unet-99.9: + Offline: + target_qps: 1.0 + gptj-99.9: + Offline: + target_qps: 0.5 + Server: + target_qps: 0.3 + gptj-99: + Offline: + target_qps: 0.5 + Server: + target_qps: 0.3 + sdxl: + Offline: + target_qps: 0.1 + Server: + target_qps: 0.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x1/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x1/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml new file mode 100644 index 000000000..f7a8477a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x1/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml @@ -0,0 +1,30 @@ +3d-unet-99: + Offline: + target_qps: 4.0 + SingleStream: + target_latency: 400 +3d-unet-99.9: + Offline: + target_qps: 4.0 + SingleStream: + target_latency: 400 +bert-99: + Offline: + target_qps: 4000.0 + Server: + target_qps: 3800.0 +bert-99.9: + Offline: + target_qps: 2000.0 + Server: + target_qps: 1600.0 +resnet50: + Offline: + target_qps: '42959.4' + Server: + target_qps: 35000.0 +retinanet: + Offline: + target_qps: 850.0 + Server: + target_qps: 630.0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x1/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x1/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml new file mode 100644 index 000000000..a9ad05a50 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x1/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml @@ -0,0 +1,36 @@ +3d-unet-99: + Offline: + target_qps: 4.0 + SingleStream: + target_latency: 400 +3d-unet-99.9: + Offline: + target_qps: 4.0 + SingleStream: + target_latency: 400 +bert-99: + Offline: + target_qps: 4000.0 + Server: + target_qps: 3800.0 +bert-99.9: + Offline: + target_qps: 2000.0 + Server: + target_qps: 1400.0 +resnet50: + Offline: + target_qps: '42959.4' + Server: + target_qps: 35000.0 +retinanet: + Offline: + target_qps: 850.0 + Server: + target_qps: 630.0 +sdxl: + Offline: + target_qps: 0.7 + Server: + target_qps: 0.3 + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml new file mode 100644 index 000000000..7b24138ac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/default-config.yaml @@ -0,0 +1,38 @@ +--- + resnet50: + Offline: + target_qps: 43000.0 + Server: + target_qps: 37000.0 + retinanet: + Offline: + target_qps: 650.0 + Server: + target_qps: 600 + bert-99: + Offline: + target_qps: 4000 + bert-99.9: + Offline: + target_qps: 4000 + 3d-unet-99: + Offline: + target_qps: 2.0 + 3d-unet-99.9: + Offline: + target_qps: 2.0 + gptj-99.9: + Offline: + target_qps: 4 + Server: + target_qps: 3.5 + gptj-99: + Offline: + target_qps: 4 + Server: + target_qps: 3.5 + sdxl: + Offline: + target_qps: 2 + Server: + target_qps: 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml new file mode 100644 index 000000000..4820e8b52 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/RTX4090x2/nvidia_original-implementation/gpu-device/tensorrt-framework/framework-version-default/default-config.yaml @@ -0,0 +1,42 @@ +--- + resnet50: + Offline: + target_qps: 88000.0 + Server: + target_qps: 73000.0 + retinanet: + Offline: + target_qps: 1700.0 + Server: + target_qps: 1400 + bert-99: + Offline: + target_qps: 8000 + Server: + target_qps: 6000 + bert-99.9: + Offline: + target_qps: 3500 + Server: + target_qps: 3000 + 3d-unet-99: + Offline: + target_qps: 8.0 + 3d-unet-99.9: + Offline: + target_qps: 8.0 + gptj-99.9: + Offline: + target_qps: 8 + Server: + target_qps: 7 + gptj-99: + Offline: + target_qps: 8 + Server: + target_qps: 7 + sdxl: + Offline: + target_qps: 1.3 + Server: + target_qps: 0.6 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/default/config.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/default/config.yaml new file mode 100644 index 000000000..0a30a8cd2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/default/config.yaml @@ -0,0 +1,73 @@ +--- + resnet50: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 0.1 + MultiStream: + target_latency: 0.1 + retinanet: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 1 + MultiStream: + target_latency: 1 + bert-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 1 + bert-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + 3d-unet-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 500 + 3d-unet-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 500 + gpt-j: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 500 + sdxl: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 200 + llama2-70b-99: + Offline: + target_qps: 0.1 + Server: + target_qps: 0.1 + SingleStream: + target_latency: 2000 + llama2-70b-99.9: + Offline: + target_qps: 0.1 + Server: + target_qps: 0.1 + SingleStream: + target_latency: 2000 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/default/default/default-config.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/default/default/default-config.yaml new file mode 100644 index 000000000..8fdf44d7d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/configs/default/default/default-config.yaml @@ -0,0 +1,55 @@ +--- + resnet50: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + retinanet: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + bert-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + bert-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + 3d-unet-99: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 + 3d-unet-99.9: + Offline: + target_qps: 1.0 + Server: + target_qps: 1.0 + SingleStream: + target_latency: 10 + MultiStream: + target_latency: 80 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/customize.py new file mode 100644 index 000000000..b66c9cb20 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-configs/customize.py @@ -0,0 +1,155 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import yaml +import shutil + + +def postprocess(i): + env = i['env'] + state = i['state'] + + if env.get('CM_HW_NAME', '') == '': + host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") + env['CM_HW_NAME'] = host_name + + device = env.get('CM_MLPERF_DEVICE', 'cpu') + + backend = env.get('CM_MLPERF_BACKEND', 'default') + if env.get('CM_MLPERF_BACKEND_VERSION', '') != '': + backend_version = "v" + env.get('CM_MLPERF_BACKEND_VERSION') if not env.get( + 'CM_MLPERF_BACKEND_VERSION').startswith("v") else env.get('CM_MLPERF_BACKEND_VERSION') + else: + backend_version = 'vdefault' + + if 'CM_SUT_CONFIG' not in state: + state['CM_SUT_CONFIG'] = {} + if 'CM_SUT_CONFIG_PATH' not in state: + state['CM_SUT_CONFIG_PATH'] = {} + + implementation_string = env['CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX'] if env.get( + 'CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX', '') != '' else env.get( + 'CM_MLPERF_IMPLEMENTATION', 'default') + + run_config = [] + for i in range(1, 6): + if env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}', '') != '': + run_config.append( + env.get(f'CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX{i}')) + + run_config_string = "_".join( + run_config) if run_config else 'default_config' + env['CM_MLPERF_INFERENCE_SUT_RUN_CONFIG'] = run_config_string + + if env.get('CM_SUT_NAME', '') == '': + env['CM_SUT_NAME'] = env['CM_HW_NAME'] + "-" + implementation_string + "-" + \ + device + "-" + backend + "-" + backend_version + "-" + run_config_string + + if env.get('CM_SUT_CONFIGS_PATH', '') != '': + path = env['CM_SUT_CONFIGS_PATH'] + elif env.get('CM_SUT_USE_EXTERNAL_CONFIG_REPO', '') == "yes": + path = env.get('CM_GIT_CHECKOUT_PATH') + else: + path = os.path.join(os.getcwd(), "configs") + + config_path = os.path.join( + path, + env['CM_HW_NAME'], + implementation_string + + "-implementation", + device + + "-device", + backend + + "-framework", + "framework-version-" + + backend_version, + run_config_string + + "-config.yaml") + if not os.path.exists(config_path): + os.makedirs(os.path.dirname(config_path), exist_ok=True) + config_path_default = os.path.join( + path, + env['CM_HW_NAME'], + implementation_string + + "-implementation", + device + + "-device", + backend + + "-framework", + "default-config.yaml") + if os.path.exists(config_path_default): + shutil.copy(config_path_default, config_path) + else: + src_config_full = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + env['CM_HW_NAME'], + implementation_string + "-implementation", + device + "-device", + backend + "-framework", + "framework-version-" + backend_version, + run_config_string + "-config.yaml") + src_config_partial1 = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + env['CM_HW_NAME'], + implementation_string + "-implementation", + device + "-device", + backend + "-framework", + "framework-version-" + backend_version, + "default-config.yaml") + src_config_partial2 = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + env['CM_HW_NAME'], + implementation_string + "-implementation", + device + "-device", + backend + "-framework", + "framework-version-default", + "default-config.yaml") + src_config_partial3 = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + env['CM_HW_NAME'], + implementation_string + "-implementation", + device + "-device", + backend + "-framework", + "default-config.yaml") + if os.path.exists(src_config_full): + shutil.copy(src_config_full, config_path) + elif os.path.exists(src_config_partial1): + shutil.copy(src_config_partial1, config_path) + elif os.path.exists(src_config_partial2): + shutil.copy(src_config_partial2, config_path) + elif os.path.exists(src_config_partial3): + shutil.copy(src_config_partial3, config_path) + else: + print( + f"Config file missing for given hw_name: '{env['CM_HW_NAME']}', implementation: '{implementation_string}', device: '{device}, backend: '{backend}', copying from default") + src_config = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], + "configs", + "default", + "config.yaml") + shutil.copy(src_config, config_path) + os.makedirs( + os.path.dirname(config_path_default), + exist_ok=True) + shutil.copy(src_config, config_path_default) + + state['CM_SUT_CONFIG'][env['CM_SUT_NAME']] = yaml.load( + open(config_path), Loader=yaml.SafeLoader) + state['CM_SUT_CONFIG_NAME'] = env['CM_SUT_NAME'] + state['CM_SUT_CONFIG_PATH'][env['CM_SUT_NAME']] = config_path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/README.md new file mode 100644 index 000000000..1654ae928 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/_cm.yaml new file mode 100644 index 000000000..9d2139869 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/_cm.yaml @@ -0,0 +1,55 @@ +alias: get-mlperf-inference-sut-description +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +default_env: + CM_SUT_DESC_CACHE: 'no' +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python3 + - python + tags: get,python3 +- names: + - compiler + skip_if_env: + CM_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: + - 'yes' + tags: get,compiler +- enable_if_env: + CM_MLPERF_DEVICE: + - gpu + - cuda + tags: get,cuda-devices,_with-pycuda +- enable_if_env: + CM_DETERMINE_MEMORY_CONFIGURATION: + - 'yes' + CM_HOST_OS_TYPE: + - linux + tags: detect,sudo +- tags: get,generic-python-lib,_package.dmiparser +- env: + CM_CACHE_DIR_ENV_NAME: CM_MLPERF_INFERENCE_SUT_DESC_PATH + extra_cache_tags: mlperf,inference,sut,descriptions + tags: get,cache,dir,_name.mlperf-inference-sut-descriptions +docker: + run: false +input_mapping: + name: CM_HW_NAME + submitter: CM_MLPERF_SUBMITTER +new_env_keys: +- CM_HW_* +- CM_SUT_* +new_state_keys: +- CM_SUT_* +- CM_HW_* +tags: +- get +- mlperf +- sut +- description +- system-under-test +- system-description +uid: e49a3f758b2d4e7b diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/customize.py new file mode 100644 index 000000000..5bc14bc2d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/customize.py @@ -0,0 +1,198 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json +import shutil + + +def preprocess(i): + env = i['env'] + state = i['state'] + os_info = i['os_info'] + + submitter = env.get('CM_MLPERF_SUBMITTER', 'MLCommons') + + auto_detected_hw_name = False + if env.get('CM_HW_NAME', '') == '': + host_name = env.get('CM_HOST_SYSTEM_NAME', 'default').replace("-", "_") + env['CM_HW_NAME'] = host_name + auto_detected_hw_name = True + + hw_name = env['CM_HW_NAME'] + + backend = env.get('CM_MLPERF_BACKEND', '') + backend_version = env.get('CM_MLPERF_BACKEND_VERSION', '') + sut_suffix = '' + backend_name = '' + backend_desc = '' + if backend: + backend_name = env.get('CM_MLPERF_BACKEND_NAME', backend) + sut_suffix = "-" + backend + backend_desc = backend_name + if backend_version: + sut_suffix += "-" + backend_version + backend_desc += ' v' + backend_version + + sut = hw_name + sut_suffix + script_path = i['run_script_input']['path'] + sut_desc_path = env['CM_MLPERF_INFERENCE_SUT_DESC_PATH'] + + sut_path = os.path.join(sut_desc_path, "suts", sut + ".json") + if os.path.exists(sut_path) and env.get('CM_SUT_DESC_CACHE', '') == "yes": + print(f"Reusing SUT description file {sut}") + state['CM_SUT_META'] = json.load(open(sut_path)) + else: + if not os.path.exists(os.path.dirname(sut_path)): + os.makedirs(os.path.dirname(sut_path)) + + print("Generating SUT description file for " + sut) + hw_path = os.path.join(os.getcwd(), "hardware", hw_name + ".json") + if not os.path.exists(os.path.dirname(hw_path)): + os.makedirs(os.path.dirname(hw_path)) + if not os.path.exists(hw_path): + default_hw_path = os.path.join( + script_path, "hardware", "default.json") + print("HW description file for " + hw_name + + " not found. Copying from default!!!") + shutil.copy(default_hw_path, hw_path) + + state['CM_HW_META'] = json.load(open(hw_path)) + state['CM_SUT_META'] = state['CM_HW_META'] + state['CM_SUT_META']['framework'] = backend_desc + os_name = env.get('CM_HOST_OS_FLAVOR', '').capitalize() + os_version = env.get('CM_HOST_OS_VERSION', '') + if os_name and os_version: + os_name_string = os_name + " " + os_version + else: + os_name_string = '' + os_type = env.get('CM_HOST_OS_TYPE', '') + kernel = env.get('CM_HOST_OS_KERNEL_VERSION', '') + if os_type and kernel: + os_name_string += " (" + os_type + "-" + kernel + glibc_version = env.get('CM_HOST_OS_GLIBC_VERSION', '') + if glibc_version: + os_name_string += '-glibc' + glibc_version + os_name_string += ')' + python_version = env.get('CM_PYTHON_VERSION', '') + compiler = env.get('CM_COMPILER_FAMILY', '') + compiler_version = env.get('CM_COMPILER_VERSION', '') + state['CM_SUT_META']['submitter'] = submitter + + # If Windows and os_name_string is empty, rebuild it: + + if os_name_string == '' and os_info['platform'] == 'windows': + import platform + os_name_string = str(platform.platform()) + + state['CM_SUT_META']['operating_system'] = os_name_string + + state['CM_SUT_META']['other_software_stack'] = "Python: " + \ + python_version + ", " + compiler + "-" + compiler_version + + if env.get('CM_DOCKER_VERSION', '') != '': + state['CM_SUT_META']['other_software_stack'] += " Docker version:" + \ + env['CM_DOCKER_VERSION'] + else: + if os.path.exists('/.dockerenv'): + state['CM_SUT_META']['other_software_stack'] += ", Using Docker " + + if state['CM_SUT_META'].get('system_name', '') == '': + system_name = env.get('CM_MLPERF_SYSTEM_NAME') + if not system_name: + system_name = env.get('CM_HW_NAME') + if system_name: + if auto_detected_hw_name: + system_name += " (auto detected)" + else: + system_name = " (generic)" + state['CM_SUT_META']['system_name'] = system_name + + # Add GPU info + + if env.get('CM_MLPERF_DEVICE', '') == "gpu" or env.get( + 'CM_MLPERF_DEVICE', '') == "cuda": + if env.get('CM_CUDA_VERSION', '') != '': + cuda_version = " , CUDA " + env['CM_CUDA_VERSION'] + state['CM_SUT_META']['other_software_stack'] += cuda_version + + if 'cm_cuda_device_prop' in state: + state['CM_SUT_META']['accelerator_frequency'] = state['cm_cuda_device_prop']['Max clock rate'] + state['CM_SUT_META']['accelerator_memory_capacity'] = str(int( + state['cm_cuda_device_prop']['Global memory']) / (1024 * 1024.0 * 1024)) + " GB" + state['CM_SUT_META']['accelerator_model_name'] = state['cm_cuda_device_prop']['GPU Name'] + num_accelerators = env.get('CM_CUDA_NUM_DEVICES', "1") + state['CM_SUT_META']['accelerators_per_node'] = num_accelerators + + if state['CM_SUT_META'].get('host_processor_core_count', '') == '': + physical_cores_per_node = env.get( + 'CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET') + + if physical_cores_per_node is None or physical_cores_per_node == '': + if os_info['platform'] == 'windows': + physical_cores_per_node = '1' + + state['CM_SUT_META']['host_processor_core_count'] = physical_cores_per_node + + if state['CM_SUT_META'].get('host_processor_model_name', '') == '': + state['CM_SUT_META']['host_processor_model_name'] = env.get( + 'CM_HOST_CPU_MODEL_NAME', 'undefined') + if state['CM_SUT_META'].get('host_processors_per_node', '') == '': + x = env.get('CM_HOST_CPU_SOCKETS', '') + if x == '' and os_info['platform'] == 'windows': + x = '1' + state['CM_SUT_META']['host_processors_per_node'] = x + + if state['CM_SUT_META'].get('host_processor_caches', '') == '': + state['CM_SUT_META']['host_processor_caches'] = "L1d cache: " + env.get('CM_HOST_CPU_L1D_CACHE_SIZE', ' ') + \ + ", L1i cache: " + env.get('CM_HOST_CPU_L1I_CACHE_SIZE', ' ') + ", L2 cache: " + \ + env.get('CM_HOST_CPU_L2_CACHE_SIZE', ' ') + \ + ", L3 cache: " + env.get('CM_HOST_CPU_L3_CACHE_SIZE', ' ') + + if state['CM_SUT_META'].get('host_processor_frequency', '') == '': + state['CM_SUT_META']['host_processor_frequency'] = env.get( + 'CM_HOST_CPU_MAX_MHZ') if env.get('CM_HOST_CPU_MAX_MHZ', '') != '' else 'undefined' + if state['CM_SUT_META'].get('host_memory_capacity', '') == '': + state['CM_SUT_META']['host_memory_capacity'] = env.get( + 'CM_HOST_MEMORY_CAPACITY') if env.get('CM_HOST_MEMORY_CAPACITY', '') != '' else 'undefined' + if state['CM_SUT_META'].get('host_storage_capacity', '') == '': + state['CM_SUT_META']['host_storage_capacity'] = env.get( + 'CM_HOST_DISK_CAPACITY') if env.get('CM_HOST_DISK_CAPACITY', '') != '' else 'undefined' + if 'CM_SUT_SW_NOTES' in env: + sw_notes = env['CM_SUT_SW_NOTES'] + else: + sw_notes = '' + state['CM_SUT_META']['sw_notes'] = sw_notes + + if env.get('CM_SUDO_USER', '') == "yes" and env.get( + 'CM_HOST_OS_TYPE', 'linux'): + r = i['automation'].run_native_script( + {'run_script_input': i['run_script_input'], 'env': env, 'script_name': 'detect_memory'}) + if r['return'] > 0: + return r + if env.get('CM_HOST_MEM_INFO', '') != '': + state['CM_SUT_META']['host_memory_configuration'] = env['CM_HOST_MEM_INFO'] + + state['CM_SUT_META'] = dict(sorted(state['CM_SUT_META'].items())) + + sut_file = open(sut_path, "w") + json.dump(state['CM_SUT_META'], sut_file, indent=4) + sut_file.close() + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/detect_memory.sh b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/detect_memory.sh new file mode 100644 index 000000000..8a65daa13 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/detect_memory.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [[ ${CM_SUDO_USER} == "yes" ]]; then + ${CM_SUDO} dmidecode -t memory > meminfo.out + ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/get_memory_info.py +fi +test $? -eq 0 || return $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/get_memory_info.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/get_memory_info.py new file mode 100644 index 000000000..27d0f870a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/get_memory_info.py @@ -0,0 +1,61 @@ +import os +import json +from dmiparser import DmiParser + +with open("meminfo.out", "r") as f: + text = f.read() + parser = DmiParser(text, sort_keys=True, indent=4) + + parsedStr = str(parser) + parsedObj = json.loads(str(parser)) + memory = [] + + ind = 0 + needed_global_keys = ['Speed', 'Configured Memory Speed', 'Type'] + added_global_keys = [] + needed_keys = ['Size', 'Rank'] + + for item in parsedObj: + if item['name'] == 'Physical Memory Array': + ecc_value = item['props']['Error Correction Type']['values'][0] + if not ecc_value or 'None' in ecc_value: + ecc_value = "No ECC" + memory.append({"info": ['Error Correction Type: ' + ecc_value]}) + ind += 1 + continue + if item['name'] != 'Memory Device': + continue + memory.append({}) + memory[ind]['handle'] = item['handle'] + memory[ind]['info'] = [] + locator = item['props']['Locator']['values'][0] + bank_locator = item['props']['Bank Locator']['values'][0] + + if not "Not Specified" in locator: + memory[ind]['info'].append(locator) + if not "Not Specified" in bank_locator: + memory[ind]['info'].append(bank_locator) + + if item['props']['Size']['values'][0] == "No Module Installed": + memory[ind]['populated'] = False + memory[ind]['info'].append("Unpopulated") + else: + memory[ind]['populated'] = True + + for key in item['props']: + if key in needed_global_keys and key not in added_global_keys: + memory[0]['info'].append( + f'{key}: {";".join(item["props"][key]["values"])}') + added_global_keys.append(key) + elif key in needed_keys: + memory[ind]['info'].append( + f'{key}: {";".join(item["props"][key]["values"])}') + ind += 1 + + meminfo = [] + for item in memory: + meminfo.append("; ".join(item['info'])) + + meminfo_string = ", ".join(meminfo) + with open("tmp-run-env.out", "w") as f: + f.write(f"CM_HOST_MEM_INFO={meminfo_string}") diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/hardware/default.json b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/hardware/default.json new file mode 100644 index 000000000..ad5b1ba32 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-sut-description/hardware/default.json @@ -0,0 +1,26 @@ +{ + "accelerator_frequency": "", + "accelerator_host_interconnect": "N/A", + "accelerator_interconnect": "N/A", + "accelerator_interconnect_topology": "", + "accelerator_memory_capacity": "N/A", + "accelerator_memory_configuration": "N/A", + "accelerator_model_name": "N/A", + "accelerator_on-chip_memories": "", + "accelerators_per_node": "0", + "cooling": "air", + "division": "closed", + "host_memory_configuration": "undefined", + "host_networking": "Gig Ethernet", + "host_network_card_count": "1", + "host_networking_topology": "N/A", + "host_processor_interconnect": "", + "host_storage_type": "SSD", + "hw_notes": "", + "number_of_nodes": "1", + "status": "available", + "submitter": "", + "sw_notes": "", + "system_type": "edge", + "system_type_detail": "edge server" +} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/README.md new file mode 100644 index 000000000..a9ff7535e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts//get-mlperf-inference-utils](https://docs.mlcommons.org/cm4mlops/scripts//get-mlperf-inference-utils) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/_cm.yaml new file mode 100644 index 000000000..bde11ac26 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/_cm.yaml @@ -0,0 +1,18 @@ +alias: get-mlperf-inference-utils +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- get +- mlperf +- inference +- util +- utils +- functions +uid: e341e5f86d8342e5 +deps: + - tags: get,mlperf,inference,src + names: + - inference-src +new_env_keys: + - '+PYTHONPATH' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/customize.py new file mode 100644 index 000000000..ec9fe4ddb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/customize.py @@ -0,0 +1,46 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import sys + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + utils_path = i['run_script_input']['path'] + + env['+PYTHONPATH'] = [utils_path] + + submission_checker_dir = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission") + + sys.path.append(submission_checker_dir) + sys.path.append(utils_path) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/mlperf_utils.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/mlperf_utils.py new file mode 100644 index 000000000..1e60cafc1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-inference-utils/mlperf_utils.py @@ -0,0 +1,353 @@ +import sys +import os +import submission_checker as checker +from log_parser import MLPerfLog + + +def get_result_from_log(version, model, scenario, + result_path, mode, inference_src_version=None): + + config = checker.Config( + version, + None, + ignore_uncommited=False, + skip_power_check=False, + ) + mlperf_model = config.get_mlperf_model(model) + # scenario = checker.SCENARIO_MAPPING[scenario] + + result = '' + power_result = None + valid = {} + if mode == "performance": + has_power = os.path.exists(os.path.join(result_path, "..", "power")) + version_tuple = None + if inference_src_version: + version_tuple = tuple(map(int, inference_src_version.split('.'))) + + if version_tuple and version_tuple >= (4, 1, 22): + result_ = checker.get_performance_metric( + config, mlperf_model, result_path, scenario) + else: + result_ = checker.get_performance_metric( + config, mlperf_model, result_path, scenario, None, None, has_power) + mlperf_log = MLPerfLog( + os.path.join( + result_path, + "mlperf_log_detail.txt")) + if ( + "result_validity" not in mlperf_log.get_keys() + or mlperf_log["result_validity"] != "VALID" + ): + valid['performance'] = False + else: + valid['performance'] = True + + if "stream" in scenario.lower(): + result = result_ / 1000000 # convert to milliseconds + else: + result = result_ + result = str(round(result, 3)) + + if has_power: + power_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric( + config, scenario, result_path, True, result_) + power_result = f"{round(power_metric,3)},{round(avg_power_efficiency,3)}" + valid['power'] = power_valid + + elif mode == "accuracy" and os.path.exists(os.path.join(result_path, 'accuracy.txt')): + + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric( + config, mlperf_model, result_path) + valid['accuracy'] = acc_valid + + if len(acc_results) == 1: + for acc in acc_results: + result = str(round(float(acc_results[acc]), 5)) + else: + result = '(' + result_list = [] + for i, acc in enumerate(acc_results): + result_list.append(str(round(float(acc_results[acc]), 5))) + result += ", ".join(result_list) + ")" + + return result, valid, power_result + + +def get_accuracy_metric(config, model, path): + + import re + is_valid = False + all_accuracy_valid = True + acc = None + result_acc = None + target = config.get_accuracy_target(model) + acc_upper_limit = config.get_accuracy_upper_limit(model) + patterns = [] + acc_targets = [] + acc_limits = [None] * (len(target) // 2) + up_patterns = [None] * (len(target) // 2) + acc_types = [] + + if acc_upper_limit is not None: + acc_limit_check = True + + for ii in range(0, len(target), 2): + acc_type1, tmp = target[ii:ii + 2] + for i in range(0, len(acc_upper_limit), 2): + acc_type, acc_target = acc_upper_limit[i:i + 2] + if acc_type != acc_type1: + continue + acc_limits[ii // 2] = acc_target + up_patterns[ii // 2] = checker.ACC_PATTERN[acc_type] + + for i in range(0, len(target), 2): + acc_type, acc_target = target[i:i + 2] + acc_types.append(acc_type) + patterns.append(checker.ACC_PATTERN[acc_type]) + acc_targets.append(acc_target) + + acc_seen = [False for _ in acc_targets] + acc_results = {} + with open(os.path.join(path, "accuracy.txt"), "r", encoding="utf-8") as f: + for line in f: + for i, (pattern, acc_target, acc_type) in enumerate( + zip(patterns, acc_targets, acc_types)): + m = re.match(pattern, line) + if m: + acc = m.group(1) + + acc_results[acc_type] = acc + + if acc is not None and float(acc) >= acc_target: + all_accuracy_valid &= True + acc_seen[i] = True + elif acc is not None: + all_accuracy_valid = False + # log.warning("%s accuracy not met: expected=%f, found=%s", path, acc_target, acc) + if i == 0 and acc: + result_acc = acc + acc = None + if acc_upper_limit is not None: + for i, (pattern, acc_limit) in enumerate( + zip(up_patterns, acc_limits)): + if not pattern: + continue + m = re.match(pattern, line) + if m: + acc = m.group(1) + if acc is not None and acc_upper_limit is not None and float( + acc) > acc_limit: + acc_limit_check = False + # log.warning("%s accuracy not met: upper limit=%f, found=%s", path, acc_limit, acc) + acc = None + if all(acc_seen): + break + is_valid = all_accuracy_valid & all(acc_seen) + if acc_upper_limit is not None: + is_valid &= acc_limit_check + + return is_valid, acc_results, acc_targets, acc_limits + + +def get_result_string(version, model, scenario, result_path, has_power, sub_res, + division="open", system_json=None, model_precision="fp32", inference_src_version=None): + + config = checker.Config( + version, + None, + ignore_uncommited=False, + skip_power_check=False, + ) + mlperf_model = config.get_mlperf_model(model) + performance_path = os.path.join(result_path, "performance", "run_1") + accuracy_path = os.path.join(result_path, "accuracy") + scenario = checker.SCENARIO_MAPPING[scenario.lower()] + + fname = os.path.join(performance_path, "mlperf_log_detail.txt") + mlperf_log = MLPerfLog(fname) + effective_scenario = mlperf_log["effective_scenario"] + inferred = False + result = {} + + version_tuple = None + if inference_src_version: + version_tuple = tuple(map(int, inference_src_version.split('.'))) + + if version_tuple and version_tuple >= (4, 1, 22): + performance_result = checker.get_performance_metric( + config, mlperf_model, performance_path, scenario) + else: + performance_result = checker.get_performance_metric( + config, mlperf_model, performance_path, scenario, None, None) + if "stream" in scenario.lower(): + performance_result_ = performance_result / 1000000 # convert to milliseconds + else: + performance_result_ = performance_result + result['performance'] = round(performance_result_, 3) + + if scenario != effective_scenario: + inferred, inferred_result = checker.get_inferred_result( + scenario, effective_scenario, performance_result, mlperf_log, config, False) + + if has_power: + is_valid, power_metric, scenario, avg_power_efficiency = checker.get_power_metric( + config, scenario, performance_path, True, performance_result) + if "stream" in scenario.lower(): + power_metric_unit = "milliJoules" + else: + power_metric_unit = "Watts" + power_result_string = f"`Power consumed`: `{round(power_metric, 3)} {power_metric_unit}`, `Power efficiency`: `{round(avg_power_efficiency * 1000, 3)} samples per Joule`" + + power_result = round(power_metric, 3) + power_efficiency_result = round(avg_power_efficiency, 3) + result['power'] = power_result + result['power_efficiency'] = power_efficiency_result + + compliance_list = ["TEST01", "TEST04", "TEST06"] + if division == "closed": + for test in compliance_list: + test_path = os.path.join(result_path, test) + if os.path.exists( + test_path): # We dont consider missing test folders now - submission checker will do that + # test_pass = checker.check_compliance_dir(test_path, mlperf_model, scenario, config, "closed", system_json, sub_res) + test_pass = checker.check_compliance_perf_dir( + test_path) if test != "TEST06" else True + if test_pass and test in ["TEST01", "TEST06"]: + # test_pass = checker.check_compliance_acc_dir(test_path, mlperf_model, config) + pass # accuracy truncation script is done after submission generation. We assume here that it'll pass + if test_pass: + result[test] = "passed" + else: + result[test] = "failed" + + acc_valid, acc_results, acc_targets, acc_limits = get_accuracy_metric( + config, mlperf_model, accuracy_path) + + result_field = checker.RESULT_FIELD[effective_scenario] + + performance_result_string = f"`{result_field}`: `{performance_result}`\n" + if inferred: + inferred_result_field = checker.RESULT_FIELD[scenario] + performance_result_string += f"Inferred result: `{inferred_result_field}`: `{inferred_result}` \n" + + accuracy_result_string = '' + accuracy_results = [] + for i, acc in enumerate(acc_results): + accuracy_results.append(str(round(float(acc_results[acc]), 5))) + accuracy_result_string += f"`{acc}`: `{round(float(acc_results[acc]), 5)}`" + if not acc_limits or not acc_limits[i]: + accuracy_result_string += f", Required accuracy for closed division `>= {round(acc_targets[i], 5)}`" + else: + accuracy_result_string += f", Required accuracy for closed division `>= {round(acc_targets[i], 5)}` and `<= {round(acc_limits[i], 5)}`" + accuracy_result_string += "\n" + + if len(accuracy_results) == 1: + accuracy_result = accuracy_results[0] + else: + accuracy_result = "(" + ", ".join(accuracy_results) + ")" + result['accuracy'] = accuracy_result + + result_string = f"\n\n## Results\n" + result_string += f"\nPlatform: {sub_res}\n" + result_string += f"\nModel Precision: {model_precision}\n" + result_string += "\n### Accuracy Results \n" + accuracy_result_string + result_string += "\n### Performance Results \n" + performance_result_string + if has_power: + result_string += "\n### Power Results \n" + power_result_string + + return result_string, result + + +def get_result_table(results): + + headers = [ + "Model", + "Scenario", + "Accuracy", + "Throughput", + "Latency (in ms)", + "Power Efficiency (in samples/J)", + "TEST01", + "TEST04"] + table = [] + for model in results: + for scenario in results[model]: + row = [] + row.append(model) + row.append(scenario) + if results[model][scenario].get('accuracy'): + val = str(results[model][scenario]['accuracy']) + if not results[model][scenario].get('accuracy_valid', True): + val = "X " + val + row.append(val) + else: + row.append("-") + + if results[model][scenario].get('performance'): + + if "stream" in scenario.lower(): + if float(results[model][scenario]['performance']) == 0: + row.append("-") + elif scenario.lower() == "singlestream": + val_qps = str( + round( + 1000 / + float( + results[model][scenario]['performance']), + 3)) + if not results[model][scenario].get( + 'performance_valid', True): # we explicitly mark invalid results + val_qps = "X " + val_qps + row.append(val_qps) + elif scenario.lower() == "multistream": + val_qps = str( + round( + 8000 / + float( + results[model][scenario]['performance']), + 3)) + if not results[model][scenario].get( + 'performance_valid', True): + val_qps = "X " + val_qps + row.append(val_qps) + val = str(results[model][scenario]['performance']) + if not results[model][scenario].get( + 'performance_valid', True): + val = "X " + val + row.append(val) + else: + val = str(results[model][scenario]['performance']) + if not results[model][scenario].get( + 'performance_valid', True): + val = "X " + val + row.append(val) + row.append("-") + + val1 = results[model][scenario].get('TEST01') + # val2 = results[model][scenario].get('TEST05') + val3 = results[model][scenario].get('TEST04') + + # if results[model][scenario].get('power','') != '': + # row.append(results[model][scenario]['power']) + if results[model][scenario].get('power_efficiency', '') != '': + val = str(results[model][scenario]['power_efficiency']) + if not results[model][scenario].get('power_valid', True): + val = "X " + val + row.append(val) + elif val1 or val3: # Don't output unless there are any further column data + row.append(None) + + if val1: + row.append(val1) + if val3: + row.append(val3) + + else: + if val3: + row.append("missing") + row.append(val3) + + table.append(row) + + return table, headers diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/README-extra.md new file mode 100644 index 000000000..32392035f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/README-extra.md @@ -0,0 +1,16 @@ +# Get MLCommons Training Source + +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) +git clones and installs the [MLCommons Logging library]( https://github.com/mlcommons/logging ). + +## Commands + +To install +``` +cm run script --tags=get,mlperf,logging +``` +or + +``` +cmr "get mlperf logging" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/README.md new file mode 100644 index 000000000..d9b3b3d5c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-logging](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-logging) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/_cm.yaml new file mode 100644 index 000000000..c173a906a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/_cm.yaml @@ -0,0 +1,24 @@ +alias: get-mlperf-logging +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: {} +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +- env: + CM_GIT_CHECKOUT: master + tags: get,git,repo,_repo.https://github.com/mlcommons/logging +new_env_keys: +- CM_MLPERF_LOGGING_* +- +PYTHONPATH +tags: +- get +- mlperf +- logging +- mlperf-logging +uid: c9830dc6f87b4dc6 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/customize.py new file mode 100644 index 000000000..1282175e0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-logging/customize.py @@ -0,0 +1,34 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + meta = i['meta'] + + env['CM_MLPERF_LOGGING_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['+PYTHONPATH'] = [env['CM_MLPERF_LOGGING_SRC_PATH']] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/README.md new file mode 100644 index 000000000..04fb8cef2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-power-dev](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-power-dev) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/_cm.yaml new file mode 100644 index 000000000..c97047225 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/_cm.yaml @@ -0,0 +1,65 @@ +alias: get-mlperf-power-dev +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + CM_GIT_CHECKOUT_FOLDER: power-dev + CM_GIT_DEPTH: --depth 1 + CM_GIT_PATCH: 'no' +deps: [] +new_env_keys: +- CM_MLPERF_POWER_SOURCE +prehook_deps: +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_POWER_SOURCE + extra_cache_tags: mlperf,power,power-dev,src + force_env_keys: + - CM_GIT_* + names: + - mlperf-power-dev-git-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +tags: +- get +- src +- source +- power +- power-dev +- mlperf +- mlcommons +uid: 72aa56768c994bcf +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + group: checkout + mlcommons: + default: true + env: + CM_GIT_URL: https://github.com/mlcommons/power-dev.git + group: repo + octoml: + env: + CM_GIT_URL: https://github.com/octoml/power-dev.git + group: repo + repo.#: + env: + CM_GIT_URL: '#' + group: repo + sha.#: + env: + CM_GIT_SHA: '#' + group: checkout + tag.#: + env: + CM_GIT_CHECKOUT_TAG: '#' + group: checkout diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/customize.py new file mode 100644 index 000000000..bedbf96c4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-power-dev/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + if env.get('CM_VERSION', '') == '': + env['CM_VERSION'] = "master" + + if env.get('CM_GIT_REPO_CURRENT_HASH', '') != '': + env['CM_VERSION'] += "-git-" + env['CM_GIT_REPO_CURRENT_HASH'] + + return {'return': 0, 'version': env['CM_VERSION']} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/README.md new file mode 100644 index 000000000..abf631d75 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.yaml new file mode 100644 index 000000000..32e42c206 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.yaml @@ -0,0 +1,23 @@ +alias: get-mlperf-tiny-eembc-energy-runner-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + CM_GIT_CHECKOUT: main + CM_GIT_PATCH: 'no' + CM_GIT_RECURSE_SUBMODULES: '' + CM_GIT_URL: https://github.com/eembc/energyrunner +new_env_keys: +- CM_EEMBC_ENERGY_RUNNER_* +- +PYTHONPATH +tags: +- get +- src +- source +- eembc +- energyrunner +- energy-runner +- eembc-energy-runner +- tinymlperf-energy-runner +uid: c7da8d1ce4164a4b diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py new file mode 100644 index 000000000..66219c3da --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script +# yet'} + + env = i['env'] + meta = i['meta'] + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + env['CM_EEMBC_ENERGY_RUNNER_SRC'] = os.path.join(os.getcwd(), 'src') + datasets_src_path = os.path.join(os.getcwd(), 'src', 'datasets') + env['CM_EEMBC_ENERGY_RUNNER_SRC_DATASETS'] = datasets_src_path + + # Get user directory for EEMBC runner path + home_directory = os.path.expanduser('~') + + sessions_path = os.path.join(home_directory, 'eembc', 'runner', 'sessions') + + print('') + print('Path to EEMBC runner sessions: {}'.format(sessions_path)) + + env['CM_EEMBC_ENERGY_RUNNER_SESSIONS'] = sessions_path + + if not os.path.isdir(sessions_path): + os.makedirs(sessions_path) + + datasets_path = os.path.join( + home_directory, + 'eembc', + 'runner', + 'benchmarks', + 'ulp-mlperf', + 'datasets') + + print('') + print('Path to EEMBC runner datasets: {}'.format(datasets_path)) + + if not os.path.isdir(datasets_path): + os.makedirs(datasets_path) + + env['CM_EEMBC_ENERGY_RUNNER_DATASETS'] = datasets_path + + print('') + print('Copying datasets to EEMBC user space ...') + + shutil.copytree(datasets_src_path, datasets_path, dirs_exist_ok=True) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat new file mode 100644 index 000000000..799902b4d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat @@ -0,0 +1,72 @@ +@echo off + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +echo ****************************************************** +echo Cloning EEMBC Energy Runner from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... + +set folder=src + +if not exist %folder% ( + + if not "%CM_GIT_SHA%" == "" ( + git clone %CM_GIT_RECURSE_SUBMODULES% -b "%CM_GIT_CHECKOUT%" %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + cd %folder% + ) else ( + git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + cd %folder% + + git checkout "%CM_GIT_CHECKOUT%" + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) else ( + + cd %folder% + +) + + +if not "%CM_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%CM_GIT_SUBMODULES%") do ( + echo. + echo Initializing submodule %%s + git submodule update --init %%s + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) + + +if "%CM_GIT_PATCH%" == "yes" ( + echo Git patching is not yet implemented in CM script "get-mlperf-tiny-src" - please add it! + pause + + rem set patch_filename=%CM_GIT_PATCH_FILENAME% + rem if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then + rem patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} + rem CM_GIT_PATCH_FILENAMES=$patchfile + rem fi + rem + rem IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + rem + rem for patch_filename in "${patch_files[@]}" + rem do + rem echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + rem git apply ${SCRIPT_DIR}/patch/"$patch_filename" + rem if [ "${?}" != "0" ]; then exit 1; fi + rem done + +) + +rem Based on https://github.com/mwangistan/inference +for %%f in (%SCRIPT_DIR%\patch\windows-*) do ( + echo %%f + patch -p1 < %%f +) + + +cd %CUR_DIR% + +exit /b 0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh new file mode 100644 index 000000000..ea2645f7e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning EEMBC Energy Runner from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES} ..." + +if [ ! -d "src" ]; then + if [ -z ${CM_GIT_SHA} ]; then + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} src + cd src + else + git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} src + cd src + git checkout -b "${CM_GIT_CHECKOUT}" + fi + if [ "${?}" != "0" ]; then exit 1; fi +else + cd src +fi + +IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" + +for submodule in "${submodules[@]}" +do + echo "Initializing submodule ${submodule}" + git submodule update --init "${submodule}" + if [ "${?}" != "0" ]; then exit 1; fi +done + +if [ ${CM_GIT_PATCH} == "yes" ]; then + patch_filename=${CM_GIT_PATCH_FILENAME:-git.patch} + echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + git apply ${SCRIPT_DIR}/patch/"$patch_filename" + if [ "${?}" != "0" ]; then exit 1; fi +fi + +cd "$CUR_DIR" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/README.md new file mode 100644 index 000000000..2c658c4c5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/_cm.yaml new file mode 100644 index 000000000..86e859a73 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/_cm.yaml @@ -0,0 +1,31 @@ +alias: get-mlperf-tiny-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + CM_GIT_CHECKOUT: master + CM_GIT_PATCH: 'no' + CM_GIT_RECURSE_SUBMODULES: '' + CM_GIT_URL: https://github.com/mlcommons/tiny.git +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +new_env_keys: +- CM_MLPERF_TINY_* +- +PYTHONPATH +tags: +- get +- src +- source +- tiny +- tiny-src +- tiny-source +- tinymlperf +- tinymlperf-src +- mlperf +- mlcommons +uid: 777843a0bb034524 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/customize.py new file mode 100644 index 000000000..aaec439fe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/customize.py @@ -0,0 +1,71 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + +# if os_info['platform'] == 'windows': +# return {'return':1, 'error': 'Windows is not supported in this script +# yet'} + + env = i['env'] + meta = i['meta'] + + if 'CM_GIT_DEPTH' not in env: + env['CM_GIT_DEPTH'] = '' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + env['CM_MLPERF_TINY_SRC'] = os.path.join(os.getcwd(), 'src') + env['CM_MLPERF_TINY_BENCHMARK'] = os.path.join( + os.getcwd(), 'src', 'benchmark') + env['CM_MLPERF_TINY_DATASETS'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets') + env['CM_MLPERF_TINY_DATASETS_AD'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ad01') + env['CM_MLPERF_TINY_DATASETS_IC'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'ic01') + env['CM_MLPERF_TINY_DATASETS_KWS'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01') + env['CM_MLPERF_TINY_DATASETS_KWS_OPEN'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'kws01-open') + env['CM_MLPERF_TINY_DATASETS_VWW'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'evaluation', 'datasets', 'vww01') + env['CM_MLPERF_TINY_TRAINING'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training') + env['CM_MLPERF_TINY_TRAINING_AD'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training', 'anomaly_detection') + env['CM_MLPERF_TINY_TRAINING_IC'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training', 'image_classification') + env['CM_MLPERF_TINY_TRAINING_KWS'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training', 'keyword_spotting') + env['CM_MLPERF_TINY_TRAINING_VWW'] = os.path.join( + os.getcwd(), 'src', 'benchmark', 'training', 'visual_wake_words') + +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if '+PYTHONPATH' not in env: env['+PYTHONPATH'] = [] +# env['+PYTHONPATH']=[] +# env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], 'python')) +# env['+PYTHONPATH'].append(os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], 'tools', 'submission')) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/run.bat new file mode 100644 index 000000000..e94998ad7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/run.bat @@ -0,0 +1,72 @@ +@echo off + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +echo ****************************************************** +echo Cloning MLCommons from %CM_GIT_URL% with branch %CM_GIT_CHECKOUT% %CM_GIT_DEPTH% %CM_GIT_RECURSE_SUBMODULES% ... + +set folder=src + +if not exist %folder% ( + + if not "%CM_GIT_SHA%" == "" ( + git clone %CM_GIT_RECURSE_SUBMODULES% -b "%CM_GIT_CHECKOUT%" %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + cd %folder% + ) else ( + git clone %CM_GIT_RECURSE_SUBMODULES% %CM_GIT_URL% %CM_GIT_DEPTH% %folder% + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + cd %folder% + + git checkout "%CM_GIT_CHECKOUT%" + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) else ( + + cd %folder% + +) + + +if not "%CM_GIT_SUBMODULES%" == "" ( + for /F %%s in ("%CM_GIT_SUBMODULES%") do ( + echo. + echo Initializing submodule %%s + git submodule update --init %%s + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + ) +) + + +if "%CM_GIT_PATCH%" == "yes" ( + echo Git patching is not yet implemented in CM script "get-mlperf-tiny-src" - please add it! + pause + + rem set patch_filename=%CM_GIT_PATCH_FILENAME% + rem if [ ! -n ${CM_GIT_PATCH_FILENAMES} ]; then + rem patchfile=${CM_GIT_PATCH_FILENAME:-"git.patch"} + rem CM_GIT_PATCH_FILENAMES=$patchfile + rem fi + rem + rem IFS=', ' read -r -a patch_files <<< ${CM_GIT_PATCH_FILENAMES} + rem + rem for patch_filename in "${patch_files[@]}" + rem do + rem echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + rem git apply ${SCRIPT_DIR}/patch/"$patch_filename" + rem if [ "${?}" != "0" ]; then exit 1; fi + rem done + +) + +rem Based on https://github.com/mwangistan/inference +for %%f in (%SCRIPT_DIR%\patch\windows-*) do ( + echo %%f + patch -p1 < %%f +) + + +cd %CUR_DIR% + +exit /b 0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/run.sh new file mode 100644 index 000000000..e625891ac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-tiny-src/run.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +echo "******************************************************" +echo "Cloning MLCommons from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} ${CM_GIT_DEPTH} ${CM_GIT_RECURSE_SUBMODULES} ..." + +if [ ! -d "src" ]; then + if [ -z ${CM_GIT_SHA} ]; then + git clone ${CM_GIT_RECURSE_SUBMODULES} -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} ${CM_GIT_DEPTH} src + cd src + else + git clone ${CM_GIT_RECURSE_SUBMODULES} ${CM_GIT_URL} ${CM_GIT_DEPTH} src + cd src + git checkout -b "${CM_GIT_CHECKOUT}" + fi + if [ "${?}" != "0" ]; then exit 1; fi +else + cd src +fi + +IFS=',' read -r -a submodules <<< "${CM_GIT_SUBMODULES}" + +for submodule in "${submodules[@]}" +do + echo "Initializing submodule ${submodule}" + git submodule update --init "${submodule}" + if [ "${?}" != "0" ]; then exit 1; fi +done + +if [ ${CM_GIT_PATCH} == "yes" ]; then + patch_filename=${CM_GIT_PATCH_FILENAME:-git.patch} + echo "Applying patch ${SCRIPT_DIR}/patch/$patch_filename" + git apply ${SCRIPT_DIR}/patch/"$patch_filename" + if [ "${?}" != "0" ]; then exit 1; fi +fi + +cd "$CUR_DIR" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/README.md new file mode 100644 index 000000000..e7143c85a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/_cm.yaml new file mode 100644 index 000000000..02e2ca173 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/_cm.yaml @@ -0,0 +1,53 @@ +alias: get-mlperf-training-nvidia-code +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +clean_files: [] +default_version: r3.0 +deps: +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_TRAINING_RESULTS_PATH + extra_cache_tags: mlperf,training,results + names: + - mlperf-training-results + tags: get,git,repo + update_tags_from_env_with_prefix: + _repo.: + - CM_NVIDIA_CODE_DOWNLOAD_URL +new_env_keys: +- CM_MLPERF_TRAINING_NVIDIA_CODE_PATH +tags: +- get +- nvidia +- mlperf +- training +- code +- training-code +uid: fdc630b1d41743c5 +variations: + ctuning: + env: + CM_TMP_TRAINING_SRC: ctuning + group: repo-owner + custom: + group: repo-owner + mlcommons: + default: true + env: + CM_TMP_TRAINING_SRC: mlcommons + group: repo-owner + nvidia-only: + env: + CM_TMP_TRAINING_SRC: GATEOverflow + group: repo-owner +versions: + r2.1: + env: + CM_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v2.1 + r3.0: + env: + CM_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v3.0 + r3.1: + env: + CM_NVIDIA_CODE_DOWNLOAD_URL: https://github.com/<<>>/training_results_v3.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/customize.py new file mode 100644 index 000000000..11acbe12e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-nvidia-code/customize.py @@ -0,0 +1,36 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] = os.path.join( + env['CM_MLPERF_TRAINING_RESULTS_PATH'], "NVIDIA") + if not os.path.exists(env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH']): + return { + 'return': 1, 'error': f'Nvidia code path not found in the repository{env["CM_MLPERF_TRAINING_RESULTS_PATH"]}'} + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_MLPERF_TRAINING_NVIDIA_CODE_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/README-extra.md new file mode 100644 index 000000000..08293c98b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/README-extra.md @@ -0,0 +1,27 @@ +# Get MLCommons Training Source +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [MLCommons Training repository](https://github.com/mlcommons/training). + +## Commands +To install +``` +cm run script --tags=get,mlperf,training,src,[VARIATION] --version=[VERSION] +``` +where [VARIATION] is one of +* `default:` Works with the official MLCommons inference repository. Uses `short-history` variation +* `patch:` Applies the `git.patch` to the cloned git repository +* `octoml:` Works with the OctoML fork of the MLCommons inference repository. Uses `short-history` variation +* `short-history:` Uses a git depth of last 10 commits (significantly reduces the download size) +* `full-history:` Uses the full git history +* `no-recurse-submodules:` Only download the main repository + +[VERSION] is one of +* `master:` Uses the master branch +* `r2.1:` Uses the release branch used for MLCommons training 2.1 round + +## Exported Variables +* `CM_MLPERF_TRAINING_SOURCE`: Directory path of the cloned inference repository +* `PYTHONPATH`: Is appended with the paths to vision module and the submission tools module + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/README.md b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/README.md new file mode 100644 index 000000000..bcc85e3d8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-training-src](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-mlperf-training-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/_cm.yaml new file mode 100644 index 000000000..063e655ff --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/_cm.yaml @@ -0,0 +1,97 @@ +alias: get-mlperf-training-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + CM_GIT_CHECKOUT: master + CM_GIT_CHECKOUT_FOLDER: training + CM_GIT_DEPTH: --depth 4 + CM_GIT_PATCH: 'no' + CM_GIT_RECURSE_SUBMODULES: ' --recurse-submodules' +default_version: master +new_env_keys: +- CM_MLPERF_TRAINING_* +- CM_MLPERF_TRAINING_LAST_RELEASE +- +PYTHONPATH +prehook_deps: +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_TRAINING_SOURCE + extra_cache_tags: mlperf,training,src + force_env_keys: + - CM_GIT_* + names: + - mlperf-training-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +tags: +- get +- src +- source +- training +- training-src +- training-source +- mlperf +- mlcommons +uid: dc440bd88e794a28 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + group: checkout + cknowledge: + default: true + env: + CM_GIT_URL: https://github.com/cknowledge/training.git + group: src + full-history: + env: + CM_GIT_DEPTH: '' + group: git-history + mlcommons: + env: + CM_GIT_URL: https://github.com/mlcommons/training.git + group: src + no-recurse-submodules: + env: + CM_GIT_RECURSE_SUBMODULES: '' + nvidia-retinanet: + base: + - patch + env: + CM_GIT_PATCH_FILENAMES: nvidia-retinanet.patch,cpu_load.patch + patch: + env: + CM_GIT_PATCH: 'yes' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + sha.#: + env: + CM_GIT_SHA: '#' + group: checkout + short-history: + default: true + env: + CM_GIT_DEPTH: --depth 5 + group: git-history + tag.#: + env: + CM_GIT_CHECKOUT_TAG: '#' + group: checkout +versions: + custom: + env: + CM_MLPERF_LAST_RELEASE: custom + master: + env: + CM_MLPERF_LAST_RELEASE: v3.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/customize.py new file mode 100644 index 000000000..7d0c4699e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/customize.py @@ -0,0 +1,40 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + script_path = i['run_script_input']['path'] + + if env.get('CM_GIT_PATCH_FILENAMES', '') != '': + patch_files = env['CM_GIT_PATCH_FILENAMES'].split(",") + patch_files_full_paths = [] + for patch_file in patch_files: + patch_file_full_path = os.path.join( + script_path, "patch", patch_file) + patch_files_full_paths.append(patch_file_full_path) + env['CM_GIT_PATCH_FILEPATHS'] = ",".join(patch_files_full_paths) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/patch/cpu_load.patch b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/patch/cpu_load.patch new file mode 100644 index 000000000..b72537696 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/patch/cpu_load.patch @@ -0,0 +1,16 @@ +diff --git a/single_stage_detector/ssd/pth_to_onnx.py b/single_stage_detector/ssd/pth_to_onnx.py +index 93679cd..6146d49 100755 +--- a/single_stage_detector/ssd/pth_to_onnx.py ++++ b/single_stage_detector/ssd/pth_to_onnx.py +@@ -54,7 +54,10 @@ def main(args): + model.to(device) + + print("Loading model") +- checkpoint = torch.load(args.input) ++ if args.device == "cpu": ++ checkpoint = torch.load(args.input, map_location=torch.device('cpu')) ++ else: ++ checkpoint = torch.load(args.input) + + # For some reason the batchnorms in the checkpoint do not have the same sizes as the module object. The checkpoint + # batchnorms have a size of [1, N, 1, 1], while the model batchnorms just have a size of [N]. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/patch/nvidia-retinanet.patch b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/patch/nvidia-retinanet.patch new file mode 100644 index 000000000..7256a1acc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-mlperf-training-src/patch/nvidia-retinanet.patch @@ -0,0 +1,170 @@ +diff --git a/single_stage_detector/ssd/model/retinanet.py b/single_stage_detector/ssd/model/retinanet.py +index 2f10d96..cdba3be 100644 +--- a/single_stage_detector/ssd/model/retinanet.py ++++ b/single_stage_detector/ssd/model/retinanet.py +@@ -12,6 +12,7 @@ from model.transform import GeneralizedRCNNTransform + from model.backbone_utils import resnet_fpn_backbone, _validate_trainable_layers + from model.feature_pyramid_network import LastLevelP6P7 + from model.focal_loss import sigmoid_focal_loss ++from model.image_list import ImageList + from model.boxes import box_iou, clip_boxes_to_image, batched_nms + from model.utils import Matcher, overwrite_eps, BoxCoder + +@@ -510,7 +511,13 @@ class RetinaNet(nn.Module): + original_image_sizes.append((val[0], val[1])) + + # transform the input +- images, targets = self.transform(images, targets) ++ # images, targets = self.transform(images, targets) ++ _image_sizes = [img.shape[-2:] for img in images] ++ for _size in _image_sizes: ++ assert len(_size) == 2 and _size[0] == 800 and _size[1] == 800 ++ # print(type(images)) ++ # images = ImageList(torch.stack(images), _image_sizes) ++ images = ImageList(images, _image_sizes) + + # Check for degenerate boxes + # TODO: Move this to a function +@@ -539,7 +546,11 @@ class RetinaNet(nn.Module): + + # compute the retinanet heads outputs using the features + head_outputs = self.head(features) ++ for k, v in head_outputs.items(): ++ print(f"{k}: {v.size()}") ++ return head_outputs + ++ """ + # create the set of anchors + anchors = self.anchor_generator(images, features) + +@@ -576,6 +587,7 @@ class RetinaNet(nn.Module): + self._has_warned = True + return losses, detections + return self.eager_outputs(losses, detections) ++ """ + + + model_urls = { +diff --git a/single_stage_detector/scripts/pth_to_onnx.py b/single_stage_detector/ssd/pth_to_onnx.py +similarity index 65% +rename from single_stage_detector/scripts/pth_to_onnx.py +rename to single_stage_detector/ssd/pth_to_onnx.py +index 78945aa..93679cd 100755 +--- a/single_stage_detector/scripts/pth_to_onnx.py ++++ b/single_stage_detector/ssd/pth_to_onnx.py +@@ -8,7 +8,7 @@ from torch.autograd import Variable + + from model.retinanet import retinanet_from_backbone + +-def parse_args(add_help=True): ++def parse_args(add_help=True, custom_argv=None): + parser = argparse.ArgumentParser(description='Convert PyTorch detection file to onnx format', add_help=add_help) + + parser.add_argument('--input', required=True, help='input pth file') +@@ -30,11 +30,15 @@ def parse_args(add_help=True): + help="Model data layout") + parser.add_argument('--device', default='cuda', help='device') + +- args = parser.parse_args() ++ if custom_argv is None: ++ args = parser.parse_args() ++ else: ++ args = parser.parse_args(args=custom_argv) + + args.output = args.output or ('retinanet_'+args.backbone+'.onnx') + return args + ++ + def main(args): + batch_size = args.batch_size or 1 + image_size = args.image_size or [800, 800] +@@ -51,6 +55,25 @@ def main(args): + + print("Loading model") + checkpoint = torch.load(args.input) ++ ++ # For some reason the batchnorms in the checkpoint do not have the same sizes as the module object. The checkpoint ++ # batchnorms have a size of [1, N, 1, 1], while the model batchnorms just have a size of [N]. ++ # However, this is fine, since (assuming the README is correct), the batchnorms were frozen and were not modified ++ # during training. ++ target_state_dict = model.state_dict() ++ for k, v in target_state_dict.items(): ++ ckpt_val = checkpoint["model"][k] ++ if v.size() == ckpt_val.size(): ++ continue ++ target_size = torch.tensor(v.size()) ++ actual_size = torch.tensor(ckpt_val.size()) ++ flattened = torch.flatten(actual_size) ++ if all(target_size != flattened): ++ raise ValueError(f"Real size mismatch for {k}: {target_size} vs {actual_size}") ++ checkpoint["model"][k] = checkpoint["model"][k].view(target_size) ++ # Remove unexpected keys ++ for k in [k for k in checkpoint["model"] if k not in target_state_dict]: ++ del checkpoint["model"][k] + model.load_state_dict(checkpoint['model']) + + print("Creating input tensor") +@@ -60,20 +83,31 @@ def main(args): + dtype=torch.float) + inputs = torch.autograd.Variable(rand) + # Output dynamic axes ++ """ + dynamic_axes = { + 'boxes': {0 : 'num_detections'}, + 'scores': {0 : 'num_detections'}, + 'labels': {0 : 'num_detections'}, + } ++ """ ++ + # Input dynamic axes ++ """ + if (args.batch_size is None) or (args.image_size is None): + dynamic_axes['images'] = {} + if args.batch_size is None: +- dynamic_axes['images'][0]: 'batch_size' ++ dynamic_axes['images'][0] = 'batch_size' + if args.image_size is None: + dynamic_axes['images'][2] = 'width' + dynamic_axes['images'][3] = 'height' +- ++ """ ++ # Force dynamic batch_size ++ dynamic_axes = { ++ "images": {0: "batch_size"}, ++ "cls_logits": {0: "batch_size", 1: "num_regions", 2: "num_classes"}, ++ "bbox_regression": {0: "batch_size", 1: "num_regions", 2: "bbox_coord_dim"}, ++ } ++ print(dynamic_axes) + + print("Exporting the model") + model.eval() +@@ -81,10 +115,11 @@ def main(args): + inputs, + args.output, + export_params=True, +- opset_version=13, +- do_constant_folding=False, ++ opset_version=11, ++ do_constant_folding=True, + input_names=['images'], +- output_names=['boxes', 'scores', 'labels'], ++ # output_names=['boxes', 'scores', 'labels'], ++ output_names=['cls_logits', 'bbox_regression'], + dynamic_axes=dynamic_axes) + + +diff --git a/single_stage_detector/ssd/run_pth_to_onnx.sh b/single_stage_detector/ssd/run_pth_to_onnx.sh +new file mode 100644 +index 0000000..e244aed +--- /dev/null ++++ b/single_stage_detector/ssd/run_pth_to_onnx.sh +@@ -0,0 +1,9 @@ ++docker build -t mlperf/single_stage_detector . ++docker run -v /home/mlperf_inference_data:/home/mlperf_inference_data \ ++ -v /home/scratch.etcheng_sw/mlperf-training:/mnt/training \ ++ --gpus=0 -e NVIDIA_VISIBLE_DEVICES=0 mlperf/single_stage_detector:latest \ ++ python pth_to_onnx.py \ ++ --num-classes 264 \ ++ --image-size 800 800 \ ++ --input /home/mlperf_inference_data/models/retinanet-resnext50-32x4d/new/retinanet_model_10.pth \ ++ --output /mnt/training/resnext-retinanet-ckpts/onnx/retinanet_resnext50_32x4d_fpn.opset11.dyn_bs.800x800.onnx diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/README.md b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/README.md new file mode 100644 index 000000000..cdc56b7fb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/_cm.yaml new file mode 100644 index 000000000..303124799 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/_cm.yaml @@ -0,0 +1,26 @@ +alias: get-nvidia-docker +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +deps: +- tags: detect,os +- tags: get,docker +docker_input_mapping: {} +input_description: {} +input_mapping: {} +new_env_keys: [] +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- install +- nvidia +- nvidia-container-toolkit +- nvidia-docker +- engine +uid: 465ae240998e4779 +variations: {} +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/run-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/run-ubuntu.sh new file mode 100644 index 000000000..34a49d5a4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-docker/run-ubuntu.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +if dpkg -l | grep -q nvidia-container-toolkit; then + exit 0 +fi + +if [[ ! -f /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg ]]; then + cmd="curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg" + echo "$cmd" + eval "$cmd" +fi + +cmd="curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ + && \ + sudo apt-get update" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +cmd="sudo apt-get install -y nvidia-container-toolkit" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +cmd="sudo nvidia-ctk runtime configure --runtime=docker" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? + +cmd="sudo systemctl restart docker" +cmd="sudo service docker restart" +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/README-extra.md new file mode 100644 index 000000000..8c1a21948 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/README-extra.md @@ -0,0 +1 @@ +TBD: compile https://github.com/NVIDIA/mitten diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/README.md b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/README.md new file mode 100644 index 000000000..a21224b6f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-nvidia-mitten](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-nvidia-mitten) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/_cm.yaml new file mode 100644 index 000000000..fe0200b5c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/_cm.yaml @@ -0,0 +1,33 @@ +alias: get-nvidia-mitten +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_version: master +deps: +- tags: detect,os +- names: + - python3 + - python + tags: get,python3 +- tags: get,generic-python-lib,_pycuda + version: 2022.2.2 +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_NVIDIA_MITTEN_SRC + extra_cache_tags: nvidia,mitten,src + force_env_keys: + - CM_GIT_CHECKOUT + names: + - nvidia-mitten-git-src + tags: get,git,_repo.https://github.com/NVIDIA/mitten +extra_cache_tags_from_env: +- env: CM_PYTHON_CACHE_TAGS + prefix: python- +new_env_keys: +- CM_NVIDIA_MITTEN* +tags: +- get +- nvidia +- mitten +- nvidia-mitten +uid: 1c045f2902374de9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/customize.py new file mode 100644 index 000000000..29da237a5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + # TBD + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + # TBD + cur_dir = os.getcwd() + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/run.bat new file mode 100644 index 000000000..ceaa88fea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/run.bat @@ -0,0 +1,3 @@ +@echo off + +echo TBD diff --git a/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/run.sh new file mode 100644 index 000000000..28b1ea4ce --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-nvidia-mitten/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +cd ${CM_NVIDIA_MITTEN_SRC} +${CM_PYTHON_BIN_WITH_PATH} -m pip install . +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/README.md b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/README.md new file mode 100644 index 000000000..a45964d95 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/_cm.yaml new file mode 100644 index 000000000..22bb2b719 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/_cm.yaml @@ -0,0 +1,36 @@ +alias: get-onnxruntime-prebuilt +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +clean_files: [] +default_version: 1.16.3 +deps: +- tags: detect,os +new_env_keys: +- CM_ONNXRUNTIME_LIB_PATH +- CM_ONNXRUNTIME_INCLUDE_PATH +- +PATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +tags: +- install +- onnxruntime +- get +- prebuilt +- lib +- lang-c +- lang-cpp +uid: be02c84ff57c4244 +variations: + cpu: + default: true + env: + CM_ONNXRUNTIME_DEVICE: '' + group: device + cuda: + env: + CM_ONNXRUNTIME_DEVICE: gpu + group: device diff --git a/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/customize.py new file mode 100644 index 000000000..b14bc1670 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/customize.py @@ -0,0 +1,95 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + env = i['env'] + + machine = env.get('CM_HOST_OS_MACHINE', '') + if machine == '': + machine = 'x86_64' + if machine == 'x86_64': + machine = 'x64' + + hostos = env['CM_HOST_OS_TYPE'] + + ext = '.tgz' + + if hostos == 'darwin': + hostos = 'osx' + elif hostos == 'windows': + hostos = 'win' + ext = '.zip' + + device = env.get('CM_ONNXRUNTIME_DEVICE', '') + if device != '': + machine += '-' + device + + version = env['CM_VERSION'] + + FOLDER = 'onnxruntime-{}-{}-{}'.format(hostos, machine, version) + + FILENAME = FOLDER + ext + + URL = 'https://github.com/microsoft/onnxruntime/releases/download/v{}/{}'.format( + version, FILENAME) + + print('') + print('Downloading from {}'.format(URL)) + print('') + + env['FOLDER'] = FOLDER + env['FILENAME'] = FILENAME + env['URL'] = URL + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + hostos = env['CM_HOST_OS_TYPE'] + + install_folder = env['CM_TMP_INSTALL_FOLDER'] + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: + env[key] = [] + + include_path = os.path.join( + os.getcwd(), + 'install', + install_folder, + 'include') + + env['+C_INCLUDE_PATH'].append(include_path) + env['+CPLUS_INCLUDE_PATH'].append(include_path) + + lib_path = os.path.join(os.getcwd(), 'install', install_folder, 'lib') + + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + if hostos == 'windows': + # For dynamic libraries + env['+PATH'] = [lib_path] + + env['CM_ONNXRUNTIME_LIB_PATH'] = lib_path + env['CM_ONNXRUNTIME_INCLUDE_PATH'] = include_path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/run.bat new file mode 100644 index 000000000..ea9ebc982 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/run.bat @@ -0,0 +1,10 @@ +del /Q /S install +del /Q %FILENAME% + +wget --no-check-certificate %URL% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip %FILENAME% -d install +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo CM_TMP_INSTALL_FOLDER=%FOLDER% > tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/run.sh new file mode 100644 index 000000000..6be34ea8a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-onnxruntime-prebuilt/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +rm -rf install +rm -f ${FILENAME} + +mkdir -p install + +wget --no-check-certificate ${URL} +test $? -eq 0 || exit 1 + +tar -C install -xzf ${FILENAME} +test $? -eq 0 || exit 1 + +echo "CM_TMP_INSTALL_FOLDER=$FOLDER" > tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-openssl/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-openssl/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-openssl/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-openssl/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-openssl/README-extra.md new file mode 100644 index 000000000..c4f88f975 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-openssl/README-extra.md @@ -0,0 +1,8 @@ +# Get OpenSSL +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects openssl installed on the system and if not found calls the [install script for openssl](../script/install-openssl). + +## Exported Variables +* `CM_OPENSSL_BIN_WITH_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-openssl/README.md b/cmx4mlops/cmx4mlops/repo/script/get-openssl/README.md new file mode 100644 index 000000000..d4065c1a5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-openssl/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-openssl/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-openssl/_cm.yaml new file mode 100644 index 000000000..d46266838 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-openssl/_cm.yaml @@ -0,0 +1,23 @@ +alias: get-openssl +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +clean_files: [] +env: + CM_REQUIRE_INSTALL: 'no' +new_env_keys: +- CM_OPENSSL_* +- +LD_LIBRARY_PATH +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + reuse_version: true + tags: install,openssl +tags: +- get +- openssl +- lib +- lib-openssl +uid: febdae70e9e64e30 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-openssl/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-openssl/customize.py new file mode 100644 index 000000000..a824fb14c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-openssl/customize.py @@ -0,0 +1,73 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'openssl' + if 'CM_OPENSSL_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_OPENSSL_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': i['recursion_spaces']}) + if r['return'] > 0: + if r['return'] == 16 and os_info['platform'] != 'windows': + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'OpenSSL\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_OPENSSL_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + version = r['version'] + found_file_path = env['CM_OPENSSL_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_OPENSSL_INSTALLED_PATH'] = found_path + + # Save tags that can be used to specialize further dependencies (such as + # python packages) + tags = 'version-' + version + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-openssl/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-openssl/run.sh new file mode 100644 index 000000000..14277c91a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-openssl/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +openssl_bin=${CM_OPENSSL_BIN_WITH_PATH} +${openssl_bin} version > tmp-ver.out 2>/dev/null +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-platform-details/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-platform-details/README-EXTRA.md b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/README-EXTRA.md new file mode 100644 index 000000000..22b4875e8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/README-EXTRA.md @@ -0,0 +1,10 @@ +Please execute the following CM command to obtain the platform details of the System Under Test (SUT): + +``` +cm run script --tags=get,platform-details --platform_details_dir= +``` + + +The generated details will be saved as a text file in the specified directory. If no directory is specified, the generated text file will be saved in the CM cache + +A sample of the generated text file can be found [here](https://github.com/GATEOverflow/mlperf_inference_test_submissions_v5.0/blob/main/open/MLCommons/measurements/gh_action-reference-gpu-pytorch_v2.5.0-cu124/system_info.txt) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-platform-details/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/_cm.yaml new file mode 100644 index 000000000..957d72e96 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/_cm.yaml @@ -0,0 +1,62 @@ +alias: get-platform-details +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Platform information +deps: +- tags: detect,os +- skip_if_env: + CM_HOST_OS_TYPE: + - windows + tags: detect,sudo +- skip_if_any_env: + CM_HOST_OS_TYPE: + - windows + skip_if_env: + CM_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_psmisc +- enable_if_env: + CM_HOST_OS_TYPE: + - linux + skip_if_env: + CM_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_systemd +- enable_if_env: + CM_HOST_OS_TYPE: + - linux + skip_if_env: + CM_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_dmidecode +input_mapping: + out_dir_path: CM_PLATFORM_DETAILS_DIR_PATH + out_file_name: CM_PLATFORM_DETAILS_FILE_NAME +prehook_deps: +- enable_if_env: + CM_HOST_OS_TYPE: + - linux + CM_INSTALL_NUMACTL: + - 'True' + skip_if_env: + CM_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_numactl +- enable_if_env: + CM_HOST_OS_TYPE: + - linux + CM_INSTALL_CPUPOWER: + - 'True' + env: + CM_TMP_FAIL_SAFE: 'yes' + skip_if_env: + CM_SUDO_USER: + - 'no' + tags: get,sys-util,generic,_linux-tools +tags: +- get +- platform +- details +- platform-details +uid: f0801943c17f4e48 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-platform-details/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/customize.py new file mode 100644 index 000000000..15b761269 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/customize.py @@ -0,0 +1,58 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess + + +def check_installation(command, os_info): + if os_info['platform'] == "windows": + return subprocess.call( + [command, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) == 0 + elif os_info['platform'] == "linux": + return subprocess.call(['which', command], stdout=subprocess.PIPE, + stderr=subprocess.PIPE) == 0 # 0 means the package is there + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if not check_installation("numactl", os_info): + env['CM_INSTALL_NUMACTL'] = 'True' + + # if not check_installation("cpupower",os_info): + env['CM_INSTALL_CPUPOWER'] = 'True' + + if env.get('CM_PLATFORM_DETAILS_FILE_PATH', '') == '': + if env.get('CM_PLATFORM_DETAILS_DIR_PATH', '') == '': + env['CM_PLATFORM_DETAILS_DIR_PATH'] = os.getcwd() + if env.get('CM_PLATFORM_DETAILS_FILE_NAME', '') == '': + env['CM_PLATFORM_DETAILS_FILE_NAME'] = "system-info.txt" + env['CM_PLATFORM_DETAILS_FILE_PATH'] = os.path.join( + env['CM_PLATFORM_DETAILS_DIR_PATH'], env['CM_PLATFORM_DETAILS_FILE_NAME']) + + return {'return': 0} + + +def postprocess(i): + + state = i['state'] + + env = i['env'] + + os_info = i['os_info'] + + automation = i['automation'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run-macos.sh b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run-macos.sh new file mode 100644 index 000000000..fcde181c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run-macos.sh @@ -0,0 +1 @@ +echo "WARNING: get-platform-details script is fully supported on linux systems only." diff --git a/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run.bat new file mode 100644 index 000000000..fcde181c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run.bat @@ -0,0 +1 @@ +echo "WARNING: get-platform-details script is fully supported on linux systems only." diff --git a/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run.sh new file mode 100644 index 000000000..12b0388a1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-platform-details/run.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +OUTPUT_FILE="$CM_PLATFORM_DETAILS_FILE_PATH" +#set -e +#echo $OUTPUT_FILE +echo "WARNING: sudo permission is needed for some of the below commands" + +echo "Platform Details" > $OUTPUT_FILE +echo "" >> $OUTPUT_FILE +echo "------------------------------------------------------------" >> $OUTPUT_FILE +echo "1. uname -a" >> $OUTPUT_FILE +eval "uname -a" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "2. w" >> $OUTPUT_FILE +eval "w" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "3. Username" >> $OUTPUT_FILE +eval "whoami" >> $OUTPUT_FILE +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "4. ulimit -a" >> $OUTPUT_FILE +eval "ulimit -a" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "5. sysinfo process ancestry" >> $OUTPUT_FILE +eval "pstree" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "6. /proc/cpuinfo" >> $OUTPUT_FILE +eval "cat /proc/cpuinfo" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "7. lscpu" >> $OUTPUT_FILE +eval "lscpu" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "8. numactl --hardware" >> $OUTPUT_FILE +if [[ ${CM_SUDO_USER} == "yes" ]]; then + echo "${CM_SUDO} numactl --hardware" + eval "${CM_SUDO} numactl --hardware" >> $OUTPUT_FILE + test $? -eq 0 || exit $? +else + echo "Requires SUDO permission" >> $OUTPUT_FILE +fi +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "9. /proc/meminfo" >> $OUTPUT_FILE +eval "cat /proc/meminfo" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "10. who -r" >> $OUTPUT_FILE +eval "who -r" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "11. Systemd service manager version" >> $OUTPUT_FILE +eval "systemctl --version | head -n 1" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "12. Services, from systemctl list-unit-files" >> $OUTPUT_FILE +eval "systemctl list-unit-files" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "13. Linux kernel boot-time arguments, from /proc/cmdline" >> $OUTPUT_FILE +eval "cat /proc/cmdline" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "14. cpupower frequency-info" >> $OUTPUT_FILE +eval "cpupower frequency-info" >> $OUTPUT_FILE +test $? -eq 0 || echo "FAILED: cpupower frequency-info" >> $OUTPUT_FILE +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "15. sysctl" >> $OUTPUT_FILE +if [[ ${CM_SUDO_USER} == "yes" ]]; then + echo "${CM_SUDO} sysctl -a" + eval "${CM_SUDO} sysctl -a" >> $OUTPUT_FILE + test $? -eq 0 || exit $? +else + echo "Requires SUDO permission" >> $OUTPUT_FILE +fi +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "16. /sys/kernel/mm/transparent_hugepage" >> $OUTPUT_FILE +eval "cat /sys/kernel/mm/transparent_hugepage/enabled" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "17. /sys/kernel/mm/transparent_hugepage/khugepaged" >> $OUTPUT_FILE +eval "cat /sys/kernel/mm/transparent_hugepage/khugepaged/defrag" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "18. OS release" >> $OUTPUT_FILE +eval "cat /etc/os-release" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "19. Disk information" >> $OUTPUT_FILE +eval "lsblk" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "20. /sys/devices/virtual/dmi/id" >> $OUTPUT_FILE +eval "ls /sys/devices/virtual/dmi/id" >> $OUTPUT_FILE +test $? -eq 0 || exit $? +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "21. dmidecode" >> $OUTPUT_FILE +if [[ ${CM_SUDO_USER} == "yes" ]]; then + eval "${CM_SUDO} dmidecode" >> $OUTPUT_FILE + test $? -eq 0 || echo "FAILED: dmidecode" >> $OUTPUT_FILE +else + echo "Requires SUDO permission" >> $OUTPUT_FILE +fi +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "22. BIOS" >> $OUTPUT_FILE +if [[ ${CM_SUDO_USER} == "yes" ]]; then + eval "${CM_SUDO} dmidecode -t bios" >> $OUTPUT_FILE + test $? -eq 0 || echo "FAILED: dmidecode -t bios" >> $OUTPUT_FILE +else + echo "Requires SUDO permission" >> $OUTPUT_FILE +fi +echo "------------------------------------------------------------" >> $OUTPUT_FILE + +echo "System information has been saved to $OUTPUT_FILE" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/README-extra.md new file mode 100644 index 000000000..7a6f99137 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/README-extra.md @@ -0,0 +1,16 @@ +# Get Preprocessed Criteo Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Criteo dataset. + +## How To +```bash +cm run script --tags=get,criteo,preprocessed --threads=[NUM_THREADS] +``` +where, +* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory +* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. + + +## Exported Variables +* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored + + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/README.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/README.md new file mode 100644 index 000000000..df674df78 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/_cm.yaml new file mode 100644 index 000000000..2f012605d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/_cm.yaml @@ -0,0 +1,156 @@ +alias: get-preprocessed-dataset-criteo +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +deps: +- names: + - python3 + - python + tags: get,python3 +- names: + - original-dataset + - criteo-dataset + skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,dataset,criteo,original +- names: + - dlrm-src + skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,dlrm,src +- names: + - inference-src + skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: mlperf,mlcommons,inference,source,src +- skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,generic-python-lib,_scikit-learn +- skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,generic-python-lib,_torch +- skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,generic-python-lib,_opencv-python +- skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,generic-python-lib,_decorator +- skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,generic-python-lib,_psutil +- skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,generic-python-lib,_onnx +- skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,generic-python-lib,_tqdm +- skip_if_env: + CM_DATASET_PREPROCESSED_PATH: + - 'on' + tags: get,generic-python-lib,_mlperf_logging +docker: + run: false +input_mapping: + dir: CM_DATASET_PREPROCESSED_PATH + output_dir: CM_DATASET_PREPROCESSED_OUTPUT_PATH + threads: CM_NUM_PREPROCESS_THREADS +new_env_keys: +- CM_DATASET_PREPROCESSED_PATH +- CM_DATASET_CRITEO_MULTIHOT +- CM_CRITEO_PREPROCESSED_PATH +tags: +- get +- dataset +- criteo +- recommendation +- dlrm +- preprocessed +uid: afa59956272a4ba4 +variations: + '1': + env: + CM_DATASET_SIZE: '1' + '50': + env: + CM_DATASET_SIZE: '50' + fake: + add_deps_recursive: + original-dataset: + tags: _fake + env: + CM_CRITEO_FAKE: 'yes' + full: + add_deps_recursive: + original-dataset: + tags: -_fake + env: {} + mlc: + default: true + env: + CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: 'yes' + CM_DATASET_PREPROCESSED_PATH: 'on' + group: src + multihot: + default: true + deps: + - names: + - mlperf-training + - training-src + skip_if_env: + CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + - 'yes' + tags: get,mlperf,training,src + - skip_if_env: + CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + - 'yes' + tags: get,generic-python-lib,_package.typing_inspect + - skip_if_env: + CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + - 'yes' + tags: get,generic-python-lib,_package.iopath + - skip_if_env: + CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + - 'yes' + tags: get,generic-python-lib,_package.fbgemm_gpu + - skip_if_env: + CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + - 'yes' + tags: get,generic-python-lib,_package.torchrec + - skip_if_env: + CM_DATASET_PREPROCESSED_CRITEO_FROM_MLC: + - 'yes' + tags: get,generic-python-lib,_package.pyre_extensions + env: + CM_DATASET_CRITEO_MULTIHOT: 'yes' + group: type + multihot,mlc: + deps: + - env: + CM_DOWNLOAD_CHECKSUM_FILE: <<>>/checksums.txt + CM_DOWNLOAD_FINAL_ENV_NAME: CM_DATASET_PREPROCESSED_PATH + CM_EXTRACT_FINAL_ENV_NAME: CM_DATASET_PREPROCESSED_PATH + CM_EXTRACT_TO_FOLDER: criteo-preprocessed + CM_RCLONE_CONFIG_NAME: mlc-inference + CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/dlrm_preprocessed + extra_cache_tags: criteo,preprocessed,dataset + force_cache: true + names: + - dae + tags: download-and-extract,_rclone,_url.mlc-inference:mlcommons-inference-wg-public/dlrm_preprocessed + preprocess: + group: src + validation: + add_deps: + original-dataset: + tags: _validation diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/checksums.txt b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/checksums.txt new file mode 100644 index 000000000..f9bea4aed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/checksums.txt @@ -0,0 +1,3 @@ +cdf7af87cbc7e9b468c0be46b1767601 day_23_dense.npy +dd68f93301812026ed6f58dfb0757fa7 day_23_labels.npy +c46b7e31ec6f2f8768fa60bdfc0f6e40 day_23_sparse_multi_hot.npz diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/customize.py new file mode 100644 index 000000000..7115e89b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/customize.py @@ -0,0 +1,62 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + skip_preprocessing = False + if env.get('CM_DATASET_PREPROCESSED_PATH', '') != '': + ''' + Path with preprocessed dataset given as input + ''' + skip_preprocessing = True + print("Using preprocessed criteo dataset from '" + + env['CM_DATASET_PREPROCESSED_PATH'] + "'") + + if not skip_preprocessing and env.get( + 'CM_DATASET_PREPROCESSED_OUTPUT_PATH', '') != '': + env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + + if not skip_preprocessing and env.get( + 'CM_DATASET_CRITEO_MULTIHOT', '') == 'yes': + i['run_script_input']['script_name'] = "run-multihot" + # ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py + output_dir = env['CM_DATASET_PREPROCESSED_PATH'] + dataset_path = env['CM_DATASET_PATH'] + tmp_dir = os.path.join(output_dir, "tmp") + run_dir = os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + "recommendation_v2", + "torchrec_dlrm", + "scripts") + env['CM_RUN_CMD'] = f'cd {run_dir} && bash ./process_Criteo_1TB_Click_Logs_dataset.sh {dataset_path} {tmp_dir} {output_dir} ' + + print("Using MLCommons Training source from '" + + env['CM_MLPERF_TRAINING_SOURCE'] + "'") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_CRITEO_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_CRITEO_PREPROCESSED_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/preprocess.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/preprocess.py new file mode 100644 index 000000000..5a5c429c6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/preprocess.py @@ -0,0 +1,35 @@ +import dataset +import criteo +import os +import sys +mlperf_dlrm_path = os.environ['CM_MLPERF_INFERENCE_DLRM_PATH'] +python_path = os.path.join(mlperf_dlrm_path, "pytorch", "python") +sys.path.insert(0, python_path) + + +dataset_name = os.environ['CM_DATASET'] +dataset_path = os.environ['CM_DATASET_PATH'] +dataset_list = os.environ.get('CM_DATASET_IMAGES_LIST', None) +samples_to_aggregate_fix = os.environ.get( + 'CM_DATASET_SAMPLES_TO_AGGREGATE_FIX', None) +samples_to_aggregate_min = os.environ.get( + 'CM_DATASET_SAMPLES_TO_AGGREGATE_MIN', None) +samples_to_aggregate_max = os.environ.get( + 'CM_DATASET_SAMPLES_TO_AGGREGATE_MAX', None) +count = int(os.environ.get('CM_DATASET_SIZE', 0)) or None +max_ind_range = os.environ.get('CM_DATASET_MAX_IND_RANGE', -1) +threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) +threads = os.environ.get('CM_NUM_PREPROCESS_THREADS', threads) + +criteo.Criteo(data_path=dataset_path, + name=dataset_name, + pre_process=criteo.pre_process_criteo_dlrm, + use_cache=True, + samples_to_aggregate_fix=samples_to_aggregate_fix, + samples_to_aggregate_min=samples_to_aggregate_min, + samples_to_aggregate_max=samples_to_aggregate_max, + max_ind_range=max_ind_range, + count=count, + mlperf_bin_loader=False, + test_num_workers=threads + ) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/preprocess_multihot.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/preprocess_multihot.sh new file mode 100644 index 000000000..058cd76ee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/preprocess_multihot.sh @@ -0,0 +1,9 @@ +#!/bin/bash +cd ${CM_MLPERF_TRAINING_SOURCE}/recommendation_v2_torchrec_dlrm/ +${CM_PYTHON_BIN_WITH_PATH} materialize_synthetic_multihot_dataset.py \ + --in_memory_binary_criteo_path $PREPROCESSED_CRITEO_1TB_CLICK_LOGS_DATASET_PATH \ + --output_path $MATERIALIZED_DATASET_PATH \ + --num_embeddings_per_feature 40000000,39060,17295,7424,20265,3,7122,1543,63,40000000,3067956,405282,10,2209,11938,155,4,976,14,40000000,40000000,40000000,590152,12973,108,36 \ + --multi_hot_sizes 3,2,1,2,6,1,1,1,1,7,3,8,1,6,9,5,1,1,1,12,100,27,10,3,1,1 \ + --multi_hot_distribution_type uniform +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/run-multihot.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/run-multihot.sh new file mode 100644 index 000000000..e4741b41d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/run-multihot.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +CUR=$PWD +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/run.sh new file mode 100644 index 000000000..5c080f4c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-criteo/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +CUR=$PWD + +if [[ ${CM_CRITEO_FAKE} == "yes" ]]; then + exit 0 +fi +#${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/README.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/README.md new file mode 100644 index 000000000..8a6d38917 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-generic](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-generic) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/_cm.yaml new file mode 100644 index 000000000..22b6d7a92 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/_cm.yaml @@ -0,0 +1,14 @@ +alias: get-preprocesser-script-generic +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: AI/ML datasets +new_env_keys: +- +PYTHONPATH +tags: +- get +- preprocessor +- generic +- image-preprocessor +- script +uid: d5e603627e2046eb diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/customize.py new file mode 100644 index 000000000..a4d91ab37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/customize.py @@ -0,0 +1,22 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + env = i['env'] + path = i['run_script_input']['path'] + env['+PYTHONPATH'] = [os.path.join(path, "src")] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/src/generic_preprocess.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/src/generic_preprocess.py new file mode 100644 index 000000000..05d65cdd2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/src/generic_preprocess.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 + +import numpy as np +import cv2 +import os +supported_extensions = ['jpeg', 'jpg', 'gif', 'png'] + + +# Load and preprocess image + +def load_image(image_path, # Full path to processing image + target_size, # Desired size of resulting image + intermediate_size=0, # Scale to this size then crop to target size + crop_percentage=87.5, # Crop to this percentage then scale to target size + data_type='uint8', # Data type to store + data_layout='nhwc', # Data layout to store + convert_to_bgr=False, # Swap image channel RGB -> BGR + interpolation_method=cv2.INTER_LINEAR # Interpolation method. + ): + + out_height = target_size + out_width = target_size + + def resize_with_aspectratio(img): + height, width, _ = img.shape + # intermediate oversized image from which to crop + new_height = int(100. * out_height / crop_percentage) + # ---------------------- ,, --------------------- + new_width = int(100. * out_width / crop_percentage) + if height > width: + w = new_width + h = int(new_height * height / width) + else: + h = new_height + w = int(new_width * width / height) + img = cv2.resize(img, (w, h), interpolation=interpolation_method) + return img + + def center_crop(img): + height, width, _ = img.shape + left = int((width - out_width) / 2) + right = int((width + out_width) / 2) + top = int((height - out_height) / 2) + bottom = int((height + out_height) / 2) + img = img[top:bottom, left:right] + return img + + img = cv2.imread(image_path) + + if len(img.shape) < 3 or img.shape[2] != 3: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + else: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + # Mimic preprocessing steps from the official reference code. + img = resize_with_aspectratio(img) + img = center_crop(img) + + # Convert to BGR. + if convert_to_bgr: + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + + return img + + +def preprocess_files(selected_filenames, source_dir, destination_dir, crop_percentage, square_side, inter_size, convert_to_bgr, + data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method): + "Go through the selected_filenames and preprocess all the files (optionally normalize and subtract mean)" + + output_filenames = [] + + for current_idx in range(len(selected_filenames)): + input_filename = selected_filenames[current_idx] + + full_input_path = os.path.join(source_dir, input_filename) + + image_data = load_image(image_path=full_input_path, + target_size=square_side, + intermediate_size=inter_size, + crop_percentage=crop_percentage, + data_type=data_type, + convert_to_bgr=convert_to_bgr, + interpolation_method=interpolation_method) + + image_data = np.asarray(image_data, dtype=data_type) + + # Normalize. + if normalize_data: + image_data = image_data / 127.5 - 1.0 + + # Subtract mean value. + if subtract_mean: + if len(given_channel_means): + image_data -= given_channel_means + else: + image_data -= np.mean(image_data) + + # Subtract standard deviations. + if len(given_channel_stds): + image_data /= given_channel_stds + + # NHWC -> NCHW. + if data_layout == 'nchw': + image_data = image_data[:, :, 0:3].transpose(2, 0, 1) + + # Value 1 for quantization to int8 + if quantize == 1: + image_data = quantize_to_int8( + image_data, quant_scale, quant_offset) + + # Value 1 to convert from int8 to uint8 + if convert_to_unsigned == 1: + image_data = int8_to_uint8(image_data) + + output_filename = input_filename.rsplit( + '.', 1)[0] + '.' + new_file_extension if new_file_extension else input_filename + + full_output_path = os.path.join(destination_dir, output_filename) + image_data.tofile(full_output_path) + + print("[{}]: Stored {}".format(current_idx + 1, full_output_path)) + + output_filenames.append(output_filename) + + return output_filenames + + +def quantize_to_int8(image, scale, offset): + quant_image = (image / scale + offset).astype(np.float32) + output = np.copy(quant_image) + gtZero = (quant_image > 0).astype(int) + gtZero = gtZero * 0.5 + output = output + gtZero + ltZero = (quant_image < 0).astype(int) + ltZero = ltZero * (-0.5) + output = output + ltZero + return output.astype(np.int8) + + +def int8_to_uint8(image): + image = (image + 128).astype(np.uint8) + return image + + +def preprocess(): + import sys + + source_dir = os.environ['CM_DATASET_PATH'] + destination_dir = os.environ['CM_DATASET_PREPROCESSED_PATH'] + + square_side = int(os.environ['CM_DATASET_INPUT_SQUARE_SIDE']) + crop_percentage = float(os.environ['CM_DATASET_CROP_FACTOR']) + inter_size = int(os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0)) + convert_to_bgr = int(os.getenv('CM_DATASET_CONVERT_TO_BGR', 0)) + offset = int(os.getenv('CM_DATASET_SUBSET_OFFSET', 0)) + volume = int(os.environ['CM_DATASET_SIZE']) + fof_name = os.getenv('CM_DATASET_SUBSET_FOF', 'files.txt') + data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') + data_layout = os.getenv('CM_DATASET_DATA_LAYOUT', '').lower() + new_file_extension = os.getenv('CM_DATASET_PREPROCESSED_EXTENSION', '') + normalize_data = int(os.getenv('CM_DATASET_NORMALIZE_DATA', '0')) + subtract_mean = int(os.getenv('CM_DATASET_SUBTRACT_MEANS', '0')) + given_channel_means = os.getenv('CM_DATASET_GIVEN_CHANNEL_MEANS', '') + given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') + quant_scale = float(os.environ['CM_DATASET_QUANT_SCALE']) + quant_offset = float(os.environ['CM_DATASET_QUANT_OFFSET']) + quantize = int(os.environ['CM_DATASET_QUANTIZE']) # 1 for quantize to int8 + convert_to_unsigned = int( + os.environ['CM_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 + + images_list = os.getenv('CM_DATASET_IMAGES_LIST') + + if given_channel_means: + given_channel_means = [float(x) + for x in given_channel_means.split(' ')] + + if given_channel_stds: + given_channel_stds = [float(x) for x in given_channel_stds.split(' ')] + + interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') + + print(("From: {}, To: {}, Size: {}, Crop: {}, InterSize: {}, 2BGR: {}, OFF: {}, VOL: '{}', FOF: {}," + + " DTYPE: {}, DLAYOUT: {}, EXT: {}, NORM: {}, SMEAN: {}, GCM: {}, GSTD: {}, QUANTIZE: {}, QUANT_SCALE: {}, QUANT_OFFSET: {}, CONV_UNSIGNED: {}, INTER: {}").format( + source_dir, destination_dir, square_side, crop_percentage, inter_size, convert_to_bgr, offset, volume, fof_name, + data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method)) + + if interpolation_method == 'INTER_AREA': + # Used for ResNet in pre_process_vgg. + interpolation_method = cv2.INTER_AREA + else: + # Default interpolation method. + interpolation_method = cv2.INTER_LINEAR + + filenames = [] + if images_list: + with open(images_list) as f: + filenames = f.read().splitlines() + else: + filenames = sorted(os.listdir(source_dir)) + + if os.path.isdir(source_dir): + sorted_filenames = [filename for filename in filenames if any(filename.lower().endswith( + extension) for extension in supported_extensions) and not filename.startswith(".")] + + total_volume = len(sorted_filenames) + + if offset < 0: # support offsets "from the right" + offset += total_volume + + selected_filenames = sorted_filenames[offset:offset + volume] + + assert len(selected_filenames) == volume + + output_filenames = preprocess_files( + selected_filenames, source_dir, destination_dir, crop_percentage, square_side, inter_size, convert_to_bgr, + data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method) + + fof_full_path = os.path.join(destination_dir, fof_name) + with open(fof_full_path, 'w') as fof: + for filename in output_filenames: + fof.write(filename + '\n') diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py new file mode 100644 index 000000000..4c6a31dc6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 + +import os +import json +import numpy as np +from PIL import Image +import torch +import torchvision + +SUPPORTED_EXTENSIONS = ['jpeg', 'jpg', 'gif', 'png'] + + +def load_image(image_path, target_size, data_type='uint8', convert_to_bgr=False, + normalize_data=False, normalize_lower=-1, normalize_upper=1, + subtract_mean=False, given_channel_means='', given_channel_stds='', + quantize=0, quant_scale=1, quant_offset=0, convert_to_unsigned=0): + if not convert_to_bgr: + image = Image.open(image_path).convert('RGB') + else: + image = Image.open(image_path).convert('BGR') + + tensor_image = torchvision.transforms.functional.to_tensor(image) + mean = torch.as_tensor(given_channel_means) + std = torch.as_tensor(given_channel_stds) + normalized_image = ( + tensor_image - mean[:, None, None]) / std[:, None, None] + + resized_image = torch.nn.functional.interpolate(normalized_image[None], + size=(target_size, + target_size), + mode='bilinear')[0].numpy() + + if quantize == 1: + resized_image = quantize_to_uint8( + resized_image, quant_scale, quant_offset) + + original_height, original_width, _ = resized_image.shape + batch_shape = (1, target_size, target_size, 3) + batch_data = resized_image.reshape(batch_shape) + + return batch_data, original_width, original_height + + +def quantize_to_uint8(image, scale, offset): + quantized_image = ( + image.astype( + np.float64) / + scale + + offset).astype( + np.float64) + output = np.round(quantized_image) + output = np.clip(output, 0, 255) + return output.astype(np.uint8) + + +def preprocess_files(selected_filenames, source_dir, destination_dir, square_side, + data_type, convert_to_bgr, normalize_data, normalize_lower, + normalize_upper, subtract_mean, given_channel_means, + given_channel_stds, quantize, quant_scale, quant_offset, + convert_to_unsigned, new_file_extension): + output_signatures = [] + + for current_idx, input_filename in enumerate(selected_filenames): + full_input_path = os.path.join(source_dir, input_filename) + image_data, original_width, original_height = load_image( + image_path=full_input_path, + target_size=square_side, + data_type=data_type, + convert_to_bgr=convert_to_bgr, + normalize_data=normalize_data, + normalize_lower=normalize_lower, + normalize_upper=normalize_upper, + subtract_mean=subtract_mean, + given_channel_means=given_channel_means, + given_channel_stds=given_channel_stds, + quantize=quantize, + quant_scale=quant_scale, + quant_offset=quant_offset, + convert_to_unsigned=convert_to_unsigned + ) + + output_filename = f"{input_filename.rsplit('.', 1)[0]}.{new_file_extension}" if new_file_extension else input_filename + full_output_path = os.path.join(destination_dir, output_filename) + image_data.tofile(full_output_path) + + print(f"[{current_idx+1}]: Stored {full_output_path}") + output_signatures.append( + f'{output_filename};{original_width};{original_height}') + + return output_signatures + + +def preprocess(): + source_directory = os.environ['CM_DATASET_PATH'] + destination_directory = os.environ['CM_DATASET_PREPROCESSED_PATH'] + + intermediate_data_type = os.environ.get( + 'CM_DATASET_INTERMEDIATE_DATA_TYPE', np.float32) + square_side = int(os.environ['CM_DATASET_INPUT_SQUARE_SIDE']) + crop_percentage = float(os.environ['CM_DATASET_CROP_FACTOR']) + inter_size = int(os.getenv('CM_DATASET_INTERMEDIATE_SIZE', 0)) + convert_to_bgr = int(os.getenv('CM_DATASET_CONVERT_TO_BGR', 0)) + offset = int(os.getenv('CM_DATASET_SUBSET_OFFSET', 0)) + volume = int(os.environ['CM_DATASET_SIZE']) + fof_name = os.getenv('CM_DATASET_SUBSET_FOF', 'files.txt') + data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') + input_data_type = os.getenv('CM_DATASET_DATA_TYPE_INPUT', 'float32') + data_layout = os.getenv('CM_DATASET_DATA_LAYOUT', '').lower() + new_file_extension = os.getenv('CM_DATASET_PREPROCESSED_EXTENSION', '') + normalize_data = int(os.getenv('CM_DATASET_NORMALIZE_DATA', '0')) + subtract_mean = int(os.getenv('CM_DATASET_SUBTRACT_MEANS', '0')) + given_channel_means = os.getenv('CM_DATASET_GIVEN_CHANNEL_MEANS', '') + given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') + quant_scale = float(os.environ['CM_DATASET_QUANT_SCALE']) + quant_offset = float(os.environ['CM_DATASET_QUANT_OFFSET']) + quantize = int(os.environ['CM_DATASET_QUANTIZE']) # 1 for quantize to int8 + convert_to_unsigned = int( + os.environ['CM_DATASET_CONVERT_TO_UNSIGNED']) # 1 for int8 to uint8 + + images_list = os.getenv('CM_DATASET_IMAGES_LIST') + interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') + + annotations_filepath = os.environ['CM_DATASET_ANNOTATIONS_FILE_PATH'] + is_calibration = os.environ['CM_DATASET_TYPE'] == "calibration" + image_file = os.getenv('CM_IMAGE_FILE', '') + + normalize_lower = float(os.getenv('CM_DATASET_NORMALIZE_LOWER', -1.0)) + normalize_upper = float(os.getenv('CM_DATASET_NORMALIZE_UPPER', 1.0)) + + if given_channel_means: + given_channel_means = np.fromstring( + given_channel_means, + dtype=np.float32, + sep=' ').astype(intermediate_data_type) + if convert_to_bgr: + given_channel_means = given_channel_means[::-1] + + given_channel_stds = os.getenv('CM_DATASET_GIVEN_CHANNEL_STDS', '') + if given_channel_stds: + given_channel_stds = np.fromstring( + given_channel_stds, + dtype=np.float32, + sep=' ').astype(intermediate_data_type) + if convert_to_bgr: + given_channel_stds = given_channel_stds[::-1] + + print(f"From: {source_directory}, To: {destination_directory}, Size: {square_side}, Crop: {crop_percentage}, InterSize: {inter_size}, 2BGR: {convert_to_bgr}, " + + f"OFF: {offset}, VOL: '{volume}', FOF: {fof_name}, DTYPE: {data_type}, DLAYOUT: {data_layout}, EXT: {new_file_extension}, " + + f"NORM: {normalize_data}, SMEAN: {subtract_mean}, GCM: {given_channel_means}, GSTD: {given_channel_stds}, QUANTIZE: {quantize}, QUANT_SCALE: {quant_scale}, " + + f"QUANT_OFFSET: {quant_offset}, CONV_UNSIGNED: {convert_to_unsigned}, INTER: {interpolation_method}") + + if image_file: + source_directory = os.path.dirname(image_file) + selected_filenames = [os.path.basename(image_file)] + else: + if annotations_filepath and not is_calibration: + with open(annotations_filepath, "r") as annotations_fh: + annotations_struct = json.load(annotations_fh) + ordered_filenames = [image_entry['file_name'] + for image_entry in annotations_struct['images']] + elif os.path.isdir(source_directory): + ordered_filenames = [ + filename for filename in sorted( + os.listdir(source_directory)) if any( + filename.lower().endswith(extension) for extension in SUPPORTED_EXTENSIONS)] + else: + raise FileNotFoundError( + errno.ENOENT, os.strerror( + errno.ENOENT), source_directory) + + total_volume = len(ordered_filenames) + + if offset < 0: + offset += total_volume + + if not volume: + volume = total_volume - offset + + selected_filenames = ordered_filenames[offset:offset + volume] + + output_signatures = preprocess_files(selected_filenames, source_directory, destination_directory, square_side, data_type, + convert_to_bgr, normalize_data, normalize_lower, normalize_upper, + subtract_mean, given_channel_means, given_channel_stds, quantize, + quant_scale, quant_offset, convert_to_unsigned, new_file_extension) + + fof_full_path = os.path.join(destination_directory, fof_name) + with open(fof_full_path, 'w') as fof_file: + for filename in output_signatures: + fof_file.write(f'{filename}\n') + + +if __name__ == "__main__": + preprocess() diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/README-extra.md new file mode 100644 index 000000000..cc2742fa5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/README-extra.md @@ -0,0 +1,26 @@ +# Get Preprocessed Imagenet Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Imagenet dataset. + +## How To +```bash +cm run script --tags=get,imagenet,preprocessed,_[VARIATION] --dir=[DIRECTORY] --threads=[NUM_THREADS] +``` +where, +* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory +* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. +and the supported [VARIATIONS] (comma separated and beginning with _) are +*`[1]:` Preprocess only 1 image +*`[500]:` Preprocess first 500 images +*`[full]:` Preprocess the full dataset +*`[NHWC]:` Preprocess the dataset with `Channel` component at end +*`[NCHW]:` Preprocess the dataset with `Channel` component at beginning + +## Input Variables coming from Dependencies +* `[CM_DATASET_PATH]:` Folder path to Imagenet dataset +* `[CM_DATASET_AUX_PATH]:` Folder path to Imagenet auxiliary dataset (to get image list) +* `[CM_DATASET_IMAGES_LIST]:` File path containing the image names + +## Exported Variables +* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored + + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/README.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/README.md new file mode 100644 index 000000000..6cc290554 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/_cm.yaml new file mode 100644 index 000000000..fcd514b77 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/_cm.yaml @@ -0,0 +1,272 @@ +alias: get-preprocessed-dataset-imagenet +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + CM_DATASET_CONVERT_TO_UNSIGNED: '0' + CM_DATASET_CROP_FACTOR: '87.5' + CM_DATASET_DATA_LAYOUT: NCHW + CM_DATASET_DATA_TYPE: float32 + CM_DATASET_PREPROCESSED_EXTENSION: npy + CM_DATASET_QUANTIZE: '0' + CM_DATASET_QUANT_OFFSET: '0' + CM_DATASET_QUANT_SCALE: '1' + CM_DATASET_REFERENCE_PREPROCESSOR: '1' + CM_MODEL: resnet50 + CM_PREPROCESS_VGG: 'yes' +deps: +- names: + - python3 + - python + skip_if_env: + CM_IMAGENET_PREPROCESSED_PATH: + - 'on' + tags: get,python3 +- names: + - original-dataset + skip_if_env: + CM_IMAGENET_PREPROCESSED_PATH: + - 'on' + tags: get,dataset,image-classification,original +- enable_if_env: + CM_DATASET_TYPE: + - validation + skip_if_env: + CM_IMAGENET_PREPROCESSED_PATH: + - 'on' + tags: get,dataset-aux,image-classification,imagenet-aux +- enable_if_env: + CM_DATASET_TYPE: + - calibration + tags: get,dataset,imagenet,calibration +- tags: get,generic-python-lib,_package.opencv-python-headless +- tags: get,generic-python-lib,_pillow +- enable_if_env: + CM_DATASET_REFERENCE_PREPROCESSOR: + - '1' + names: + - inference-src + skip_if_env: + CM_IMAGENET_PREPROCESSED_PATH: + - 'on' + tags: mlperf,mlcommons,inference,source,src +docker: + run: false +env: + CM_DATASET: imagenet +input_mapping: + dir: CM_DATASET_PREPROCESSED_PATH + imagenet_path: CM_IMAGENET_PATH + imagenet_preprocessed_path: CM_IMAGENET_PREPROCESSED_PATH + threads: CM_NUM_PREPROCESS_THREADS +new_env_keys: +- CM_DATASET_* +tags: +- get +- dataset +- imagenet +- ILSVRC +- image-classification +- preprocessed +uid: f259d490bbaf45f5 +variations: + '1': + add_deps: + original-dataset: + tags: _2012-1 + env: + CM_DATASET_SIZE: '1' + group: size + '500': + add_deps: + original-dataset: + tags: _2012 + env: + CM_DATASET_SIZE: '500' + group: size + 500,validation: + add_deps: + original-dataset: + tags: _size.500 + NCHW: + default: true + env: + CM_DATASET_DATA_LAYOUT: NCHW + group: layout + NHWC: + env: + CM_DATASET_DATA_LAYOUT: NHWC + group: layout + calibration: + add_deps: + original-dataset: + tags: _full + default_variations: + calibration-option: mlperf.option1 + preprocessing-source: generic-preprocessor + env: + CM_DATASET_TYPE: calibration + group: dataset-type + default: {} + float32: + env: + CM_DATASET_CONVERT_TO_UNSIGNED: '0' + CM_DATASET_DATA_TYPE: float32 + CM_DATASET_QUANTIZE: '0' + group: precision + for.mobilenet: + base: + - mobilenet_ + env: {} + group: model + for.mobilenet,float32: + env: + CM_DATASET_GIVEN_CHANNEL_MEANS: '' + CM_DATASET_NORMALIZE_DATA: '1' + CM_DATASET_QUANTIZE: '0' + CM_DATASET_SUBTRACT_MEANS: '0' + for.mobilenet,rgb8: + env: + CM_DATASET_DATA_TYPE: uint8 + CM_DATASET_GIVEN_CHANNEL_MEANS: '' + CM_DATASET_NORMALIZE_DATA: '0' + CM_DATASET_QUANTIZE: '0' + CM_DATASET_SUBTRACT_MEANS: '0' + for.resnet50: + base: + - resnet50_ + env: + CM_DATASET_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + CM_DATASET_INTERPOLATION_METHOD: INTER_AREA + CM_DATASET_NORMALIZE_DATA: '0' + CM_DATASET_SUBTRACT_MEANS: '1' + group: model + for.resnet50,float32: + env: {} + for.resnet50,rgb8: + env: + CM_DATASET_DATA_TYPE: uint8 + CM_DATASET_GIVEN_CHANNEL_MEANS: '' + CM_DATASET_NORMALIZE_DATA: '0' + CM_DATASET_QUANTIZE: '0' + CM_DATASET_SUBTRACT_MEANS: '0' + for.resnet50,rgb8,uint8: + env: + CM_DATASET_GIVEN_CHANNEL_MEANS: 123.68 116.78 103.94 + CM_DATASET_QUANTIZE: '1' + CM_DATASET_SUBTRACT_MEANS: '1' + for.resnet50,uint8: + env: + CM_DATASET_QUANT_OFFSET: '0' + CM_DATASET_QUANT_SCALE: '1.18944883' + full: + add_deps: + original-dataset: + tags: _full + env: + CM_DATASET_SIZE: '50000' + group: size + generic-preprocessor: + env: + CM_DATASET_REFERENCE_PREPROCESSOR: '0' + group: preprocessing-source + prehook_deps: + - tags: get,generic,image-preprocessor + int8: + env: + CM_DATASET_CONVERT_TO_UNSIGNED: '0' + CM_DATASET_DATA_TYPE: int8 + CM_DATASET_QUANTIZE: '1' + group: precision + inter.area: + env: + CM_DATASET_INTERPOLATION_METHOD: INTER_AREA + group: interpolation-method + inter.linear: + env: + CM_DATASET_INTERPOLATION_METHOD: INTER_LINEAR + group: interpolation-method + mlcommons-reference-preprocessor: + default: true + env: + CM_DATASET_REFERENCE_PREPROCESSOR: '1' + group: preprocessing-source + mlperf.option1: + env: + CM_DATASET_CALIBRATION_OPTION: one + group: calibration-option + mlperf.option2: + env: + CM_DATASET_CALIBRATION_OPTION: two + group: calibration-option + mobilenet_: + default_variations: + extension: rgb32 + interpolation-method: inter.linear + precision: int8 + preprocessing-source: generic-preprocessor + env: + CM_MODEL: mobilenet + pytorch: + default_variations: + preprocessing-source: mlcommons-reference-preprocessor + deps: + - names: + - torchvision + tags: get,generic-python-lib,_torchvision + env: + CM_MODEL: resnet50 + CM_PREPROCESS_PYTORCH: 'yes' + resnet50_: + default_variations: + extension: rgb32 + interpolation-method: inter.area + precision: float32 + preprocessing-source: generic-preprocessor + env: + CM_MODEL: resnet50 + resolution.#: + env: + CM_DATASET_INPUT_SQUARE_SIDE: '#' + group: resolution + resolution.224: + default: true + env: + CM_DATASET_INPUT_SQUARE_SIDE: '224' + group: resolution + rgb32: + env: + CM_DATASET_PREPROCESSED_EXTENSION: rgb32 + group: extension + rgb8: + env: + CM_DATASET_PREPROCESSED_EXTENSION: rgb8 + group: extension + size.#: + add_deps: + original-dataset: + tags: _# + env: + CM_DATASET_SIZE: '#' + group: size + tflite_tpu: + default_variations: + preprocessing-source: mlcommons-reference-preprocessor + env: + CM_MODEL: resnet50 + CM_PREPROCESS_TFLITE_TPU: 'yes' + uint8: + env: + CM_DATASET_CONVERT_TO_UNSIGNED: '1' + CM_DATASET_DATA_TYPE: uint8 + CM_DATASET_DATA_TYPE_INPUT: float32 + CM_DATASET_QUANTIZE: '1' + group: precision + validation: + default: 'true' + default_variations: + size: '500' + env: + CM_DATASET_TYPE: validation + group: dataset-type diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/customize.py new file mode 100644 index 000000000..a3727a64c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/customize.py @@ -0,0 +1,84 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +from os.path import exists +import shutil +import glob + + +def preprocess(i): + + env = i['env'] + if 'CM_IMAGENET_PREPROCESSED_PATH' in env: + files = glob.glob( + env['CM_IMAGENET_PREPROCESSED_PATH'] + + "/**/" + + env['CM_IMAGENET_PREPROCESSED_FILENAME'], + recursive=True) + if files: + env['CM_DATASET_PREPROCESSED_PATH'] = env['CM_IMAGENET_PREPROCESSED_PATH'] + else: + return {'return': 1, 'error': 'No preprocessed images found in ' + + env['CM_IMAGENET_PREPROCESSED_PATH']} + else: + if env.get('CM_DATASET_REFERENCE_PREPROCESSOR', "0") == "1": + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + + env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + if env['CM_DATASET_TYPE'] == "validation" and not exists( + os.path.join(env['CM_DATASET_PATH'], "val_map.txt")): + shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), os.path.join(env['CM_DATASET_PATH'], + "val_map.txt")) + + preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + + if env.get('CM_DATASET_TYPE', '') == "validation" and not exists( + os.path.join(preprocessed_path, "val_map.txt")): + shutil.copy(os.path.join(env['CM_DATASET_AUX_PATH'], "val.txt"), + os.path.join(preprocessed_path, "val_map.txt")) + + if env.get('CM_DATASET_TYPE', '') == "calibration": + env['CM_DATASET_IMAGES_LIST'] = env['CM_MLPERF_IMAGENET_CALIBRATION_LIST_FILE_WITH_PATH'] + env['CM_DATASET_SIZE'] = 500 + + if env.get('CM_DATASET_DATA_TYPE_INPUT', '') == '': + env['CM_DATASET_DATA_TYPE_INPUT'] = env['CM_DATASET_DATA_TYPE'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + # finalize path + preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + preprocessed_images_list = [] + preprocessed_imagenames_list = [] + + match_text = "/*." + env.get("CM_DATASET_PREPROCESSED_EXTENSION", "*") + for filename in sorted(glob.glob(preprocessed_path + match_text)): + preprocessed_images_list.append(filename) + preprocessed_imagenames_list.append(os.path.basename(filename)) + with open("preprocessed_files.txt", "w") as f: + f.write("\n".join(preprocessed_images_list)) + with open("preprocessed_filenames.txt", "w") as f: + f.write("\n".join(preprocessed_imagenames_list)) + + env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join( + os.getcwd(), "preprocessed_files.txt") + env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join( + os.getcwd(), "preprocessed_filenames.txt") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/preprocess.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/preprocess.py new file mode 100644 index 000000000..beefd1dca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/preprocess.py @@ -0,0 +1,41 @@ +import os +import sys + +if os.environ.get('CM_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": + import generic_preprocess + generic_preprocess.preprocess() +else: + mlperf_src_path = os.environ['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] + python_path = os.path.join(mlperf_src_path, "python") + sys.path.insert(0, python_path) + + import imagenet + import dataset + + dataset_path = os.environ['CM_DATASET_PATH'] + dataset_list = os.environ.get('CM_DATASET_IMAGES_LIST', None) + img_format = os.environ.get('CM_DATASET_DATA_LAYOUT', 'NHWC') + count = int(os.environ.get('CM_DATASET_SIZE', 1)) + preprocessed_dir = os.environ.get( + 'CM_DATASET_PREPROCESSED_PATH', os.getcwd()) + threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) + threads = int(os.environ.get('CM_NUM_PREPROCESS_THREADS', threads)) + + if os.environ.get('CM_MODEL') == 'mobilenet': + pre_process = dataset.pre_process_mobilenet + elif os.environ.get('CM_MODEL', 'resnet50') == 'resnet50' and os.environ.get('CM_PREPROCESS_PYTORCH', '') == "yes": + pre_process = dataset.pre_process_imagenet_pytorch + elif os.environ.get('CM_MODEL', 'resnet50') == 'resnet50' and os.environ.get('CM_PREPROCESS_TFLITE_TPU', '') == "yes": + pre_process = dataset.pre_process_imagenet_tflite_tpu + else: + pre_process = dataset.pre_process_vgg + + imagenet.Imagenet(data_path=dataset_path, + image_list=dataset_list, + name="imagenet", + image_format=img_format, + pre_process=pre_process, + use_cache=True, + count=count, + threads=threads, + preprocessed_dir=preprocessed_dir) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/run.bat new file mode 100644 index 000000000..7f6036f84 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/run.bat @@ -0,0 +1,4 @@ +@echo off + +%CM_PYTHON_BIN% %CM_TMP_CURRENT_SCRIPT_PATH%\preprocess.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/run.sh new file mode 100644 index 000000000..c6e17411b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-imagenet/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +if [ ! -z ${CM_IMAGENET_PREPROCESSED_PATH+x} ]; then + exit 0 +fi +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/README.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/README.md new file mode 100644 index 000000000..fee3d0ae4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/_cm.yaml new file mode 100644 index 000000000..7c7eeda67 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/_cm.yaml @@ -0,0 +1,97 @@ +alias: get-preprocessed-dataset-kits19 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + CM_DATASET: kits19 + CM_DATASET_DTYPE: fp32 +deps: +- names: + - python3 + - python + tags: get,python3 +- names: + - original-dataset + tags: get,dataset,medical-imaging,kits19,original +- names: + - inference-src + tags: mlperf,mlcommons,inference,source,src +- tags: get,generic-python-lib,_scipy +- tags: get,generic-python-lib,_nibabel +- names: + - numpy + tags: get,generic-python-lib,_numpy +input_mapping: + dir: CM_DATASET_PREPROCESSED_PATH + threads: CM_NUM_PREPROCESS_THREADS +new_env_keys: +- CM_DATASET_* +tags: +- get +- dataset +- medical-imaging +- kits19 +- preprocessed +uid: 2094d9b9ab6c4c9e +variations: + '1': + adr: + original-dataset: + tags: _1 + env: + CM_DATASET_SIZE: '1' + group: dataset-count + '5': + adr: + original-dataset: + tags: _5 + env: + CM_DATASET_SIZE: '5' + group: dataset-count + '50': + adr: + original-dataset: + tags: _50 + env: + CM_DATASET_SIZE: '50' + group: dataset-count + '500': + adr: + original-dataset: + tags: _500 + env: + CM_DATASET_SIZE: '500' + group: dataset-count + calibration: + add_deps: + original-dataset: + tags: _calibration + env: + CM_DATASET_PATH: <<>> + group: dataset-type + fp32: + default: true + env: + CM_DATASET_DTYPE: fp32 + group: dataset-precision + full: + adr: + original-dataset: + tags: _full + env: + CM_DATASET_SIZE: '' + group: dataset-count + int8: + env: + CM_DATASET_DTYPE: int8 + group: dataset-precision + nvidia: + env: + CM_PREPROCESSING_BY_NVIDIA: 'yes' + validation: + add_deps: + original-dataset: + tags: _validation + default: true + group: dataset-type diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/customize.py new file mode 100644 index 000000000..1914e0e64 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/customize.py @@ -0,0 +1,41 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + preprocess_src = os.path.join( + env['CM_MLPERF_INFERENCE_3DUNET_PATH'], + 'preprocess.py') + cmd = 'cd ' + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + \ + ' && ${CM_PYTHON_BIN_WITH_PATH} preprocess.py --raw_data_dir ' + \ + env['CM_DATASET_PATH'] + ' --results_dir ' + \ + os.getcwd() + ' --mode preprocess' + env['CM_TMP_CMD'] = cmd + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + if 'CM_DATASET_PREPROCESSED_PATH' not in env: + env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + env['CM_DATASET_KITS19_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/run.sh new file mode 100644 index 000000000..a9f248c38 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-kits19/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cmd=${CM_TMP_CMD} +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/README.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/README.md new file mode 100644 index 000000000..c0e885ca5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/_cm.yaml new file mode 100644 index 000000000..07adafaa5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/_cm.yaml @@ -0,0 +1,93 @@ +alias: get-preprocessed-dataset-librispeech +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + CM_DATASET: kits19 + CM_DATASET_DTYPE: fp32 +deps: +- names: + - python3 + - python + tags: get,python3 +- names: + - original-dataset + tags: get,dataset,speech-recognition,librispeech,original +- names: + - inference-src + tags: mlperf,mlcommons,inference,source,src +- tags: get,generic-python-lib,_sox +- tags: get,generic-python-lib,_pandas +- tags: get,generic-python-lib,_tqdm +- tags: get,sys-util,generic,_sox +input_mapping: + dir: CM_DATASET_PREPROCESSED_PATH + threads: CM_NUM_PREPROCESS_THREADS +new_env_keys: +- CM_DATASET_* +tags: +- get +- dataset +- speech-recognition +- librispeech +- preprocessed +uid: e9f62fc969d5483a +variations: + '1': + adr: + original-dataset: + tags: _1 + env: + CM_DATASET_SIZE: '1' + group: dataset-count + '5': + adr: + original-dataset: + tags: _5 + env: + CM_DATASET_SIZE: '5' + group: dataset-count + '50': + adr: + original-dataset: + tags: _50 + env: + CM_DATASET_SIZE: '50' + group: dataset-count + '500': + adr: + original-dataset: + tags: _500 + env: + CM_DATASET_SIZE: '500' + group: dataset-count + calibration: + add_deps: + original-dataset: + tags: _calibration + env: + CM_DATASET_PATH: <<>> + group: dataset-type + fp32: + default: true + env: + CM_DATASET_DTYPE: fp32 + group: dataset-precision + full: + adr: + original-dataset: + tags: _full + env: + CM_DATASET_SIZE: '' + group: dataset-count + int8: + env: + CM_DATASET_DTYPE: int8 + group: dataset-precision + validation: + add_deps: + original-dataset: + tags: validation + default: true + group: dataset-type diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/customize.py new file mode 100644 index 000000000..e10df2196 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/customize.py @@ -0,0 +1,43 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + preprocess_src = os.path.join( + env['CM_MLPERF_INFERENCE_RNNT_PATH'], + 'pytorch', + 'utils', + 'convert_librispeech.py') + cmd = 'cd ' + env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} ' + preprocess_src + ' --input_dir ' + env['CM_DATASET_LIBRISPEECH_PATH'] + \ + ' --dest_dir ' + os.path.join(os.getcwd(), 'dev-clean-wav') + \ + ' --output_json ' + os.path.join(os.getcwd(), 'dev-clean-wav.json') + env['CM_TMP_CMD'] = cmd + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( + os.getcwd(), 'dev-clean-wav') + env['CM_DATASET_PREPROCESSED_JSON'] = os.path.join( + os.getcwd(), 'dev-clean-wav.json') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/run.sh new file mode 100644 index 000000000..a9f248c38 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-librispeech/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cmd=${CM_TMP_CMD} +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/README-extra.md new file mode 100644 index 000000000..f5c013f9a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/README-extra.md @@ -0,0 +1,28 @@ +# Get Preprocessed Open Images Dataset +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) preprocesses the Imagenet dataset. + +## How To +```bash +cm run script --tags=get,imagenet,preprocessed,_[VARIATION] --dir=[DIRECTORY] --threads=[NUM_THREADS] +``` +where, +* `[DIRECTORY]:` is the folder to store the preprocessed dataset. Default is current work directory +* `[NUM_THREADS:]` is the number of threads to do preprocessing. Default is number of host cpus. +and the supported [VARIATIONS] (comma separated and beginning with _) are +*`[1]:` Preprocess only 1 image +*`[500]:` Preprocess first 500 images +*`[full]:` Preprocess the full dataset +*`[validation]:` Preprocess the validation dataset +*`[calibration]:` Preprocess the calibration dataset +*`[NHWC]:` Preprocess the dataset with `Channel` component at end +*`[NCHW]:` Preprocess the dataset with `Channel` component at beginning + +## Input Variables coming from Dependencies +* `[CM_DATASET_PATH]:` Folder path to Imagenet dataset +* `[CM_DATASET_IMAGES_LIST]:` File path containing the image names +* `[CM_DATASET_OPENIMAGES_RESIZE]:` Image width to resize to (default 800) + +## Exported Variables +* `[CM_DATASET_PREPROCESSED_PATH]:` Directory where the preprocessed images are stored + + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/README.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/README.md new file mode 100644 index 000000000..39db81041 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/_cm.yaml new file mode 100644 index 000000000..75c03137c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/_cm.yaml @@ -0,0 +1,232 @@ +alias: get-preprocessed-dataset-openimages +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +default_env: + CM_DATASET: OPENIMAGES + CM_DATASET_CROP_FACTOR: '100.0' + CM_DATASET_DTYPE: fp32 + CM_DATASET_INPUT_SQUARE_SIDE: '800' + CM_DATASET_QUANTIZE: '0' + CM_DATASET_QUANT_OFFSET: '0' + CM_DATASET_QUANT_SCALE: '1' +deps: +- names: + - python3 + - python + tags: get,python3 +- names: + - original-dataset + tags: get,dataset,object-detection,openimages,original +- names: + - inference-src + tags: mlperf,mlcommons,inference,source,src +- names: + - pycocotools + tags: get,generic-python-lib,_pycocotools +- tags: get,generic-python-lib,_opencv-python +- tags: get,generic-python-lib,_pillow +- tags: get,generic-python-lib,_package.ujson +- names: + - numpy + tags: get,generic-python-lib,_numpy +- names: + - numpy + tags: get,generic-python-lib,_numpy +input_mapping: + dir: CM_DATASET_PREPROCESSED_PATH + threads: CM_NUM_PREPROCESS_THREADS +new_env_keys: +- CM_DATASET_* +tags: +- get +- dataset +- openimages +- open-images +- object-detection +- preprocessed +uid: 9842f1be8cba4c7b +variations: + '50': + ad: + original-dataset: + tags: _50 + default: true + env: + CM_DATASET_SIZE: '50' + group: dataset-count + '500': + ad: + original-dataset: + tags: _500 + env: + CM_DATASET_SIZE: '500' + group: dataset-count + NCHW: + default: true + env: + CM_DATASET_DATA_LAYOUT: NCHW + group: dataset-layout + NHWC: + env: + CM_DATASET_DATA_LAYOUT: NHWC + group: dataset-layout + calibration: + ad: + original-dataset: + tags: _calibration + default_variations: + dataset-count: '500' + env: + CM_DATASET_ANNOTATIONS_FILE_PATH: <<>> + CM_DATASET_PATH: <<>> + CM_DATASET_TYPE: calibration + group: dataset-type + custom-annotations: + ad: + original-dataset: + tags: _custom-annotations + group: annotations + default-annotations: + ad: + original-dataset: + tags: _default-annotations + default: true + group: annotations + filter: + ad: + original-dataset: + tags: _filter + filter,calibration: + env: + CM_DATASET_CALIBRATION_FILTER: 'yes' + filter-size.#: + ad: + original-dataset: + tags: _filter-size.# + group: filter-size + for.retinanet.onnx: + default_variations: + dataset-layout: NCHW + interpolation-method: inter.linear + preprocessing-source: generic-preprocessor + env: + CM_DATASET_CONVERT_TO_BGR: '0' + CM_DATASET_CROP_FACTOR: '100.0' + CM_DATASET_GIVEN_CHANNEL_MEANS: 0.485 0.456 0.406 + CM_DATASET_GIVEN_CHANNEL_STDS: 0.229 0.224 0.225 + CM_DATASET_NORMALIZE_DATA: '0' + CM_DATASET_NORMALIZE_LOWER: '0.0' + CM_DATASET_NORMALIZE_UPPER: '1.0' + CM_DATASET_SUBTRACT_MEANS: '1' + CM_ML_MODEL_NAME: retinanet + for.retinanet.onnx,fp32: + env: {} + for.retinanet.onnx,uint8: + env: + CM_DATASET_QUANT_OFFSET: '114' + CM_DATASET_QUANT_SCALE: '0.0186584499' + fp32: + default: true + default_variations: + extension: raw + env: + CM_DATASET_CONVERT_TO_UNSIGNED: '0' + CM_DATASET_DTYPE: fp32 + CM_DATASET_INPUT_DTYPE: fp32 + CM_DATASET_QUANTIZE: '0' + group: dataset-precision + full: + group: dataset-count + full,validation: + ad: + original-dataset: + tags: _full + env: + CM_DATASET_SIZE: '24781' + generic-preprocessor: + deps: + - names: + - torch + - pytorch + tags: get,generic-python-lib,_torch + - names: + - torchvision + tags: get,generic-python-lib,_torchvision + env: + CM_DATASET_REFERENCE_PREPROCESSOR: '0' + group: preprocessing-source + prehook_deps: + - tags: get,generic,image-preprocessor + int8: + default_variations: + extension: rgb8 + env: + CM_DATASET_CONVERT_TO_UNSIGNED: '0' + CM_DATASET_DTYPE: int8 + CM_DATASET_INPUT_DTYPE: fp32 + CM_DATASET_QUANTIZE: '1' + group: dataset-precision + inter.area: + env: + CM_DATASET_INTERPOLATION_METHOD: INTER_AREA + group: interpolation-method + inter.linear: + env: + CM_DATASET_INTERPOLATION_METHOD: INTER_LINEAR + group: interpolation-method + mlcommons-reference-preprocessor: + default: true + env: + CM_DATASET_REFERENCE_PREPROCESSOR: '1' + group: preprocessing-source + npy: + env: + CM_DATASET_PREPROCESSED_EXTENSION: npy + group: extension + nvidia: + env: + CM_PREPROCESSING_BY_NVIDIA: 'yes' + quant-offset.#: + const: + CM_DATASET_QUANT_OFFSET: '#' + quant-scale.#: + const: + CM_DATASET_QUANT_SCALE: '#' + raw: + env: + CM_DATASET_PREPROCESSED_EXTENSION: raw + group: extension + rgb32: + env: + CM_DATASET_PREPROCESSED_EXTENSION: rgb32 + group: extension + rgb8: + env: + CM_DATASET_PREPROCESSED_EXTENSION: rgb8 + group: extension + size.#: + ad: + original-dataset: + tags: _size.# + env: + CM_DATASET_SIZE: '#' + group: dataset-count + uint8: + default_variations: + extension: rgb8 + env: + CM_DATASET_CONVERT_TO_UNSIGNED: '1' + CM_DATASET_DTYPE: uint8 + CM_DATASET_INPUT_DTYPE: fp32 + CM_DATASET_QUANTIZE: '1' + group: dataset-precision + validation: + ad: + original-dataset: + tags: _validation + default: true + env: + CM_DATASET_TYPE: validation + group: dataset-type diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/customize.py new file mode 100644 index 000000000..06b53bfb1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/customize.py @@ -0,0 +1,68 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil +import glob + + +def preprocess(i): + + env = i['env'] + + if 'CM_DATASET_PREPROCESSED_PATH' not in env: + env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + + if env.get('CM_DATASET_REFERENCE_PREPROCESSOR', "0") == "1": + print("Using MLCommons Inference source from '" + + env['CM_MLPERF_INFERENCE_SOURCE'] + "'") + + if env.get('CM_ML_MODEL_NAME', '') == 'retinanet': + if env.get('CM_DATASET_QUANTIZE', '') == '1': + if env.get('CM_QAIC_MODEL_RETINANET_IMAGE_SCALE', '') != '': + env['CM_DATASET_QUANT_SCALE'] = env['CM_QAIC_MODEL_RETINANET_IMAGE_SCALE'] + if env.get('CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET', '') != '': + env['CM_DATASET_QUANT_OFFSET'] = env['CM_QAIC_MODEL_RETINANET_IMAGE_OFFSET'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + if env["CM_DATASET_TYPE"] == "validation": + env['CM_DATASET_ANNOTATIONS_DIR_PATH'] = os.path.join( + env['CM_DATASET_PREPROCESSED_PATH'], "annotations") + env['CM_DATASET_ANNOTATIONS_FILE_PATH'] = os.path.join( + env['CM_DATASET_ANNOTATIONS_DIR_PATH'], "openimages-mlperf.json") + + # finalize path + preprocessed_path = env['CM_DATASET_PREPROCESSED_PATH'] + preprocessed_images_list = [] + preprocessed_imagenames_list = [] + + match_text = "/*." + env.get("CM_DATASET_PREPROCESSED_EXTENSION", "*") + for filename in sorted(glob.glob(preprocessed_path + match_text)): + preprocessed_images_list.append(filename) + preprocessed_imagenames_list.append(os.path.basename(filename)) + with open("preprocessed_files.txt", "w") as f: + f.write("\n".join(preprocessed_images_list)) + with open("preprocessed_filenames.txt", "w") as f: + f.write("\n".join(preprocessed_imagenames_list)) + + env['CM_DATASET_PREPROCESSED_IMAGES_LIST'] = os.path.join( + os.getcwd(), "preprocessed_files.txt") + env['CM_DATASET_PREPROCESSED_IMAGENAMES_LIST'] = os.path.join( + os.getcwd(), "preprocessed_filenames.txt") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py new file mode 100644 index 000000000..cdafac123 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/nvidia_preprocess.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import math +import os +from PIL import Image +import shutil + +from code.common.fix_sys_path import ScopedRestrictedImport +with ScopedRestrictedImport(): + import numpy as np + import torch + from torchvision.transforms import functional as F + +from code.common import logging +from code.common.image_preprocessor import ImagePreprocessor, center_crop, resize_with_aspectratio + + +def preprocess_openimage_for_retinanet( + data_dir, preprocessed_data_dir, formats, overwrite=False, cal_only=False, val_only=False): + def loader(fpath): + loaded_tensor = F.to_tensor(Image.open(fpath).convert("RGB")) + dtype = torch.float32 + device = torch.device("cpu") + image_size = [800, 800] + image_std = [0.229, 0.224, 0.225] + image_mean = [0.485, 0.456, 0.406] + mean = torch.as_tensor(image_mean, dtype=dtype, device=device) + std = torch.as_tensor(image_std, dtype=dtype, device=device) + img_norm = (loaded_tensor - mean[:, None, None]) / std[:, None, None] + img_resize = torch.nn.functional.interpolate(img_norm[None], size=image_size, scale_factor=None, mode='bilinear', + recompute_scale_factor=None, align_corners=False)[0] + img = img_resize.numpy() + return img + + def quantizer(image): + # Dynamic range of image is [-2.64064, 2.64064] based on calibration cache. + # Calculated by: + # np.uint32(int("3caa54fc", base=16)).view(np.dtype('float32')).item() * 127.0 + max_abs = 2.64064 + image_int8 = image.clip(-max_abs, max_abs) / max_abs * 127.0 + return image_int8.astype(dtype=np.int8, order='C') + + preprocessor = ImagePreprocessor(loader, quantizer) + if not val_only: + # Preprocess calibration set. FP32 only because calibrator always takes + # FP32 input. + preprocessor.run(os.path.join(data_dir, "open-images-v6-mlperf", "calibration", "train", "data"), + os.path.join( + preprocessed_data_dir, + "open-images-v6-mlperf", + "calibration", + "Retinanet"), + "data_maps/open-images-v6-mlperf/cal_map.txt", ["fp32"], overwrite) + if not cal_only: + # Preprocess validation set. + preprocessor.run(os.path.join(data_dir, "open-images-v6-mlperf", "validation", "data"), + os.path.join( + preprocessed_data_dir, + "open-images-v6-mlperf", + "validation", + "Retinanet"), + "data_maps/open-images-v6-mlperf/val_map.txt", formats, overwrite) + + +def copy_openimage_annotations(data_dir, preprocessed_data_dir): + src_dir = os.path.join(data_dir, "open-images-v6-mlperf/annotations") + dst_dir = os.path.join( + preprocessed_data_dir, + "open-images-v6-mlperf/annotations") + if not os.path.exists(dst_dir): + shutil.copytree(src_dir, dst_dir) + + +def main(): + # Parse arguments to identify the data directory with the input images + # and the output directory for the preprocessed images. + # The data dicretory is assumed to have the following structure: + # + # └── coco + # ├── annotations + # ├── calibration + # └── validation + # And the output directory will have the following structure: + # + # └── open-images-v6-mlperf + # ├── annotations + # ├── calibration + # │ └── Retinanet + # │ └── fp32 + # └── validation + # └── Retinanet + # └── int8_linear + parser = argparse.ArgumentParser() + parser.add_argument( + "--data_dir", "-d", + help="Specifies the directory containing the input images.", + default="build/data" + ) + parser.add_argument( + "--preprocessed_data_dir", "-o", + help="Specifies the output directory for the preprocessed data.", + default="build/preprocessed_data" + ) + parser.add_argument( + "--formats", "-t", + help="Comma-separated list of formats. Choices: fp32, int8_linear, int8_chw4.", + default="default" + ) + parser.add_argument( + "--overwrite", "-f", + help="Overwrite existing files.", + action="store_true" + ) + parser.add_argument( + "--cal_only", + help="Only preprocess calibration set.", + action="store_true" + ) + parser.add_argument( + "--val_only", + help="Only preprocess validation set.", + action="store_true" + ) + args = parser.parse_args() + data_dir = args.data_dir + preprocessed_data_dir = args.preprocessed_data_dir + formats = args.formats.split(",") + overwrite = args.overwrite + cal_only = args.cal_only + val_only = args.val_only + default_formats = ["int8_linear"] + + # Now, actually preprocess the input images + logging.info( + "Loading and preprocessing images. This might take a while...") + if args.formats == "default": + formats = default_formats + preprocess_openimage_for_retinanet( + data_dir, + preprocessed_data_dir, + formats, + overwrite, + cal_only, + val_only) + + # Copy annotations from data_dir to preprocessed_data_dir. + copy_openimage_annotations(data_dir, preprocessed_data_dir) + + logging.info("Preprocessing done.") + + +if __name__ == '__main__': + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/preprocess.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/preprocess.py new file mode 100644 index 000000000..c5af0ff04 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/preprocess.py @@ -0,0 +1,50 @@ +import shutil +import dataset +import openimages +import os +import sys +import os.path + +mlperf_src_path = os.environ['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'] +python_path = os.path.join(mlperf_src_path, "python") +sys.path.insert(0, python_path) + + +dataset_path = os.environ['CM_DATASET_PATH'] +preprocessed_dir = os.environ.get('CM_DATASET_PREPROCESSED_PATH', os.getcwd()) + +if os.environ.get('CM_DATASET_REFERENCE_PREPROCESSOR', '1') == "0": + # import generic_preprocess + # generic_preprocess.preprocess() + import preprocess_object_detection_dataset as pp + pp.preprocess() +else: + dataset_list = os.environ.get('CM_DATASET_ANNOTATIONS_FILE_PATH', None) + img_format = os.environ.get('CM_DATASET_DATA_LAYOUT', 'NHWC') + count = int(os.environ.get('CM_DATASET_SIZE', 0)) or None + image_width = int(os.environ.get('CM_DATASET_OPENIMAGES_RESIZE', 800)) + threads = os.environ.get('CM_NUM_THREADS', os.cpu_count()) + threads = os.environ.get('CM_NUM_PREPROCESS_THREADS', threads) + name = "openimages-" + str(image_width) + "-retinanet" + + openimages.OpenImages(data_path=dataset_path, + image_list=dataset_list, + name=name, + image_format=img_format, + pre_process=dataset.pre_process_openimages_retinanet, + use_cache=True, + image_size=[image_width, image_width, 3], + count=count, + threads=threads, + preprocessed_dir=preprocessed_dir) + +if os.environ["CM_DATASET_TYPE"] == "validation": + src_path = os.environ.get( + 'CM_DATASET_ANNOTATIONS_DIR_PATH', + os.path.join( + dataset_path, + "annotations")) + dest_path = os.path.join(preprocessed_dir, "annotations") + + if not os.path.exists(dest_path): + shutil.copytree(src_path, dest_path) diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/run.bat new file mode 100644 index 000000000..f3ccd2da7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/run.bat @@ -0,0 +1 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\preprocess.py diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/run.sh new file mode 100644 index 000000000..aa660b693 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openimages/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/preprocess.py diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/README.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/README.md new file mode 100644 index 000000000..b14021903 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/_cm.yaml new file mode 100644 index 000000000..e3055db10 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/_cm.yaml @@ -0,0 +1,110 @@ +alias: get-preprocessed-dataset-openorca +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML datasets +category_sort: 8500 +default_env: + CM_DATASET_CALIBRATION: 'no' +deps: +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 +- names: + - openorca-original + - dataset-original + skip_if_env: + CM_DATASET_PREPROCESSED_BY_MLC: + - 'on' + - 'yes' + tags: get,dataset,original,openorca +- force_env_keys: + - CM_GIT_* + names: + - inference-src + skip_if_env: + CM_DATASET_PREPROCESSED_BY_MLC: + - 'on' + - 'yes' + tags: mlperf,inference,source +- names: + - pyarrow + tags: get,generic-python-lib,_package.pyarrow +- names: + - fastparquet + tags: get,generic-python-lib,_package.fastparquet +- names: + - transformers + tags: get,generic-python-lib,_package.transformers +- skip_if_env: + CM_DATASET_PREPROCESSED_BY_MLC: + - 'on' + - 'yes' + tags: get,ml-model,llama2 +docker: + real_run: false +env: + CM_DATASET: OPENORCA +tags: +- get +- dataset +- openorca +- language-processing +- preprocessed +uid: 5614c39cb1564d72 +variations: + '60': + ad: + dataset-original: + tags: _60 + group: size + calibration: + base: + - mlcommons + env: + CM_DATASET_CALIBRATION: 'yes' + group: dataset-type + new_env_keys: + - CM_DATASET_CALIBRATION_PATH + - CM_DATASET_OPENORCA_CALIBRATION_PATH + full: + ad: + dataset-original: + tags: _full + default: true + group: size + mlc: + alias: mlcommons + mlcommons: + deps: + - env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_OPENORCA_PREPROCESSED_ROOT + CM_EXTRACT_FINAL_ENV_NAME: CM_OPENORCA_PREPROCESSED_ROOT + CM_EXTRACT_TO_FOLDER: openorca-preprocessed + CM_RCLONE_CONFIG_NAME: mlc-inference + extra_cache_tags: openorca,preprocessed,dataset + force_cache: true + names: + - dae + tags: download-and-extract,_rclone + update_tags_from_env_with_prefix: + _url.: + - CM_RCLONE_URL + env: + CM_DATASET_PREPROCESSED_BY_MLC: 'yes' + CM_RCLONE_URL: mlc-inference:mlcommons-inference-wg-public/open_orca + size.#: + ad: + dataset-original: + tags: _size.# + group: size + validation: + default: true + env: + CM_DATASET_CALIBRATION: 'no' + group: dataset-type + new_env_keys: + - CM_DATASET_PREPROCESSED_PATH + - CM_DATASET_OPENORCA_PREPROCESSED_PATH diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/customize.py new file mode 100644 index 000000000..203742534 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/customize.py @@ -0,0 +1,66 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + env = i['env'] + + if str(env.get('CM_DATASET_PREPROCESSED_BY_MLC', '') + ).lower() in ["yes", "1", "true"]: + run_dir = os.getcwd() + if env.get('CM_DATASET_CALIBRATION', '') == "yes": + env['CM_DATASET_CALIBRATION_PATH'] = os.path.join( + env['CM_OPENORCA_PREPROCESSED_ROOT'], + "open_orca_gpt4_tokenized_llama.calibration_1000.pkl.gz") + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_CALIBRATION_PATH'] + env['CM_DATASET_OPENORCA_CALIBRATION_PATH'] = env['CM_DATASET_CALIBRATION_PATH'] + else: + env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( + env['CM_OPENORCA_PREPROCESSED_ROOT'], + "open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz") + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] + env['CM_DATASET_OPENORCA_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] + # run_cmd = f"gunzip -k {env['CM_DATASET_PREPROCESSED_PATH']}" + run_cmd = '' + else: + inference_src = env['CM_MLPERF_INFERENCE_SOURCE'] + run_dir = os.path.join(inference_src, 'language', 'llama2-70b') + model_dir = env['CM_ML_MODEL_PATH'] + if env.get('CM_DATASET_CALIBRATION', '') == "yes": + return {'return': 1, 'error': 'No raw preprocessing information is available for openorca calibration. Please use _mlcommons variation to use the MLCommons shared calibration dataset'} + else: + env['CM_DATASET_PREPROCESSED_PATH'] = os.path.join( + os.path.join( + os.getcwd(), + "processed-openorca", + 'open_orca_gpt4_tokenized_llama.sampled_' + + env['CM_DATASET_SIZE'] + + '.pkl')) + run_cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' processorca.py --dataset_pq_path=' + env['CM_DATASET_OPENORCA_PARQUET'] + ' --model_dir=' + model_dir + \ + ' --seqlen_limit=2048 --export_dir=' + \ + os.path.join(os.getcwd(), "processed-openorca") + \ + ' --num_total_samples=' + env['CM_DATASET_SIZE'] + + env['CM_RUN_DIR'] = run_dir + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/run.sh new file mode 100644 index 000000000..38fe6d64b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-openorca/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd ${CM_RUN_DIR} +echo "${CM_RUN_CMD}" +eval "${CM_RUN_CMD}" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/README.md b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/README.md new file mode 100644 index 000000000..b2c4eee56 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-squad](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-datasets/get-preprocessed-dataset-squad) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/_cm.yaml new file mode 100644 index 000000000..cff348c26 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/_cm.yaml @@ -0,0 +1,93 @@ +uid: 7cd1d9b7e8af4788 +alias: get-preprocessed-dataset-squad + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: AI/ML datasets + +deps: + - tags: get,python3 + names: + - python + - python3 + - tags: get,mlperf,inference,src + names: + - inference-src + - tags: get,squad,dataset,original + names: + - squad-dataset + - tags: get,squad,vocab + names: + - squad-vocab + - tags: get,generic-python-lib,_package.tokenization + - tags: get,generic-python-lib,_package.transformers + - tags: get,generic-python-lib,_package.tensorflow + +env: + CM_DATASET_MAX_QUERY_LENGTH: 64 + +new_env_keys: + - CM_DATASET_SQUAD_TOKENIZED_* + +tags: +- get +- dataset +- preprocessed +- tokenized +- squad + +variations: + calib1: + group: calibration-set + env: + CM_DATASET_SQUAD_CALIBRATION_SET: one + calib2: + group: calibration-set + env: + CM_DATASET_SQUAD_CALIBRATION_SET: two + no-calib: + group: calibration-set + default: true + env: + CM_DATASET_SQUAD_CALIBRATION_SET: '' + raw: + group: raw + default: true + env: + CM_DATASET_RAW: "yes" + pickle: + group: raw + env: + CM_DATASET_RAW: "no" + seq-length.#: + group: seq-length + env: + CM_DATASET_MAX_SEQ_LENGTH: "#" + seq-length.384: + group: seq-length + default: true + env: + CM_DATASET_MAX_SEQ_LENGTH: 384 + doc-stride.#: + group: doc-stride + env: + CM_DATASET_DOC_STRIDE: "#" + doc-stride.128: + group: doc-stride + default: true + env: + CM_DATASET_DOC_STRIDE: 128 + packed: + group: packing + env: + CM_DATASET_SQUAD_PACKED: 'yes' + deps: + - tags: get,preprocessed,squad,_pickle + env: + CM_DATASET_SQUAD_PACKED: '' + inherit_variation_tags: true + skip_inherit_variation_groups: + - packing diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/customize.py new file mode 100644 index 000000000..9a4c988d1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/customize.py @@ -0,0 +1,108 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_DATASET_SQUAD_CALIBRATION_SET', '') == "one": + env['DATASET_CALIBRATION_FILE'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + 'calibration', + 'SQuAD-v1.1', + 'bert_calibration_features.txt') + env['DATASET_CALIBRATION_ID'] = 1 + elif env.get('CM_DATASET_SQUAD_CALIBRATION_SET', '') == "two": + env['DATASET_CALIBRATION_FILE'] = os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + 'calibration', + 'SQuAD-v1.1', + 'bert_calibration_qas_ids.txt') + env['DATASET_CALIBRATION_ID'] = 2 + else: + env['DATASET_CALIBRATION_FILE'] = "''" + env['DATASET_CALIBRATION_ID'] = 0 + + env['CK_ENV_MLPERF_INFERENCE'] = env['CM_MLPERF_INFERENCE_SOURCE'] + + if env.get('CM_DATASET_SQUAD_PACKED', '') == "yes": + i['run_script_input']['script_name'] = "run-packed" + if env.get('+PYTHONPATH', '') == '': + env['+PYTHONPATH'] = [] + + env['+PYTHONPATH'].append(env['CM_MLPERF_INFERENCE_BERT_PATH']) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + cur = os.getcwd() + + if env.get('CM_DATASET_SQUAD_PACKED', '') != "yes": + env['CM_DATASET_SQUAD_TOKENIZED_ROOT'] = cur + if env.get('CM_DATASET_RAW', '') == "yes": + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_IDS'] = os.path.join( + cur, 'bert_tokenized_squad_v1_1_input_ids.raw') + env['CM_DATASET_SQUAD_TOKENIZED_SEGMENT_IDS'] = os.path.join( + cur, 'bert_tokenized_squad_v1_1_segment_ids.raw') + env['CM_DATASET_SQUAD_TOKENIZED_INPUT_MASK'] = os.path.join( + cur, 'bert_tokenized_squad_v1_1_input_mask.raw') + else: + env['CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE'] = os.path.join( + cur, 'bert_tokenized_squad_v1_1.pickle') + + env['CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH'] = env['CM_DATASET_MAX_SEQ_LENGTH'] + env['CM_DATASET_SQUAD_TOKENIZED_DOC_STRIDE'] = env['CM_DATASET_DOC_STRIDE'] + env['CM_DATASET_SQUAD_TOKENIZED_MAX_QUERY_LENGTH'] = env['CM_DATASET_MAX_QUERY_LENGTH'] + + else: + with open("packed_filenames.txt", "w") as f: + for dirname in os.listdir(cur): + if os.path.isdir(dirname) and not dirname.startswith("_"): + f.write( + os.path.join( + cur, + dirname, + "input_ids.raw") + + "," + + os.path.join( + cur, + dirname, + "input_mask.raw") + + "," + + os.path.join( + cur, + dirname, + "segment_ids.raw") + + "," + + os.path.join( + cur, + dirname, + "input_position_ids.raw") + + "\n") + env['CM_DATASET_SQUAD_TOKENIZED_PACKED_FILENAMES_FILE'] = os.path.join( + cur, "packed_filenames.txt") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/run-packed.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/run-packed.sh new file mode 100644 index 000000000..776c35142 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/run-packed.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +CUR=$PWD + +run "wget --no-check-certificate -nc https://raw.githubusercontent.com/graphcore/examples/v3.2.0/tutorials/blogs_code/packedBERT/spfhp.py" +run "wget --no-check-certificate -nc https://raw.githubusercontent.com/arjunsuresh/ck-qaic/main/package/model-qaic-calibrate-bert/pack.py" +run "${CM_PYTHON_BIN_WITH_PATH} pack.py ${CM_DATASET_SQUAD_TOKENIZED_PICKLE_FILE} ./ ${CM_DATASET_MAX_SEQ_LENGTH}" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/run.sh new file mode 100644 index 000000000..94b008eac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-preprocessed-dataset-squad/run.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +CUR=$PWD +run "wget --no-check-certificate -nc https://raw.githubusercontent.com/krai/ck-mlperf/master/package/dataset-squad-tokenized_for_bert/tokenize_and_pack.py" + +run "${CM_PYTHON_BIN_WITH_PATH} tokenize_and_pack.py \ + ${CM_DATASET_SQUAD_VAL_PATH} \ + ${CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH} \ + ${CUR}/bert_tokenized_squad_v1_1 \ + ${CM_DATASET_MAX_SEQ_LENGTH} \ + ${CM_DATASET_MAX_QUERY_LENGTH} \ + ${CM_DATASET_DOC_STRIDE} \ + ${CM_DATASET_RAW} \ + ${DATASET_CALIBRATION_FILE} \ + ${DATASET_CALIBRATION_ID}" + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-python3/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-python3/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-python3/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-python3/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-python3/README-extra.md new file mode 100644 index 000000000..fcf689078 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-python3/README-extra.md @@ -0,0 +1,70 @@ +# Detect or install python + +## New ENV + +* CM_PYTHON_BIN +* CM_PYTHON_BIN_WITH_PATH +* CM_PYTHON_VERSION +* CM_PYTHON_CACHE_TAGS + +* PATH +* LD_LIBRARY_PATH +* C_INCLUDE_PATH + +## New state + + +# CLI + +## Default +```bash +cm run script "get python" +``` +or +```bash +cm run script --tags=get,python +``` + +## Version + +```bash +cm run script "get python" --version=3.10.6 +``` + +## Version min +```bash +cm run script "get python" --version_min=3.9 +``` + +## Version max +```bash +cm run script "get python" --version_max=3.9.999 --version_max_usable=3.9.12 +``` + +## Detect python3 in non-standard path +```bash +cm run script "get python" --path={directory with python3} +``` + +### Detect python with non-standard name +```bash +cm run script "get python" --input={full path to python} +``` + +## Force new detection even if python is already found and cached +```bash +cm run script "get python" --new +``` + +## Test + +```bash +cm run script "print python hello-world" +``` + +## Reproducibility matrix + +*Test detection and installation on different platforms:* + +* Windows, Linux, MacOS + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-python3/README.md b/cmx4mlops/cmx4mlops/repo/script/get-python3/README.md new file mode 100644 index 000000000..2c99444fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-python3/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/get-python3](https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/get-python3) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-python3/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-python3/_cm.yaml new file mode 100644 index 000000000..57bc286e1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-python3/_cm.yaml @@ -0,0 +1,54 @@ +alias: get-python3 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Python automation +clean_files: [] +extra_cache_tags_from_env: +- env: CM_PYTHON_INSTALL_CACHE_TAGS + prefix: python- +new_env_keys: +- CM_PYTHON_* +- +LD_LIBRARY_PATH +- +C_INCLUDE_PATH +- +PATH +new_state_keys: +- script_prefix +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + inherit_variation_tags: true + reuse_version: true + tags: install,python,src +print_env_at_the_end: + CM_PYTHON_BIN_WITH_PATH: Path to Python + CM_PYTHON_VERSION: Python version +tags: +- get +- python +- python3 +- get-python +- get-python3 +uid: d0b5dd74373f4a62 +variations: + conda.#: + adr: + pip-package: + tags: _conda.# + deps: + - names: + - conda-package + - conda-python + tags: get,generic,conda-package,_name.#,_package.python + env: + CM_PYTHON_CONDA: 'yes' + CM_PYTHON_INSTALL_CACHE_TAGS: _conda.# + custom-path.#: + env: + CM_PYTHON_BIN_WITH_PATH: '#' + lto: {} + optimized: {} + shared: {} + with-custom-ssl: {} + with-ssl: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-python3/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-python3/customize.py new file mode 100644 index 000000000..9f715f24c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-python3/customize.py @@ -0,0 +1,154 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if env.get('CM_PYTHON_CONDA', '') == 'yes' and env.get( + 'CM_CONDA_BIN_PATH', '') != '': + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['CM_CONDA_BIN_PATH'], "python") + + recursion_spaces = i['recursion_spaces'] + + # we need to understand whether this script is called first and CM_PYTHON_BIN_WITH_PATH is empty + # then we should search for related artifacts (python in our case) + # or this script is called after install-python* and CM_PYTHON_BIN_WITH_PATH is set there + # then we do not search for an artifact (python) but pick it up from the + # installation + + if 'CM_PYTHON_BIN_WITH_PATH' not in env: + # file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python[0-9|\.]*$' + file_name = 'python.exe' if os_info['platform'] == 'windows' else 'python3' + extra_paths = {"include": "+C_INCLUDE_PATH", "lib": "+LD_LIBRARY_PATH"} + + r = i['automation'].find_artifact({'file_name': file_name, + 'default_path_env_key': 'PATH', + 'env': env, + 'os_info': os_info, + # this key defines env key with + # paths where to find an artifact + 'detect_version': True, + # the next key is used in run.sh to + # detect python version + 'env_path_key': 'CM_PYTHON_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': i['recursion_spaces'], + 'extra_paths': extra_paths + }) + if r['return'] > 0: + if r['return'] == 16 and os_info['platform'] != 'windows': + # If artifact is not found and we are not on windows + # we should try to install python from src + # in prehook_deps + env['CM_REQUIRE_INSTALL'] = "yes" + + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'Python\s*([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_PYTHON_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + os_info = i['os_info'] + + r = detect_version(i) + if r['return'] > 0: + return r + + version = r['version'] + + found_file_path = env['CM_PYTHON_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + + env['CM_PYTHON_BIN'] = os.path.basename(found_file_path) + env['CM_PYTHON_BIN_PATH'] = os.path.dirname(found_file_path) + + # Save tags that can be used to specialize further dependencies (such as + # python packages) + tags = 'version-' + version + + add_extra_cache_tags = [] + + extra_tags = env.get('CM_EXTRA_CACHE_TAGS', '') + if extra_tags != '': + tags += ',' + extra_tags + + # Check if called from virtual env installer + from_virtual = True if 'virtual' in extra_tags.split(',') else False + + if not from_virtual: + tags += ',non-virtual' + + env['CM_PYTHON_CACHE_TAGS'] = tags + + add_extra_cache_tags = tags.split(',') + + # Check if need to add path, include and lib to env + # (if not in default paths) + default_path_list = i['automation'].get_default_path_list(i) + found_path_root = os.path.dirname(found_path) + + if from_virtual: + # Clean PATH (it will be in activate script) + # but keep LD_LIBRARY_PATH and C_INCLUDE_PATH from the native python + for k in ['+PATH']: + if k in env: + del (env[k]) + + elif os_info['platform'] == 'windows': + extra_path = os.path.join(found_path, 'Scripts') + + if extra_path not in default_path_list and extra_path + \ + os.sep not in default_path_list: + paths = env.get('+PATH', []) + if extra_path not in paths: + paths.append(extra_path) + env['+PATH'] = paths + + version_split = version.split(".") + python_major_version = version_split[0] + python_minor_version = version_split[1] + if len(version_split) > 2: + python_patch_version = version_split[2] + + env['CM_PYTHON_MAJOR_VERSION'] = python_major_version + env['CM_PYTHON_MINOR_VERSION'] = python_minor_version + env['CM_PYTHON_PATCH_VERSION'] = python_patch_version + + return {'return': 0, 'version': version, + 'add_extra_cache_tags': add_extra_cache_tags} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-python3/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-python3/run.bat new file mode 100644 index 000000000..515d6849b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-python3/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-python3/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-python3/run.sh new file mode 100644 index 000000000..28cf477f2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-python3/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} --version > tmp-ver.out 2>&1 +test $? -eq 0 || exit 1 + +#PYTHON_BIN_PATH="${python_bin%/*}" +# +#if [[ ! -f ${PYTHON_BIN_PATH}/python ]]; then +# echo "Creating softlink of python to python3" +# cmd="sudo ln -s ${python_bin} ${PYTHON_BIN_PATH}/python" +# echo $cmd +# eval $cmd +#fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/README.md b/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/README.md new file mode 100644 index 000000000..005d94fa4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-qaic-apps-sdk](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-qaic-apps-sdk) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/_cm.yaml new file mode 100644 index 000000000..f448ee4be --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/_cm.yaml @@ -0,0 +1,30 @@ +alias: get-qaic-apps-sdk +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +deps: [] +input_description: {} +input_mapping: {} +new_env_keys: +- +PATH +- CM_QAIC_EXEC_PATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- detect +- qaic +- apps +- sdk +- apps-sdk +- qaic-apps-sdk +uid: 0a9e206af6764da9 +variations: {} +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/customize.py new file mode 100644 index 000000000..30e424a25 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-apps-sdk/customize.py @@ -0,0 +1,128 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import xml.etree.ElementTree as et + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + apps_sdk_path = None + + if env.get('CM_INPUT', '').strip() != '': + path = env['CM_INPUT'] + if os.path.exists(os.path.join(path, "exec", "qaic-exec")): + apps_sdk_path = path + else: + return { + 'return': 1, 'error': 'exec/qaic-exec not found in the input path (--input)'} + else: + path = "/opt/qti-aic/" + if os.path.exists(os.path.join(path, "exec", "qaic-exec")): + apps_sdk_path = path + + if not apps_sdk_path: + return {'return': 1, + 'error': f'qaic-exec not found in the default path: {path}'} + + env['CM_QAIC_APPS_SDK_PATH'] = path + env['CM_QAIC_EXEC_PATH'] = os.path.join(path, "exec", "qaic-exec") + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def detect_version(i): + + env = i['env'] + sdk_path = env['CM_QAIC_APPS_SDK_PATH'] + version = None + version_xml_path = os.path.join(sdk_path, "versions", "apps.xml") + version_info = et.parse(version_xml_path) + + versions = version_info.getroot() + build_id = None + + for child1 in versions: + if child1.tag == "ci_build": + for child2 in child1: + if child2.tag == "base_version": + version = child2.text + if child2.tag == "build_id": + build_id = child2.text + if build_id: + version = version + "." + build_id + + if not version: + return {'return': 1, 'error': f'qaic apps sdk version info not found'} + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = r['version'] + + if "+PATH" not in env: + env["+PATH"] = [] + + env['+PATH'].append(os.path.dirname(env['CM_QAIC_EXEC_PATH'])) + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + include_paths = [] + lib_paths = [] + + inc_path = os.path.join(env['CM_QAIC_APPS_SDK_PATH'], "dev", "inc") + if os.path.exists(inc_path): + include_paths.append(inc_path) + + for inc_path in include_paths: + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + lib_path = os.path.join( + env['CM_QAIC_APPS_SDK_PATH'], + "dev", + "lib", + "x86_64") + if os.path.exists(lib_path): + lib_paths.append(lib_path) + + for lib_path in lib_paths: + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/README.md b/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/README.md new file mode 100644 index 000000000..c9cca8baf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-qaic-platform-sdk](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-qaic-platform-sdk) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/_cm.yaml new file mode 100644 index 000000000..d40a7d624 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/_cm.yaml @@ -0,0 +1,32 @@ +alias: get-qaic-platform-sdk +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +deps: +- tags: detect,os +input_description: {} +input_mapping: {} +new_env_keys: +- +PATH +- CM_QAIC_RUNNER_PATH +- CM_QAIC_TOOLS_PATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- detect +- qaic +- platform +- sdk +- platform-sdk +- qaic-platform-sdk +uid: a60f86918dc9457d +variations: {} +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/customize.py new file mode 100644 index 000000000..aaeb6e6cb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-platform-sdk/customize.py @@ -0,0 +1,129 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import xml.etree.ElementTree as et + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + platform_sdk_path = None + + if env.get('CM_INPUT', '').strip() != '': + path = env['CM_INPUT'] + if os.path.exists(os.path.join(path, "exec", "qaic-runner")): + platform_sdk_path = path + else: + return { + 'return': 1, 'error': 'exec/qaic-runner not found in the input path (--input)'} + else: + path = "/opt/qti-aic/" + if os.path.exists(os.path.join(path, "exec", "qaic-runner")): + platform_sdk_path = path + + if not platform_sdk_path: + return {'return': 1, + 'error': f'qaic-runner not found in the default path: {path}'} + + env['CM_QAIC_PLATFORM_SDK_PATH'] = path + env['CM_QAIC_RUNNER_PATH'] = os.path.join(path, "exec", "qaic-runner") + env['CM_QAIC_TOOLS_PATH'] = os.path.join(path, "tools") + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def detect_version(i): + + env = i['env'] + sdk_path = env['CM_QAIC_PLATFORM_SDK_PATH'] + version = None + version_xml_path = os.path.join(sdk_path, "versions", "platform.xml") + version_info = et.parse(version_xml_path) + + versions = version_info.getroot() + build_id = None + + for child1 in versions: + if child1.tag == "ci_build": + for child2 in child1: + if child2.tag == "base_version": + version = child2.text + if child2.tag == "build_id": + build_id = child2.text + if build_id: + version = version + "." + build_id + + if not version: + return {'return': 1, 'error': f'qaic platform sdk version info not found'} + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + r = detect_version(i) + if r['return'] > 0: + return r + + version = r['version'] + + if "+PATH" not in env: + env["+PATH"] = [] + + env['+PATH'].append(os.path.dirname(env['CM_QAIC_RUNNER_PATH'])) + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + include_paths = [] + lib_paths = [] + + inc_path = os.path.join(env['CM_QAIC_PLATFORM_SDK_PATH'], "dev", "inc") + if os.path.exists(inc_path): + include_paths.append(inc_path) + + for inc_path in include_paths: + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + lib_path = os.path.join( + env['CM_QAIC_PLATFORM_SDK_PATH'], + "dev", + "lib", + env['CM_HOST_PLATFORM_FLAVOR']) + if os.path.exists(lib_path): + lib_paths.append(lib_path) + + for lib_path in lib_paths: + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/README.md b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/README.md new file mode 100644 index 000000000..6c692ac5e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-qaic-software-kit](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-qaic-software-kit) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/_cm.yaml new file mode 100644 index 000000000..772fc8b1f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/_cm.yaml @@ -0,0 +1,56 @@ +alias: get-qaic-software-kit +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +deps: +- extra_cache_tags: qaic-software-git-repo,qaic-software,qaic,software,kit + names: + - qaic-software-git-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL +- tags: get,generic,sys-util,_libudev-dev +- tags: get,generic,sys-util,_libpci-dev +- tags: get,google,test +- names: + - cmake + tags: get,cmake + version_min: 3.24.0 +- names: + - compiler + tags: get,compiler +input_description: {} +input_mapping: {} +new_env_keys: +- +PATH +- CM_QAIC_SOFTWARE_KIT_PATH +- CM_QAIC_RUNNER_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- qaic +- software +- kit +- qaic-software-kit +uid: 3344655922694bbb +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + repo.#: + env: + CM_GIT_URL: '#' + group: repo-source + repo.quic: + default: true + env: + CM_GIT_URL: https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100 + group: repo-source +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/customize.py new file mode 100644 index 000000000..76d9e8f58 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + env['CM_QAIC_SOFTWARE_KIT_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('+ CXXFLAGS', []) == []: + env['+ CXXFLAGS'] = [] + if env.get('+ CFLAGS', []) == []: + env['+ CFLAGS'] = [] + + if env.get('CM_LLVM_CLANG_VERSION', '') != '': + clang_version_split = env['CM_LLVM_CLANG_VERSION'].split(".") + clang_major_version = int(clang_version_split[0]) + + if clang_major_version >= 17: + env['+ CFLAGS'].append("-Wno-error=c2x-extensions") + + if clang_major_version >= 16: + env['+ CFLAGS'].append("-Wno-error=unused-but-set-variable") + env['+ CXXFLAGS'].append("-Wno-error=unused-but-set-variable") + + if clang_major_version >= 13: + env['+ CFLAGS'].append("-Wno-error=unused-const-variable") + env['+ CFLAGS'].append("-Wno-error=unused-but-set-variable") + env['+ CFLAGS'].append("-Wno-error=strict-prototypes") + env['+ CFLAGS'].append("-Wno-error=unused-variable") + env['+ CXXFLAGS'].append("-Wno-error=unused-const-variable") + env['+ CXXFLAGS'].append("-Wno-error=unused-variable") + env['+ CXXFLAGS'].append("-Wno-error=unused-private-field") + env['+ CXXFLAGS'].append("-Wno-error=unused-result") + env['+ CXXFLAGS'].append("-Wno-error=string-concatenation") + env['+ CXXFLAGS'].append("-Wno-error=infinite-recursion") + + if clang_major_version == 12: + env['+ CXXFLAGS'].append("-Wno-error=unknown-warning-option") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + env['CM_QAIC_RUNNER_PATH'] = os.path.join( + env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") + + if '+PATH' not in env: + env['+PATH'] = [] + + env['+PATH'].append(env['CM_QAIC_RUNNER_PATH']) + env['CM_QAIC_RUNNER_PATH'] = os.path.join( + env['CM_QAIC_RUNNER_PATH'], "qaic-runner") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/run.sh new file mode 100644 index 000000000..a00122a35 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-qaic-software-kit/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +function cmake() { +${CM_CMAKE_BIN_WITH_PATH} $@ +} + +export CC=${CM_C_COMPILER_WITH_PATH} +export CXX=${CM_CXX_COMPILER_WITH_PATH} + +export -f cmake +cd ${CM_QAIC_SOFTWARE_KIT_PATH} +rm -rf build +./bootstrap.sh +test $? -eq 0 || exit $? +cd build +../scripts/build.sh -b Release +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/_cm.yaml new file mode 100644 index 000000000..a8fa32218 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/_cm.yaml @@ -0,0 +1,13 @@ +alias: get-rclone-config +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false #keeping cache off as rerunning the command is safe +can_force_cache: true +tags: +- get +- rclone-config +uid: 6c59ddbc6cd046e3 +variations: + mlc-inference: + env: + CM_RCLONE_CONFIG_CMD: 'rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/customize.py new file mode 100644 index 000000000..a556f4b97 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/customize.py @@ -0,0 +1,38 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_RCLONE_CONFIG_CMD', '') != '': + env['CM_RUN_CMD'] = env['CM_RCLONE_CONFIG_CMD'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/run.sh new file mode 100644 index 000000000..4c23c380e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone-config/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running: " +echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-rclone/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/README.md b/cmx4mlops/cmx4mlops/repo/script/get-rclone/README.md new file mode 100644 index 000000000..a7e311950 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-rclone/_cm.yaml new file mode 100644 index 000000000..e2d8e02fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/_cm.yaml @@ -0,0 +1,30 @@ +alias: get-rclone +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: 1.65.2 +deps: +- tags: detect,os +- enable_if_env: + CM_HOST_OS_TYPE: + - windows + tags: get,sys-utils-min +new_env_keys: +- CM_RCLONE_CACHE_TAGS +- CM_RCLONE_BIN_WITH_PATH +- CM_RCLONE_VERSION +- +PATH +tags: +- get +- rclone +uid: 22ffb43c49c9419e +variations: + gdrive: + env: + CM_RCLONE_GDRIVE: 'yes' + system: + env: + CM_RCLONE_SYSTEM: 'yes' + warnings: + - This CM script will install rclone using sudo/brew! diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/configs/rclone.conf b/cmx4mlops/cmx4mlops/repo/script/get-rclone/configs/rclone.conf new file mode 100644 index 000000000..45699a0a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/configs/rclone.conf @@ -0,0 +1,8 @@ +[cm-team] +type = drive +scope = drive.readonly +service_account_file = +team_drive = 0AN8R_ThwUNY8Uk9PVA +shared_with_me = true +root_folder_id = 0AN8R_ThwUNY8Uk9PVA + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-rclone/customize.py new file mode 100644 index 000000000..10a975424 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/customize.py @@ -0,0 +1,158 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import configparser + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'rclone.exe' if os_info['platform'] == 'windows' else 'rclone' + env['FILE_NAME'] = file_name + + run_script_input = i['run_script_input'] + automation = i['automation'] + + need_version = env.get('CM_VERSION', '') + + host_os_machine = '' + if os_info['platform'] != 'windows': + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + + r = automation.detect_version_using_script({ + 'env': env, + 'run_script_input': run_script_input, + 'recursion_spaces': recursion_spaces}) + + if r['return'] > 0: + if r['return'] == 16: + install_script = 'install' + if os_info['platform'] != 'windows' and env.get( + 'CM_RCLONE_SYSTEM', '') == 'yes': + install_script += '-system' + else: + if os_info['platform'] != 'windows': + x1 = 'arm64' if host_os_machine.startswith( + 'arm') or host_os_machine.startswith('aarch') else 'amd64' + + filebase = 'rclone-v{}-{}-{}' + urlbase = 'https://downloads.rclone.org/v{}/{}' + + if os_info['platform'] == 'darwin': + filename = filebase.format(need_version, 'osx', x1) + elif os_info['platform'] == 'linux': + filename = filebase.format(need_version, 'linux', x1) + + env['CM_RCLONE_URL'] = urlbase.format( + need_version, filename + '.zip') + env['CM_RCLONE_ARCHIVE'] = filename + env['CM_RCLONE_ARCHIVE_WITH_EXT'] = filename + '.zip' + + print( + recursion_spaces + + 'Downloading {}'.format( + env['CM_RCLONE_URL'])) + + cur_dir = os.getcwd() + path_bin = os.path.join(cur_dir, file_name) + env['CM_RCLONE_BIN_WITH_PATH'] = path_bin + + if not env.get('+PATH', []): + env['+PATH'] = [] + env['+PATH'].append(cur_dir) + + if not env.get('+PATH', []): + env['+PATH'] = [] + env['+PATH'].append(cur_dir) + + r = automation.run_native_script({'run_script_input': run_script_input, + 'env': env, + 'script_name': install_script}) + if r['return'] > 0: + return r + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'rclone v([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_RCLONE_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + + gdrive = env.get('CM_RCLONE_GDRIVE', '') + if gdrive == "yes": + config = configparser.ConfigParser() + config_file_path = os.path.join( + env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", "rclone.conf") + + config.read(config_file_path) + # config['cm-team']['service_account_file'] = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "accessfiles", "rclone-gdrive.json") + + default_config_path = os.path.join( + os.path.expanduser('~'), ".config", "rclone", "rclone.conf") + + default_config = configparser.ConfigParser() + default_config.read(default_config_path) + + for section in config.sections(): + if section not in default_config.sections(): + default_config[section] = config[section] + + with open(default_config_path, 'w') as configfile: + default_config.write(configfile) + print({section: dict(default_config[section]) + for section in default_config.sections()}) + + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + + env['CM_RCLONE_CACHE_TAGS'] = 'version-' + version + + file_name = 'rclone.exe' if os_info['platform'] == 'windows' else 'rclone' + + if os_info['platform'] == 'windows' or env.get( + 'CM_RCLONE_SYSTEM', '') != 'yes': + cur_dir = os.getcwd() + path_bin = os.path.join(cur_dir, file_name) + if os.path.isfile(path_bin): + # Was downloaded and extracted by CM + env['CM_RCLONE_BIN_WITH_PATH'] = path_bin + env['+PATH'] = [cur_dir] + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/install-system-macos.sh b/cmx4mlops/cmx4mlops/repo/script/get-rclone/install-system-macos.sh new file mode 100644 index 000000000..97f8f41ee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/install-system-macos.sh @@ -0,0 +1,3 @@ +#!/bin/bash +brew install rclone +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/install-system.sh b/cmx4mlops/cmx4mlops/repo/script/get-rclone/install-system.sh new file mode 100644 index 000000000..a08dd54fb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/install-system.sh @@ -0,0 +1,3 @@ +#!/bin/bash +sudo -v ; curl -k https://rclone.org/install.sh | sudo bash +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/install.bat b/cmx4mlops/cmx4mlops/repo/script/get-rclone/install.bat new file mode 100644 index 000000000..0c12f5c1b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/install.bat @@ -0,0 +1,12 @@ +del /Q /S rclone-v%CM_VERSION%-windows-amd64.zip > NUL 2>&1 + +wget --no-check-certificate https://downloads.rclone.org/v%CM_VERSION%/rclone-v%CM_VERSION%-windows-amd64.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +unzip -o rclone-v%CM_VERSION%-windows-amd64.zip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +move /Y rclone-v%CM_VERSION%-windows-amd64\* . + +del /Q /S rclone-v%CM_VERSION%-windows-amd64.zip > NUL 2>&1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/install.sh b/cmx4mlops/cmx4mlops/repo/script/get-rclone/install.sh new file mode 100644 index 000000000..d3f6ede34 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/install.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +rm -rf ${CM_RCLONE_ARCHIVE_WITH_EXT} +rm -rf rclone + +wget ${CM_RCLONE_URL} --no-check-certificate +test $? -eq 0 || exit 1 + +unzip ${CM_RCLONE_ARCHIVE_WITH_EXT} +test $? -eq 0 || exit 1 + +mv ${CM_RCLONE_ARCHIVE}/rclone . +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-rclone/run.bat new file mode 100644 index 000000000..e8abbfd95 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/run.bat @@ -0,0 +1,5 @@ +where rclone.exe > NUL 2>&1 +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +rclone --version > tmp-ver.out +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rclone/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-rclone/run.sh new file mode 100644 index 000000000..4eb6912d7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rclone/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +echo ${PATH} + +if ! command -v rclone &> /dev/null +then + echo "rclone was not detected" + exit 1 +fi +rclone --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/README.md b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/README.md new file mode 100644 index 000000000..7b1f4474c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/README.md @@ -0,0 +1,4 @@ +Run this script +``` +cm run script --tags=get,rocm-devices +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/_cm.yaml new file mode 100644 index 000000000..21a91b373 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/_cm.yaml @@ -0,0 +1,29 @@ +alias: get-rocm-devices +uid: c618239543364753 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- get +- rocm-devices + +cache: false + +can_force_cache: true + +category: ROCM automation + +clean_files: +- tmp-run.out +docker: + run: false + all_gpus: 'yes' + skip_run_cmd: 'no' + skip_cm_sys_upgrade: 'yes' + cm_repo_flags: '--checkout=dev' + use_host_group_id: 'yes' + image_tag_extra: '-cm-dev' + +print_files_if_script_error: +- tmp-run.out diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/customize.py new file mode 100644 index 000000000..f6540af1f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess + + +def preprocess(i): + + env = i['env'] + + if str(env.get('CM_DETECT_USING_HIP-PYTHON', '') + ).lower() in ["1", "yes", "true"]: + i['run_script_input']['script_name'] = 'detect' + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + os_info = i['os_info'] + + r = utils.load_txt(file_name='tmp-run.out', + check_if_exists=True, + split=True) + if r['return'] > 0: + return r + + lst = r['list'] + + # properties + p = {} + gpu = {} + + gpu_id = -1 + + for line in lst: + # print (line) + + j = line.find(':') + + if j >= 0: + key = line[:j].strip() + val = line[j + 1:].strip() + + if key == "GPU Device ID": + gpu_id += 1 + gpu[gpu_id] = {} + + if gpu_id < 0: + continue + + gpu[gpu_id][key] = val + p[key] = val + + key_env = 'CM_ROCM_DEVICE_PROP_' + key.upper().replace(' ', '_') + env[key_env] = val + + state['cm_rocm_num_devices'] = gpu_id + 1 + env['CM_ROCM_NUM_DEVICES'] = gpu_id + 1 + + state['cm_rocm_device_prop'] = p + state['cm_rocm_devices_prop'] = gpu + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/detect.py b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/detect.py new file mode 100644 index 000000000..8029f0444 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/detect.py @@ -0,0 +1,55 @@ +from hip import hip + +# Defining the value for hipDeviceGetAttribute +STRINGLENGTH = 256 +hipDeviceAttributeClockRate = 5 +hipDeviceAttributeMaxBlockDimX = 26 +hipDeviceAttributeMaxBlockDimY = 27 +hipDeviceAttributeMaxBlockDimZ = 28 +hipDeviceAttributeMaxGridDimX = 29 +hipDeviceAttributeMaxGridDimY = 30 +hipDeviceAttributeMaxGridDimZ = 31 +hipDeviceAttributeMaxThreadsPerBlock = 56 +hipDeviceAttributeMaxThreadsPerMultiProcessor = 57 +hipDeviceAttributeMaxRegistersPerBlock = 71 +hipDeviceAttributeMaxSharedMemoryPerBlock = 74 +hipDeviceAttributeWarpSize = 87 + + +def get_gpu_info(): + num_gpus = hip.hipGetDeviceCount()[1] + all_gpu_info = [] + + for i in range(num_gpus): + gpu_info = { + "GPU Device ID": hip.hipDeviceGetPCIBusId(STRINGLENGTH, i)[1], + "GPU Name": i, + "GPU compute capability": f"{hip.hipDeviceComputeCapability(i)[1]}.{hip.hipDeviceComputeCapability(i)[2]}", + "ROCM driver version": f"{hip.hipDriverGetVersion()[1]}", + "ROCM runtime version": hip.hipRuntimeGetVersion()[1], + "Global memory (GiB)": hip.hipDeviceTotalMem(i)[1] / 1_073_741_824, + "Max clock rate": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeClockRate), i)[1] / 1000} MHz", + "Total amount of shared memory per block (Bytes)": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxSharedMemoryPerBlock), i)[1]}", + "Total number of registers available per block (Bytes)": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxRegistersPerBlock), i)[1]}", + "Warp size": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeWarpSize), i)[1]}", + "Maximum number of threads per multiprocessor": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxThreadsPerMultiProcessor), i)[1]}", + "Maximum number of threads per block": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxThreadsPerBlock), i)[1]}", + "Max dimension size of a thread block X": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxBlockDimX), i)[1]}", + "Max dimension size of a thread block Y": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxBlockDimY), i)[1]}", + "Max dimension size of a thread block Z": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxBlockDimZ), i)[1]}", + "Max dimension size of a grid size X": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxGridDimX), i)[1]}", + "Max dimension size of a grid size Y": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxGridDimY), i)[1]}", + "Max dimension size of a grid size Z": f"{hip.hipDeviceGetAttribute(hip.hipDeviceAttribute_t(hipDeviceAttributeMaxGridDimZ), i)[1]}", + } + all_gpu_info.append(gpu_info) + + return all_gpu_info + + +if __name__ == "__main__": + gpu_info_list = get_gpu_info() + with open("tmp-run.out", "w") as f: + for idx, gpu_info in enumerate(gpu_info_list): + print(f"GPU {idx}:") + for key, value in gpu_info.items(): + f.write(f"{key}: {value}\n") diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/detect.sh b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/detect.sh new file mode 100644 index 000000000..8f6b93596 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/detect.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/detect.py +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/run.sh new file mode 100644 index 000000000..7b4fa0386 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm-devices/run.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Compile + +rm a.out + +# Check if hip-python is installed +echo "" +echo "Checking if hip-python is installed..." +echo "" + +if ! python3 -m pip show hip-python > /dev/null 2>&1; then + echo "hip-python not found. Installing hip-python..." + python3 -m pip install --extra-index-url https://test.pypi.org/simple hip-python + if [ $? -ne 0 ]; then + echo "Failed to install hip-python. Please check your Python environment." + exit 1 + fi +else + echo "hip-python is already installed." +fi + +echo "" +echo "Running program ..." +echo "" + +cd ${CM_TMP_CURRENT_PATH} + +python ${CM_TMP_CURRENT_SCRIPT_PATH}/detect.py > tmp-run.out +cat tmp-run.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-rocm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm/README.md b/cmx4mlops/cmx4mlops/repo/script/get-rocm/README.md new file mode 100644 index 000000000..a2f5a0414 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-rocm](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-rocm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-rocm/_cm.yaml new file mode 100644 index 000000000..0390db5a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm/_cm.yaml @@ -0,0 +1,20 @@ +alias: get-rocm +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +clean_files: [] +new_env_keys: +- CM_ROCM_* +- +PATH +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + reuse_version: true + tags: install,rocm +tags: +- get +- rocm +- get-rocm +uid: 23a69f9477cb4dab diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-rocm/customize.py new file mode 100644 index 000000000..c6a2252b8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'rocminfo.exe' if os_info['platform'] == 'windows' else 'rocminfo' + env['FILE_NAME'] = file_name + env['CM_TMP_PATH'] = "/opt/rocm/bin" + + if 'CM_ROCM_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_ROCM_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'([\d.]+[-\d+]*)', + 'group_number': 1, + 'env_key': 'CM_ROCM_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + found_file_path = env['CM_ROCM_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_ROCM_INSTALLED_PATH'] = found_path + + env['CM_ROCM_CACHE_TAGS'] = 'version-' + version + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-rocm/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-rocm/run.sh new file mode 100644 index 000000000..f7c8e888c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-rocm/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +dir="${CM_ROCM_BIN_WITH_PATH%/*}/../" +cat ${dir}/.info/version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/README-extra.md new file mode 100644 index 000000000..4061851ca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/README-extra.md @@ -0,0 +1,16 @@ +# Get SPEC Power Daemon +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) git clones the [SPEC Power Daemon](https://github.com/mlcommons/power) used by MLPerf for power measurements. + +## Commands +To install +``` +cm run script --tags=get,mlperf,power,src +``` + +## Exported Variables +* `CM_SPEC_PTD_PATH'`: Path to the PTDaemon +* `CM_MLPERF_PTD_PATH'`: Path to the PTDaemon (same as `CM_SPEC_PTD_DAEMON`) + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/README.md b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/README.md new file mode 100644 index 000000000..b3a793e90 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-spec-ptd](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/get-spec-ptd) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/_cm.yaml new file mode 100644 index 000000000..c4d7c8218 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/_cm.yaml @@ -0,0 +1,49 @@ +alias: get-spec-ptd +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +default_env: + CM_GIT_CHECKOUT: main + CM_GIT_DEPTH: --depth 1 + CM_GIT_PATCH: 'no' + CM_GIT_RECURSE_SUBMODULES: ' ' + CM_GIT_URL: https://github.com/mlcommons/power.git +default_version: main +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +- env: + CM_GIT_AUTH: 'yes' + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_POWER_SOURCE + extra_cache_tags: mlperf,power,spec,ptdaemon,ptd + force_env_keys: + - CM_GIT_* + tags: get,git,repo,_repo.https://github.com/mlcommons/power +input_description: + input: Path to SPEC PTDaemon (Optional) +input_mapping: + input: CM_INPUT +new_env_keys: +- CM_SPEC_PTD_PATH +- CM_MLPERF_PTD_PATH +tags: +- get +- spec +- ptd +- ptdaemon +- power +- daemon +- power-daemon +- mlperf +- mlcommons +uid: 7423a878e4524136 +versions: + custom: + env: {} + main: + env: + CM_GIT_CHECKOUT: main diff --git a/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/customize.py new file mode 100644 index 000000000..04d1bedae --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/customize.py @@ -0,0 +1,48 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil +import stat + + +def preprocess(i): + + os_info = i['os_info'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + if env['CM_HOST_OS_TYPE'].lower() == "windows": + binary_name = "ptd-windows-x86.exe" + else: + binary_name = "ptd-linux-x86" + if env.get('CM_MLPERF_PTD_PATH', '') == '': + env['CM_MLPERF_PTD_PATH'] = os.path.join( + env['CM_MLPERF_POWER_SOURCE'], 'PTD', 'binaries', binary_name) + + file_path = env['CM_MLPERF_PTD_PATH'] + current_permissions = os.stat(file_path).st_mode + + # Check if the file already has execute permissions + if not (current_permissions & stat.S_IXUSR): # Check user execute permission + # Add execute permissions for the user + os.chmod(file_path, current_permissions | stat.S_IXUSR) + + env['CM_SPEC_PTD_PATH'] = env['CM_MLPERF_PTD_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/run.sh new file mode 100644 index 000000000..f0f2e7eae --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-spec-ptd/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [[ -n "${CM_INPUT}" ]]; then + exit 0 +fi + +cd ${CM_MLPERF_POWER_SOURCE} + +chmod +x "inference_v1.0/ptd-linux-x86" +chmod +x "inference_v1.0/ptd-windows-x86.exe" +cd - diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/README.md b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/README.md new file mode 100644 index 000000000..1c1f06baf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/_cm.yaml new file mode 100644 index 000000000..4d3e755ed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/_cm.yaml @@ -0,0 +1,36 @@ +alias: get-sys-utils-cm +uid: bc90993277e84b8e + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: Detection or installation of tools and artifacts + +deps: +- tags: detect,os + +env: + CM_CLEAN_DIRS: bin + CM_PACKAGE_WIN_URL: https://zenodo.org/records/13868077/files/cm-artifact-os-windows-32.zip?download=1 + CM_SUDO: sudo + +input_mapping: + skip: CM_SKIP_SYS_UTILS + +new_env_keys: +- +PATH + +tags: +- get +- sys-utils-cm + +variations: + user: + env: + CM_PYTHON_PIP_USER: --user + + skip_python_deps: + env: + CM_SKIP_PYTHON_DEPS: "yes" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/customize.py new file mode 100644 index 000000000..6f052c75b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/customize.py @@ -0,0 +1,100 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + cm = automation.cmind + + if env.get('CM_HOST_OS_FLAVOR', '') == 'amzn': + env['CM_PACKAGE_TOOL'] = "yum" + i['run_script_input']['script_name'] = "run-rhel" + + # Test (not needed - will be removed) + if str(env.get('CM_SKIP_SYS_UTILS', '')).lower() in [True, 'yes', 'on']: + return {'return': 0, 'skip': True} + + +# Windows has moved to get-sys-utils-min and will be always run with +# "detect,os"! + + if os_info['platform'] == 'windows': + print('') + print('This script is not used on Windows') + print('') + + # If windows, download here otherwise use run.sh + +# +# path = os.getcwd() +# +# clean_dirs = env.get('CM_CLEAN_DIRS','').strip() +# if clean_dirs!='': +# import shutil +# for cd in clean_dirs.split(','): +# if cd != '': +# if os.path.isdir(cd): +# print ('Clearning directory {}'.format(cd)) +# shutil.rmtree(cd) +# +# url = env['CM_PACKAGE_WIN_URL'] +# +# urls = [url] if ';' not in url else url.split(';') +# +# print ('') +# print ('Current directory: {}'.format(os.getcwd())) +# +# for url in urls: +# +# url = url.strip() +# +# print ('') +# print ('Downloading from {}'.format(url)) +# +# r = cm.access({'action':'download_file', +# 'automation':'utils,dc2743f8450541e3', +# 'url':url}) +# if r['return']>0: return r +# +# filename = r['filename'] +# +# print ('Unzipping file {}'.format(filename)) +# +# r = cm.access({'action':'unzip_file', +# 'automation':'utils,dc2743f8450541e3', +# 'filename':filename}) +# if r['return']>0: return r +# +# if os.path.isfile(filename): +# print ('Removing file {}'.format(filename)) +# os.remove(filename) +# +# print ('') +# +# # Add to path +# env['+PATH']=[os.path.join(path, 'bin')] +# + else: + print('') + print('***********************************************************************') + print('This script will attempt to install minimal system dependencies for CM.') + print('Note that you may be asked for your SUDO password ...') + print('***********************************************************************') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/do_pip_installs.sh b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/do_pip_installs.sh new file mode 100644 index 000000000..cbf7e5857 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/do_pip_installs.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +PIP_EXTRA=`python3 -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` +cmd="python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} ${CM_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" +echo $cmd +eval $cmd diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/do_pip_installs.sh.old b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/do_pip_installs.sh.old new file mode 100644 index 000000000..55a149249 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/do_pip_installs.sh.old @@ -0,0 +1,6 @@ +#!/bin/bash + +PIP_EXTRA=`python3 -c "import pkg_resources; print(' --break-system-packages ' if int(pkg_resources.get_distribution('pip').version.split('.')[0]) >= 23 else '')"` +cmd="python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} ${CM_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" +echo $cmd +eval $cmd diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/requirements.txt new file mode 100644 index 000000000..bb2a50df8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/requirements.txt @@ -0,0 +1,5 @@ +requests +numpy +pandas +wheel +giturlparse diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-arch.sh b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-arch.sh new file mode 100644 index 000000000..eb71848ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-arch.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +echo "***************************************************" +echo "Installing some system dependencies via sudo pacman" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-pacman} + +${CM_SUDO} ${CM_PACKAGE_TOOL} -Syu && \ + ${CM_SUDO} ${CM_PACKAGE_TOOL} -Sy \ + acl autoconf \ + bzip2 \ + ca-certificates curl cmake \ + gcc git g++ \ + libtool \ + zlib \ + patch python python-pip \ + rsync \ + sudo \ + tar \ + unzip \ + vim \ + wget which \ + xz \ + zip + +# Install Python deps though preference is to install them +# via cmr "get generic-python-lib _package.{Python PIP package name}" +if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-debian.sh b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-debian.sh new file mode 100644 index 000000000..488da76fb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-debian.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +echo "************************************************" +echo "Installing some system dependencies via sudo apt" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +CM_APT_TOOL=${CM_APT_TOOL:-apt-get} + +${CM_SUDO} ${CM_APT_TOOL} update && \ + ${CM_SUDO} ${CM_APT_TOOL} install -y --no-install-recommends \ + apt-utils \ + git \ + wget \ + curl \ + zip \ + unzip \ + bzip2 \ + zlib1g-dev \ + libbz2-dev \ + openssh-client \ + kmod \ + libmesa-dev \ + libssl-dev \ + vim \ + mc \ + tree \ + gcc \ + g++ \ + tar \ + autoconf \ + autogen \ + libtool \ + make \ + cmake \ + libc6-dev \ + build-essential \ + libbz2-dev \ + libffi-dev \ + liblzma-dev \ + python3 \ + python3-pip \ + python3-dev \ + libtinfo-dev \ + sudo \ + libgl1 \ + libncurses5 + +# Install Python deps though preference is to install them +# via cmr "get generic-python-lib _package.{Python PIP package name}" +if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-macos.sh b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-macos.sh new file mode 100644 index 000000000..afb2ef8e6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-macos.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +echo "***************************************************" +echo "Installing some system dependencies via brew" + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +brew update && \ + brew install \ + git \ + wget \ + curl \ + zip \ + unzip \ + bzip2 \ + vim \ + mc \ + tree \ + gcc \ + autoconf \ + autogen \ + libtool \ + make \ + cmake \ + openssl \ + readline \ + sqlite3 \ + tar \ + xz \ + zlib \ + python3 + +# Install Python deps though preference is to install them +# via cmr "get generic-python-lib _package.{Python PIP package name}" +if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-rhel.sh b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-rhel.sh new file mode 100644 index 000000000..87b03b777 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-rhel.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +echo "************************************************" +echo "Installing some system dependencies via sudo dnf" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +if [[ "$CM_HOST_OS_FLAVOR" == "amzn" ]]; then + ${CM_SUDO} yum groupinstall "Development Tools" +fi + +CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-dnf} + +${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ + ${CM_SUDO} ${CM_PACKAGE_TOOL} --skip-broken install -y \ + acl autoconf \ + bzip2-devel bzip2 \ + ca-certificates curl cmake \ + gcc git g++ \ + libtool libffi-devel libssl-devel\ + zlib-devel \ + libbz2-devel \ + openssh-client \ + make mesa-libGL \ + patch python3 python3-pip python3-devel \ + openssl-devel \ + rsync \ + tar \ + unzip \ + vim \ + wget which \ + xz \ + zip + +# Install Python deps though preference is to install them +# via cmr "get generic-python-lib _package.{Python PIP package name}" +if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-sles.sh b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-sles.sh new file mode 100644 index 000000000..845c0b069 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-sles.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +echo "***************************************************" +echo "Installing some system dependencies via sudo zypper" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +CM_PACKAGE_TOOL=${CM_PACKAGE_TOOL:-zypper} + +${CM_SUDO} ${CM_PACKAGE_TOOL} install -t pattern devel_basis +${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ + ${CM_SUDO} ${CM_PACKAGE_TOOL} install -y \ + bzip2-devel bzip2 \ + ca-certificates curl cmake \ + gcc git \ + libtool libffi-devel \ + zlib-devel \ + libbz2-devel \ + openssh-client \ + make \ + patch python3 python3-pip python3-devel \ + openssl-devel \ + rsync \ + tar \ + unzip \ + vim \ + wget which \ + xz \ + zip + +# Install Python deps though preference is to install them +# via cmr "get generic-python-lib _package.{Python PIP package name}" +if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-ubuntu.sh new file mode 100644 index 000000000..3fd66a552 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-cm/run-ubuntu.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +echo "************************************************" +echo "Installing some system dependencies via sudo apt" + + +if [[ "$CM_QUIET" != "yes" ]]; then + echo "Enter skip to skip this step or press enter to continue:" + read DUMMY + + if [[ "$DUMMY" == "skip" ]]; then exit 0; fi +fi + +CM_APT_TOOL=${CM_APT_TOOL:-apt-get} + +${CM_SUDO} ${CM_APT_TOOL} update && \ + ${CM_SUDO} DEBIAN_FRONTEND=noninteractive ${CM_APT_TOOL} install -y --no-install-recommends \ + apt-utils \ + git \ + wget \ + curl \ + zip \ + unzip \ + bzip2 \ + libz-dev \ + libbz2-dev \ + openssh-client \ + libssl-dev \ + vim \ + mc \ + tree \ + gcc \ + g++ \ + tar \ + autoconf \ + autogen \ + libtool \ + make \ + cmake \ + libc6-dev \ + build-essential \ + libbz2-dev \ + libffi-dev \ + liblzma-dev \ + python3 \ + python3-pip \ + python3-dev \ + python3-venv \ + libtinfo-dev \ + python-is-python3 \ + sudo \ + libgl1 \ + libncurses5 \ + libjpeg9-dev \ + unzip \ + libgl1 \ + zlib1g-dev + +# Install Python deps though preference is to install them +# via cmr "get generic-python-lib _package.{Python PIP package name}" +if [[ "${CM_SKIP_PYTHON_DEPS}" != "yes" ]]; then + . ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/README.md b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/README.md new file mode 100644 index 000000000..327b0de77 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/_cm.yaml new file mode 100644 index 000000000..c07f46eb5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/_cm.yaml @@ -0,0 +1,33 @@ +alias: get-sys-utils-min +uid: a9af7714d3d94779 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: true + +category: Detection or installation of tools and artifacts + +deps: + - tags: detect,os + - tags: get,generic,sys-util,_xz + enable_if_env: + CM_HOST_OS_TYPE: + - windows + - tags: get,generic,sys-util,_zlib + enable_if_env: + CM_HOST_OS_TYPE: + - windows + +env: + CM_CLEAN_DIRS: bin + CM_WINDOWS_SYS_UTILS_MIN_INSTALL: yes + CM_PACKAGE_WIN_URL: https://zenodo.org/records/13868077/files/cm-artifact-os-windows-32.zip?download=1 + CM_SUDO: sudo + +new_env_keys: +- +PATH + +tags: +- get +- sys-utils-min diff --git a/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/customize.py new file mode 100644 index 000000000..0230039c2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-sys-utils-min/customize.py @@ -0,0 +1,78 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + cm = automation.cmind + + # If windows, download here otherwise use run.sh + if os_info['platform'] == 'windows': + + path = os.getcwd() + + clean_dirs = env.get('CM_CLEAN_DIRS', '').strip() + if clean_dirs != '': + import shutil + for cd in clean_dirs.split(','): + if cd != '': + if os.path.isdir(cd): + print('Clearning directory {}'.format(cd)) + shutil.rmtree(cd) + + url = env['CM_PACKAGE_WIN_URL'] + + urls = [url] if ';' not in url else url.split(';') + + print('') + print('Current directory: {}'.format(os.getcwd())) + + for url in urls: + + url = url.strip() + + print('') + print('Downloading from {}'.format(url)) + + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': url}) + if r['return'] > 0: + return r + + filename = r['filename'] + + print('Unzipping file {}'.format(filename)) + + r = cm.access({'action': 'unzip_file', + 'automation': 'utils,dc2743f8450541e3', + 'filename': filename}) + if r['return'] > 0: + return r + + if os.path.isfile(filename): + print('Removing file {}'.format(filename)) + os.remove(filename) + + print('') + + # Add to path + env['+PATH'] = [os.path.join(path, 'bin')] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/README-extra.md new file mode 100644 index 000000000..925426511 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/README-extra.md @@ -0,0 +1,11 @@ +# Get TensorRT + +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs TensorRT when the corrsponding [tar file](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-tar) is provided as an input. + +## How to Use +``` +cm run script --tags=get,tensorrt --tar_file= +``` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/README.md b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/README.md new file mode 100644 index 000000000..71265b354 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/get-tensorrt](https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/get-tensorrt) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/_cm.yaml new file mode 100644 index 000000000..3ded18c17 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/_cm.yaml @@ -0,0 +1,38 @@ +alias: get-tensorrt +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: CUDA automation +clean_files: [] +default_env: {} +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +docker: {} +input_description: + input: Full path to the installed TensorRT library (nvinfer) + tar_file: Full path to the TensorRT Tar file downloaded from the Nvidia website + (https://developer.nvidia.com/tensorrt) +input_mapping: + input: CM_INPUT + tar_file: CM_TENSORRT_TAR_FILE_PATH +new_env_keys: +- CM_TENSORRT_* +- +PATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +- + LDFLAGS +tags: +- get +- tensorrt +- nvidia +uid: 2a84ca505e4c408d +variations: + dev: + env: + CM_TENSORRT_REQUIRE_DEV: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/customize.py new file mode 100644 index 000000000..7f0bbe977 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/customize.py @@ -0,0 +1,166 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import tarfile + + +def preprocess(i): + + recursion_spaces = i['recursion_spaces'] + + os_info = i['os_info'] + + env = i['env'] + + # Not enforcing dev requirement for now + if env.get('CM_TENSORRT_TAR_FILE_PATH', '') == '' and env.get( + 'CM_TENSORRT_REQUIRE_DEV1', '') != 'yes' and env.get('CM_HOST_PLATFORM_FLAVOR', '') != 'aarch64': + + if os_info['platform'] == 'windows': + extra_pre = '' + extra_ext = 'lib' + else: + extra_pre = 'lib' + extra_ext = 'so' + + libfilename = extra_pre + 'nvinfer.' + extra_ext + env['CM_TENSORRT_VERSION'] = 'vdetected' + + if env.get('CM_TMP_PATH', '').strip() != '': + path = env.get('CM_TMP_PATH') + if os.path.exists(os.path.join(path, libfilename)): + env['CM_TENSORRT_LIB_PATH'] = path + return {'return': 0} + + if not env.get('CM_TMP_PATH'): + env['CM_TMP_PATH'] = '' + + if os_info['platform'] == 'windows': + if env.get('CM_INPUT', '').strip() == '' and env.get( + 'CM_TMP_PATH', '').strip() == '': + # Check in "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA" + paths = [] + for path in ["C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA", + "C:\\Program Files (x86)\\NVIDIA GPU Computing Toolkit\\CUDA"]: + if os.path.isdir(path): + dirs = os.listdir(path) + for dr in dirs: + path2 = os.path.join(path, dr, 'lib') + if os.path.isdir(path2): + paths.append(path2) + + if len(paths) > 0: + tmp_paths = ';'.join(paths) + tmp_paths += ';' + os.environ.get('PATH', '') + + env['CM_TMP_PATH'] = tmp_paths + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + else: + # paths to cuda are not always in PATH - add a few typical locations to search for + # (unless forced by a user) + + if env.get('CM_INPUT', '').strip() == '': + if env.get('CM_TMP_PATH', '').strip() != '': + env['CM_TMP_PATH'] += ':' + + env['CM_TMP_PATH_IGNORE_NON_EXISTANT'] = 'yes' + + for lib_path in env.get( + '+CM_HOST_OS_DEFAULT_LIBRARY_PATH', []): + if (os.path.exists(lib_path)): + env['CM_TMP_PATH'] += ':' + lib_path + + r = i['automation'].find_artifact({'file_name': libfilename, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'LD_LIBRARY_PATH', + 'detect_version': False, + 'env_path_key': 'CM_TENSORRT_LIB_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if os_info['platform'] == 'windows': + return r + else: + return {'return': 0} + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is currently not supported!'} + + if env.get('CM_TENSORRT_TAR_FILE_PATH', '') == '': + tags = ["get", "tensorrt"] + if env.get('CM_TENSORRT_REQUIRE_DEV', '') != 'yes': + tags.append("_dev") + return {'return': 1, 'error': 'Please envoke cmr "' + + " ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} + + print('Untaring file - can take some time ...') + + file_name = "trtexec" + my_tar = tarfile.open(os.path.expanduser(env['CM_TENSORRT_TAR_FILE_PATH'])) + folder_name = my_tar.getnames()[0] + if not os.path.exists(os.path.join(os.getcwd(), folder_name)): + my_tar.extractall() + my_tar.close() + + import re + version_match = re.match(r'TensorRT-(\d+\.\d+\.\d+\.\d+)', folder_name) + if not version_match: + return {'return': 1, 'error': 'Extracted TensorRT folder does not seem proper - Version information missing'} + version = version_match.group(1) + + env['CM_TENSORRT_VERSION'] = version + env['CM_TENSORRT_INSTALL_PATH'] = os.path.join(os.getcwd(), folder_name) + env['CM_TENSORRT_LIB_PATH'] = os.path.join(os.getcwd(), folder_name, "lib") + env['CM_TMP_PATH'] = os.path.join(os.getcwd(), folder_name, "bin") + env['+CPLUS_INCLUDE_PATH'] = [ + os.path.join( + os.getcwd(), + folder_name, + "include")] + env['+C_INCLUDE_PATH'] = [ + os.path.join( + os.getcwd(), + folder_name, + "include")] + env['+LD_LIBRARY_PATH'] = [os.path.join(os.getcwd(), folder_name, "lib")] + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if '+LD_LIBRARY_PATH' not in env: + env['+LD_LIBRARY_PATH'] = [] + + if '+PATH' not in env: + env['+PATH'] = [] + + if '+ LDFLAGS' not in env: + env['+ LDFLAGS'] = [] + + # if 'CM_TENSORRT_LIB_WITH_PATH' in env: + # tensorrt_lib_path = os.path.dirname(env['CM_TENSORRT_LIB_WITH_PATH']) + if 'CM_TENSORRT_LIB_PATH' in env: + env['+LD_LIBRARY_PATH'].append(env['CM_TENSORRT_LIB_PATH']) + env['+PATH'].append(env['CM_TENSORRT_LIB_PATH']) # for cmake + env['+ LDFLAGS'].append("-L" + env['CM_TENSORRT_LIB_PATH']) + + version = env['CM_TENSORRT_VERSION'] + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/run.sh new file mode 100644 index 000000000..ac3b30a9d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tensorrt/run.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +if [[ ${CM_TENSORRT_VERSION} == 'vdetected' ]]; then + exit 0; +fi + +PIP_EXTRA=`python3 -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` + +version=${CM_TENSORRT_VERSION} +install_dir=${CM_TENSORRT_INSTALL_PATH} +python_version=${CM_PYTHON_VERSION} +python_version_info=(${python_version//./ }) +python_max_version=${python_version_info[0]} +python_min_version=${python_version_info[1]} + +cd ${install_dir}/python +${CM_PYTHON_BIN_WITH_PATH} -m pip install tensorrt-*-cp${python_max_version}${python_min_version}-none-${CM_HOST_OS_TYPE}_${CM_HOST_OS_MACHINE}.whl $PIP_EXTRA +test $? -eq 0 || exit $? + +cd ${install_dir}/uff +${CM_PYTHON_BIN_WITH_PATH} -m pip install uff-0.6.9-py2.py3-none-any.whl $PIP_EXTRA +test $? -eq 0 || exit $? + +cd ${install_dir}/graphsurgeon +${CM_PYTHON_BIN_WITH_PATH} -m pip install graphsurgeon-0.4.6-py2.py3-none-any.whl $PIP_EXTRA +test $? -eq 0 || exit $? + +cd ${install_dir}/onnx_graphsurgeon +${CM_PYTHON_BIN_WITH_PATH} -m pip install onnx_graphsurgeon-0.3.12-py2.py3-none-any.whl $PIP_EXTRA +test $? -eq 0 || exit $? + +#create softlinks for libnvinfer.so.7 and libnvinfer_plugin.so.7 +# https://forums.developer.nvidia.com/t/could-not-load-dynamic-library-libnvinfer-so-7/231606/5 +if [ ! -f "${install_dir}/lib/libnvinfer.so.7" ]; then + ln -s "${install_dir}/lib/libnvinfer.so" "${install_dir}/lib/libnvinfer.so.7" +fi +test $? -eq 0 || exit $? +if [ ! -f "${install_dir}/lib/libnvinfer_plugin.so.7" ]; then + ln -s "${install_dir}/lib/libnvinfer_plugin.so" "${install_dir}/lib/libnvinfer_plugin.so.7" +fi +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-terraform/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-terraform/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-terraform/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-terraform/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-terraform/README-extra.md new file mode 100644 index 000000000..0fc57d505 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-terraform/README-extra.md @@ -0,0 +1,9 @@ +# Get Terraform +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed Terraform on the system and if not found calls the [install script for Terraform](../script/install-terraform-from-src). + +## Exported Variables +* `CM_TERRAFORM_BIN_WITH_PATH` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-terraform/README.md b/cmx4mlops/cmx4mlops/repo/script/get-terraform/README.md new file mode 100644 index 000000000..45451b763 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-terraform/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/get-terraform](https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/get-terraform) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-terraform/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-terraform/_cm.yaml new file mode 100644 index 000000000..236d5b9f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-terraform/_cm.yaml @@ -0,0 +1,20 @@ +alias: get-terraform +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Cloud automation +clean_files: [] +new_env_keys: +- CM_TERRAFORM_* +- +PATH +prehook_deps: +- enable_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + reuse_version: true + tags: install,terraform +tags: +- get +- terraform +- get-terraform +uid: 66b33c38a4d7461e diff --git a/cmx4mlops/cmx4mlops/repo/script/get-terraform/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-terraform/customize.py new file mode 100644 index 000000000..765e599d9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-terraform/customize.py @@ -0,0 +1,75 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + recursion_spaces = i['recursion_spaces'] + + file_name = 'terraform.exe' if os_info['platform'] == 'windows' else 'terraform' + env['FILE_NAME'] = file_name + if 'CM_TERRAFORM_BIN_WITH_PATH' not in env: + r = i['automation'].find_artifact({'file_name': file_name, + 'env': env, + 'os_info': os_info, + 'default_path_env_key': 'PATH', + 'detect_version': True, + 'env_path_key': 'CM_TERRAFORM_BIN_WITH_PATH', + 'run_script_input': i['run_script_input'], + 'recursion_spaces': recursion_spaces}) + if r['return'] > 0: + if r['return'] == 16: + env['CM_REQUIRE_INSTALL'] = "yes" + return {'return': 0} + else: + return r + + return {'return': 0} + + +def detect_version(i): + r = i['automation'].parse_version({'match_text': r'Terraform\s*v([\d.]+)', + 'group_number': 1, + 'env_key': 'CM_TERRAFORM_VERSION', + 'which_env': i['env']}) + if r['return'] > 0: + return r + + version = r['version'] + + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + return {'return': 0, 'version': version} + + +def postprocess(i): + env = i['env'] + + r = detect_version(i) + + if r['return'] > 0: + return r + + version = r['version'] + found_file_path = env['CM_TERRAFORM_BIN_WITH_PATH'] + + found_path = os.path.dirname(found_file_path) + env['CM_TERRAFORM_INSTALLED_PATH'] = found_path + + env['CM_TERRAFORM_CACHE_TAGS'] = 'version-' + version + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-terraform/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-terraform/run.sh new file mode 100644 index 000000000..7e0438bb9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-terraform/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash +terraform --version > tmp-ver.out +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/README-extra.md new file mode 100644 index 000000000..0815c5b53 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/README-extra.md @@ -0,0 +1,21 @@ +# CM script + +This script starts tuning (if specified) and compilation of any model using Apache TVM. + +## How To +```bash +cm run script --tags=get,tvm-model,_[VARIATION] +``` +where, `[VARIATION]` is one of +1) Frontend frameworks name (`onnx`, `pytorch`, `tensorflow`, `tflite`) +2) Precision (`fp32`, `int8`) +3) TVM Runtime (`virtual_machine` or `graph_executor`) +4) `tune-model` variation if you want to start tuning the model using TVM MetaScheduler +5) Model name (`model.#`) +6) Batch size (`batch_size.#`) +in 5 and 6 you can insert any suitable value instead of the symbol `#`, e.g. `model.bert` or `batch_size.8`. + +## Notes + +For PyTorch and TensorFlow frontends you should specify evironment variable `CM_ML_MODEL_INPUT_SHAPES` with input shapes of the model you want to compile (e.g. `"input": (16, 3, 224, 224)`) or separate variables `CM_ML_MODEL_IMAGE_NUM_CHANNELS`, `CM_ML_MODEL_IMAGE_WIDTH`, `CM_ML_MODEL_IMAGE_HEIGHT` for 2D CV models and `CM_ML_MODEL_MAX_SEQ_LENGTH` for language models. +If your model is in ONNX format then all input shapes can be extracted automatically. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/README.md b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/README.md new file mode 100644 index 000000000..1613c09f4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-tvm-model](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-models/get-tvm-model) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/_cm.yaml new file mode 100644 index 000000000..c843c94e7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/_cm.yaml @@ -0,0 +1,120 @@ +alias: get-tvm-model +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML models +default_env: + CM_ML_MODEL_MAX_BATCH_SIZE: '1' + CM_TUNE_TVM_MODEL: 'no' + CM_TVM_FRONTEND_FRAMEWORK: onnx + CM_TVM_USE_VM: 'yes' +deps: +- names: + - python + - python3 + tags: get,python3 +- names: + - tvm + tags: get,tvm +- tags: get,generic-python-lib,_decorator +- tags: get,generic-python-lib,_psutil +- tags: get,generic-python-lib,_scipy +- tags: get,generic-python-lib,_attrs +new_env_keys: +- CM_ML_MODEL_* +- CM_TUNE_TVM_* +- CM_TVM_* +prehook_deps: +- names: + - original-model + tags: get,ml-model,raw + update_tags_from_env: + - CM_ML_MODEL + update_tags_from_env_with_prefix: + _: + - CM_TVM_FRONTEND_FRAMEWORK +tags: +- get +- ml-model-tvm +- tvm-model +uid: c1b7b656b6224307 +variations: + batch_size.#: + env: + CM_ML_MODEL_MAX_BATCH_SIZE: '#' + group: batchsize + fp32: + add_deps_recursive: + original-model: + tags: _fp32 + default: true + group: precision + graph_executor: + env: + CM_TVM_USE_VM: 'no' + group: runtime + int8: + add_deps_recursive: + original-model: + tags: _int8 + group: precision + model.#: + env: + CM_ML_MODEL: '#' + group: model + onnx: + default: true + deps: + - names: + - onnx + tags: get,generic-python-lib,_onnx + env: + CM_TVM_FRONTEND_FRAMEWORK: onnx + group: frontend + pytorch: + deps: + - names: + - pytorch + - torch + tags: get,generic-python-lib,_torch + - tags: get,generic-python-lib,_torchvision + env: + CM_TVM_FRONTEND_FRAMEWORK: pytorch + group: frontend + tensorflow: + deps: + - names: + - tensorflow + tags: get,generic-python-lib,_tensorflow + env: + CM_TVM_FRONTEND_FRAMEWORK: tensorflow + group: frontend + tf: + alias: tensorflow + tflite: + deps: + - names: + - tflite + tags: get,generic-python-lib,_tflite + env: + CM_TVM_FRONTEND_FRAMEWORK: tflite + group: frontend + torch: + alias: pytorch + tune-model: + deps: + - tags: get,generic-python-lib,_xgboost + - tags: get,generic-python-lib,_pandas + - tags: get,generic-python-lib,_tornado + env: + CM_TUNE_TVM_MODEL: 'yes' + uint8: + add_deps_recursive: + original-model: + tags: _uint8 + group: precision + virtual_machine: + default: true + env: + CM_TVM_USE_VM: 'yes' + group: runtime diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/customize.py new file mode 100644 index 000000000..7fc7a54f2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/customize.py @@ -0,0 +1,65 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + work_dir = env.get('CM_TUNE_TVM_MODEL_WORKDIR', '') + + if work_dir != '': + if not os.path.exists(work_dir): + raise FileNotFoundError( + f"Error: the specified path \"{work_dir}\"does not exist") + + if not os.path.exists(f"{work_dir}/database_workload.json"): + raise FileNotFoundError( + "Error: the found workdir does not contain database_workload.json") + + if not os.path.exists(f"{work_dir}/database_tuning_record.json"): + raise FileNotFoundError( + "Error: the found workdir does not contain database_tuning_record.json") + + if env.get('CM_TUNE_TVM_MODEL', '') != '': + print("The \"tune-model\" variation is selected, but at the same time the path to the existing \"work_dir\" is also specified. The compiled model will be based on the found existing \"work_dir\".") + env["CM_TUNE_TVM_MODEL"] = "no" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_ML_MODEL_ORIGINAL_FILE_WITH_PATH'] = env['CM_ML_MODEL_FILE_WITH_PATH'] + env['CM_ML_MODEL_FILE'] = 'model-tvm.so' + env['CM_ML_MODEL_PATH'] = os.path.join(os.getcwd()) + env['CM_ML_MODEL_FILE_WITH_PATH'] = os.path.join( + os.getcwd(), env['CM_ML_MODEL_FILE']) + env['CM_ML_MODEL_FRAMEWORK'] = "tvm-" + env['CM_ML_MODEL_FRAMEWORK'] + if 'CM_ML_MODEL_INPUT_SHAPES' in env.keys(): + env['CM_ML_MODEL_INPUT_SHAPES'] = env['CM_ML_MODEL_INPUT_SHAPES'].replace( + "BATCH_SIZE", env['CM_ML_MODEL_MAX_BATCH_SIZE']) + if 'CM_TVM_FRONTEND_FRAMEWORK' in env and env['CM_TVM_FRONTEND_FRAMEWORK'] == 'pytorch': + env['CM_PREPROCESS_PYTORCH'] = 'yes' + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/process.py b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/process.py new file mode 100644 index 000000000..c7384000d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/process.py @@ -0,0 +1,273 @@ +import os +import tempfile +from typing import Dict, Tuple, Optional, List, Any, Union + +if os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None) == "pytorch": + import torch + import torchvision + +import tvm +from tvm import relay, meta_schedule +from tvm.driver.tvmc.frontends import load_model + + +def get_shape_dict_from_onnx( + shape: List[int], + model_path: str +) -> Dict[str, List[int]]: + import onnx + onnx_model = onnx.load(model_path) + if len(shape) == 1: + for _input in onnx_model.graph.input: + tensor_type = _input.type.tensor_type + if (tensor_type.HasField("shape")): + for dimension in tensor_type.shape.dim: + if dimension.dim_value != 0: + shape.append(dimension.dim_value) + input_all = [node.name for node in onnx_model.graph.input] + input_initializer = [node.name for node in onnx_model.graph.initializer] + net_feed_input = list(set(input_all) - set(input_initializer)) + return {input_name: shape for input_name in net_feed_input} + + +def get_mod_params( + model_path: str, + model_name: str, + batch_size: int, + frontend: str, + input_shapes_str: Optional[str] = None, + input_layer_name: Optional[str] = None, + num_channels: Optional[int] = None, + image_width: Optional[int] = None, + image_height: Optional[int] = None, + max_seq_length: Optional[int] = None +) -> Tuple[tvm.IRModule, Dict[str, tvm.nd.NDArray]]: + if not input_shapes_str and ( + not image_width or not image_height) and not max_seq_length and frontend != "onnx": + raise RuntimeError( + "Error: None of environment variables storing shape is set!" + ) + if input_shapes_str: + shape_dict = eval( + '{' + + input_shapes_str.replace( + 'BATCH_SIZE', + str(batch_size)) + + '}') + else: + shape = [] + if image_width and image_height: + shape = [batch_size, num_channels, image_height, image_width] + elif max_seq_length: + shape = [batch_size, max_seq_length] + if frontend == "onnx": + shape_dict = get_shape_dict_from_onnx( + shape if len(shape) > 0 else [batch_size], model_path) + else: + raise RuntimeError( + "Error: Cannot find proper shapes in environment variables" + ) + print(f"Shape dict {shape_dict}") + if frontend == "pytorch": + torch_model = getattr(torchvision.models, model_name)(weights=None) + torch_model.load_state_dict(torch.load(model_path)) + torch_model.fc = torch.nn.Sequential( + torch_model.fc, + torch.nn.Softmax(dim=1) + ) + torch_model = torch_model.eval() + shape_list = list(shape_dict.items()) + input_data = torch.randn(shape_list[0][1]) + traced_model = torch.jit.trace(torch_model, input_data).eval() + mod, params = tvm.relay.frontend.from_pytorch(traced_model, shape_list) + else: + tvmc_model = load_model(path=model_path, shape_dict=shape_dict) + mod, params = tvm.relay.transform.DynamicToStatic()( + tvmc_model.mod), tvmc_model.params + + input_layer_name_file = os.path.join(os.getcwd(), "input_layer_name") + if not input_layer_name: + input_layer_name = shape_dict.keys()[0] + with open(input_layer_name_file, 'w') as file: + file.write(input_layer_name) + + return mod, params + + +def tune_model( + mod: tvm.IRModule, + params: Dict[str, tvm.nd.NDArray], + target: tvm.target.Target, +) -> Tuple[str, meta_schedule.database.Database]: + work_dir = os.path.join(os.getcwd(), "metaschedule_workdir") + if not os.path.exists(work_dir): + os.mkdir(work_dir) + print("Extracting tasks...") + extracted_tasks = meta_schedule.relay_integration.extract_tasks( + mod, target, params + ) + tasks, task_weights = meta_schedule.relay_integration.extracted_tasks_to_tune_contexts( + extracted_tasks, work_dir, strategy="evolutionary" + ) + + print("Begin tuning...") + evaluator_config = meta_schedule.runner.config.EvaluatorConfig( + number=1, + repeat=10, + enable_cpu_cache_flush=True + ) + database = meta_schedule.tune.tune_tasks( + tasks=tasks, + task_weights=task_weights, + work_dir=work_dir, + max_trials_global=10000, + num_trials_per_iter=64, + max_trials_per_task=512, + builder=meta_schedule.builder.LocalBuilder(), + runner=meta_schedule.runner.LocalRunner( + evaluator_config=evaluator_config + ), + ) + + return work_dir, database + + +def compile_model( + mod: tvm.IRModule, + params: Dict[str, tvm.nd.NDArray], + work_dir: str, + target: tvm.target.Target, + opt_level: int, + build_conf: Dict[str, Any], + use_vm: bool, + database: Optional[meta_schedule.database.Database] = None, +) -> Union[tvm.runtime.Module, tvm.runtime.vm.Executable]: + if work_dir != '': + if not database: + database = meta_schedule.database.JSONDatabase( + f"{work_dir}/database_workload.json", + f"{work_dir}/database_tuning_record.json", + allow_missing=False + ) + build_conf["relay.backend.use_meta_schedule"] = True + with tvm.transform.PassContext( + opt_level=opt_level, + config=build_conf + ): + lib = meta_schedule.relay_integration.compile_relay( + database=database, + mod=mod, + target=target, + params=params, + backend="vm" if use_vm else "graph" + ) + else: + with tvm.transform.PassContext( + opt_level=opt_level, + config=build_conf, + ): + if use_vm: + lib = tvm.relay.backend.vm.compile( + mod=mod, + target=target, + params=params + ) + else: + lib = tvm.relay.build( + mod, + target=target, + params=params + ) + return lib + + +def serialize_vm( + vm_exec: tvm.runtime.vm.Executable +) -> tvm.runtime.Module: + path_consts = os.path.join( + tempfile.mkdtemp( + dir=os.getcwd(), + suffix="-tvm-tmp" + ), + "consts" + ) + code_path = os.path.join(os.getcwd(), "vm_exec_code.ro") + vm_exec.move_late_bound_consts(path_consts, byte_limit=256) + code, lib = vm_exec.save() + with open(code_path, "wb") as file: + file.write(code) + return lib + + +def main() -> None: + model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', None) + compiled_model = os.path.join(os.getcwd(), 'model-tvm.so') + print('TVM model: ' + model_path) + if model_path.endswith('.so') or model_path.endswith('.dylib'): + compiled_model = model_path + if not os.path.isfile(compiled_model): + print('') + raise RuntimeError( + f"Error: Model file {compiled_model} not found!" + ) + else: + mod, params = get_mod_params( + model_path=os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', None), + model_name=os.environ.get('CM_ML_MODEL', '').strip().lower(), + batch_size=int(os.environ.get('CM_ML_MODEL_MAX_BATCH_SIZE', 1)), + frontend=os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None), + input_shapes_str=os.environ.get('CM_ML_MODEL_INPUT_SHAPES', None), + input_layer_name=os.environ.get( + 'CM_ML_MODEL_INPUT_LAYER_NAME', None), + num_channels=int( + os.environ.get( + 'CM_ML_MODEL_IMAGE_NUM_CHANNELS', + 3)), + image_width=int(os.environ.get('CM_ML_MODEL_IMAGE_WIDTH', 0)), + image_height=int(os.environ.get('CM_ML_MODEL_IMAGE_HEIGHT', 0)), + max_seq_length=int( + os.environ.get( + 'CM_ML_MODEL_MAX_SEQ_LENGTH', 0)), + ) + opt_level = int(os.environ.get('CM_MLPERF_TVM_OPT_LEVEL', 3)) + target = os.environ.get( + 'CM_MLPERF_TVM_TARGET', + f"llvm -num-cores {os.environ.get('CM_HOST_CPU_TOTAL_CORES', '1')}" + ) + build_conf = {} + target_host = None + tvm_target = tvm.target.Target(target, host=target_host) + tune_model_flag = os.environ.get('CM_TUNE_TVM_MODEL', 'no') == 'yes' + work_dir = '' + database = None + use_vm = os.environ.get('CM_TVM_USE_VM', 'no') == 'yes' + if tune_model_flag: + work_dir, database = tune_model( + mod=mod, + params=params, + target=tvm_target, + ) + lib = compile_model( + mod=mod, + params=params, + work_dir=work_dir if work_dir != '' else os.environ.get( + 'CM_TUNE_TVM_MODEL_WORKDIR', ''), + target=tvm_target, + opt_level=opt_level, + build_conf=build_conf, + use_vm=use_vm, + database=database + ) + if use_vm: + lib = serialize_vm( + vm_exec=lib + ) + + with open(os.path.join(os.getcwd(), "tvm_executor"), "w") as file: + file.write("virtual_machine" if use_vm else "graph_executor") + lib.export_library(compiled_model) + print('TVM compiled model: ' + compiled_model) + + +if __name__ == "__main__": + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/run.sh new file mode 100644 index 000000000..6b18e39df --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm-model/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +cmd="${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/process.py" + +echo $cmd + +eval $cmd diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-tvm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-tvm/README-extra.md new file mode 100644 index 000000000..ae5cc929e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm/README-extra.md @@ -0,0 +1,5 @@ +```bash +cm run script "get llvm" --version=14.0.0 +cm run script "get tvm _llvm" --version=0.10.0 +cm run script "python app image-classification tvm-onnx" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm/README.md b/cmx4mlops/cmx4mlops/repo/script/get-tvm/README.md new file mode 100644 index 000000000..7120db0db --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-tvm](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/get-tvm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-tvm/_cm.yaml new file mode 100644 index 000000000..75f2a174c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm/_cm.yaml @@ -0,0 +1,73 @@ +alias: get-tvm +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +default_env: + CM_GIT_CHECKOUT: main + CM_GIT_URL: https://github.com/apache/tvm + CM_TVM_PIP_INSTALL: 'no' +deps: +- tags: cmake,get-cmake + version_min: '3.18' +- tags: detect,cpu +- tags: get,generic-python-lib,_typing_extensions +- tags: get,generic-python-lib,_decorator +- tags: get,generic-python-lib,_scipy +- tags: get,generic-python-lib,_attrs +- tags: get,generic-python-lib,_psutil +extra_cache_tags_from_env: +- env: CM_LLVM_CACHE_TAGS + prefix: llvm- +new_env_keys: +- TVM_HOME +- CM_TVM_* +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +- +PYTHONPATH +tags: +- get +- tvm +- get-tvm +uid: 93c89140e6224f4b +variations: + cuda: + deps: + - tags: get,cuda + env: + CM_TVM_USE_CUDA: 'yes' + llvm: + deps: + - names: + - llvm + tags: get,llvm + version_min: 14.0.0 + env: + CM_TVM_USE_LLVM: 'yes' + group: installation-type + openmp: + env: + CM_TVM_USE_OPENMP: 'yes' + pip-install: + default: true + deps: + - tags: get,generic-python-lib,_apache-tvm + env: + CM_TVM_PIP_INSTALL: 'yes' + group: installation-type +versions: + main: + env: + CM_GIT_CHECKOUT: main + v0.10.0: + env: + CM_GIT_CHECKOUT: v0.10.0 + v0.7.0: + env: + CM_GIT_CHECKOUT: v0.7.0 + v0.8.0: + env: + CM_GIT_CHECKOUT: v0.8.0 + v0.9.0: + env: + CM_GIT_CHECKOUT: v0.9.0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-tvm/customize.py new file mode 100644 index 000000000..b6bdaf5c9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm/customize.py @@ -0,0 +1,61 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + if env.get('CM_TVM_PIP_INSTALL', '') == "yes": + return {'return': 0} + + tvm_home = env['TVM_HOME'] + +# 20221024: we save and restore env in the main script and can clean env here for determinism +# if '+PYTHONPATH' not in env: env['+PYTHONPATH']=[] + env['+PYTHONPATH'] = [] + + env['+PYTHONPATH'].append(os.path.join(tvm_home, 'python')) + + # Prepare paths + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + env[key] = [] + + # Include + include_path = os.path.join(tvm_home, 'include') + if os.path.isdir(include_path): + if os_info['platform'] != 'windows': + env['+C_INCLUDE_PATH'].append(include_path) + env['+CPLUS_INCLUDE_PATH'].append(include_path) + + env['CM_TVM_PATH_INCLUDE'] = include_path + + # Lib + lib_path = os.path.join(tvm_home, 'build') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + env['CM_TVM_PATH_LIB'] = lib_path + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-tvm/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-tvm/run.sh new file mode 100644 index 000000000..e7c492058 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-tvm/run.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +CUR_DIR=$PWD + +if [ "${CM_TVM_PIP_INSTALL}" != "no" ]; then + exit 0; +fi + +echo "******************************************************" +echo "Path for TVM: ${CUR_DIR}" +echo "" + +if [ ! -d "tvm" ]; then + echo "git clone --recursive -b ${CM_GIT_CHECKOUT} ${CM_GIT_URL} tvm" + git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} tvm + test $? -eq 0 || exit 1 +fi + +cd tvm +if [ "${CM_GIT_SHA}" != "" ]; then + echo "git checkout ${CM_GIT_SHA}" + git checkout ${CM_GIT_SHA} + test $? -eq 0 || exit 1 +fi + + +if [ ! -d "${CUR_DIR}/tvm/build" ]; then + echo "******************************************************" + echo "Configuring TVM ..." + echo "" + + mkdir -p "${CUR_DIR}/tvm/build" + + cp cmake/config.cmake ${CUR_DIR}/tvm/build + + cd ${CUR_DIR}/tvm/build + + if [[ ${CM_TVM_USE_LLVM} == "yes" ]]; then + if [[ -z "${CM_LLVM_INSTALLED_PATH}" ]]; then + llvm_version=$(echo "${CM_LLVM_CLANG_VERSION}" | cut -d. -f1) + sed -i.bak "s|set(USE_LLVM OFF)|set(USE_LLVM llvm-config-$llvm_version)|" config.cmake + else + sed -i.bak "s|set(USE_LLVM OFF)|set(USE_LLVM ${CM_LLVM_INSTALLED_PATH}/llvm-config)|" config.cmake + fi + fi + + if [[ ${CM_TVM_USE_OPENMP} == "yes" ]]; then + sed -i.bak 's/set(USE_OPENMP none)/set(USE_OPENMP gnu)/' config.cmake + fi + + if [[ ${CM_TVM_USE_CUDA} == "yes" ]]; then + sed -i.bak 's/set(USE_CUDA OFF)/set(USE_OPENMP ON)/' config.cmake + echo 'set(USE_CUDA ON)' >> config.cmake + fi + + cmake .. + test $? -eq 0 || exit 1 +fi + +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} + +echo "******************************************************" +echo "Building TVM using ${CM_MAKE_CORES} cores ..." +echo "" + +cd ${CUR_DIR}/tvm/build + +make -j${CM_MAKE_CORES} +test $? -eq 0 || exit 1 + +INSTALL_DIR=$PWD + +cd ../../ + +echo "TVM_HOME=$PWD/tvm" > tmp-run-env.out +echo "CM_TVM_INSTALLED_PATH=$PWD/tvm" >> tmp-run-env.out + +echo "******************************************************" +echo "TVM was built and installed to ${INSTALL_DIR} ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/README.md b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/README.md new file mode 100644 index 000000000..5a27b3a0b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/_cm.yaml new file mode 100644 index 000000000..745020214 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/_cm.yaml @@ -0,0 +1,27 @@ +alias: get-xilinx-sdk +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: '2019.1' +deps: [] +input_description: {} +input_mapping: + input: CM_XILINX_SDK_FILE_PATH +new_env_keys: +- CM_XILINX_* +- +PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +tags: +- get +- xilinx +- sdk +uid: 76d4d1bd09df4490 +variations: {} +versions: + '2019.1': + env: + CM_DOWNLOAD_CHECKSUM: 7ccb3840d36c305a7cb34b314db7d7f2 + CM_DOWNLOAD_URL: https://www.xilinx.com/member/forms/download/xef.html?filename=Xilinx_SDK_2019.1_0524_1430_Lin64.bin diff --git a/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/customize.py new file mode 100644 index 000000000..b6650af14 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/customize.py @@ -0,0 +1,45 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + file_path = env.get("CM_XILINX_SDK_BIN_PATH") + if not file_path or not os.path.exists(file_path): + return {'return': 1, 'error': 'FILE_PATH does not exist'} + + bin_folder_path = os.path.dirname(file_path) + if '+PATH' in env: + env['+PATH'].append(bin_foler_path) + else: + env['+PATH'] = [bin_folder_path] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/run.sh new file mode 100644 index 000000000..3a584c10c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-xilinx-sdk/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zendnn/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zendnn/README.md b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/README.md new file mode 100644 index 000000000..16669f1aa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zendnn/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/_cm.yaml new file mode 100644 index 000000000..865850559 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/_cm.yaml @@ -0,0 +1,27 @@ +alias: get-zendnn +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +deps: +- names: + - aocl + tags: get,amd,aocl +- tags: get,lib,blis,_amd +- tags: detect,cpu +- tags: get,git,_repo.https://github.com/amd/ZenDNN.git +input_description: {} +input_mapping: {} +new_env_keys: [] +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- zendnn +- amd +- from.src +uid: d1c6feb0ee684b09 +variations: {} +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zendnn/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/customize.py new file mode 100644 index 000000000..e32b8123d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/customize.py @@ -0,0 +1,40 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + env['ZENDNN_BLIS_PATH'] = env['CM_BLIS_INSTALL_PATH'] + env['ZENDNN_LIBM_PATH'] = env['CM_AOCL_BUILD_PATH'] + + env['ZENDNN_SRC_PATH'] = env['CM_GIT_REPO_CHECKOUT_PATH'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zendnn/run.bat b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zendnn/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/run.sh new file mode 100644 index 000000000..58026fcbb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zendnn/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +cd ${ZENDNN_SRC_PATH} + +make clean +test $? -eq 0 || exit $? + +source scripts/zendnn_build.sh gcc +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/README-extra.md new file mode 100644 index 000000000..3c139b607 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/README-extra.md @@ -0,0 +1,19 @@ +# GET-ZEPHYR-SDK +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs the [Zephyr-SDK](https://github.com/zephyrproject-rtos/sdk-ng/releases) from a prebuilt binary. + +## Install +```bash +cm run script --tags=get,zephyr-sdk --version=0.13.2 +``` +## Exported Variables +1. [ZEPHYR_SDK_INSTALL_DIR](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-zephyr-sdk/customize.py#L13): Location in CM cache where Zephyr SDK is installed. +2. [ZEPHYR_TOOLCHAIN_VARIANT](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-zephyr-sdk/customize.py#L12) + +## Supported Versions +1. 0.13.1 +2. 0.13.2 +3. 0.15.0 + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/README.md b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/README.md new file mode 100644 index 000000000..50bb80ae5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/get-zephyr-sdk](https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/get-zephyr-sdk) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/_cm.yaml new file mode 100644 index 000000000..5a7a97862 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/_cm.yaml @@ -0,0 +1,25 @@ +alias: get-zephyr-sdk +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: TinyML automation +clean_files: [] +default_version: 0.13.2 +deps: +- tags: detect,os +new_env_keys: +- ZEPHYR_* +tags: +- get +- zephyr-sdk +uid: c70ae1a7567f4a7b +versions: + 0.13.1: + env: + CM_ZEPHYR_SDK_VERSION: 0.13.1 + 0.13.2: + env: + CM_ZEPHYR_SDK_VERSION: 0.13.2 + 0.15.0: + env: + CM_ZEPHYR_SDK_VERSION: 0.15.0 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/customize.py new file mode 100644 index 000000000..13559e2a0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/customize.py @@ -0,0 +1,28 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + env = i['env'] + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + env['ZEPHYR_TOOLCHAIN_VARIANT'] = "zephyr" + env['ZEPHYR_SDK_INSTALL_DIR'] = os.path.join( + os.getcwd(), "zephyr-sdk-" + env['CM_ZEPHYR_SDK_VERSION']) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/run.sh new file mode 100644 index 000000000..07c55e078 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr-sdk/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +version=${CM_ZEPHYR_SDK_VERSION} +os=${CM_HOST_OS_TYPE} +if [ $os == "darwin" ]; then + os=${CM_HOST_OS_FLAVOR} +fi +platform=${CM_HOST_OS_MACHINE} +if [ $platform == "arm64" ]; then + platform=aarch64 +fi + +file=zephyr-sdk-${version}-${os}-${platform}-setup.run +url=https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v${version}/$file +wget -nc "${url}" +if [ "${?}" != "0" ]; then exit 1; fi +chmod +x $file +./$file -- -d $PWD/zephyr-sdk-$version -y + +if [ "${?}" != "0" ]; then exit 1; fi + diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/README-extra.md new file mode 100644 index 000000000..34aa511ce --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/README-extra.md @@ -0,0 +1,8 @@ +# GET-ZEPHYR +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) installs the [Zephyr](https://github.com/zephyrproject-rtos/zephyr) real-time OS including all the needed system and python dependencies using its own command line tool [west](https://docs.zephyrproject.org/latest/develop/west/index.html). +## Exported Variables +1. [ZEPHYR_DIR](https://github.com/octoml/ck/blob/master/cm-mlops/script/get-zephyr/customize.py#L15): Location in CM cache where Zephyr is installed. + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr/README.md b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/README.md new file mode 100644 index 000000000..ed4f90981 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/get-zephyr](https://docs.mlcommons.org/cm4mlops/scripts/TinyML-automation/get-zephyr) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/_cm.yaml new file mode 100644 index 000000000..55c59aff5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/_cm.yaml @@ -0,0 +1,26 @@ +alias: get-zephyr +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: TinyML automation +clean_files: [] +default_version: v2.7 +deps: +- names: + - python3 + - python + tags: get,python3 + version_min: 3.7.0 +- tags: get,cmake + version_min: 3.20.0 +- tags: get,generic-python-lib,_west +new_env_keys: +- CM_ZEPHYR_* +tags: +- get +- zephyr +uid: d4105c2cdb044276 +versions: + v2.7: + env: + CM_ZEPHYR_VERSION: v2.7 diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr/customize.py b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/customize.py new file mode 100644 index 000000000..85429b22d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/customize.py @@ -0,0 +1,29 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + env = i['env'] + if '+PATH' not in env: + env['+PATH'] = [] + env['+PATH'].append("$HOME/.local/bin") + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + env['CM_ZEPHYR_DIR'] = os.path.join(os.getcwd(), "zephyr") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr/run-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/run-ubuntu.sh new file mode 100644 index 000000000..a8cb216b4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/run-ubuntu.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +sudo apt-get install -y --no-install-recommends gcc-multilib g++-multilib libsdl2-dev +. ${CM_TMP_CURRENT_SCRIPT_PATH}/run.sh diff --git a/cmx4mlops/cmx4mlops/repo/script/get-zephyr/run.sh b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/run.sh new file mode 100644 index 000000000..7d5b53245 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/get-zephyr/run.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} +CM_PYTHON_BIN_WITH_PATH=${CM_PYTHON_BIN_WITH_PATH:-python3} + +CUR=`pwd` + +if [ "${?}" != "0" ]; then exit 1; fi + +if [ ! -d "zephyr" ]; then + west init --mr ${CM_ZEPHYR_VERSION}-branch $CUR + if [ "${?}" != "0" ]; then exit 1; fi +fi + +cd $CUR/zephyr +west update +if [ "${?}" != "0" ]; then exit 1; fi +west zephyr-export +if [ "${?}" != "0" ]; then exit 1; fi +${CM_PYTHON_BIN_WITH_PATH} -m pip install -r $CUR/zephyr/scripts/requirements.txt +if [ "${?}" != "0" ]; then exit 1; fi + diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/gui/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/README-about.md b/cmx4mlops/cmx4mlops/repo/script/gui/README-about.md new file mode 100644 index 000000000..064b0f257 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/README-about.md @@ -0,0 +1,15 @@ +This CM script provides a unified GUI to run CM scripts using [Streamlit library](https://streamlit.io). + +If you want to run it in a cloud (Azure, AWS, GCP), you need to open some port and test that you can reach it from outside. + +By default, streamlit uses port 8501 but you can change it as follows: + +```bash +cm run script "cm gui" --port 80 +``` + +If you have troubles accessing this port, use this simple python module to test if your port is open: +```bash +python3 -m http.server 80 +``` + diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/README.md b/cmx4mlops/cmx4mlops/repo/script/gui/README.md new file mode 100644 index 000000000..f0083fa0e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/GUI/gui](https://docs.mlcommons.org/cm4mlops/scripts/GUI/gui) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/gui/_cm.yaml new file mode 100644 index 000000000..efa578d65 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/_cm.yaml @@ -0,0 +1,106 @@ +# Identification of this CM script +alias: gui +uid: 605cac42514a4c69 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "GUI" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - cm + - gui + - cm-gui + - script-gui + - cm-script-gui + - streamlit + +# Map script inputs to environment variables +input_mapping: + script: CM_GUI_SCRIPT_TAGS + prefix: CM_GUI_SCRIPT_PREFIX_LINUX + port: CM_GUI_PORT + address: CM_GUI_ADDRESS + title: CM_GUI_TITLE + no_browser: CM_GUI_NO_BROWSER + no_run: CM_GUI_NO_RUN + app: CM_GUI_APP + exp_tags: CM_GUI_GRAPH_EXPERIMENT_TAGS + exp_name: CM_GUI_GRAPH_EXPERIMENT_NAME + exp_max_results: CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS + exp_uid: CM_GUI_GRAPH_EXPERIMENT_RESULT_UID + exp_title: CM_GUI_GRAPH_EXPERIMENT_TITLE + exp_key_x: CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X + exp_key_y: CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y + exp_key_c: CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C + exp_key_s: CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S + +default_env: + CM_GUI_EXTRA_CMD: "" + CM_GUI_SCRIPT_PREFIX_LINUX: "gnome-terminal --" + CM_GUI_APP: app + +# Dependencies on other CM scripts +deps: + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect/install python + - tags: get,python + names: + - python + - python3 + + - tags: get,generic-python-lib,_cmind + - tags: get,generic-python-lib,_streamlit + +# Variations to customize dependencies +variations: + main: + group: + app + env: + CM_GUI_APP: 'app' + + graph: + group: + app + env: + CM_GUI_APP: 'graph' + prehook_deps: + - tags: get,generic-python-lib,_matplotlib + - tags: get,generic-python-lib,_mpld3 + + playground: + group: + app + env: + CM_GUI_APP: 'playground' + prehook_deps: + - tags: get,generic-python-lib,_matplotlib + - tags: get,generic-python-lib,_mpld3 + - tags: get,generic-python-lib,_streamlit_option_menu + - tags: get,generic-python-lib,_numpy + - tags: get,generic-python-lib,_pandas + - tags: get,generic-python-lib,_package.plotly + - tags: get,generic-python-lib,_package.streamlit-aggrid + + chatgpt: + group: + app + env: + CM_GUI_APP: 'chatgpt' + + +input_description: + script: "script tags" + app: "gui app" diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/app.py b/cmx4mlops/cmx4mlops/repo/script/gui/app.py new file mode 100644 index 000000000..c5928994f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/app.py @@ -0,0 +1,73 @@ +# Developer(s): Grigori Fursin + +import streamlit as st +import os +import cmind + +import misc + + +def main(): + + query_params = misc.get_params(st) + + script_path = os.environ.get('CM_GUI_SCRIPT_PATH', '') + script_alias = os.environ.get('CM_GUI_SCRIPT_ALIAS', '') + title = os.environ.get('CM_GUI_TITLE', '') + + # Check if script tags are specified from CMD + script_tags = os.environ.get('CM_GUI_SCRIPT_TAGS', '').strip() + + script_tags_from_url = query_params.get('tags', ['']) + if len(script_tags_from_url) > 0: + x_script_tags_from_url = script_tags_from_url[0].strip() + if x_script_tags_from_url != '': + script_tags = x_script_tags_from_url + + meta = {} + + if script_tags != '': + # Check type of tags + if ' ' in script_tags: + script_tags = script_tags.replace(' ', ',') + + print('Searching CM scripts using tags "{}"'.format(script_tags)) + + r = cmind.access({'action': 'find', + 'automation': 'script,5b4e0237da074764', + 'tags': script_tags}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 1: + script = lst[0] + meta = script.meta + script_path = script.path + script_alias = meta['alias'] + + # Read meta + if len(meta) == 0 and script_path != '' and os.path.isdir(script_path): + fn = os.path.join(script_path, '_cm') + r = cmind.utils.load_yaml_and_json(fn) + if r['return'] == 0: + meta = r['meta'] + script_path = script.path + script_alias = meta['alias'] + + import script + + ii = {'st': st, + 'params': query_params, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, + 'script_meta': meta, + 'skip_bottom': False} + + return script.page(ii) + + +if __name__ == "__main__": + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/customize.py b/cmx4mlops/cmx4mlops/repo/script/gui/customize.py new file mode 100644 index 000000000..9b25588a9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +# Developer(s): Grigori Fursin + +from cmind import utils + +import os +import json +import shutil +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + script_path = i['run_script_input']['path'] + + cm = i['automation'].cmind + + script_tags = env.get('CM_GUI_SCRIPT_TAGS', '') + + if script_tags != '': + # Check type of tags + if ' ' in script_tags: + script_tags = script_tags.replace(' ', ',') + + print('Searching CM scripts using tags "{}"'.format(script_tags)) + + r = cm.access({'action': 'find', + 'automation': 'script', + 'tags': script_tags}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 1: + script = lst[0] + env['CM_GUI_SCRIPT_PATH'] = script.path + env['CM_GUI_SCRIPT_ALIAS'] = script.meta['alias'] + + print('Script found in path {}'.format(script.path)) + + env['CM_GUI_SCRIPT_TAGS'] = script_tags + + # Check other vars and assemble extra CMD + extra_cmd = env.get('CM_GUI_EXTRA_CMD', '') + + port = env.get('CM_GUI_PORT', '') + address = env.get('CM_GUI_ADDRESS', '') + no_browser = env.get('CM_GUI_NO_BROWSER', '') + + if no_browser != '': + extra_cmd += ' --server.headless true' + + if address != '': + extra_cmd += ' --server.address=' + address + + if port != '': + extra_cmd += ' --server.port=' + port + + env['CM_GUI_EXTRA_CMD'] = extra_cmd + + print('Prepared extra CMD for streamlit: {}'.format(extra_cmd)) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/graph.py b/cmx4mlops/cmx4mlops/repo/script/gui/graph.py new file mode 100644 index 000000000..30581dd09 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/graph.py @@ -0,0 +1,827 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import misc + +import streamlit.components.v1 as components + +import streamlit as st + +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.colors as mcolors + +import numpy as np +import pandas as pd + +import mpld3 +from mpld3 import plugins +from mpld3 import utils + +security = ['os.', 'streamlit.', 'matplotlib.', 'numpy.', 'pandas.', 'mpld3.'] + + +repro_badges = { + 'acm_ctuning_repro_badge_functional': {'img': 'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png'}, + 'acm_ctuning_repro_badge_reproduce': {'img': 'https://cTuning.org/images/results_reproduced_v1_1_small.png'}, + 'acm_ctuning_repro_badge_support_docker': {'img': 'https://cTuning.org/images/docker_logo2_small.png'}, + 'acm_ctuning_repro_badge_cm_interface': {'img': 'https://cTuning.org/images/logo-ck-single-tr4.png'} +} + + +class OpenBrowserOnClick(mpld3.plugins.PluginBase): + + JAVASCRIPT = """ + + mpld3.register_plugin("openbrowseronclick", PointClickableHTMLTooltip); + + PointClickableHTMLTooltip.prototype = Object.create(mpld3.Plugin.prototype); + PointClickableHTMLTooltip.prototype.constructor = PointClickableHTMLTooltip; + PointClickableHTMLTooltip.prototype.requiredProps = ["id"]; + PointClickableHTMLTooltip.prototype.defaultProps = {targets:null}; + + function PointClickableHTMLTooltip(fig, props){ + mpld3.Plugin.call(this, fig, props); + }; + + PointClickableHTMLTooltip.prototype.draw = function(){ + var obj = mpld3.get_element(this.props.id); + var targets = this.props.targets; + obj.elements() + .on("mousedown", function(d, i){ + window.open(targets[i]); + }); + }; + + """ + + def __init__(self, points, targets=None): + self.points = points + self.targets = targets + self.dict_ = {"type": "openbrowseronclick", + "id": mpld3.utils.get_id(points, None), + "targets": targets} + + +def main(): + + params = misc.get_params(st) + + # Set title + st.title('CM experiment visualization') + + return visualize(st, params) + + +def visualize(st, query_params, action=''): + + # Query experiment + result_uid = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_RESULT_UID', '') + q_result_uid = query_params.get('result_uid', ['']) + if len(q_result_uid) > 0: + if q_result_uid[0] != '': + result_uid = q_result_uid[0] + + v_experiment_name = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_NAME', '') + q_experiment_name = query_params.get('name', ['']) + if len(q_experiment_name) > 0: + if q_experiment_name[0] != '': + v_experiment_name = q_experiment_name[0] + + v_experiment_tags = '' + if v_experiment_name == '': + v_experiment_tags = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_TAGS', '') + q_experiment_tags = query_params.get('tags', ['']) + if len(q_experiment_tags) > 0: + if q_experiment_tags[0] != '': + v_experiment_tags = q_experiment_tags[0] + v_experiment_tags = v_experiment_tags.replace(',', ' ') + + # Check default +# if v_experiment_tags == '' and v_experiment_name == '': +# v_experiment_tags = 'mlperf-inference v4.0' + + v_experiment_tags = st.text_input( + 'Select CM experiment tags separated by space:', + value=v_experiment_tags, + key='v_experiment_tags').strip() + v_experiment_tags = v_experiment_tags.replace(',', ' ') + + # Get all experiment names + ii = {'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb'} + + # If name is given, do not use tags + if v_experiment_name != '': + ii['artifact'] = v_experiment_name + elif v_experiment_tags != '': + ii['tags'] = v_experiment_tags.replace(' ', ',') + + r = cmind.access(ii) + if r['return'] > 0: + return r + + lst_all = r['list'] + + experiments = [''] + + selection = 0 + index = 1 + for l in sorted(lst_all, key=lambda x: ( + ','.join(x.meta.get('tags', [])), + x.meta.get('alias', ''), + x.meta['uid'] + )): + + meta = l.meta + + if v_experiment_name != '' and ( + v_experiment_name == meta['alias'] or v_experiment_name == meta['uid']): + selection = index + + name = ' '.join(meta.get('tags', [])) + if name == '': + name = meta.get('alias', '') + if name == '': + name = meta['uid'] + + experiments.append(name) + + index += 1 + + if len(lst_all) == 1: + selection = 1 + + # Show experiment artifacts + experiment = st.selectbox('Select experiment from {} found:'.format(len(experiments) - 1), + range(len(experiments)), + format_func=lambda x: experiments[x], + index=selection, + key='experiment') + + lst = [lst_all[experiment - 1]] if experiment > 0 else lst_all + + if len(lst) > 8: + st.markdown('Too many experiments - continue pruning ...') + return {'return': 0} + + # Check experiments + results = [] + results_with_password = [] + passwords = [] + results_meta = {} + + for experiment in lst: + path = experiment.path + + for d in os.listdir(path): + path2 = os.path.join(path, d) + if os.path.isdir(path2): + path_to_result = os.path.join(path, d, 'cm-result.json') + + if os.path.isfile(path_to_result): + emeta = experiment.meta + + desc = {'path': path_to_result, + 'experiment_dir': d, + 'experiment_uid': emeta['uid'], + 'experiment_alias': emeta['alias'], + 'experiment_tags': ','.join(emeta.get('tags', []))} + + add = True + if result_uid != '': + add = False + r = cmind.utils.load_json(path_to_result) + if r['return'] == 0: + meta = r['meta'] + + results_meta[path_to_result] = meta + + for m in meta: + if m.get('uid', '') == result_uid: + add = True + break + + if add: + pwd = experiment.meta.get('password_hash', '') + if pwd == '': + results.append(desc) + else: + desc['password_hash'] = pwd + + if pwd not in passwords: + passwords.append(pwd) + + results_with_password.append(desc) + + # Check if password + if len(passwords) > 0: + password = st.text_input( + 'Some results are protected by password. Enter password to unlock them:', + value='', + key='v_experiment_pwd').strip() + + if password != '': + import bcrypt + # salt = bcrypt.gensalt() + # TBD: temporal hack to demo password protection for experiments + # salt = bcrypt.gensalt() + password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' + password_hash2 = bcrypt.hashpw( + password.encode('utf-8'), + password_salt).decode('utf-8') + + for result in results_with_password: + if result['password_hash'] == password_hash2: + results.append(result) + + # How to visualize selection + if len(results) == 0: + st.markdown('No results found!') + return {'return': 0} + + if st.session_state.get('tmp_cm_results', '') == '': + st.session_state['tmp_cm_results'] = len(results) + elif int(st.session_state['tmp_cm_results']) != len(results): + st.session_state['tmp_cm_results'] = len(results) + st.session_state['how'] = 0 + + how = '' + + if result_uid == '': + v_max_results = os.environ.get( + 'CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS', '') + + if v_max_results != '' and len(results) > int(v_max_results): + st.markdown('Too many results - continue pruning ...') + return {'return': 0} + + v_how = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_HOW', '') + q_how = query_params.get('type', ['']) + if len(q_how) > 0: + if q_how[0] != '': + v_how = q_how[0] + + how_selection = ['', '2d-static', '2d', 'bar'] + how_selection_desc = [ + '', + 'Scatter plot (static)', + 'Scatter plot (interactive, slow - to be improved)', + 'Bar plot (static)'] + + how_index = 0 + if v_how != '' and v_how in how_selection: + how_index = how_selection.index(v_how) + + how2 = st.selectbox('Select how to visualize {} CM experiment set(s):'.format(len(results)), + range(len(how_selection_desc)), + format_func=lambda x: how_selection_desc[x], + index=how_index, + key='how') + + if how2 == '' or how2 == 0: + return {'return': 0} + + how = how_selection[how2] + + how = how.strip() + + # Continue visualizing + all_values = [] + keys = [] + all_data = [] + + derived_metrics_value = query_params.get( + 'derived_metrics', [''])[0].strip() + derived_metrics_value = st.text_input("Optional: add derived metrics in Python. Example: result['Accuracy2'] = result['Accuracy']*2", + value=derived_metrics_value).strip() + + for x in security: + if x in derived_metrics_value: + derived_metrics_value = '' + break + + error_shown2 = False + for desc in results: + path_to_result = desc['path'] + + if path_to_result in results_meta: + result_meta = results_meta[path_to_result] + else: + r = cmind.utils.load_json_or_yaml(path_to_result) + if r['return'] > 0: + return r + + result_meta = r['meta'] + + for result in result_meta: + # Add extra info + for k in ['experiment_dir', 'experiment_alias', + 'experiment_uid', 'experiment_tags']: + if k in desc: + result[k] = desc[k] + + if derived_metrics_value != '': + try: + exec(derived_metrics_value) + except Exception as e: + if not error_shown2: + st.markdown( + '*Syntax error in derived metrics: {}*'.format(e)) + error_shown2 = True + + all_values.append(result) + + for k in result.keys(): + if k not in keys: + keys.append(k) + + first_keys = [ + 'Organization', + 'Model', + 'Scenario', + 'SystemName', + 'notes', + 'framework', + 'Result', + 'Result_Units', + 'Accuracy'] + sorted_keys = [k for k in first_keys if k in keys] + \ + [k for k in sorted(keys, key=lambda s: s.lower()) + if k not in first_keys] + + filter_value = query_params.get('filter', [''])[0].strip() + if result_uid == '': # and filter_value!='': + filter_value = st.text_input( + "Optional: add result filter in Python. Examples: result['Accuracy']>75 or 'llama2' in result['Model']", + value=filter_value).strip() + + st.markdown('---') + + for x in security: + if x in filter_value: + filter_value = '' + break + + # all_values is a list of dictionaries with all keys + error_shown = False + for result in all_values: + + if filter_value != '': + try: + if not eval(filter_value): + continue + except Exception as e: + if not error_shown: + st.markdown('*Syntax error in filter: {}*'.format(e)) + error_shown = True + + # Check if 1 result UID is selected + if result_uid != '' and result.get('uid', '') != result_uid: + continue + + data = [] + for k in sorted_keys: + data.append(result.get(k)) + + all_data.append(data) + + if result_uid != '': + break + + ################################################### + if len(all_data) == 0: + st.markdown('No results found for your selection.') + return {'return': 0, 'end_html': end_html} + + ################################################### + # If experiment found and 1 UID, print a table + if result_uid != '': + st.markdown('---') + st.markdown('# Result summary') + + data = all_data[0] + + result = {} + + j = 0 + for k in sorted_keys: + result[k] = data[j] + j += 1 + + # Check badges + x = '' + + for k in repro_badges: + if result.get(k, False): + img = repro_badges[k]['img'] + + x += '\n'.format( + img) + + if x != '': + st.write( + '
    \n' + x + '\n
    \n', + unsafe_allow_html=True) + + x = '' + for k in sorted_keys: + x += '* **{}**: {}\n'.format(k, str(result[k])) + + st.markdown(x) + + # Check associated reports + r = cmind.access({'action': 'find', + 'automation': 'report,6462ecdba2054467', + 'tags': 'result-{}'.format(result_uid)}) + if r['return'] > 0: + return r + + lst = r['list'] + + for l in lst: + report_path = l.path + + f1 = os.path.join(report_path, 'README.md') + if os.path.isfile(f1): + report_meta = l.meta + + report_alias = report_meta['alias'] + report_title = report_meta.get('title', '') + + report_name = report_title if report_title != '' else report_alias + + r = cmind.utils.load_txt(f1) + if r['return'] > 0: + return r + + s = r['string'] + + st.markdown('---') + st.markdown('### ' + report_name) + + st.markdown(s) + + # Create self link + st.markdown("""---""") + + experiment_alias_or_uid = result['experiment_uid'] + + end_html = ''' +
    + Self link +
    + '''.format(misc.make_url(experiment_alias_or_uid, action=action, md=False), result_uid) + + st.write(end_html, unsafe_allow_html=True) + + return {'return': 0} + + ################################################### + # Select 2D keys + axis_key_x = '' + axis_key_y = '' + axis_key_c = '' + + if len(keys) > 0: + keys = [''] + sorted_keys + + axis_key_x = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X', '') + q_axis_key_x = query_params.get('x', ['']) + if len(q_axis_key_x) > 0: + if q_axis_key_x[0] != '': + axis_key_x = q_axis_key_x[0] + i_axis_key_x = 0 + if axis_key_x != '' and axis_key_x in keys: + i_axis_key_x = keys.index(axis_key_x) + if axis_key_x == '' and 'Result' in keys: + i_axis_key_x = keys.index('Result') + axis_key_x = st.selectbox( + 'Select X key', keys, index=i_axis_key_x, key='x') + + axis_key_y = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y', '') + q_axis_key_y = query_params.get('y', ['']) + if len(q_axis_key_y) > 0: + if q_axis_key_y[0] != '': + axis_key_y = q_axis_key_y[0] + i_axis_key_y = 0 + if axis_key_y != '' and axis_key_y in keys: + i_axis_key_y = keys.index(axis_key_y) + if axis_key_y == '' and 'Accuracy' in keys: + i_axis_key_y = keys.index('Accuracy') + axis_key_y = st.selectbox( + 'Select Y key', keys, index=i_axis_key_y, key='y') + + axis_key_c = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C', '') + q_axis_key_c = query_params.get('c', ['']) + if len(q_axis_key_c) > 0: + if q_axis_key_c[0] != '': + axis_key_c = q_axis_key_c[0] + i_axis_key_c = 0 + if axis_key_c != '' and axis_key_c in keys: + i_axis_key_c = keys.index(axis_key_c) + if axis_key_c == '' and 'version' in keys: + i_axis_key_c = keys.index('version') + axis_key_c = st.selectbox( + 'Select Color key', + keys, + index=i_axis_key_c, + key='c') + + axis_key_s = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S', '') + q_axis_key_s = query_params.get('s', ['']) + if len(q_axis_key_s) > 0: + axis_key_s = q_axis_key_s[0] + i_axis_key_s = 0 + if axis_key_s != '' and axis_key_s in keys: + i_axis_key_s = keys.index(axis_key_s) + axis_key_s = st.selectbox( + 'Select Style key', + keys, + index=i_axis_key_s, + key='s') + + # Select values + values = [] + + if axis_key_x != '' and axis_key_y != '': + for v in all_values: + x = v.get(axis_key_x, None) + y = v.get(axis_key_y, None) + + if x is not None and y is not None: + values.append(v) + + if len(values) > 0: + + # fig, ax = plt.subplots(figsize=(12,6)) + fig, ax = plt.subplots() # figsize=(6,4)) + + ax.set_xlabel(axis_key_x) + ax.set_ylabel(axis_key_y) + + title = os.environ.get('CM_GUI_GRAPH_EXPERIMENT_TITLE', '') + q_title = query_params.get('title', ['']) + if len(q_title) > 0: + if q_title[0] != '': + title = q_title[0] + ax.set_title(title, size=16) + + if how == 'bar': + ax.set_title('Under development ...', size=16) + ax.yaxis.grid(linestyle='dotted') + else: + ax.grid(linestyle='dotted') + # https://matplotlib.org/stable/api/markers_api.html + + unique_color_values = {} +# unique_colors = list(mcolors.CSS4_COLORS.keys()) + unique_colors = list(mcolors.TABLEAU_COLORS.keys()) + i_unique_color_values = 0 + + unique_style_values = {} +# unique_styles = ['o','v','^','<','>','1','2','3','4','8','s','p','P','*','+','D'] + unique_styles = ['circle', 'square', 'diamond', 'cross', 'x', 'triangle', 'pentagon', 'hexagram', + 'star', 'hourglass', 'bowtie', 'asterisk', 'hash'] + i_unique_style_values = 0 + + # If Bar, use Style to separate results + unique_x_values = [] + unique_s_values = [] + + experiment_uids = [] + + # Filter values + values2 = [] + + for result in values: + if filter_value != '': + try: + if not eval(filter_value): + continue + except Exception as e: + if not error_shown: + st.markdown('*Syntax error in filter: {}*'.format(e)) + error_shown = True + + values2.append(result) + + if how == 'bar': + x = result.get(axis_key_x, None) + if x is not None and x != '' and x not in unique_x_values: + unique_x_values.append(x) + + s = result.get(axis_key_s, None) + if s is not None and s != '' and s not in unique_s_values: + unique_s_values.append(s) + + ####################################################################### + # Continue visualizing + if how == '2d-static' or how == 'bar': + + xx = [] + yy = [] + cc = [] + ss = [] + io = [] + + t = 0 + for result in values2: + v = result + + t += 1 + + x = v.get(axis_key_x, None) + y = v.get(axis_key_y, None) + + xx.append(x) + yy.append(y) + + color = 'blue' + if axis_key_c != '': + c = v.get(axis_key_c, None) + if c is not None: + if c in unique_color_values: + color = unique_color_values[c] + else: + color = unique_colors[i_unique_color_values] + unique_color_values[c] = color + if i_unique_color_values < ( + len(unique_colors) - 1): + i_unique_color_values += 1 + + cc.append(color) + + style = 'o' + if axis_key_s != '': + s = v.get(axis_key_s, None) + if s is not None: + if s in unique_style_values: + style = unique_style_values[s] + else: + style = unique_styles[i_unique_style_values] + unique_style_values[s] = style + if i_unique_style_values < ( + len(unique_styles) - 1): + i_unique_style_values += 1 + + ss.append(style) + + info = '' + for key in sorted(v.keys(), key=lambda x: x.lower()): + value = v[key] + info += str(key) + ': ' + str(value) + '
    \n' + + io.append(info) + + import plotly.express as px + + dd = { + axis_key_x: xx, + axis_key_y: yy, + axis_key_c: cc, + axis_key_s: ss, + 'info': io} + + # https://docs.streamlit.io/library/api-reference/charts/st.bar_chart + # https://docs.streamlit.io/library/api-reference/charts/st.plotly_chart + # https://plotly.com/python/line-and-scatter/ + + df = pd.DataFrame(dd) + + if how == 'bar': + st.bar_chart(df, x=axis_key_x, y=axis_key_y) + else: + fig = px.scatter( + df, + x=axis_key_x, + y=axis_key_y, + color=axis_key_c, + symbol=axis_key_s, + hover_name='info', + height=1000) + + st.plotly_chart( + fig, + theme="streamlit", + use_container_width=True) + + elif how == '2d': + ################################################################### + # 2D interactive graph - very slow - need to be updated + width = 1 + + t = 0 + for result in values2: + v = result + + t += 1 + + x = v.get(axis_key_x, None) + y = v.get(axis_key_y, None) + + url = v.get('url', '') + if url == '': + url = v.get('git_url', '') + + color = 'blue' + if axis_key_c != '': + c = v.get(axis_key_c, None) + if c is not None: + if c in unique_color_values: + color = unique_color_values[c] + else: + color = unique_colors[i_unique_color_values] + unique_color_values[c] = color + if i_unique_color_values < ( + len(unique_colors) - 1): + i_unique_color_values += 1 + + style = 'o' + if axis_key_s != '': + s = v.get(axis_key_s, None) + if s is not None: + if s in unique_style_values: + style = unique_style_values[s] + else: + style = unique_styles[i_unique_style_values] + unique_style_values[s] = style + if i_unique_style_values < ( + len(unique_styles) - 1): + i_unique_style_values += 1 + + graph = ax.scatter(x, y, color=color, marker=style) + + info = '' + for key in sorted(v.keys(), key=lambda x: x.lower()): + value = v[key] + info += '' + str(key) + ': ' + str(value) + '
    \n' + + info2 = '
    ' + \ + info + '
    ' + + label = [info2] + plugins.connect(fig, plugins.PointHTMLTooltip(graph, label)) + + experiment_uid = v.get('experiment_uid', '') + if experiment_uid != '' and experiment_uid not in experiment_uids: + experiment_uids.append(experiment_uid) + + uid = v.get('uid', '') + if uid != '': + xaction = 'action={}&'.format( + action) if action != '' else '' + url = '?{}name={}&result_uid={}'.format( + xaction, experiment_uid, uid) + + if url != '': + targets = [url] + plugins.connect( + fig, OpenBrowserOnClick( + graph, targets=targets)) + + # Render graph + fig_html = mpld3.fig_to_html(fig) + components.html(fig_html, width=1100, height=500) + + # fig_html = '
    '+fig_html+'
    ' + + # components.html(fig_html, width=1000, height=800) + # st.markdown('---') + + ####################################################################### + # Show all data + df = pd.DataFrame( + all_data, + columns=(k for k in sorted_keys if k != '') + ) + + st.markdown('---') + st.dataframe(df) + + # Check if can create self link + if len(experiment_uids) == 1: + st.markdown("""---""") + + xtype = '&type={}'.format(how) if how != '' else '' + + end_html = ''' +
    + Self link +
    + '''.format(misc.make_url(experiment_uids[0], action=action, md=False), xtype) + + st.write(end_html, unsafe_allow_html=True) + + return {'return': 0} + + +if __name__ == "__main__": + r = main() + + if r['return'] > 0: + + st.markdown("""---""") + st.markdown('**Error detected by CM:** {}'.format(r['error'])) diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/install/linux.md b/cmx4mlops/cmx4mlops/repo/script/gui/install/linux.md new file mode 100644 index 000000000..4ee277a7b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/install/linux.md @@ -0,0 +1,10 @@ +```bash +sudo apt update && sudo apt upgrade +sudo apt install python3 python3-pip python3-venv git wget curl +``` + +*Note that you must set up virtual env on Ubuntu 23+ before using any Python project including CM:* +```bash +python3 -m venv cm +source cm/bin/activate +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/install/macos.md b/cmx4mlops/cmx4mlops/repo/script/gui/install/macos.md new file mode 100644 index 000000000..f7905e340 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/install/macos.md @@ -0,0 +1,24 @@ +*Note that CM currently does not work with Python installed from the Apple Store. + Please install Python via brew as described below.* + +If `brew` package manager is not installed, please install it as follows (see details [here](https://brew.sh/)): +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +``` + +Don't forget to add brew to PATH environment as described in the end. + +Then install python, pip, git and wget: + +```bash +brew install python3 git wget curl + +python3 -m pip install cmind + +cm test core +``` + +*Sometimes python does not add `cm` and `cmr` binaries to the `PATH` environment variable. + You may need to find these files and add their path to `PATH` variable. + We plan to simplify this installation in the future.* + diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/install/redhat.md b/cmx4mlops/cmx4mlops/repo/script/gui/install/redhat.md new file mode 100644 index 000000000..13f1c6a0e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/install/redhat.md @@ -0,0 +1,7 @@ +*We have successfully tested CM on Red Hat 9 and CentOS 8:* + +```bash +sudo dnf update +sudo dnf install python3 python-pip git wget curl + +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/install/windows.md b/cmx4mlops/cmx4mlops/repo/script/gui/install/windows.md new file mode 100644 index 000000000..8e1d50bfc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/install/windows.md @@ -0,0 +1,15 @@ +* Configure Windows 10+ to [support long paths](https://learn.microsoft.com/en-us/windows/win32/fileio/maximum-file-path-limitation?tabs=registry#enable-long-paths-in-windows-10-version-1607-and-later) from command line as admin: + ```bash + reg add "HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem" /v LongPathsEnabled /t REG_DWORD /d 1 /f + ``` +* Download and install Git from [git-for-windows.github.io](https://git-for-windows.github.io). + * Configure Git to accept long file names: `git config --system core.longpaths true` +* Download and install Python 3+ from [www.python.org/downloads/windows](https://www.python.org/downloads/windows). + * Don't forget to select option to add Python binaries to PATH environment! + * Configure Windows to accept long fie names during Python installation! + +*Note that we [have reports](https://github.com/mlcommons/ck/issues/844) + that CM does not work when Python was first installed from the Microsoft Store. + If CM fails to run, you can find a fix [here](https://stackoverflow.com/questions/57485491/python-python3-executes-in-command-prompt-but-does-not-run-correctly)*. + +*We plan to provide a self-sustained package in the future to simplify CM installation on Windows.* diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/misc.py b/cmx4mlops/cmx4mlops/repo/script/gui/misc.py new file mode 100644 index 000000000..feb9122fb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/misc.py @@ -0,0 +1,231 @@ +# Support functions + +########################################################## +def make_url(name, alias='', action='contributors', + key='name', md=True, skip_url_quote=False): + + import urllib + + if alias == '': + alias = name + + x = urllib.parse.quote_plus(alias) if not skip_url_quote else alias + + xaction = '' + if action != '': + xaction = 'action={}'.format(action) + if key != '': + xaction += '&' + + url = '?{}'.format(xaction) + + if key != '': + url += '{}={}'.format(key, x) + + if md: + md = '[{}]({})'.format(name, url) + else: + md = url + + return md + +########################################################## + + +def convert_date(date): + # date: format YYYYMMDD to YYYY month day + + import calendar + + try: + year = date[0:4] + month = calendar.month_abbr[int(date[4:6])] + day = str(int(date[6:8])) + except Exception as e: + return {'return': 1, 'error': 'date "{}" is not of format YYYYMMDD: {}'.format( + date, format(e))} + + return {'return': 0, 'string': year + ' ' + month + ' ' + day} + +########################################################## + + +def get_params(st): + compatibility = False + + try: + params2 = st.query_params + # Convert to old style + params = {} + for k in params2: + v = params2[k] + if type(v) != list: + params[k] = [v] + except BaseException: + compatibility = True + + if compatibility: + params = st.experimental_get_query_params() + + return params + +########################################################## + + +def get_all_deps_tags(i): + meta = i['meta'] + all_deps_tags = i.get('all_deps_tags', []) + + for k in meta: + v = meta[k] + + if k == 'tags': + + +if isinstance(v, if) v = ','.join(v) + + if v not in all_deps_tags: + all_deps_tags.append(v) + +elif isinstance(v, elif ) r = get_all_deps_tags({'meta': v, 'all_deps_tags': all_deps_tags}) + all_deps_tags = r['all_deps_tags'] + +elif isinstance(v, elif) for vv in v: +if isinstance(vv, if ) r = get_all_deps_tags({'meta': vv, 'all_deps_tags': all_deps_tags}) + all_deps_tags = r['all_deps_tags'] + + return {'return': 0, 'all_deps_tags':all_deps_tags} + +########################################################## + +def make_selector(i): + + key = i['key'] + value = i['desc'] + + params = i['params'] + + st = i['st'] + st_inputs = i['st_inputs'] + + hide = i.get('hide', False) + + key2 = '@' +key + + value2 = None + +if isinstance(value, if ) desc = value['desc'] + + choices = value.get('choices', []) + boolean = value.get('boolean', False) + default = value.get('default', '') + force = value.get('force', None) + + if force is not None: + value2 = force + if not hide: + st.markdown('**{}:** {}'.format(desc, str(force))) + + else: + if boolean: + v = default + x = params.get(key2, None) + if x !=None and len(x)>0 and x[0]!=None: + if x[0].lower() =='true': + v = True + elif x[0].lower() =='false': + v = False + if hide: + value2 = v + else: + value2 = st.checkbox(desc, value=v, key=key2) + elif len(choices) >0: + x = params.get(key2, None) + if x !=None and len(x)>0 and x[0]!=None: + x = x[0] + if x in choices: + selected_index = choices.index( + x) if x in choices else 0 + else: + selected_index = choices.index(default) if default != '' else 0 + else: + selected_index = choices.index(default) if default != '' else 0 + if hide: + value2 = choices[selected_index] + else: + value2 = st.selectbox( + desc, choices, index=selected_index, key=key2) + else: + v = default + x = params.get(key2, None) + if x !=None and len(x)>0 and x[0]!=None: + v = x[0] + if hide: + value2 = v + else: + value2 = st.text_input(desc, value=v, key=key2) + + st_inputs[key2] = value2 + + else: + desc = value + if hide: + value2 = desc + else: + value2 = st.text_input(desc) + st_inputs[key2] = value2 + + return {'return': 0, 'key2': key2, 'value2': value2} + +########################################################## + +def make_selection(st, selection, param_key, text, x_uid, force_index=0): + + x_meta = {} + + if len(selection) >0: + selection = sorted(selection, key= lambda v: v['name']) + + if x_uid != '': + x_meta = selection[0] + st.markdown('**Selected {}:** {}'.format(text, x_meta['name'])) + else: + x_selection = [{'name': ''}] + x_selection += selection + + x_id = st.selectbox('Select {}:'.format(text), + range(len(x_selection)), + format_func=lambda x: x_selection[x]['name'], + index= force_index, + key= param_key) + + if x_id >0: + x_meta = x_selection[x_id] + + return {'return': 0, 'meta':x_meta} + +########################################################################## + +def get_with_complex_key_safe(meta, key): + v = get_with_complex_key(meta, key) + + if v == None: + v='' + + return v + +########################################################################## + +def get_with_complex_key(meta, key): + + j = key.find('.') + + if j <0: + return meta.get(key) + + key0 = key[:j] + + if key0 not in meta: + return None + + return get_with_complex_key(meta[key0], key[j +1:]) diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground.py new file mode 100644 index 000000000..851d3f1bf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground.py @@ -0,0 +1,208 @@ +# Developer(s): Grigori Fursin + +import streamlit as st +from streamlit.components.v1 import html +from streamlit_option_menu import option_menu + +import os +import cmind +import misc + + +def main(): + + st.set_page_config(layout="wide", + menu_items={}) + + params = misc.get_params(st) + + # Set style + # Green: background:#7fcf6f; + hide_streamlit_style = """ + + """ + + st.markdown(hide_streamlit_style, unsafe_allow_html=True) + + # Set title (check extra user HTML to embed before title if needed) + extra = os.environ.get('CM_GUI_EXTRA_HTML', '') + + if extra != '': + url = '' + for p in params: + v = str(','.join(params[p])) + if url != '': + url += ';' + url += p + '=' + v + extra = extra.replace('{{CM_URL}}', url) + '\n\n' + + st.write(''' +
    +

    Collective Knowledge Playground

    +
    + {} +
    +
    + '''.format(extra), + unsafe_allow_html=True + ) + + extra_file = os.environ.get('CM_GUI_EXTRA_HTML_FILE', '') + if extra_file != '': + r = cmind.utils.load_txt(extra_file) + if r['return'] > 0: + return r + + s = '\n\n' + r['string'] + '\n\n' + + st.write(s, unsafe_allow_html=True) + + # Check action and basic menu + action = params.get('action', ['scripts'])[0].lower() + + style_action_scripts = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'scripts' else '' + style_action_howtorun = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'howtorun' else '' + style_action_challenges = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'challenges' else '' + style_action_contributors = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'contributors' else '' + style_action_experiments = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'experiments' else '' + style_action_reproduce = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'reproduce' else '' + style_action_apps = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'apps' else '' + style_action_reports = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'reports' else '' + style_action_beta = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'beta' else '' + style_action_install = 'font-style:italic;font-weight:bold;color:#ffffff' if action == 'install' else '' + + st.write(''' +
    + + + + +
    + + + +
    + + + +
    + '''.format( + style_action_scripts, + style_action_howtorun, + style_action_challenges, + style_action_experiments, + style_action_reproduce, + style_action_contributors, + style_action_reports, + style_action_beta, + style_action_apps, + style_action_install + ), + unsafe_allow_html=True + ) + + # Check actions +# st.markdown("""---""") + st.markdown('') + + r = {'return': 0} + + if action == 'challenges': + from playground_challenges import page + r = page(st, params) + elif action == 'howtorun': + from playground_howtorun import page + r = page(st, params) + elif action == 'experiments': + from graph import visualize + r = visualize(st, params, action='experiments') + elif action == 'contributors': + from playground_contributors import page + r = page(st, params) + elif action == 'scripts' or action == 'recipes' or action == 'automation-recipes' or action == 'components': + from playground_scripts import page + r = page(st, params) + elif action == 'reproduce' or action == 'repro' or action == 'reproducibility': + from playground_reproduce import page + r = page(st, params) + elif action == 'apps' or action == 'optimized-apps': + from playground_apps import page + r = page(st, params) + elif action == 'reports': + from playground_reports import page + r = page(st, params) + elif action == 'beta': + from playground_beta import page + r = page(st, params) + elif action == 'install' or action == 'setup': + from playground_install import page + r = page(st, params, {}) + + if r['return'] > 0: + st.markdown( + '**CM error:** {} . Please report [here](https://github.com/mlcommons/ck/issues)'.format(r['error'])) + + end_html = r.get('end_html', '') + + # Finalize all pages + st.markdown("""---""") + + if end_html != '': + st.write(end_html, unsafe_allow_html=True) + + st.write(""" +
    + Powered by MLCommons Collective Mind +
    + """, + unsafe_allow_html=True) + + +def make_url(name, alias='', action='contributors', key='name', md=True): + + import urllib + + if alias == '': + alias = name + + url = '?action={}&{}={}'.format( + action, key, urllib.parse.quote_plus(alias)) + + if md: + md = '[{}]({})'.format(name, url) + else: + md = url + + return md + + +def convert_date(date): + # date: format YYYYMMDD to YYYY month day + + import calendar + + try: + year = date[0:4] + month = calendar.month_abbr[int(date[4:6])] + day = str(int(date[6:8])) + except Exception as e: + return {'return': 1, 'error': 'date "{}" is not of format YYYYMMDD: {}'.format( + date, format(e))} + + return {'return': 0, 'string': year + ' ' + month + ' ' + day} + + +if __name__ == "__main__": + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_apps.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_apps.py new file mode 100644 index 000000000..acaf9f4bf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_apps.py @@ -0,0 +1,38 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import misc + +import streamlit.components.v1 as components + +import streamlit as st + +announcement = 'Under development - please get in touch via [Discord](https://discord.gg/JjWNWXKxwT) for more details ...' + +initialized = False +external_module_path = '' +external_module_meta = {} + + +def main(): + params = misc.get_params(st) + + # Set title + st.title('How to run benchmarks') + + st.markdown(announcement) + + return page(st, params) + + +def page(st, params, action=''): + + global initialized, external_module_path, external_module_meta + + end_html = '' + + st.markdown('----') + st.markdown(announcement) + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_beta.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_beta.py new file mode 100644 index 000000000..bf2b24ec4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_beta.py @@ -0,0 +1,37 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + + +def page(st, params): + + current_script_path = os.environ.get('CM_TMP_CURRENT_SCRIPT_PATH', '') + + url_prefix = st.config.get_option('server.baseUrlPath') + '/' + + name = params.get('name', [''])[0].strip() + tags = params.get('tags', [''])[0].lower() + + readme = os.path.join(current_script_path, 'playground_beta_README.md') + + md = '' + + if os.path.isfile(readme): + + r = cmind.utils.load_txt(readme) + if r['return'] > 0: + return r + + md += r['string'] + + md = md.replace('{{URL_PREFIX}}', url_prefix) + +# st.markdown(md) + st.write(md, unsafe_allow_html=True) + + end_html = '' + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_beta_README.md b/cmx4mlops/cmx4mlops/repo/script/gui/playground_beta_README.md new file mode 100644 index 000000000..77712f3c4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_beta_README.md @@ -0,0 +1,10 @@ +## Beta features (under development) + +Here are a few on-going projects to extend the Collective Knowledge Playground +being developed by [cKnowledge.org](https://cKnowledge.org) and [cTuning.org](https://cTuning.org) +using [Collective Mind automation recipes (CM scripts)]({{URL_PREFIX}}?action=scripts): + +* [MLPerf results explorer](https://access.cknowledge.org/mlperf-explorer) +* [LLM-based assistant to run MLPerf benchmarks](https://access.cknowledge.org/assistant) + +Feel free to suggest your projects and extensions using [GitHub issues](https://github.com/mlcommons/ck/issues)! diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_challenges.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_challenges.py new file mode 100644 index 000000000..44a882489 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_challenges.py @@ -0,0 +1,496 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath') + '/' + + url_scripts = url_prefix + '?action=scripts' + url_contributors = url_prefix + '?action=contributors' + + name = params.get('name', [''])[0].strip() + tags = params.get('tags', [''])[0].lower() + + ii = {'action': 'find', + 'automation': 'challenge,3d84abd768f34e08'} + + if name != '': + ii['artifact'] = name + if tags != '': + ii['tags'] = tags + + r = cmind.access(ii) + if r['return'] > 0: + return r + + lst = r['list'] + + end_html = '' + + if len(lst) == 0: + st.markdown('Challenges were not found!') + else: + artifact = None + + if len(lst) == 1: + artifact = lst[0] + else: + challenges = [] + + date_now = datetime.datetime.now().isoformat() + date_now2 = int(date_now[0:4] + date_now[5:7] + date_now[8:10]) + + ongoing = [] + + for l in sorted(lst, key=lambda x: ( + -int(x.meta.get('date_open', '0')), + -int(x.meta.get('date_close', '0')), + x.meta.get('title', '') + )): + + row = {} + + meta = l.meta + row['uid'] = meta['uid'] + + name = meta.get('title', meta['alias']) + + row['name'] = name + + if meta.get('hot', False): + row['hot'] = True + + for k in ['date_close_extension', 'points', + 'trophies', 'prize', 'prize_short', 'skip', 'sort']: + if k in meta: + row[k] = meta[k] + + under_preparation = meta.get('under_preparation', False) + row['under_preparation'] = under_preparation + + date_open = meta.get('date_open', '') + date_close = meta.get('date_close', '') + + s_date_open = '' + if date_open != '': + r = misc.convert_date(date_open) + s_date_open = r['string'] if r['return'] == 0 else '' + + row['orig_date_open'] = date_open + row['date_open'] = s_date_open + + s_date_close = '' + if date_close != '': + r = misc.convert_date(date_close) + s_date_close = r['string'] if r['return'] == 0 else '' + + row['orig_date_close'] = date_close + row['date_close'] = s_date_close + + diff1 = 0 + diff2 = 0 + + if date_open != '': + diff1 = int(date_open) - int(date_now2) + + if date_close != '': + diff2 = int(date_close) - int(date_now2) + + prefix = '' + if under_preparation: + prefix = 'Under preparation: ' + else: + if date_open != '' and diff1 > 0: + prefix = 'Opens on {}: '.format(s_date_open) + else: + if date_close != '': + if diff2 < 0: + prefix = 'Finished on {}: '.format( + s_date_close) + else: + prefix = 'Open and finishes on {}: '.format( + s_date_close) + else: + prefix = 'Open: '.format(s_date_close) + + # Check if open challenge even if under preparation + if date_open and (date_close == '' or ( + diff1 <= 0 and diff2 > 0)): + ongoing.append(row) + else: + challenges.append( + {'prefix': prefix, 'name': name, 'uid': l.meta['uid']}) + + # Show ongoing if open + if len(ongoing) > 0: + + # Check hot + hot = [] + ongoing_without_hot = [] + + for row in ongoing: + if row.get('hot', False): + hot.append(row) + else: + ongoing_without_hot.append(row) + + # Some info + x = ''' + + + Collaborative benchmarking and optimization of AI applications and systems + (latency, throughput, power consumption, accuracy, costs ...) + is organized by MLCommons, + cKnowledge + and the cTuning foundation + and powered by Collective Mind automation recipes. + We deeply thank all our participants and contributors! + + +
    +
    + '''.format(url_scripts, url_contributors) + st.write(x, unsafe_allow_html=True) + + # Check if hot + if len(hot) > 0: + st.markdown('#### Hot challenges') + + md_tmp = '' + + for row in sorted(hot, key=lambda row: (int(row.get('orig_date_close', 9999999999)), + row.get('sort', 0), + row.get( + 'name', ''), + row.get( + 'under_preparation', False) + )): + x = row['name'] + x = x[0].upper() + x[1:] + + url = url_prefix + \ + '?action=challenges&name={}'.format(row['uid']) + + date_close = row.get('date_close', '').strip() + y = ' (Closing date: **{}**)'.format( + date_close) if date_close != '' else '' + + md_tmp += '* [{}]({}){}\n'.format(x, url, y) + + st.markdown(md_tmp) + + st.markdown('#### On-going challenges') + + # Continue all + ind = 1 + + data = [] + + for row in sorted(ongoing_without_hot, key=lambda row: (int(row.get('orig_date_close', 9999999999)), + row.get( + 'sort', 0), + row.get( + 'name', ''), + row.get( + 'under_preparation', False) + )): + if row.get('skip', False): + continue + + xrow = [] + + md = '' + up = row.get('under_preparation', False) + + x = row['name'] + y = '' + if up: + x = x[0].lower() + x[1:] + y = 'Under preparation: ' + + url = url_prefix + \ + '?action=challenges&name={}'.format(row['uid']) +# md += '###### {}) {}[{}]({})\n'.format(str(ind), y, x, url) + + x = ''' +
    + + {}{} + +
    + '''.format(y, url, x).replace('\n', '') +# st.write(x, unsafe_allow_html = True) + + xrow.append(x) + + # Assemble info + x = '' + + date_close = row.get('date_close', '') + y = '' + if date_close != '' and date_close is not None: + x += '   Closing date: **{}**\n'.format( + date_close) + y = date_close.replace(' ', ' ') + + xrow.append(y) + + y = '' + if row.get('date_close_extension', False): + y = 'until done' + + xrow.append(y) + +# points = row.get('points',0) +# y = '' +# if points>0: +# x += '   Points: **{}**\n'.format(str(points)) +# y = str(points) +# +# xrow.append(y) + + awards = '' + + trophies = row.get('trophies', False) + if trophies: + x += '   Trophy: **Yes**\n' + awards += '🏆' + + +# prize = row.get('prize_short','') +# if prize!='': +# x += '   Prizes from [MLCommons organizations](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https:/cKnowledge.org): **{}**\n'.format(prize) +# if awards!='': awards+=' , ' +# awards += prize +# +# xrow.append(awards) + + if x != '': + md += '     ' + x + +# st.markdown(md) + + data.append(xrow) + ind += 1 + + import pandas as pd + import numpy as np + + df = pd.DataFrame(data, + columns=['Challenge', 'Closing date', 'Extension']) + + df.index += 1 + +# st.table(df) + st.write( + df.to_html( + escape=False, + justify='left'), + unsafe_allow_html=True) + + # Show selector for all +# challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', +# range(len(challenges)), +# format_func=lambda x: challenges[x], +# index=0, key='challenge') +# +# if challenge>0: +# artifact = artifacts[challenge] + + # Process 1 challenge + if artifact is None: + # st.markdown('#### Past or future challenges:') + + st.markdown('#### Future or past challenges') + + for c in challenges: + + prefix = c['prefix'] + name = c['name'] + uid = c['uid'] + + url = url_prefix + '?action=challenges&name={}'.format(uid) + + x = ''' +
    + {}) {}{} +
    + '''.format(str(ind), prefix, url, name) + + st.write(x, unsafe_allow_html=True) + + ind += 1 + + else: + meta = artifact.meta + + name = meta.get('title', meta['alias']) + uid = meta['uid'] + + st.write(''' +
    +

    Challenge: {}

    +
    + '''.format(name), + unsafe_allow_html=True + ) + + end_html = '
    Self link
    '.format( + misc.make_url(meta['uid'], action='challenges', md=False)) + + # Check basic password + password_hash = meta.get('password_hash', '') + view = True + if password_hash != '': + view = False + + password = st.text_input( + "Enter password", type="password", key="password") + + if password != '': + import bcrypt + # TBD: temporal hack to demo password protection for + # experiments + password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' + password_hash2 = bcrypt.hashpw( + password.encode('utf-8'), password_salt) + + if password_hash.encode('utf-8') == password_hash2: + view = True + else: + st.markdown('**Warning:** wrong password') + + if not view: + return {'return': 0, 'end_html': end_html} + + z = '' + date_open = meta.get('date_open', '') + if date_open != '': + # Format YYYYMMDD + r = misc.convert_date(date_open) + if r['return'] > 0: + return r + z += '* **Open date:** {}\n'.format(r['string']) + + date_close = meta.get('date_close', '') + if date_close != '': + # Format YYYYMMDD + r = misc.convert_date(date_close) + if r['return'] > 0: + return r + z += '* **Closing date:** {}\n'.format(r['string']) + + if meta.get('trophies', False): + z += '* **MLCommons Collective Knowledge Contributor award:** Yes\n' + + prize_short = meta.get('prize_short', '') + if prize_short != '': + z += '* **Prizes:** {}\n'.format(prize_short) + +# prize = meta.get('prize','') +# if prize!='': +# z+='* **Student prizes:** {}\n'.format(prize) + + urls = meta.get('urls', []) + url = meta.get('url', '') + + if url != '': + urls.append(url) + + if len(urls) > 0: + x = '* **External link:** ' + md = '' + if len(urls) > 1: + md = '* **External links:**\n' + x = ' * ' + + for u in urls: + md += x + '[{}]({})\n'.format(u, u) + z += md + '\n' + + # Check if has linked experiments + experiments = meta.get('experiments', []) + + if len(experiments) > 0: + md = '* **Shared experiments:**\n' + + for e in experiments: + tags = e.get('tags', '') + name = e.get('name', '') + + if tags != '': + md += ' * ' + \ + misc.make_url( + tags, action='experiments', key='tags') + '\n' + elif name != '': + md += ' * ' + \ + misc.make_url(name, action='experiments') + '\n' + + z += md + '\n' + + st.markdown(z) + + # Check if has text + path = artifact.path + + for f in ['README.md', 'info.html']: + f1 = os.path.join(path, f) + if os.path.isfile(f1): + r = cmind.utils.load_txt(f1) + if r['return'] > 0: + return r + + s = r['string'] + + st.markdown('---') + + if f.endswith('.html'): + y = s.split('\n') + ss = '' + for x in y: + ss += x.strip() + '\n' + + st.write(ss, unsafe_allow_html=True) + else: + st.markdown(s) + + break + + # Check associated reports + r = cmind.access({'action': 'find', + 'automation': 'report,6462ecdba2054467', + 'tags': 'challenge-{}'.format(uid)}) + if r['return'] > 0: + return r + + lst = r['list'] + + for l in lst: + report_path = l.path + + f1 = os.path.join(report_path, 'README.md') + if os.path.isfile(f1): + report_meta = l.meta + + report_alias = report_meta['alias'] + report_title = report_meta.get('title', '') + + report_name = report_title if report_title != '' else report_alias + + r = cmind.utils.load_txt(f1) + if r['return'] > 0: + return r + + s = r['string'] + + st.markdown('---') + st.markdown('### ' + report_name) + + st.markdown(s, unsafe_allow_html=True) + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_challenges_with_prizes.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_challenges_with_prizes.py new file mode 100644 index 000000000..3cc681cd7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_challenges_with_prizes.py @@ -0,0 +1,449 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath') + '/' + + name = params.get('name', [''])[0].strip() + tags = params.get('tags', [''])[0].lower() + + ii = {'action': 'find', + 'automation': 'challenge,3d84abd768f34e08'} + + if name != '': + ii['artifact'] = name + if tags != '': + ii['tags'] = tags + + r = cmind.access(ii) + if r['return'] > 0: + return r + + lst = r['list'] + + end_html = '' + + if len(lst) == 0: + st.markdown('Challenges were not found!') + else: + artifact = None + + if len(lst) == 1: + artifact = lst[0] + else: + challenges = [] + + date_now = datetime.datetime.now().isoformat() + date_now2 = int(date_now[0:4] + date_now[5:7] + date_now[8:10]) + + ongoing = [] + + for l in sorted(lst, key=lambda x: ( + -int(x.meta.get('date_open', '0')), + -int(x.meta.get('date_close', '0')), + x.meta.get('title', '') + )): + + row = {} + + meta = l.meta + row['uid'] = meta['uid'] + + name = meta.get('title', meta['alias']) + + row['name'] = name + + for k in ['date_close_extension', 'points', + 'trophies', 'prize', 'prize_short', 'skip', 'sort']: + if k in meta: + row[k] = meta[k] + + under_preparation = meta.get('under_preparation', False) + row['under_preparation'] = under_preparation + + date_open = meta.get('date_open', '') + date_close = meta.get('date_close', '') + + s_date_open = '' + if date_open != '': + r = misc.convert_date(date_open) + s_date_open = r['string'] if r['return'] == 0 else '' + + row['orig_date_open'] = date_open + row['date_open'] = s_date_open + + s_date_close = '' + if date_close != '': + r = misc.convert_date(date_close) + s_date_close = r['string'] if r['return'] == 0 else '' + + row['orig_date_close'] = date_close + row['date_close'] = s_date_close + + diff1 = 0 + diff2 = 0 + + if date_open != '': + diff1 = int(date_open) - int(date_now2) + + if date_close != '': + diff2 = int(date_close) - int(date_now2) + + prefix = '' + if under_preparation: + prefix = 'Under preparation: ' + else: + if date_open != '' and diff1 > 0: + prefix = 'Opens on {}: '.format(s_date_open) + else: + if date_close != '': + if diff2 < 0: + prefix = 'Finished on {}: '.format( + s_date_close) + else: + prefix = 'Open and finishes on {}: '.format( + s_date_close) + else: + prefix = 'Open: '.format(s_date_close) + + # Check if open challenge even if under preparation + if date_open and (date_close == '' or ( + diff1 <= 0 and diff2 > 0)): + ongoing.append(row) + else: + challenges.append( + {'prefix': prefix, 'name': name, 'uid': l.meta['uid']}) + + # Show ongoing if open + if len(ongoing) > 0: + ind = 1 + + x = ''' +
    +

    Ongoing reproducibility and optimization challenges

    + +
    + ''' + st.write(x, unsafe_allow_html=True) + + data = [] + + for row in sorted(ongoing, key=lambda row: (int(row.get('orig_date_close', 9999999999)), + row.get('sort', 0), + row.get( + 'name', ''), + row.get( + 'under_preparation', False) + )): + if row.get('skip', False): + continue + + xrow = [] + + md = '' + up = row.get('under_preparation', False) + + x = row['name'] + y = '' + if up: + x = x[0].lower() + x[1:] + y = 'Under preparation: ' + + url = url_prefix + \ + '?action=challenges&name={}'.format(row['uid']) +# md += '###### {}) {}[{}]({})\n'.format(str(ind), y, x, url) + + x = ''' +
    + + {}{} + +
    + '''.format(y, url, x).replace('\n', '') +# st.write(x, unsafe_allow_html = True) + + xrow.append(x) + + # Assemble info + x = '' + + date_close = row.get('date_close', '') + y = '' + if date_close != '' and date_close is not None: + x += '   Closing date: **{}**\n'.format( + date_close) + y = date_close.replace(' ', ' ') + + xrow.append(y) + + y = '' + if row.get('date_close_extension', False): + y = 'until done' + + xrow.append(y) + +# points = row.get('points',0) +# y = '' +# if points>0: +# x += '   Points: **{}**\n'.format(str(points)) +# y = str(points) +# +# xrow.append(y) + + awards = '' + + trophies = row.get('trophies', False) + if trophies: + x += '   Trophy: **Yes**\n' + awards += '🏆' + + prize = row.get('prize_short', '') + if prize != '': + x += '   Prizes from [MLCommons organizations](https://mlcommons.org), [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https:/cKnowledge.org): **{}**\n'.format( + prize) + if awards != '': + awards += ' , ' + awards += prize + + xrow.append(awards) + + if x != '': + md += '     ' + x + +# st.markdown(md) + + data.append(xrow) + ind += 1 + + import pandas as pd + import numpy as np + + df = pd.DataFrame(data, + columns=['Challenge', 'Closing date', 'Extension', 'Contributor award and prizes from MLCommons organizations, cTuning foundation and cKnowledge.org']) + + df.index += 1 + +# st.table(df) + st.write( + df.to_html( + escape=False, + justify='left'), + unsafe_allow_html=True) + + # Show selector for all +# challenge = st.selectbox('View past benchmarking, optimization, reproducibility and replicability challenges:', +# range(len(challenges)), +# format_func=lambda x: challenges[x], +# index=0, key='challenge') +# +# if challenge>0: +# artifact = artifacts[challenge] + + # Process 1 challenge + if artifact is None: + # st.markdown('#### Past or future challenges:') + + x = ''' +
    +

    Future or past challenges

    +
    + ''' + st.write(x, unsafe_allow_html=True) + + for c in challenges: + + prefix = c['prefix'] + name = c['name'] + uid = c['uid'] + + url = url_prefix + '?action=challenges&name={}'.format(uid) + + x = ''' +
    + {}) {}{} +
    + '''.format(str(ind), prefix, url, name) + + st.write(x, unsafe_allow_html=True) + + ind += 1 + + else: + meta = artifact.meta + + name = meta.get('title', meta['alias']) + uid = meta['uid'] + + st.write(''' +
    +

    Challenge: {}

    +
    + '''.format(name), + unsafe_allow_html=True + ) + + end_html = '
    Self link
    '.format( + misc.make_url(meta['uid'], action='challenges', md=False)) + + # Check basic password + password_hash = meta.get('password_hash', '') + view = True + if password_hash != '': + view = False + + password = st.text_input( + "Enter password", type="password", key="password") + + if password != '': + import bcrypt + # TBD: temporal hack to demo password protection for + # experiments + password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' + password_hash2 = bcrypt.hashpw( + password.encode('utf-8'), password_salt) + + if password_hash.encode('utf-8') == password_hash2: + view = True + else: + st.markdown('**Warning:** wrong password') + + if not view: + return {'return': 0, 'end_html': end_html} + + z = '' + date_open = meta.get('date_open', '') + if date_open != '': + # Format YYYYMMDD + r = misc.convert_date(date_open) + if r['return'] > 0: + return r + z += '* **Open date:** {}\n'.format(r['string']) + + date_close = meta.get('date_close', '') + if date_close != '': + # Format YYYYMMDD + r = misc.convert_date(date_close) + if r['return'] > 0: + return r + z += '* **Closing date:** {}\n'.format(r['string']) + + if meta.get('trophies', False): + z += '* **MLCommons Collective Knowledge Contributor award:** Yes\n' + + prize_short = meta.get('prize_short', '') + if prize_short != '': + z += '* **Prizes:** {}\n'.format(prize_short) + +# prize = meta.get('prize','') +# if prize!='': +# z+='* **Student prizes:** {}\n'.format(prize) + + urls = meta.get('urls', []) + url = meta.get('url', '') + + if url != '': + urls.append(url) + + if len(urls) > 0: + x = '* **External link:** ' + md = '' + if len(urls) > 1: + md = '* **External links:**\n' + x = ' * ' + + for u in urls: + md += x + '[{}]({})\n'.format(u, u) + z += md + '\n' + + # Check if has linked experiments + experiments = meta.get('experiments', []) + + if len(experiments) > 0: + md = '* **Shared experiments:**\n' + + for e in experiments: + tags = e.get('tags', '') + name = e.get('name', '') + + if tags != '': + md += ' * ' + \ + misc.make_url( + tags, action='experiments', key='tags') + elif name != '': + md += ' * ' + \ + misc.make_url(name, action='experiments') + + z += md + '\n' + + st.markdown(z) + + # Check if has text + path = artifact.path + + for f in ['README.md', 'info.html']: + f1 = os.path.join(path, f) + if os.path.isfile(f1): + r = cmind.utils.load_txt(f1) + if r['return'] > 0: + return r + + s = r['string'] + + st.markdown('---') + + if f.endswith('.html'): + y = s.split('\n') + ss = '' + for x in y: + ss += x.strip() + '\n' + + st.write(ss, unsafe_allow_html=True) + else: + st.markdown(s) + + break + + # Check associated reports + r = cmind.access({'action': 'find', + 'automation': 'report,6462ecdba2054467', + 'tags': 'challenge-{}'.format(uid)}) + if r['return'] > 0: + return r + + lst = r['list'] + + for l in lst: + report_path = l.path + + f1 = os.path.join(report_path, 'README.md') + if os.path.isfile(f1): + report_meta = l.meta + + report_alias = report_meta['alias'] + report_title = report_meta.get('title', '') + + report_name = report_title if report_title != '' else report_alias + + r = cmind.utils.load_txt(f1) + if r['return'] > 0: + return r + + s = r['string'] + + st.markdown('---') + st.markdown('### ' + report_name) + + st.markdown(s, unsafe_allow_html=True) + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_contributors.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_contributors.py new file mode 100644 index 000000000..3c9a9a121 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_contributors.py @@ -0,0 +1,368 @@ +# Developer(s): Grigori Fursin + +import cmind +import misc +import os + + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath') + '/' + + name = params.get('name', [''])[0].lower() + + list_all = False + + if name != '': + r = cmind.access({'action': 'load', + 'automation': 'contributor,68eae17b590d4f8f', + 'artifact': name}) + if r['return'] > 0 and r['return'] != 16: + return r + + end_html = '' + + if r['return'] == 0: + meta = r['meta'] + + path = r['path'] + + name = meta.get('name', meta.get('organization', '')) + if name != '': + st.markdown("#### " + name) + + x = '' + for t in meta.get('trophies', []): + url = t.get('url', '') + if url != '': + x += '🏆 '.format(url) + + if x != '': + st.write('

    ' + x + '

    ', unsafe_allow_html=True) + + end_html = ''' +
    + Self link +
    + '''.format(misc.make_url(meta['uid'], action='contributors', md=False)) + + org = meta.get('organization', '') + if org != '': + st.markdown("* **Organization:** " + org) + + urls = meta.get('urls', []) + + url = meta.get('url', '') + if url != '': + urls.append(url) + + if len(urls) > 0: + x = '* **Web page:** ' + md = '' + if len(urls) > 1: + md = '* **Web pages:**\n' + x = ' * ' + + for u in urls: + md += x + '[{}]({})\n'.format(u, u) + + st.markdown(md) + + ongoing = meta.get('ongoing', []) + + x = str(calculate_points(meta)) + y1 = '' + y2 = '' + if len(ongoing) > 0: + y1 = '*' + y2 = ' (ongoing)*' + st.markdown("* **Points: {}{}{}**".format(y1, x, y2)) +# st.write('

    '+x+'

    ', unsafe_allow_html = True) + + if len(ongoing) > 0: + x = "* **Ongoing challenges:**\n" + + for t in ongoing: + if t != '': + x += " - {}\n".format(misc.make_url(t, + action='challenges', key='tags')) + + st.markdown(x) + + challenges = meta.get('challenges', []) + if len(challenges) > 0: + md = "* **Contributions:**\n" + + for c in sorted(challenges): + md += " * {}\n".format(misc.make_url(c, + action='challenges', key='tags')) + + st.markdown(md) + + # Check if README + md = '' + + readme = os.path.join(path, 'README.md') + if os.path.isfile(readme): + + r = cmind.utils.load_txt(readme) + if r['return'] > 0: + return r + + md += r['string'] + + st.markdown('---') + st.markdown(md) + + else: + st.markdown( + '**Warning:** Contributor "{}" not found!'.format(name)) + + return {'return': 0, 'end_html': end_html} + + return page_list(st, params) + + +def page_list(st, params): + import pandas as pd + import numpy as np + + # Read all contributors + r = cmind.access({'action': 'find', + 'automation': 'contributor,68eae17b590d4f8f'}) + if r['return'] > 0: + return r + + lst = r['list'] + + # Prepare the latest contributors + all_data = [] + keys = [ + ('name', 'Name', 400, 'leftAligned'), + ('points', 'Points', 80, 'rightAligned'), + # ('ongoing_number', 'Ongoing challenges', 80, 'rightAligned'), + ('trophies', 'Trophies', 80, 'rightAligned') + ] + + url_prefix = st.config.get_option('server.baseUrlPath') + '/' + + md_people = '' + md_org = '' +# for l in sorted(lst, key=lambda x: (-int(x.meta.get('last_participation_date','0')), +# for l in sorted(lst, key=lambda x: x.meta.get('name', +# x.meta.get('organization','')).lower()): + + for l in lst: + + row = {} + + m = l.meta + + # Skip from stats + if m.get('skip', False): + continue + + lpd = m.get('last_participation_date', '') + trophies = m.get('trophies', []) + ongoing = m.get('ongoing', []) + +# if lpd=='-' or (lpd!='' and int(lpd)<2023) : +# continue +# +# if len(ongoing)==0 and len(trophies)==0: +# continue + +# if lpd!='': + if True: + uid = m['uid'] + alias = m['alias'] + name = m.get('name', '') + org = m.get('organization', '') + + row['name_to_print'] = name if name != '' else org + + # Registration in the CK challenges gives 1 point + y1 = '' + y2 = '' + if len(ongoing) > 0: + y1 = '*' + y2 = ' (ongoing)*' + + row['points'] = calculate_points(m) + + row['ongoing_number'] = len(ongoing) + x = '' + for t in ongoing: + if t != '': + url = url_prefix + '?action=challenges&tags={}'.format(t) + x += '{}
    '.format( + url, t.replace('-', ' ').replace(',', ' ')) + + row['ongoing'] = x + + name2 = '' + + if name != '': + url = misc.make_url(name, alias=uid, md=False) + md_people += '* ' + misc.make_url(name, alias=uid) + '\n' + + if org != '': + name2 = ' ({})'.format(org) + + elif org != '': + url = misc.make_url(org, alias=alias, md=False) + md_org += '* ' + misc.make_url(org, alias=alias) + '\n' + name = org + + row['name'] = '{}{}'.format( + url_prefix + url, name, name2) + + row['trophies_number'] = len(trophies) + x = '' + for t in trophies: + url = t.get('url', '') + if url != '': + x += '🏆 '.format( + url) + + row['trophies'] = x + + all_data.append(row) + + # Visualize table + pd_keys = [v[0] for v in keys] + pd_key_names = [v[1] for v in keys] + pd_all_data = [] + for row in sorted(all_data, key=lambda row: (row.get('ongoing_number', 0) <= 0, + -row.get('points', 0), + -row.get('trophies_number', 0), + name_to_sort(row))): + pd_row = [] + for k in pd_keys: + pd_row.append(row.get(k)) + pd_all_data.append(pd_row) + + df = pd.DataFrame(pd_all_data, columns=pd_key_names) + + df.index += 1 + + x = ''' +
    + + + Check on-going challenges + and register here + to be added to this leaderboard. + + +
    +
    + '''.format(url_prefix) + + st.write(x, unsafe_allow_html=True) + + st.write( + '
    ' + + df.to_html( + escape=False, + justify='left') + + '
    ', + unsafe_allow_html=True) + + +# from st_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode +# from st_aggrid.shared import JsCode +# +# gb = GridOptionsBuilder.from_dataframe(df, editable=False) +# +# for k in keys: +# gb.configure_column( +# k[1], +# headerName=k[1], +# width=k[2], +# type=k[3], +# cellRenderer=JsCode(""" +# class UrlCellRenderer { +# init(params) { +# this.eGui = document.createElement('a'); +# this.eGui.innerHTML = params.value; +# } +# getGui() { +# return this.eGui; +# } +# } +# """) +# ) +# +# AgGrid(df, +# gridOptions=gb.build(), +# updateMode=GridUpdateMode.VALUE_CHANGED, +# enable_enterprise_modules=False, +# allow_unsafe_jscode=True) + +# st.write(grid) #, unsafe_allow_html = True) + +# st.dataframe(df) +# st.write(df.to_html(escape = False), unsafe_allow_html = True) + + +# if md_people!='': +# st.markdown("### The latest contributors (individuals)") +# st.markdown('Huge thanks to all our contributors for supporing this community project:') +# st.markdown(md_people) + + +# if md_org!='': +# st.markdown("### The latest contributors (organizations)") +# st.markdown(md_org) + + # Prepare list of all contributors + +# md = '' +# for l in sorted(lst, key=lambda x: x.meta.get('name',x.meta.get('organization','')).lower()): +# md += prepare_name(l.meta) +# +# if md!='': +# st.markdown("### All contributors (individuals and orgs)") +# st.markdown(md) + + return {'return': 0} + + +def name_to_sort(meta): + name = meta.get('name_to_print', '') + + xname = name.split(' ') + + sname = xname[-1].lower() + + return sname + + +def calculate_points(meta): + + points = 1 + + xpoints = meta.get('points', []) + for x in xpoints: + points += int(x.get('point', 0)) + + # Automatic challenges + points += len(meta.get('challenges', [])) + points += len(meta.get('ongoing', [])) + + return points + + +def prepare_name(meta): + alias = meta['alias'] + name = meta.get('name', '') + org = meta.get('organization', '') + + md = '' + if name != '': + md = '* ' + misc.make_url(name, alias=alias) + '\n' + elif org != '': + md = '* *' + misc.make_url(org, alias=alias) + '*\n' + + return md diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_howtorun.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_howtorun.py new file mode 100644 index 000000000..e88f99c9f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_howtorun.py @@ -0,0 +1,329 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import misc + +import streamlit.components.v1 as components + +import streamlit as st + +announcement = 'Under development - please get in touch via [Discord](https://discord.gg/JjWNWXKxwT) for more details ...' + + +def main(): + params = misc.get_params(st) + + # Set title + st.title('How to run benchmarks') + + st.markdown(announcement) + + return page(st, params) + + +def page(st, params, action=''): + + end_html = '' + + # Announcement +# st.markdown('----') + + url_script = misc.make_url('', key='', action='scripts', md=False) + + # Some info + x = ''' + + + This interface will help you generate a command line or Python API + to run modular benchmarks composed from + automation recipes (CM scripts). + Note that this is a collaborative engineering effort + to make sure that they work across all possible versions and configurations of models, data sets, software and hardware + - please report encountered issues and provide feedback + here + and get in touch via Discord! + + +
    +
    + '''.format(url_script) + + st.write(x, unsafe_allow_html=True) + +# st.markdown(announcement) + + ########################################################################## + # Select target hardware + compute_uid = '' + x = params.get('compute_uid', ['']) + if len(x) > 0 and x[0] != '': + compute_uid = x[0].strip() + + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,compute', + 'skip_files': False} + + if compute_uid != '': + ii['prune'] = {'uid': compute_uid} + + r = cmind.access(ii) + if r['return'] > 0: + return r + + r = misc.make_selection( + st, + r['selection'], + 'compute', + 'target hardware', + compute_uid) + if r['return'] > 0: + return r + + compute_meta = r['meta'] +# st.markdown(compute_meta) + + ########################################################################## + # Select benchmark + bench_uid = '' + x = params.get('bench_uid', ['']) + if len(x) > 0 and x[0] != '': + bench_uid = x[0].strip() + + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,list', + 'skip_files': False} + + if bench_uid != '': + ii['prune'] = {'uid': bench_uid} + + r = cmind.access(ii) + if r['return'] > 0: + return r + + # Prune by supported compute + selection = r['selection'] + pruned_selection = [] + + if len(compute_meta) == 0 or compute_meta.get('tags', '') == '': + pruned_selection = selection + else: + xtags = set(compute_meta['tags'].split(',')) + +# st.markdown(str(xtags)) + + for s in selection: + add = True + + supported_compute = s.get('supported_compute', []) + if len(supported_compute) > 0: + add = False + + for c in supported_compute: + cc = set(c.split(',')) + if cc.issubset(xtags): + add = True + break + + if add: + pruned_selection.append(s) + + # Make default selection of MLPerf inference + force_bench_index = 0 + if bench_uid == '': + j = 0 + for q in sorted(pruned_selection, key=lambda v: v['name']): + j += 1 + if q['uid'] == '39877bb63fb54725': + force_bench_index = j + + r = misc.make_selection( + st, + pruned_selection, + 'benchmark', + 'benchmark', + bench_uid, + force_index=force_bench_index) + if r['return'] > 0: + return r + + bench_meta = r['meta'] +# st.markdown(bench_meta) + + if len(bench_meta) > 0: + ####################################################################### + # Check common CM interface + + # st.markdown('---') + + urls = bench_meta.get('urls', []) + + script_path = '' + script_name = bench_meta.get('script_name', '') + script_meta = {} + script_obj = None + script_url = '' + if script_name != '': + ii = {'action': 'find', + 'automation': 'script', + 'artifact': script_name} + r = cmind.access(ii) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) > 0: + + script_obj = lst[0] + + script_meta = script_obj.meta + script_path = script_obj.path + script_repo_meta = script_obj.repo_meta + + script_alias = script_meta['alias'] + + repo_meta = script_obj.repo_meta + + url = repo_meta.get('url', '') + if url == '' and repo_meta.get('git', False): + url = 'https://github.com/' + \ + repo_meta['alias'].replace('@', '/') + + if url != '': + # Recreate GitHub path + if not url.endswith('/'): + url = url + '/' + + url += 'tree/master/' + + if repo_meta.get('prefix', '') != '': + url += repo_meta['prefix'] + + if not url.endswith('/'): + url = url + '/' + + url += 'script/' + script_alias + + script_url = url + + if not bench_meta.get('skip_extra_urls', False): + url_script = misc.make_url( + script_name, key='name', action='scripts', md=False) + url_script += '&gui=true' + + urls.append({'name': 'Universal CM GUI to run this benchmark', + 'url': url_script}) + + # Check if extra README + script_path_readme_extra = os.path.join( + script_path, 'README-extra.md') + + if os.path.isfile(script_path_readme_extra): + # Check README.extra.md + url_readme_extra = url + '/README-extra.md' + + urls.append({'name': 'Notes about how to run this benchmark from the command line', + 'url': url_readme_extra}) + + # Check URLS + if len(urls) > 0: + x = '\n' + for u in urls: + name = u['name'] + url = u['url'] + + x += '* [{}]({})\n'.format(name, url) + x += '\n' + + st.markdown(x) + + ####################################################################### + # Check if has customization + extra = {} + skip = False + + script_tags = script_meta.get('tags_help', '') + if script_tags == '': + script_tags = ','.join(script_meta.get('tags', [])) + + if script_obj is not None: + ii = {'st': st, + 'params': params, + 'meta': script_obj.meta, + 'misc_module': misc, + 'compute_meta': compute_meta, + 'bench_meta': bench_meta, + 'script_path': script_path, + 'script_tags': script_tags, + 'script_url': script_url} + + import sys + import importlib + + full_module_path = os.path.join(script_obj.path, 'customize.py') + + tmp_module = None + try: + found_automation_spec = importlib.util.spec_from_file_location( + 'customize', full_module_path) + if found_automation_spec is not None: + tmp_module = importlib.util.module_from_spec( + found_automation_spec) + found_automation_spec.loader.exec_module(tmp_module) +# tmp_module=importlib.import_module('customize') + except Exception as e: + st.markdown('WARNING: {}'.format(e)) + pass + + if tmp_module is not None: + if hasattr(tmp_module, 'gui'): + try: + func = getattr(tmp_module, 'gui') + except Exception as e: + return {'return': 1, 'error': format(e)} + + r = func(ii) + if r['return'] > 0: + return r + + extra = r.get('extra', {}) + skip = r.get('skip', False) + + ####################################################################### + # Show official GUI + if script_path != '' and not skip: + import script + + ii = {'st': st, + 'params': params, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, + 'script_meta': script_meta, + 'script_repo_meta': script_repo_meta, + 'skip_bottom': True, + 'extra': extra} + + rr = script.page(ii) + if rr['return'] > 0: + return rr + + end_html += '\n' + rr.get('end_html', '') + + ####################################################################### + self_url = misc.make_url( + bench_meta['uid'], + action='howtorun', + key='bench_uid', + md=False) + + if len(compute_meta) > 0: + self_url += '&compute_uid=' + compute_meta['uid'] + + end_html = '
    Self link
    '.format( + self_url) + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_install.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_install.py new file mode 100644 index 000000000..a0fb3a861 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_install.py @@ -0,0 +1,147 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + + +def page(st, params, extra): + + end_html = '' + + url_prefix = st.config.get_option('server.baseUrlPath') + '/' + + if not extra.get('skip_header', False): + st.markdown('---') + st.markdown( + '**Install [MLCommons Collective Mind automation framework](https://github.com/mlcommons/ck):**') + + md = '' + + ################################################################### + # Select OS + choices = [('Ubuntu, Debian and similar Linux', 'linux'), + ('Red Hat and CentOS', 'redhat'), + ('MacOS', 'macos'), + ('Windows', 'windows')] + + host_os_selection = 0 + + if extra.get('run_on_windows', False): + host_os_selection = 3 + + host_os = st.selectbox('Select your host OS:', + range(len(choices)), + format_func=lambda x: choices[x][0], + index=host_os_selection, + key='install_select_host_os') + + host_os_index = choices[host_os][1] + + cur_script_file = __file__ + cur_script_path = os.path.dirname(cur_script_file) + + notes = os.path.join(cur_script_path, 'install', host_os_index + '.md') + + if os.path.isfile(notes): + r = cmind.utils.load_txt(notes) + if r['return'] > 0: + return r + s = r['string'] + if s != '': + show = st.toggle('Show system dependencies?', value=True) + if show: + md += s + + need_user = '' + python = 'python3' + if host_os_index == 'redhat': + need_user = ' --user' + elif host_os_index == 'windows': + python = 'python' + + ################################################################### + # Select repository + + choices = [ + ('Development GitHub version: mlcommons@cm4mlops', 'dev'), + ('Stable GitHub version: mlcommons@cm4mlops', 'main'), + ('Stable ZIP archive from Zenodo: 20240306', 'zenodo'), + ('Stable ZIP archive from GitHub: 20240416', 'zip-github') + ] + + repo = st.selectbox('Select repository with [automation recipes (CM scripts)](https://access.cknowledge.org/playground/?action=scripts):', + range(len(choices)), + format_func=lambda x: choices[x][0], + index=0, + key='select_repo') + + repo_index = choices[repo][1] + + # Add stable repo from Zenodo + if repo_index == 'dev': + cm_repo = 'mlcommons@cm4mlops --checkout=dev' + elif repo_index == 'zenodo': + cm_repo = '--url=https://zenodo.org/records/10787459/files/cm-mlops-repo-20240306.zip' + elif repo_index == 'zip-github': + cm_repo = '--url=https://github.com/mlcommons/cm4mlops/archive/refs/tags/r20240416.zip --skip-zip-parent-dir' + else: + cm_repo = 'mlcommons@cm4mlops' + + x = '{} -m pip install cmind -U {}\n\n'.format(python, need_user) + x += 'cm test core \n\n' + x += 'cm pull repo {}\n\n'.format(cm_repo) + + clean_cm_cache = st.toggle( + 'Clean CM cache', + value=True, + key='install_clean_cm_cache') + + cm_clean_cache = 'cm rm cache -f\n\n' if clean_cm_cache else '' + + x += cm_clean_cache + + python_venv_name = params.get('@adr.python.name', '') + python_ver_min = params.get('@adr.python.version_min', '') + python_ver = params.get('@adr.python.version', '') + + if python_venv_name == '': + use_python_venv = st.toggle( + 'Use Python Virtual Environment for CM scripts?', value=False) + if use_python_venv: + python_venv_name = st.text_input( + 'Enter some CM python venv name for your project:', + value="mlperf-v4.0") + + if python_ver_min == '': + python_ver_min = st.text_input( + '[Optional] Specify min version such as 3.8:') + + y = '' + if python_venv_name != '': # or python_ver!='' or python_ver_min!='': + y = 'cm run script "get sys-utils-cm"\n' + + if python_venv_name != '': + y += 'cm run script "install python-venv" --name=' + \ + str(python_venv_name) + else: + y += 'cm run script "get python"' + + if python_ver != '': + y += ' --version=' + str(python_ver) + + if python_ver_min != '': + y += ' --version_min=' + str(python_ver_min) + + if y != '': + x += y + + md += '```bash\n{}\n```\n'.format(x) + + st.markdown('---') + st.markdown(md) + st.markdown( + '*Check [more CM installation notes at GitHub](https://github.com/mlcommons/ck/blob/master/docs/installation.md)*.') + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_reports.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_reports.py new file mode 100644 index 000000000..78cc7b2c4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_reports.py @@ -0,0 +1,144 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath') + '/' + + name = params.get('name', [''])[0].strip() + tags = params.get('tags', [''])[0].lower() + + ii = {'action': 'find', + 'automation': 'report,6462ecdba2054467'} + + if name != '': + ii['artifact'] = name + if tags != '': + ii['tags'] = tags + + r = cmind.access(ii) + if r['return'] > 0: + return r + + lst = r['list'] + + end_html = '' + + ########################################################################## + if len(lst) == 0: + st.markdown('Reports were not found!') + + ########################################################################## + elif len(lst) == 1: + l = lst[0] + + meta = l.meta + + uid = meta['uid'] + + title = meta.get('title', meta['alias']) + + path = l.path + + x = ''' +
    +

    Community report

    +
    {}
    +
    + '''.format(title) + + st.write(x, unsafe_allow_html=True) + + end_html = '
    Self link
    '.format( + misc.make_url(meta['uid'], action='reports', md=False)) + + # Check basic password + password_hash = meta.get('password_hash', '') + view = True + if password_hash != '': + view = False + + password = st.text_input( + "Enter password", + type="password", + key="password") + + if password != '': + import bcrypt + # TBD: temporal hack to demo password protection for + # experiments + password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' + password_hash2 = bcrypt.hashpw( + password.encode('utf-8'), password_salt) + + if password_hash.encode('utf-8') == password_hash2: + view = True + else: + st.markdown('**Warning:** wrong password') + + if not view: + return {'return': 0, 'end_html': end_html} + + # Check if has text + for f in ['README.md']: + f1 = os.path.join(path, f) + if os.path.isfile(f1): + r = cmind.utils.load_txt(f1) + if r['return'] > 0: + return r + + s = r['string'] + + st.markdown('---') + + if f.endswith('.html'): + y = s.split('\n') + ss = '' + for x in y: + ss += x.strip() + '\n' + + st.write(ss, unsafe_allow_html=True) + else: + st.markdown(s) + + break + + ########################################################################## + else: + reports = [] + + md = '' + + for l in sorted(lst, key=lambda x: x.meta.get( + 'date', ''), reverse=True): + + meta = l.meta + + if meta.get('private', False): + continue + + uid = meta['uid'] + + title = meta.get('title', meta['alias']) + + url = meta.get('redirect', '') + if url == '': + url = url_prefix + '?action=reports&name={}'.format(uid) + + md += '* [' + title + '](' + url + ')\n' + + x = ''' +
    +

    Community reports

    +
    + ''' + st.write(x, unsafe_allow_html=True) + + st.markdown(md) + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_reproduce.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_reproduce.py new file mode 100644 index 000000000..9e82a686e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_reproduce.py @@ -0,0 +1,460 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import misc + +import streamlit.components.v1 as components + +import streamlit as st + +import json + +announcement = 'Under development - please get in touch via [Discord](https://discord.gg/JjWNWXKxwT) for more details ...' + +badges = { + 'functional': {'url': 'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png'}, + 'reproduced': {'url': 'https://cTuning.org/images/results_reproduced_v1_1_small.png'}, + 'support_docker': {'url': 'https://cTuning.org/images/docker_logo2_small.png'}, + 'support_cm': {'url': 'https://cTuning.org/images/logo-ck-single-tr4.png'} +} + + +def main(): + params = misc.get_params(st) + + # Set title + st.title('Reproducibility studies') + + st.markdown(announcement) + + return page(st, params) + + +def page(st, params, action=''): + + end_html = '' + + +# st.markdown('----') + + self_url = misc.make_url('', key='', action='reproduce', md=False) + url_benchmarks = misc.make_url('', key='', action='howtorun', md=False) + url_challenges = misc.make_url('', key='', action='challenges', md=False) + + # Some info + x = ''' + + + [Under development] This is a new project to reproduce modular benchmarks + across different models, data sets, software and hardware + via open challenges + based on the ACM/cTuning reproducibility methodology and badges + and automatically compose + High-Performance and Cost-Efficient AI Systems with MLCommons' Collective Mind and MLPerf. + Note that this is a collaborative engineering effort + - please report encountered issues and provide feedback + here + and get in touch via Discord! + + +
    +
    + '''.format(url_benchmarks, url_challenges) + + st.write(x, unsafe_allow_html=True) + + return {'return': 0} + + +# st.markdown(announcement) + + # Check if test is selected + test_uid = '' + x = params.get('test_uid', ['']) + if len(x) > 0 and x[0] != '': + test_uid = x[0].strip() + + ########################################################################## + # Select target hardware + compute_uid = '' + compute_meta = {} + compute_selection = [] + + if test_uid == '': + x = params.get('compute_uid', ['']) + if len(x) > 0 and x[0] != '': + compute_uid = x[0].strip() + + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,compute', + 'skip_files': False} + + if compute_uid != '': + ii['prune'] = {'uid': compute_uid} + + r = cmind.access(ii) + if r['return'] > 0: + return r + compute_selection = r['selection'] + + if test_uid == '': + r = misc.make_selection( + st, + r['selection'], + 'compute', + 'target hardware', + compute_uid) + if r['return'] > 0: + return r + compute_meta = r['meta'] + compute_uid = compute_meta.get('uid', '') + + ########################################################################## + # Select benchmark + bench_meta = {} + + bench_name = '' + x = params.get('bench_name', ['']) + if len(x) > 0 and x[0] != '': + bench_name = x[0].strip() + + if test_uid == '': + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,run', + 'skip_files': True} + + if bench_name != '': + ii['artifact'] = bench_name + + r = cmind.access(ii) + if r['return'] > 0: + return r + + # Prune by supported compute + selection = r['selection'] + pruned_selection = [] + + if compute_uid == '': + pruned_selection = selection + else: + for s in selection: + add = True + + if compute_uid in s.get('supported_compute', []): + pruned_selection.append(s) + + r = misc.make_selection( + st, + pruned_selection, + 'benchmark', + 'benchmark', + bench_name) + if r['return'] > 0: + return r + + bench_meta = r['meta'] + + ########################################################################## + # Select tests + if test_uid == '' and compute_uid == '' and len(bench_meta) == 0: + st.markdown('*Please prune search by device and/or benchmark ...*') + + else: + ii = {'action': 'load_cfg', + 'automation': 'utils', + 'tags': 'benchmark,run', + 'key': 'run-', + 'key_end': ['-meta.json', '-meta.yaml'], + 'skip_files': False} + + if len(bench_meta) > 0 or bench_name != '': + if len(bench_meta) > 0: + ii['artifact'] = bench_meta['uid'] + else: + ii['artifact'] = bench_name + elif compute_uid != '': + ii['prune'] = {'meta_key': 'supported_compute', + 'meta_key_uid': compute_uid} + + if compute_uid != '': + if 'prune' not in ii: + ii['prune'] = {} + ii['prune']['key'] = 'compute_uid' + ii['prune']['key_uid'] = compute_uid + + if test_uid != '': + if 'prune' not in ii: + ii['prune'] = {} + ii['prune']['uid'] = test_uid + + r = cmind.access(ii) + if r['return'] > 0: + return r + + # Prune by supported compute + selection = r['selection'] + + if len(selection) == 0: + st.markdown('*WARNING: No tests found!*') + else: + if len(selection) == 1: + ############################################################### + # Show individual test + s = selection[0] + + full_path = s['full_path'] + test_uid = s['uid'] + + st.markdown('---') + st.markdown('**Test {}**'.format(test_uid)) + + # Check badges + x = '' + + for b in badges: + if s.get(b, False) or b == 'support_cm': + x += '\n'.format( + badges[b]['url']) + + if x != '': + st.write(x, unsafe_allow_html=True) + + # Check benchmark + bench_uid = s.get('bench_uid', '') + if bench_uid != '': + url_bench = url_benchmarks + '&bench_uid=' + bench_uid + st.markdown( + '[Link to benchmark GUI]({})'.format(url_bench)) + + # Check notes + test_md = full_path[:-10] + '.md' + if os.path.isfile(test_md): + + r = cmind.utils.load_txt(test_md) + if r['return'] > 0: + return r + + x = r['string'] + + if x != '': + st.markdown('**Notes:**') + st.markdown(x) + + inp = {} + input_file = full_path[:-10] + '-input' + r = cmind.utils.load_yaml_and_json(input_file) + if r['return'] == 0: + inp = r['meta'] + + out = {} + output_file = full_path[:-10] + '-output' + r = cmind.utils.load_yaml_and_json(output_file) + if r['return'] == 0: + out = r['meta'] + + cmd = inp.get('cmd', []) + if len(cmd) > 0: + xcmd = ' \\\n '.join(cmd) + + st.markdown(""" + **CM command line:** + ```bash + cm run script {} + ``` + """.format(xcmd)) + + st.markdown(""" +**CM input dictionary:** +```json +{} +``` + """.format(json.dumps(inp, indent=2))) + + st.markdown(""" +**CM input dictionary:** +```json +{} +``` + """.format(json.dumps(out, indent=2))) + + st.markdown(""" + +**Test meta:** +```json +{} +``` + """.format(json.dumps(s, indent=2))) + + else: + ############################################################### + # Show tables + import pandas as pd + import numpy as np + + html = '' + + all_data = [] + + # TBD: should be taken from a given benchmark + dimensions = [] + + if len(bench_meta) > 0: + dimensions = bench_meta.get('view_dimensions', []) + + dimension_values = {} + dimension_keys = [] + + if len(dimensions) == 0: + keys = [('test', 'CM test', 400, 'leftAligned')] + else: + keys = [('test', 'CM test', 50, 'leftAligned')] + + for k in dimensions: + key = k[0] + + keys.append((k[0], k[1], 100, 'leftAligned')) + + dimension_values[key] = [] + dimension_keys.append(key) + + # If dimensions, sort by dimensions + for d in list(reversed(dimension_keys)): + selection = sorted( + selection, key=lambda x: misc.get_with_complex_key_safe( + selection, d)) + + keys += [ + ('functional', 'Functional', 80, ''), + ( + 'reproduced', + 'Reproduced', + 80, + ''), + ('support_docker', 'Support Docker', 80, ''), + ( + 'support_cm', + 'Has unified CM interface', + 80, + ''), + ('notes', 'Notes', 200, 'lefAligned'), + ] + + j = 0 + + for s in selection: + + row = {} + + full_path = s['full_path'] + test_uid = s['uid'] + + uid = s['uid'] + + url_test = misc.make_url( + uid, key='test_uid', action='reproduce', md=False) + + bench_meta = s['main_meta'] + + inp = {} + input_file = full_path[:-10] + '-input' + r = cmind.utils.load_yaml_and_json(input_file) + if r['return'] == 0: + inp = r['meta'] + + out = {} + output_file = full_path[:-10] + '-output' + r = cmind.utils.load_yaml_and_json(output_file) + if r['return'] == 0: + out = r['meta'] + + row_meta = {'dict': s, + 'input': inp, + 'output': out} + + if len(dimensions) == 0: + row['test'] = '{}'.format( + url_test, uid) + else: + row['test'] = 'View'.format( + url_test) + for k in dimensions: + kk = k[0] + + v = misc.get_with_complex_key_safe(row_meta, kk) + + if len(k) > 2 and k[2] == 'tick': + if v is not None and v != '': + v = '✅' + + row[kk] = str(v) + + # Check ACM/IEEE functional badge + url = '' + + x = '' + if s.get('functional', False): + x = '
    '.format( + url, badges['functional']['url']) + row['functional'] = x + + # Check ACM/IEEE reproduced badge + x = '' + if s.get('reproduced', False): + x = '
    '.format( + url, badges['reproduced']['url']) + row['reproduced'] = x + + # Check Docker + x = '' + if s.get('support_docker', False): + x = '
    '.format( + url, badges['support_docker']['url']) + row['support_docker'] = x + + x = '' + bench_uid = s.get('bench_uid', '') + if bench_uid != '': + url_bench = url_benchmarks + '&bench_uid=' + bench_uid + x = '
    '.format( + url_bench, badges['support_cm']['url']) + row['support_cm'] = x + + # Check misc notes + row['notes'] = '' + s.get('notes', '') + '' + + # Finish row + all_data.append(row) + + # Visualize table + pd_keys = [v[0] for v in keys] + pd_key_names = [v[1] for v in keys] + + pd_all_data = [] + for row in sorted( + all_data, key=lambda row: (row.get('x1', 0))): + pd_row = [] + for k in pd_keys: + pd_row.append(row.get(k)) + pd_all_data.append(pd_row) + + df = pd.DataFrame(pd_all_data, columns=pd_key_names) + + df.index += 1 + + html = df.to_html(escape=False, justify='left') + st.write(html, unsafe_allow_html=True) + + if bench_name != '': + self_url += '&bench_name=' + bench_name + if test_uid != '': + self_url += '&test_uid=' + test_uid + elif compute_uid != '': + self_url += '&compute_uid=' + compute_uid + + end_html = '
    Self link
    '.format( + self_url) + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/playground_scripts.py b/cmx4mlops/cmx4mlops/repo/script/gui/playground_scripts.py new file mode 100644 index 000000000..71a7becd2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/playground_scripts.py @@ -0,0 +1,344 @@ +# Developer(s): Grigori Fursin + +import cmind +import os +import datetime +import misc + + +def page(st, params): + + url_prefix = st.config.get_option('server.baseUrlPath') + '/' + url_prefix_script = url_prefix + '?action=scripts' + + script_name = '' + x = params.get('name', ['']) + if len(x) > 0 and x[0] != '': + script_name = x[0].strip() + + script_tags = '' + if script_name == '': + x = params.get('tags', ['']) + if len(x) > 0 and x[0] != '': + script_tags = x[0].strip() + + if script_tags == 'modular,app': + x = ''' + + + This is a new project to automatically compose AI applications that can run across diverse models, data sets, software and hardware + - please check our presentation at the MLPerf-Bench workshop @ HPCA'24 + and get in touch via Discord! + + +
    +
    + ''' + + else: + x = ''' + + + Collective Mind is a collection of open-source, portable, extensible and ready-to-use + automation scripts with a human-friendly interface and minimal dependencies to make it easier to compose, benchmark and optimize + complex AI, ML and other applications and systems across diverse and continuously changing models, data sets, software and hardware. + Note that this is a collaborative engineering effort + to make sure that they work across all possible versions and configurations + - please report encountered issues and provide feedback + here + and get in touch via Discord! + + +
    +
    + ''' + + st.write(x, unsafe_allow_html=True) + + script_tags = st.text_input( + 'Search open-source automation recipes by tags:', + value=script_tags, + key='script_tags').strip() + + # Searching automation recipes + + ii = {'action': 'find', + 'automation': 'script,5b4e0237da074764'} + + if script_tags != '': + script_tags = script_tags.replace(' ', ',') + ii['tags'] = script_tags + elif script_name != '': + ii['artifact'] = script_name + + # Check variations for later: + variations = [v for v in script_tags.split(',') if v.startswith('_')] + + r = cmind.access(ii) + if r['return'] > 0: + return r + + lst2 = r['list'] + + lst = [v for v in lst2 if not v.meta.get('private', False)] + + end_html = '' + + if len(lst) == 0: + st.markdown('CM scripts were not found!') + else: + artifact = None + + if len(lst) == 1: + # Individual script + recipe = lst[0] + + meta = recipe.meta + + alias = meta['alias'] + uid = meta['uid'] + + use_gui = False + x = params.get('gui', ['']) + if len(x) > 0 and (x[0].lower() == + 'true' or x[0].lower() == 'yes'): + import script + + script_path = recipe.path + script_alias = alias + +# script_tags = script_tags + if script_tags == '': + script_tags = meta.get('tags_help', '') + if script_tags != '': + script_tags = script_tags.replace(' ', ',') + else: + script_tags = ','.join(meta.get('tags', [])) + + ii = {'st': st, + 'params': params, + 'script_path': script_path, + 'script_alias': script_alias, + 'script_tags': script_tags, + 'script_meta': meta, + 'script_repo_meta': recipe.repo_meta, + 'skip_bottom': True} + + return script.page(ii) + + else: + + st.markdown('### CM script "{}" ({})'.format(alias, uid)) + + repo_meta = recipe.repo_meta + + # Basic run + tags = meta['tags_help'] if meta.get( + 'tags_help', '') != '' else ' '.join( + meta['tags']) + + x1 = misc.make_url( + tags.replace( + ' ', + ','), + key='tags', + action='scripts', + md=False, + skip_url_quote=True) + x2 = misc.make_url(meta['alias'], action='scripts', md=False) + x3 = misc.make_url(meta['uid'], action='scripts', md=False) + end_html = '
    Self links: tags or alias or UID
    '.format( + x1, x2, x3) + + extra_repo = '' if repo_meta['alias'] == 'mlcommons@cm4mlops' else '\ncm pull repo ' + \ + repo_meta['alias'] + + xtags = tags + if len(variations) > 0: + if xtags != '': + xtags += ' ' + xtags += ' '.join(variations) + + x = ''' +```bash +pip install cmind -U +cm test core +cm pull repo mlcommons@cm4mlops --checkout=dev{} + +cm run script "{}" +``` + +A few other popular commands: +```bash +cmr "{}" --help +cmr "{}" --shell +cm run script "{}" --shell +cm docker script "{}" +cm gui script "{}" +``` + + '''.format(extra_repo, xtags, xtags, xtags, xtags, xtags, xtags) + + # Check original link + + url = repo_meta.get('url', '') + if url == '' and repo_meta.get('git', False): + url = 'https://github.com/' + \ + repo_meta['alias'].replace('@', '/') + + url_readme = '' + url_readme_extra = '' + url_meta_description = '' + url_customize = '' + + if url != '': + # Recreate GitHub path + if not url.endswith('/'): + url = url + '/' + + url += 'tree/master/' + + if repo_meta.get('prefix', '') != '': + url += repo_meta['prefix'] + + if not url.endswith('/'): + url = url + '/' + + url += 'script/' + alias + + # Check README.md + z = os.path.join(recipe.path, 'README.md') + if os.path.isfile(z): + url_readme = url + '/README.md' + + # Check README.extra.md + z = os.path.join(recipe.path, 'README-extra.md') + if os.path.isfile(z): + url_readme_extra = url + '/README-extra.md' + + # Check customize.py + z = os.path.join(recipe.path, 'customize.py') + if os.path.isfile(z): + url_customize = url + '/customize.py' + + # Check _cm.yaml or _cm.json + for z in ['_cm.yaml', '_cm.json']: + y = os.path.join(recipe.path, z) + if os.path.isfile(y): + url_meta_description = url + '/' + z + + url_gui = url_prefix_script + '&name=' + alias + ',' + uid + '&gui=true' + + z = '* ***Check [open source code (Apache 2.0 license)]({}) at GitHub.***\n'.format( + url) + z += '* ***Check [detailed auto-generated README on GitHub]({}).***\n'.format( + url_readme) + z += '* ***Check [experimental GUI]({}) to run this script.***\n'.format( + url_gui) + z += '---\n' + + st.markdown(z) + + st.markdown( + 'Default run on Linux, Windows, MacOS and any other OS (check [CM installation guide]({}) for more details):\n{}\n'.format( + url_prefix + + '?action=install', + x)) + + st.markdown('*The [Collective Mind concept](https://doi.org/10.5281/zenodo.8105339) is to gradually improve portability and reproducibility of common automation recipes based on user feedback' + ' while keeping the same human-friendly interface. If you encounter issues, please report them [here](https://github.com/mlcommons/ck/issues) ' + ' to help this community project!*') + + if url_readme_extra != '': + st.markdown( + '* See [extra README]({}) for this automation recipe at GitHub.'.format(url_readme_extra)) + + if url_meta_description != '': + st.markdown( + '* See [meta description]({}) for this automation recipe at GitHub.'.format(url_meta_description)) + + if url_customize != '': + st.markdown( + '* See [customization python code]({}) for this automation recipe at GitHub.'.format(url_customize)) + + # Check dependencies + r = misc.get_all_deps_tags({'meta': meta, 'st': st}) + if r['return'] > 0: + return r + + all_deps_tags = r['all_deps_tags'] + + if len(all_deps_tags) > 0: + st.markdown('**Dependencies on other CM scripts:**') + + x = '' + for t in sorted(all_deps_tags): + # Test that it's not just extending tags: + if t.startswith('_') or ',' not in t: + continue + + url_deps = url_prefix_script + '&tags=' + t + + x += '* [{}]({})\n'.format(t, url_deps) + + st.markdown(x) + + else: + categories = {} + + for l in sorted(lst, key=lambda x: ( + x.meta.get('alias', '') + )): + + category = l.meta.get('category', '') + if category == '': + category = 'Unsorted' + + if category not in categories: + categories[category] = [] + + categories[category].append(l) + + if len(categories) > 1: + category_selection = [ + ''] + sorted(list(categories.keys()), key=lambda v: v.upper()) + + # Creating compute selector + category_id = st.selectbox('Prune by category:', + range(len(category_selection)), + format_func=lambda x: category_selection[x], + index=0, + key='category') + + if category_id > 0: + category_key = category_selection[category_id] + categories = {category_key: categories[category_key]} + + # Check number of recipes + recipes = 0 + for category in sorted(categories, key=lambda v: v.upper()): + recipes += len(categories[category]) + + x = ''' + + Found {} automation recipes: + + '''.format(str(recipes)) + st.write(x, unsafe_allow_html=True) + + for category in sorted(categories, key=lambda v: v.upper()): + md = '### {}'.format(category) + '\n' + + for recipe in categories[category]: + meta = recipe.meta + + alias = meta['alias'] + uid = meta['uid'] + + url = url_prefix_script + '&name=' + alias + ',' + uid + + md += '* [{}]({})'.format(alias, url) + '\n' + + st.markdown(md) + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/run.bat b/cmx4mlops/cmx4mlops/repo/script/gui/run.bat new file mode 100644 index 000000000..0e6029ed7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/run.bat @@ -0,0 +1,2 @@ +streamlit run %CM_TMP_CURRENT_SCRIPT_PATH%\%CM_GUI_APP%.py %CM_GUI_EXTRA_CMD% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/run.sh b/cmx4mlops/cmx4mlops/repo/script/gui/run.sh new file mode 100644 index 000000000..135cfe54f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +streamlit run ${CM_TMP_CURRENT_SCRIPT_PATH}/${CM_GUI_APP}.py ${CM_GUI_EXTRA_CMD} +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/script.py b/cmx4mlops/cmx4mlops/repo/script/gui/script.py new file mode 100644 index 000000000..ededecbec --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/script.py @@ -0,0 +1,493 @@ +# Developer(s): Grigori Fursin + +import streamlit as st +import os +import cmind + +import misc + + +def page(i): + + st = i['st'] + params = i['params'] + script_path = i['script_path'] + script_alias = i['script_alias'] + script_tags = i['script_tags'] + skip_bottom = i.get('skip_bottom', False) + + extra = i.get('extra', {}) + + meta = i['script_meta'] + repo_meta = i.get('script_repo_meta', None) + + no_run = os.environ.get('CM_GUI_NO_RUN', '') + + gui_meta = meta.get('gui', {}) + + gui_func = gui_meta.get('use_customize_func', '') + if gui_func != '': + ii = {'streamlit_module': st, + 'meta': meta} + return cmind.utils.call_internal_module(None, os.path.join(script_path, 'dummy'), + 'customize', gui_func, ii) + + st.markdown("""---""") + + if gui_meta.get('title', '') != '': + title = gui_meta['title'] + + # Set title +# st.title('[Collective Mind](https://github.com/mlcommons/ck)') + + url_script = 'https://github.com/mlcommons/ck' + if repo_meta is not None and script_alias != '': + url = repo_meta.get('url', '') + if url == '' and repo_meta.get('git', False): + url = 'https://github.com/' + repo_meta['alias'].replace('@', '/') + + if url != '': + # Recreate GitHub path + if not url.endswith('/'): + url = url + '/' + + url += 'tree/master/' + + if repo_meta.get('prefix', '') != '': + url += repo_meta['prefix'] + + if not url.endswith('/'): + url = url + '/' + + url += 'script/' + script_alias + + url_script = url + + hide = params.get('hide_script_customization', False) + + if script_alias != '': + show_customize = st.toggle( + '**Customize input for the CM script "[{}]({})"**'.format( + script_alias, url_script), value=not hide) + hide = not show_customize + + # Check if found path and there is meta + # TBD (Grigori): need to cache it using @st.cache + variation_groups = {} + default_variations = [] + variation_md = {} + variation_alias = {} + + st_inputs = {} + + st_variations = {} + + if len(meta) > 0: + variations = meta.get('variations', {}) + + default_variation = meta.get('default_variation', '') + + variation_keys = sorted(list(variations.keys())) + + for variation_key in sorted(variation_keys): + variation = variations[variation_key] + + alias = variation.get('alias', '').strip() + + if alias != '': + aliases = variation_alias.get(alias, []) + if variation_key not in aliases: + aliases.append(variation_key) + variation_alias[alias] = aliases + + # Do not continue this loop if alias + continue + + if 'default_gui' in variation: + default = variation['default_gui'] + else: + default = variation.get('default', False) + + if not default: + # Check outdated + if default_variation == variation_key: + default = True + + extra1 = '' + extra2 = '' + if default: + extra1 = '**' + extra2 = '** (default)' + + default_variations.append(variation_key) + + group = variation.get('group', '') + + if variation_key.endswith('_'): + group = '*internal*' + elif group == '': + group = '*no-group*' + + if group not in variation_groups: + variation_groups[group] = [] + + variation_groups[group].append(variation_key) + + # Prepare variation_groups + if len(variations) > 0: + if not hide: + st.markdown( + '**Select variations to update multiple flags and environment variables:**') + + variation_groups_order = meta.get('variation_groups_order', []) + for variation in sorted(variation_groups): + if variation not in variation_groups_order: + variation_groups_order.append(variation) + + for group_key in variation_groups_order: + group_key_cap = group_key.replace('-', ' ').capitalize() + if not group_key.startswith('*'): + y = [''] + + index = 0 + selected_index = 0 + for variation_key in sorted(variation_groups[group_key]): + index += 1 + y.append(variation_key) + if variation_key in default_variations: + selected_index = index + + key2 = '~~' + group_key + + x = params.get(key2, None) + if x != None and len(x) > 0 and x[0] != None: + x = x[0] + if x in y: + selected_index = y.index(x) if x in y else 0 + + if hide: + st_variations[key2] = sorted(y)[selected_index] + else: + st_variations[key2] = st.selectbox( + group_key_cap, sorted(y), index=selected_index, key=key2) + + elif group_key == '*no-group*': + for variation_key in sorted(variation_groups[group_key]): + v = False + if variation_key in default_variations: + v = True + + key2 = '~' + variation_key + + x = params.get(key2, None) + if x != None and len(x) > 0 and x[0] != None: + if x[0].lower() == 'true': + v = True + elif x[0].lower() == 'false': + v = False + + if hide: + st_variations[key2] = v + else: + st_variations[key2] = st.checkbox( + variation_key.capitalize(), key=key2, value=v) + + # Prepare inputs + input_desc = meta.get('input_description', {}) + + if len(input_desc) > 0: + + sort_desc = {} + sort_keys = [] + for k in input_desc: + sort = input_desc[k].get('sort', 0) + if sort > 0: + sort_desc[k] = sort + if len(sort_desc) > 0: + sort_keys = sorted(sort_desc, key=lambda k: sort_desc[k]) + + other_keys = sorted( + [k for k in input_desc if input_desc[k].get('sort', 0) == 0]) + + all_keys = [] if len(sort_keys) == 0 else sort_keys + all_keys += other_keys + + if not hide: + if len(sort_keys) > 0: + st.markdown('**Select main flags:**') + else: + st.markdown('**Select all flags:**') + + other_flags = False + for key in all_keys: + value = input_desc[key] + + if len(sort_keys) > 0 and value.get( + 'sort', 0) == 0 and not other_flags: + if not hide: + st.markdown('**Select other flags:**') + other_flags = True + + ii = {'key': key, + 'desc': value, + 'params': params, + 'st': st, + 'st_inputs': st_inputs, + 'hide': hide} + + r2 = misc.make_selector(ii) + if r2['return'] > 0: + return r2 + + # Check tags + selected_variations = [] + for k in st_variations: + v = st_variations[k] + + if k.startswith('~~'): + k2 = k[2:] + elif k.startswith('~'): + k2 = k[1:] + + +if isinstance(v, if ) if v: + selected_variations.append('_' + k2) + elif v != '': + selected_variations.append('_' + v) + + x = script_tags + if ' ' in script_tags: + if len(selected_variations) > 0: + x += ' ' + ' '.join(selected_variations) + + tags = '"{}"'.format(x) + else: + if len(selected_variations) > 0: + x += ',' + ','.join(selected_variations) + + tags = '--tags={}'.format(x) + + # Add extras to inputs + add_to_st_inputs = extra.get('add_to_st_inputs', {}) + if len(add_to_st_inputs) > 0: + st_inputs.update(add_to_st_inputs) + + ########################################################################## + st.markdown("""---""") + st.markdown('**Run this CM script (Linux/MacOS/Windows):**') + + x = '' + + extra_notes_online = extra.get('extra_notes_online', '') + if extra_notes_online != '': + x += ' [ ' +extra_notes_online +' ] ' + + extra_faq_online = extra.get('extra_faq_online', '') + if extra_faq_online != '': + x += ' [ ' +extra_faq_online +' ] ' + + if x != '': + st.markdown('*' + x.strip() + '*') + + host_os_windows = False if os.name != 'nt' else True + host_os_use_windows = st.toggle('Run on Windows?', value=host_os_windows) + if host_os_use_windows: + var1 = '^' + host_os_flag = 'windows' +# st.markdown('*Check how to install [a few +# dependencies](https://github.com/mlcommons/ck/blob/master/docs/installation.md#windows) +# on Windows.*') + else: + var1 = '\\' + host_os_flag = 'linux' + + show_cm_install = st.toggle( + 'Install MLCommons Collective Mind', + value=False) + + if show_cm_install: + + import playground_install + extra = {'skip_header': True, + 'run_on_windows': host_os_use_windows} + r = playground_install.page(st, params, extra) + if r['return'] > 0: + return r + + st.markdown('---') + + ########################################################################## + shell = st.toggle('Open shell after executing CM script?', value=False) + if shell: + st_inputs['~shell'] = True + + ########################################################################## + flags_dict = {} + flags = '' + + for key in st_inputs: + value = st_inputs[key] + key2 = key[1:] + + if value != '' and (type(value) !=bool or value==True): + flags += ' ' + var1 + '\n --' + key2 + + z = True +if not isinstance(value, if ) x = str(value) + z = x + + if ' ' in x or ':' in x or '/' in x or '\\' in x: + x = '"' + x + '"' + flags += '=' + x + + flags_dict[key2] = z + + ########################################################################## + run_via_docker = False + if not extra.get('skip_script_docker_func', False) and len(meta.get('docker', {})) > 0: + run_via_docker = st.toggle( + 'Use Docker', + key='run_via_docker', + value=False) + + if run_via_docker: + st.markdown( + "*WARNING: CM automatically generates containers for a give script - it's a beta functionality - feel free to [test and provide feedback](https://discord.gg/JjWNWXKxwT)!*") + + action = 'docker' if run_via_docker else 'run' + cli = 'cm {} script {} {}\n'.format(action, tags, flags) + + ########################################################################## + use_experiment_from_extra = extra.get('use_experiment', False) + + use_experiment = st.toggle( + 'Use CM experiment for reproducibility', + key='use_cm_experiment', + value=use_experiment_from_extra) + + extra_cm_prefix = '' + if use_experiment: + cli = 'cm run experiment --tags={} -- {}\n '.format( + "repro," + script_tags, var1) + cli + + ########################################################################## + + extra_setup = extra.get('extra_setup', '').strip() + if len(extra_setup) > 2: + show_extra_setup_notes = st.toggle( + 'Show extra setup notes?', value=True) + + if show_extra_setup_notes: + # st.markdown('---') + st.markdown(extra_setup) +# st.markdown('---') + + show_python_api = st.toggle('Run via Python API', value=False) + + # Python API + if show_python_api: + + final_script_tags = script_tags + if len(selected_variations) > 0: + for sv in selected_variations: + final_script_tags += ' ' + sv + final_script_tags = final_script_tags.replace(' ', ',') + + if use_experiment: + dd = { + 'action': 'run', + 'automation': 'experiment,a0a2d123ef064bcb', + 'tags': script_tags, + 'out': 'con' + } + + unparsed_cmd = ['cm', + 'run', + 'script,5b4e0237da074764', + '--tags=' + final_script_tags] + + for flag in flags_dict: + value = flags_dict[flag] + unparsed_cmd.append('--' + flag + '=' + str(value)) + + dd['unparsed_cmd'] = unparsed_cmd + + else: + dd = { + 'action': action, + 'automation': 'script,5b4e0237da074764', + } + + dd['tags'] = final_script_tags + + dd['out'] = 'con' + + dd.update(flags_dict) + + import json + dd_json = json.dumps(dd, indent=2) + dd_json = dd_json.replace( + ': true', + ': True').replace( + ': false', + ': False') + + y = 'import cmind\n' + y += 'r = cmind.access(' + dd_json + ')\n' + y += 'if r[\'return\']>0: print (r[\'error\'])\n' + + x = ''' + ```python + {} + '''.format(y) + + # st.write(x.replace('\n','
    \n'), unsafe_allow_html=True) + + st.markdown(x) + + ########################################################################## + show_cli = st.toggle('Run from the command line', value=True) + + if show_cli: + # Add explicit button "Run" + cli = st.text_area('', cli, height=600) + + if no_run == '' and st.button("Run in the new terminal"): + cli = cli + var1 + '--pause\n' + + cli = cli.replace(var1, ' ').replace('\n', ' ') + + if os.name == 'nt': + cmd2 = 'start cmd /c {}'.format(cli) + else: + cli2 = cli.replace('"', '\\"') + + prefix = os.environ.get('CM_GUI_SCRIPT_PREFIX_LINUX', '') + if prefix != '': + prefix += ' ' + + cmd2 = prefix + 'bash -c "{}"'.format(cli2) + + print('Running command:') + print('') + print(' {}'.format(cmd2)) + print('') + + os.system(cmd2) + + # Some info + x = ''' + +
    + We would like to thank all Collective Mind users and contributors + for supporting this collaborative engineering effort -
    + please don't hesitate report issues or suggest features at CM GitHub! +
    + ''' + st.write(x, unsafe_allow_html=True) + + return {'return': 0} + + +if __name__ == "__main__": + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/README.md b/cmx4mlops/cmx4mlops/repo/script/gui/tests/README.md new file mode 100644 index 000000000..ac40c80cf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/README.md @@ -0,0 +1,3 @@ +http://localhost:8501/?action=scripts&name=run-mlperf-inference-app,4a5d5b13fd7e4ac8&gui=true&@implementation=nvidia-original&@device=cuda +http://localhost:8501/?action=scripts&name=run-mlperf-inference-app,4a5d5b13fd7e4ac8&gui=true&@implementation=nvidia-original&@device=cuda&@submitter=xyz&clean=false +http://localhost:8501/?action=scripts&name=run-mlperf-inference-app,4a5d5b13fd7e4ac8&gui=true&@implementation=nvidia-original&@device=cuda&@submitter=xyz&clean=false&~~submission-generation-style=full&~dashboard=false diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/generate_password.py b/cmx4mlops/cmx4mlops/repo/script/gui/tests/generate_password.py new file mode 100644 index 000000000..a29f7cd63 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/generate_password.py @@ -0,0 +1,13 @@ +import bcrypt + +# salt = bcrypt.gensalt() +# TBD: temporal hack to demo password protection for experiments +# salt = bcrypt.gensalt() + +pwd = input('Password: ') +pwd = pwd.strip() + +password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' +password_hash2 = bcrypt.hashpw(pwd.encode('utf-8'), password_salt) + +print('"password_hash":"{}"'.format(password_hash2.decode('utf-8'))) diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/test.cmd b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test.cmd new file mode 100644 index 000000000..e25099a37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test.cmd @@ -0,0 +1 @@ +cm run script --tags=gui --script="app generic mlperf inference" --prefix="gnome-terminal --" \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/test2.cmd b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test2.cmd new file mode 100644 index 000000000..20f417aa9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test2.cmd @@ -0,0 +1 @@ +cm run script --tags=gui --script="app generic mlperf inference" --prefix=" " \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/test3.cmd b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test3.cmd new file mode 100644 index 000000000..60a3d6a29 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test3.cmd @@ -0,0 +1 @@ +cm run script --tags=gui --script="run mlperf inference generate-run-cmds" --prefix="gnome-terminal --" \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4.cmd b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4.cmd new file mode 100644 index 000000000..2cd19e914 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4.cmd @@ -0,0 +1 @@ +cm run script --tags=gui,_graph diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4a.cmd b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4a.cmd new file mode 100644 index 000000000..86c64b3cc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4a.cmd @@ -0,0 +1,2 @@ +cm run script --tags=gui,_graph --exp_tags=test + diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4b.cmd b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4b.cmd new file mode 100644 index 000000000..9897defc2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test4b.cmd @@ -0,0 +1,2 @@ +cm run script --tags=gui,_graph --exp_name=mlperf-inference--all--datacenter--closed--image-classification--server + diff --git a/cmx4mlops/cmx4mlops/repo/script/gui/tests/test5.cmd b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test5.cmd new file mode 100644 index 000000000..ea5942d1b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/gui/tests/test5.cmd @@ -0,0 +1 @@ +cm run script "gui _playground" diff --git a/cmx4mlops/cmx4mlops/repo/script/import-experiment-to-sqlite/README.md b/cmx4mlops/cmx4mlops/repo/script/import-experiment-to-sqlite/README.md new file mode 100644 index 000000000..cf987d9cb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-experiment-to-sqlite/README.md @@ -0,0 +1,155 @@ +
    +Click here to see the table of contents. + +* [About](#about) +* [Summary](#summary) +* [Reuse this script in your project](#reuse-this-script-in-your-project) + * [ Install CM automation language](#install-cm-automation-language) + * [ Check CM script flags](#check-cm-script-flags) + * [ Run this script from command line](#run-this-script-from-command-line) + * [ Run this script from Python](#run-this-script-from-python) + * [ Run this script via GUI](#run-this-script-via-gui) + * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) +* [Customization](#customization) + * [ Script flags mapped to environment](#script-flags-mapped-to-environment) + * [ Default environment](#default-environment) +* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) +* [Script output](#script-output) +* [New environment keys (filter)](#new-environment-keys-(filter)) +* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) +* [Maintainers](#maintainers) + +
    + +*Note that this README is automatically generated - don't edit!* + +### About + +#### Summary + +* Category: *DevOps automation.* +* CM GitHub repository: *[mlcommons@cm4mlops](https://github.com/mlcommons/cm4mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/cm4mlops/tree/main/script/import-experiment-to-sqlite)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* CM "database" tags to find this script: *import,experiment2sqlite* +* Output cached? *False* +___ +### Reuse this script in your project + +#### Install CM automation language + +* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +* [CM intro](https://doi.org/10.5281/zenodo.8105339) + +#### Pull CM repository with this automation + +```cm pull repo mlcommons@cm4mlops --checkout=dev``` + + +#### Run this script from command line + +1. `cm run script --tags=import,experiment2sqlite [--input_flags]` + +2. `cmr "import experiment2sqlite" [--input_flags]` + +* `input_flags` can be seen [here](#script-flags-mapped-to-environment) + +#### Run this script from Python + +
    +Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'import,experiment2sqlite' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
    + + +#### Run this script via GUI + +```cmr "cm gui" --script="import,experiment2sqlite"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=import,experiment2sqlite) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "import experiment2sqlite" [--input_flags]` + +___ +### Customization + + +#### Script flags mapped to environment +
    +Click here to expand this section. + +* `--db_name=value` → `CM_SQLITE_DB_NAME=value` +* `--exp_name=value` → `CM_SQLITE_EXP_NAME=value` +* `--exp_tags=value` → `CM_SQLITE_EXP_TAGS=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "db_name":...} +``` + +
    + +#### Default environment + +
    +Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
    + +___ +### Script workflow, dependencies and native scripts + +
    +Click here to expand this section. + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/import-experiment-to-sqlite/_cm.yaml)*** + * get,python3 + * CM names: `--adr.['python', 'python3']...` + - CM script: [get-python3](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3) + * set,sqlite-dir + - CM script: [set-sqlite-dir](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir) + 1. Run "preprocess" function from customize.py + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/import-experiment-to-sqlite/_cm.yaml) + 1. ***Run native script if exists*** + * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/import-experiment-to-sqlite/run.bat) + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/import-experiment-to-sqlite/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/import-experiment-to-sqlite/_cm.yaml) + 1. Run "postrocess" function from customize.py + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/import-experiment-to-sqlite/_cm.yaml) +
    + +___ +### Script output +`cmr "import experiment2sqlite" [--input_flags] -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize + +___ +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/README-extra.md new file mode 100644 index 000000000..968c63d2d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/README-extra.md @@ -0,0 +1,82 @@ +# About + +This portable script converts raw results from the [MLPerf™ Inference benchmark]( https://github.com/mlcommons/inference ) +to the [MLCommons CM format](https://github.com/mlcommons/ck) for the [Collective Knowledge Playground](https://x.cKnowledge.org). + +The goal is to make it easier for the community to analyze MLPerf inference results, +add derived metrics such as performance/Watt and constraints, +and link reproducibility reports as shown in these examples: +* [Power efficiency to compare Qualcomm, Nvidia and Sima.ai devices](https://cKnowledge.org/mlcommons-mlperf-inference-gui-derived-metrics-and-conditions) +* [Reproducibility report for Nvidia Orin](https://access.cknowledge.org/playground/?action=experiments&name=mlperf-inference--v3.0--edge--closed--image-classification--offline&result_uid=3751b230c800434a) + +Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results). + +You can see these results at [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,all). + +## Usage + +We have tested this portable CM script on Ubuntu and Windows. + +Install [MLCommons CM framework](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Pull the MLCommons CK repository with automation recipes for interoperable MLOps: +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +``` + +Pull already imported results (v2.0, v2.1, v3.0, v3.1) from this [mlcommons@cm4mlperf-results repo](https://github.com/mlcommons/cm4mlperf-results): + +```bash +cm pull repo mlcommons@cm4mlperf-results --checkout=dev +``` + +Install repository with raw MLPerf inference benchmark results with {NEW VERSION}: +```bash +cmr "get git repo _repo.https://github.com/mlcommons/inference_results_v{NEW VERSION}" --extra_cache_tags=mlperf-inference-results,version-{NEW VERSION} --time --space + +``` + +Use the following CM command if you want to analyze private MLPerf results under submission +(you need to be a submitter or collaborate with cTuning.org and cKnowledge.org to have an access to such repository): + +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_inference_v4.0" --env.CM_GIT_CHECKOUT=main --extra_cache_tags=mlperf-inference-results,version-4.0-private --time --space +``` + +Convert all raw MLPerf results into CM experiment entries - it can take 5..15 minutes to run submission checker +with raw MLPerf results before converting them to the fast CM format (skip target_repo if you want +to record results to the `local` CM repository): + +```bash +cm run script "import mlperf inference to-experiment" --target_repo=mlcommons@cm4mlperf-results --time --space +``` + +or for a specific submitter: + +```bash +cm run script "import mlperf inference to-experiment" --submitter=CTuning +``` + + +If you already generated `summary.csv` in your current directory, you can skip submission checker as follows: + +```bash +cm run script "import mlperf inference to-experiment _skip_checker" +``` + +Visualize results on your local machine via CK playground GUI: + +```bash +cm run script "gui _playground" +``` + +These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,all). + +## Further analysis of results + +Please check this [README](https://github.com/mlcommons/cm4mlperf-results#how-to-update-this-repository-with-new-results). + +# Contact us + +This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). +Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/README.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/README.md new file mode 100644 index 000000000..18e9240ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/_cm.yaml new file mode 100644 index 000000000..04f9067c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/_cm.yaml @@ -0,0 +1,38 @@ +# Identification of this CM script +alias: import-mlperf-inference-to-experiment +uid: 72099fa962ea499c + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - import + - mlperf + - inference + - mlperf-inference + - experiment + - 2experiment + - to-experiment + +input_mapping: + target_repo: CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO + submitter: CM_MLPERF_SUBMITTER + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + +variations: + skip_checker: + env: + CM_SKIP_SUBMISSION_CHECKER: yes diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/customize.py b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/customize.py new file mode 100644 index 000000000..05008635c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-inference-to-experiment/customize.py @@ -0,0 +1,365 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import cmind as cm +from cmind import utils + +import os +import subprocess +import csv +import json +import copy + + +file_summary = 'summary.csv' +file_summary_json = 'mlperf-inference-summary-{}.json' +file_result = 'cm-result.json' + +model2task = { + "resnet": "image-classification", + "retinanet": "object-detection", + "ssd-small": "object-detection", + "ssd-large": "object-detection", + "rnnt": "speech-recognition", + "bert-99": "language-processing", + "bert-99.9": "language-processing", + "gptj-99": "language-processing", + "gptj-99.9": "language-processing", + "llama2-70b-99": "language-processing", + "llama2-70b-99.9": "language-processing", + "dlrm-99": "recommendation", + "dlrm-v2-99": "recommendation", + "dlrm-99.9": "recommendation", + "dlrm-v2-99.9": "recommendation", + "3d-unet-99": "image-segmentation", + "3d-unet-99.9": "image-segmentation", + "stable-diffusion-xl": "text-to-image" +} + + +def preprocess(i): + + env = i['env'] + + cur_dir = os.getcwd() + + # Query cache for results dirs + r = cm.access({'action': 'find', + 'automation': 'cache,541d6f712a6b464e', + 'tags': 'get,repo,mlperf-inference-results'}) + if r['return'] > 0: + return r + + lst = r['list'] + + for c in lst: + path = os.path.join(c.path, 'repo') + + if os.path.isdir(path): + + meta = c.meta + + tags = meta['tags'] + + version = '' + for t in tags: + if t.startswith('version-'): + version = 'v' + t[8:] + break + + skip_submission_checker = env.get( + 'CM_SKIP_SUBMISSION_CHECKER', '') in [ + 'yes', 'True'] + + print('') + print('Processing results in path: {}'.format(path)) + print('Version: {}'.format(version)) + print('') + + if skip_submission_checker: + if not os.path.isfile(file_summary): + return {'return': 1, + 'error': '{} not found'.format(file_summary)} + else: + if os.path.isfile(file_summary): + os.remove(file_summary) + + print('* Running submission checker ...') + + xenv = {} + + submitter = env.get('CM_MLPERF_SUBMITTER', '') + if submitter != '': + xenv['CM_MLPERF_SUBMITTER'] = submitter + + ii = {'action': 'run', + 'automation': 'script', + 'tags': 'run,mlperf,inference,submission,checker', + 'extra_args': ' --skip-extra-files-in-root-check', + 'submission_dir': path} + + if len(xenv) > 0: + ii['env'] = xenv + + if version != '': + print( + ' Version detected from cache tags: {}'.format(version)) + ii['version'] = version + + r = cm.access(ii) + # Ignore if script fails for now (when some results are wrong) + if r['return'] > 0 and r['return'] != 2: + return r + + if r['return'] > 0: + print('') + print( + 'WARNING: script returned non-zero value - possible issue - please check!') + print('') + input('Press Enter to continue') + print('') + + r = convert_summary_csv_to_experiment(path, version, env) + if r['return'] > 0: + return r + + return {'return': 0} + + +def convert_summary_csv_to_experiment(path, version, env): + print('* Processing MLPerf repo in cache path: {}'.format(path)) + + cur_dir = os.getcwd() + + # Get Git URL + os.chdir(path) + + burl = subprocess.check_output( + ['git', 'config', '--get', 'remote.origin.url']) + url = burl.decode('UTF-8').strip() + + print(' Git URL: {}'.format(url)) + + os.chdir(cur_dir) + + if os.path.isfile(file_summary): + summary = [] + + with open(file_summary, encoding='utf-8') as fcsv: + csv_reader = csv.DictReader(fcsv) + + for rows in csv_reader: + result = {} + + keys = rows.keys() + + for k in keys: + v = rows[k] + + if v == 'False': + v = False + elif v == 'True': + v = True + else: + try: + v = float(v) + + if v == int(v): + v = int(v) + except ValueError: + pass + + result[k] = v + + # Add extra tags + if url != '': + result['git_url'] = url + + location = result.get('Location', '') + if location != '': + result['url'] = url + '/tree/master/' + location + + accuracy = result.get('Accuracy', 0.0) +# +# print (accuracy, type(accuracy)) + if accuracy is not None and accuracy != 'None' and accuracy > 0: + result['Accuracy_div_100'] = float( + '{:.5f}'.format(result['Accuracy'] / 100)) + + # Add ratios + + # Append to summary + summary.append(result) + + r = utils.save_json(file_summary_json.format(version), summary) + if r['return'] > 0: + return r + + # Create virtual experiment entries + experiment = {} + + for result in summary: + + # Create name + mlperfmodel = result['MlperfModel'] + task = model2task[mlperfmodel] + + system_type = result['SystemType'] + + division = result['Division'] + has_power = result.get('has_power', False) + + if division == 'network': + xdivision = 'closed-network' + else: + xdivision = division.lower() + if has_power: + xdivision += '-power' + + # If datacenter,edge - remove ,edge to be consistent with + # https://mlcommons.org/en/inference-datacenter-21/ + j = system_type.find(',') + if j >= 0: + system_type = system_type[:j] + + scenario = result['Scenario'].lower() + + name = 'mlperf-inference--{}--' + system_type + \ + '--' + xdivision + '--' + task + '--' + scenario + + name_all = name.format('all') + name_ver = name.format(version) + + for name in [name_all, name_ver]: + if name not in experiment: + experiment[name] = [] + experiment[name].append(result) + + # Checking experiment + env_target_repo = env.get( + 'CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO', '').strip() + target_repo = '' if env_target_repo == '' else env_target_repo + ':' + + print('') + for name in experiment: + print(' Preparing experiment artifact "{}"'.format(name)) + + tags = name.split('--') + if 'mlperf' not in tags: + tags.insert(0, 'mlperf') + + # Checking if experiment already exists + r = cm.access({'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + r = cm.access({'action': 'add', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name, + 'tags': tags}) + if r['return'] > 0: + return r + + path = r['path'] + else: + path = lst[0].path + + results = experiment[name] + + # Check if already date directory + dirs = os.listdir(path) + + path2 = '' + for d in dirs: + dd = os.path.join(path, d) + if os.path.isdir(dd): + path2 = dd + break + + if path2 == '': + + r = utils.get_current_date_time({}) + if r['return'] > 0: + return r + + date_time = r['iso_datetime'].replace( + ':', '-').replace('T', '.') + + path2 = os.path.join(path, date_time) + + os.makedirs(path2) + + # Check if cm-result.json + fresult = os.path.join(path2, file_result) + + if os.path.isfile(fresult): + r = utils.load_json(fresult) + if r['return'] > 0: + return r + + existing_results = r['meta'] + + # Need to check which ones to add + for result in existing_results: + found = False + + # New results + for result2 in results: + matched = True + + # Need to iterate over keys in the new results since + # old results can have more keys (derivates, etc) + for k in result2: + if k != 'uid': + if k not in result or result2[k] != result[k]: + matched = False + break + + if matched: + found = True + break + + if not found: + results.append(result) + + # Check extra keys + final_results = [] + for result in results: + # Generate UID + if 'uid' not in result: + r = utils.gen_uid() + if r['return'] > 0: + return r + + result['uid'] = r['uid'] + + # Get Result and Units together + if 'Result' in result and 'Units' in result: + result['Result_Units'] = result['Units'] + + # Temporal hack for Power to separate power from the graph + units = result.get('Units', '') + if units == 'Watts' or 'joules' in units: + if 'Result_Power' not in result: + result['Result_Power'] = result['Result'] + result['Result'] = None + + # Write results + r = utils.save_json(fresult, results) + if r['return'] > 0: + return r + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/README-extra.md new file mode 100644 index 000000000..6d3e51d2a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/README-extra.md @@ -0,0 +1,68 @@ +# About + +This portable script converts raw results from the [TinyMLPerf™ benchmark]( https://github.com/mlcommons/tiny ) +to the [MLCommons CM format](https://github.com/mlcommons/ck) for the [Collective Knowledge Playground](https://x.cKnowledge.org). + +The goal is to make it easier for the community to analyze MLPerf inference results, +add derived metrics such as performance/Watt and constraints, +and link reproducibility reports as shown in these examples: +* [Power efficiency to compare Qualcomm, Nvidia and Sima.ai devices](https://cKnowledge.org/mlcommons-mlperf-inference-gui-derived-metrics-and-conditions) +* [Reproducibility report for Nvidia Orin](https://access.cknowledge.org/playground/?action=experiments&name=mlperf-inference--v3.0--edge--closed--image-classification--offline&result_uid=3751b230c800434a) + +Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results). + +You can see these results at [MLCommons CK playground](You can see aggregated results [here](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny,all). + +## Usage + +We have tested this portable CM script on Ubuntu and Windows. + +Install [MLCommons CM framework](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Pull the MLCommons CK repository with automation recipes for interoperable MLOps: +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +``` + +Install repositories with raw MLPerf inference benchmark results: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/tiny_results_v0.7" --extra_cache_tags=mlperf-tiny-results,version-0.7 +cm run script "get git repo _repo.https://github.com/mlcommons/tiny_results_v1.0" --extra_cache_tags=mlperf-tiny-results,version-1.0 +cm run script "get git repo _repo.https://github.com/mlcommons/tiny_results_v1.1" --extra_cache_tags=mlperf-tiny-results,version-1.1 +```` + +You can also add private results to compare submissions locally before they become public: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" --extra_cache_tags=mlperf-tiny-results,version-1.1-private +``` + +You can use a specific checkout/branch as follows: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_tiny_v1.1" \ + --extra_cache_tags=mlperf-tiny-results,version-1.1-private,generate_final_report \ + --depth="" \ + --branch=generate_final_report +``` + +Convert raw MLPerf results into CM experiment entries: +```bash +cm run script "import mlperf tiny to-experiment" +``` + +Visualize results on your local machine via CK playground GUI: +```bash +cm run script "gui _graph" --exp_tags=mlperf-tiny +``` + +You can then select the results you want to visualize and compare, +add derived metrics and set constaints as shown in the following example: + +![](assets/cm-visualization-and-customization-of-tinymlperf-results2.png) + + +These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny,all). + +# Contact us + +This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). +Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/README.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/README.md new file mode 100644 index 000000000..f7b8b1ff4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/_cm.yaml new file mode 100644 index 000000000..f6c36f795 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/_cm.yaml @@ -0,0 +1,33 @@ +# Identification of this CM script +alias: import-mlperf-tiny-to-experiment +uid: 83e3efd7611f469b + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - import + - mlperf + - tiny + - mlperf-tiny + - experiment + - 2experiment + - to-experiment + +input_mapping: + target_repo: CM_IMPORT_TINYMLPERF_TARGET_REPO + + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/assets/cm-visualization-and-customization-of-tinymlperf-results2.png new file mode 100644 index 0000000000000000000000000000000000000000..8b5be23fb5045f61d914d9ff92f648a3a7c0acb9 GIT binary patch literal 118877 zcmce7byS>Bv*thu?(Ptr;O_1oAh^4`ySrO(cXzkou7eKl7Cgaa$?yB_?%s3m*>iXQ z*gt0G?V9eHe!HrkuBv|Ch;Q-|2(Y-Y0000%N>Wq_000jM0H7$K!T(A=4=exxV8Iq5 zBHyG$M2PJj?MyAKO#lF@1fK*x$pJz9AwA`0v9E}($PT05cr>6e9Fo6c_6b|iG@a@@ zo?x+BNGfULe_jp@B8pOh!ZOn#(EcoR^=WW%NNP?@`bAI49A&T1L11&kYrXqZuxU!U z=?s3J<&OmU!69v575J_KBq0dKfU-eZzI2VrekbpZfsy4-?3o+>`y=dcuoR!XNv`pM z2vP1q?xw(bu1mBq4GpF6nTQdD4&rOlWs}q`&QyF%chi;8lB0-$LBsN>1A0>v5%L_0 zSgH!~N8xFckXuZ%W=*qmuN$mTSdet+8LLFXPt-(7q2OrluVwwh(W)`%XcnM2uLlWy zzesK7_OJT;>VgPg$wrXgpn5ymqLh~PA7q1TS&sBf)N0S&DmgTQFkCF6$6H-n7)A+aW zUpQfb>kifh9cAmQRNbwXwAf%wT}gBqZzTB;8LI zDme^Z`^qM05Bu~}9xfi?0;*e!yJ2NMGB%F|ZXg!`Kq!-AN{9MOZ4?$%-;G2R&f}y0 zvG=``2^zq^_*j2&njH=k=3|nYlLCmz9cMqu7gtH?I@u<%gu+)?AD^PbhctwiQy0xS z7Tl3|ySD2^FPb&*#?eYs&u4$ymu%*pHr`9OG^qmsf&J`TxjoDoVKxuA;%Zl*2sygs zRuNI_4DZ(_|NX2{cL8u2SRn&(Yz)Tai?18@!dA|YG#lUNy2=YYl>27Nx9;N}&VzjT zh?+1pUYGVuH!yu{*y)s4b6{0dgsy%Pl#L5mq+ucGy8%UzOyAj8i3t3mVQ*&$&DJJp zGA`t5Q~H68YN7TuQSg}{p3qQ%@dS!e+l@p|gwm}FPQ@qtIRNkmYyZevQdFgfzRq&X z?=t$&&iY85Fbeu%ZfkWF-9u{9SEvyFX67CCB{v&O&aJlYJdf7UTt7EZ2=GSAx(9odZXZqNL5iu}n7+WqZL4}~{^|n5h>*_^EC9wk~=J$J~X_85dE2i-` zmB{MJ4?9Gb6G}tiKGNZMYBclg3HP_+6)T!t#cC~p^1$OA@PJP1D`a{OB?pJAnC5=N zAWatb;b=*i+2t`EIDnDu4!$ofIGw%81>Zu6z`khfqu*VslVMiHWO9*1=ey`u-mxkp2-A-LkjWMMCKtI62g4KvpP zHi#P$i{rI9JR;|MY%2S1+t8zAu<~}MqN@8vM-fe{QMQ)}P7FNcF>Lt&@qiHH*7Jc@ zotMoAA0=PaLtH=u9pa?q#0$NhNHeaqzFd~lxt^klpD4BRC%&$ z%dnzv*N0`2xk3mGEis!=J; zKRoQq-gps4ODE?u86QqWfY0&c2Yb(aA%cyZ-+SHg(#9t=#Xm=pE!`Ho-vHUm?348B zM4g=lw;MBGH{BwJ>(DV4F)-N9yKmTusAPAA6L0`ke4&b1Cv%}KciCHuHV#(j)}El7 z%j{S9vxD7HN5f10g4L3IFnF(uCN~MFq{7xs2Q0*-3i;P~F`q{}JC%z%6$k-gbn!R; z?9g))i(m7}In}N<0zV20P78KxrgC(lZ$priC()H5nNtX8)Q z$blmlRV4_`Dsf%Q+>@LjCFg=)2?Wvu7`>V%Jkz|}gjBYdd0HIan+18=Rbw-oJZClI zv5+L;&>ewXj_>h9)IHI6_*7~ZVR&2iftZvxlxc5s;=+3f56r6HH!iwkX-DE%^{|U5 zb@PRUoP@DWn!mdta5D>?R9a5mRca&l>ARc*)}GCR!znxO{yMbw_f_g^jBdGs<{bx^ znLNicEt5;HxP&J3?6L=|gdZJ&hw?5J3JoD6Fn@@q=JT8-Ty@r&wXMSX2y)easjOB2 z8H^ukO^1VO=v3Gh3tKj?v0{D9m*%=wrBc8~e^~_V64e0h$&Bys3_x)zw{vrZsZfef z{qM;h=iJeB72pUJ%pU+xC6Wz`_MCJ^gu!X5L-^}ymVF8a|N4_#$!0FvK2lTu`Ary4 zMQgJ<;JBnfhN$GTj*vaIOF-TMH{ziWZSUHDYX%5(0A+|~it@v0OIs{Xnw9uY;c5!5Phy0!-o#!CR z-tx^Ib#J?wh9r^2!U;M$B@iXgoBK#K59+HFR-@LHzl?KJx%Eu3%_J%3F4Pj)8wrQD zG&sCv>_xeEholw6#PtRnWg~AX*DyDH_qVF1n8c7(rowK{BKIL3AmV%TlWbF?-IgN1 z{-U+Oa<3tm5&rYwG_(wFS?%W;80*?v8oFsynxA@9yPg$Zk9fGTQ8OhS!iePxAuI$E zFY9wXK1FcmRa~scFAf|e2&T)spDnexcofz`-!iiGm};5+?n|c(yVgMAu7@m~nHreG zPXKtItum9MG^J)gZQh^K<{!X40>r9UR>ik4oQK0}ydhJ=R+0}#GnwE-q^x#R$lJig zH+zyO^qaUdrBn{G{aS%D@WWWtTKshFp1eNp4o=r0iFS^iqVj}YM<~RxzyEWUS)QDZ zS>7sQjfxH|V14$F2N(c=>r*;KU+U?yDElPRe<=PW>w4~X(Z*?9`C3#ROAB>CH?x{S zGex4N_&a*k+@^%?ML+O1|I+?MN)ch$_ENJisqkk_2 z9xR!v;6$*=on;F6D%WgUz<^`r1tz%tG-Oay@`X~H5c?AV(1F+u3-CX!Q6)5Uk!drq ziT-8D8fMV7y7{`YC-a`RV{bGhi&e7KQR%{};J*EoqBx!8Y8-6L@22#1!6S`%7Bqq0 zUEif`1mPdLzC zLyNVFizeJE7Wd0`yi= z-uv|dx-A`qzZh&xQj5vxr5v@;r6FPV6w~HXUO!=J`lo#oT-`D_QP0S8$tzCsaq4nu zkUd*I0{|d3zhlPB8V|@sNvj#06>->Xwpug+F{Ib5W)QMa98z#U3Gk@lYRxU}?@pEc z;d@ebQ`r6h67j#Dap_z3y2xWA2N*7SQi~LR=js!i{2t=?6PzR$MyQY2r2Q<9Vumjydxib1C?xMn+*`a1{e3P$ z<{i<=-RfO&c3?e*;SiP=PhI9uL(HPuNyz@h2o~yb2f&jxRYCkE)Q3;&J8K!JB~`)a z%fyVuPtn2Y^u%-f{*3V6pVbA#?Vf`R9OnO;NtJP9zcb_D1=Z6u#DF{U`_Zc*D2SsbdEyeEAnrVj^TOv$qVL2zu* zac=O9rE1_|G`n&2teg$&I5WxFpao+vxtYpKe4j;;_RDDmw0A+d3A?HJv|sjP?(D^- z2&q*eFAmA-Bp~4lsMPqe8{VvrIQ_)V!|1cD z3W3DrZ{`GrjPDPOEFqqx0RNt*#U`a)w*w9rW;crBd_9@mQzvJe!z8&UgowT(A(!Cv z4d;lFoZr8B&aX_Op15p3&oz8b>7E1gI9tRvazn-oNOGsz{ z4{h|U1h1l%YjB z;ZDaPN09wSQASBu=9d;L`1pV9sn?utl(Xfau&#*17cTWknrDi$-mAFib|bMp{oM;7 z>f{#YK$=5frhAB#0(TEUfv@z+)y9$wK2F#IyV!+4YotRG`wEOpuG0|CeN2~fC#7ro zxMlS~mQ7yIBdmSA7xT9XQZ~9OBf*rnR#Of%g5R!i+vAe1GhT+3Z}r-~tZbc; z;`iAKkTkVC-5kl%VyHcBXVlZblZ%ktq;tYl9!j3)RsTVaSe;jQ><{@QAY5v|T1hD- zl8u|W1}*>*@;K73=Q@M>M~2u7mOfN z4!)Pa(B{5lvdO)HIm-QmsxBejJcPSo2U4?xZ4%Zr^EYV3TV#vFlDiW!O$m1b;`Tu% z4n7HL)pDz@QvILO9h&liI5k@^WBE|LE`_4)+6kbG_ zY?&D;Pld)YV6>!LQ*!h?4kzegICBw>AB8rYlt#ubG`zlT`#aJiej3h10O(u(Qfhp* zaDS>FNq@U`bIw1*WD#nSMPEs=wqe?VJs_7wYuL*)RVfU0P0$s1ZzBc=I5%f+xukWa z&KE8A*fc>nD=R({xj8fUOO>a*zg_J=k<{)C+_}?R$tAfi_|Rqjz?+jhNQvXb#RiyA z(R4iYPaMJ4){yf}MinYqGn}&3kt^TM*JXJ-d%aA6w8JQ;*29^n}E|LM5ys%_K9}7 zpM**%KU-)1!PWwCbOSHQ5tq2teLd{O!s8(T@8MgU8!CO)@WzX_Yr0J~ozKrinVD}; zN7XFQTY^krUvd3yzkVI7Znn#vaL<>U)T)28h_PYN6V76KU4-4(o9y?KoB5&O)#82n z6hD3F#cb_$mAH%Hrodq4B*dY@%BQ>YB;3qKpn*PqF>plGz&Yf`f%d0Fk-x`7007?@ zbH%vyDC9#2Pq3mz@R8Ak50YQsZXB_FtiQt^dXCe9qCQ&Lw=jb8^3N5=-{6!(V^`>( zy}K%`@ER#-C9!vM`o}6>^VWP(nxoD|=)@?e8F(Cgn|Pmxhdy6Lb;RgXbE{{0%{KZE zAys8C`-wN%z%*~YERH?e3~(?KSHQ8|ra~z+L~OqfeHlnBhU3iGQw3nJckeA+5bQ1F z2YJpOM*_N@>qm4r)kj9ot&LdKf!!7pZt}0F3~GpCbR|-Au^+aLlts5O?CM{Q zWNj;^AsF?DOT{ymY(_ZsW(2xQ&=WGq%+2T>tEY-5of`Lk=3r{y+>*{+$79}_c8ifD z`SgPp+A&Qgwwlu0&`g|%Zij>IZl%SXIK<NJcif>x6+DU={Be<3QYi{ z$7$Y5_HtModB4J;pVww0OHsAK&tfT?+U*5SnfP|CmT7*iq(Q5dbDnwctP^0u@&@a| zp@Jh&fY-4m9->a7hUdV35u zFec(oAjiS0HxQejA+Fg9k;)dwvApiFmAs%Ou^&_{VO!9=)tDh#_TKo`(h8duUk~PJo_4PJW_Jy9FOGG9e0Ium+|@qM<1~0r2W*7 zbjE8Zt}lvU%fLuLu7!54_Bj~$r#~)JUt+Pv`W(7Ye7%qb+$MK(`B;DIh|g0d*?o2e zmCsJ8nYsiw$7x}&sj;pCoOb!<%$uWUR3zk;j+0$Z>ITSfIwVN8W-Lt1pLi6zeu1MK zWDG`Zh&s@%-a#fnvR=YoV=yD&V=m&eM$bSbGSDydEe!?Y@p{nWr#>M#AkPX(&4zDt za~nNgp}h0%oZ)p(hz|hI!uAFns;H`Y+IyBw&RWhJ%5~ZyPD1KvK!Lzv32nj3>0~_B zRKM(5&n5o8R?O4Ny2$-bP4!eLfQUBs&O(C^L1zjfR0H-x>ifKHkJ1m%-`-xnR*Sad z;B7fnGCt2in9cO-sUufyhuEBn6x#8ev5cDbr2QMHX{w$Y>aUpzObJ*%xnrK_o~h&B zWo{lC|*2m*z`>mSNPQjZQ$WdDX@|B zwLTnq|Etg}YfP~}aX8Bn&u@iGJGQiwG~$a5ocwgfH`D-zRFSOqsx76W@28=J*?7KQ zmW-I_{}j?4U8?tYRX27#r_v{$QmFf6(1H?$UpdKkTtO1Wd_>u5@20LR8y0ADhrVbS zKL?~(A#gCAO-M%-{BeuvN&Fe^ZV8rEMovA+-_{o-cgoBv5JWl|?%qnPwCqZPd8d0X z<&z`Y2eP~Pd6FS_yBjw|RG)fe#~zxtWC?7U_9-Llq)nsgk zk1qU-8QI$P_ioR%>%(!^>{VefAyJ6Osbr`kc^KIUBFwNii+)yhms;1*Nxp=-Rax3Ahn)O;tb(#nw$3igPe}TyhAco0+474Xh zg5F&vpKB$HyD02g;d_wmGqIn}^SKg2Rm#7-G!2`KL|>UvJEe>BDP2gT0_<)s_$#2+-`$cG%gR)D%(1>odC3j{K&w^q%|ov>O9?{+KP{wfogt{p1M&*zQZ zG(W;_c-E;hZUj~q63R4(b~=k+LH>vsv10}~sD~UhO8PDRIwyVE9r?a!p8+3Lzk_%Y z^bMoBmn{p7Fp`(l22+ss4Q1>_tme-=pp{*IKefm)gMT4;L zVz=r;i+!%U=iRFCIR-`&Ael{wKnsUQ%$|Tbl(8+cBTU~;E02yg`nusJ-IziFSSp2F z&!N3w(|q@BxfgDY0Q9GdtaIq3<&ueuiT_<%oN3*XY}dg?uRNI2-q}eF7^K%_{^to; zGO3kGt<3w3MHknn6jx@2bibaPDe|L7oj`j;Zlf5x5iwS|_(@|Ho6K_4#oyL4@6aAe zpC3{02G`(s)gy>z!r;*ybfWxi&sk!MyLzgZ7Aoc9rU}WTX*Bw^a2P-um7o3Bl7_Sq zzx=uPVB#>Is*%SP{JOoK6P+{#9r$s^r)#4FqAzO#TC>0aSmo9R)3XU7!3nj}`y&gd zW__J5$&Wg@z(=r|T5*67G+Tnm_MW$LAfc#fPUDtu6mZ2HnoZ&|y?tFKethS~o1nvr zb3emNk+nq7qCjHC4HrMF7eRcLiL5nf86R!(y+(C!*5~6CJW~FXah$I9^VyRcK}k(oI24+Z?L@lnaTa($okTM<~RVFfY>n^Pia<`%1|^zO`E05vrtXT zc6YLCe&s%j&;(Vw$bUlq`A`q`l7*B>x9}{H=^`Gba}!V!K(-2FJ6vm{W(f{}2D2-A zpe|fA2wp=|URi;Pq4zUO^sbRB<2hW(!9ov#Ps8aYm3Fpi zO4^k_yTyumRuHMQM4Ra5VjUY~;0>$^?UrdsUl7n!wB_SGYa<;AKGO#S{Io89 z-|AWF`(nB803Y`0kO{j>2Xdk`#b@%;DLbgrtx3SAD-7}}mh8HdBjnY}A}w9G66dc= z41OX^HBvL{r?v;FOnt1eZi>fw7IxUo+KF)I4I{t378N!VCBJdoZ(eUBYfopu%3d7- zqU+IyJpQ%$TUSDt9>zI$ztLddpD!k0>)0>?G@4{+w*Y+G#|HUVEwM-xTWp4%=SRmD zbeEWlsms(qQm;PDMZVWU+QW~CfH-4F{xm{ltbH1!E4d%`^^ae|S^fovkJJsM2L#(g z8OGjS!E*61xaNpb{C;Xbj^qeCxateqFGXEaH@?^+6|}7N1W(;yK0UsDRp1v+)YVMA zx#3_-vC&LQaU3)N${L#M8~scxnkTP*&Gs1%7wlP+o5Vb=h01KEpqWW!5uuR%@i-C! zP4dZOE}%&d_#I?wfuVw?&=9l;biMKkJNWW z?;iaa84uM1?MrEdz0KKul&4{|1gwf8_SNL6kQS^$c&9s(^PZW9f6GNt#yO)w)SD<$ zwQ&E9sp7A%pDxBZT-8{8z3(wLDIX*3h;(w=3^aCwFB$b)*qu)w%+b7&)>Qa zCpsmhT9i1P^Azv@h)q>d^&Vl0MtiQk#yruH`{dTdi$l*Dg%Rui#QG#8j?<+J7lCZg zWibZ^fn2+l4GEyD2ryj~EckAIud|hi>)O(O$pD2bhv{kJnKo_iak%!hd-xXSSV%Y- zld&RlnTv6!q0#E8FG+$u){q+MeK`TGi^ z1m1>+EUklcf@b0oheMDZA#O3LV%Gs-r_aOg4YJ3;GMpPva|s*2)IP@%XOx*RS?Igh z7gdWgfPi+u-++2lVfC1hr*gg@_Usw-ET`sMiML#`e!|zkhx-*!!i_|yD_DJ zZ-!ONpd`ANRke*(HT)b*Xx30Xo(B2*F78uDKXy_9i)QR%dB12EjbB0hNIMPR{szlN zCw;jDzj8TIg)$!4c9-Got+^v(ZJur|t9qT{KM|I8KBq!pJC6p4f4GQ|s7ak$$MHbn zrKhL2!wlK{=^VIWm!(AIvdOoo<2lhNk?h%s0qy=OI}<|hW+R_^1D0o$Ht`X!e^?t6 z+^NE>A-&yl#A6Q&iKyB`8`6x6z`IW7xPCi17nShjYlTvyuLSUo`=0uUdFUtF^BSI~ z+@$omYqrTpsd)}FRY8u!BGjK4+?}&Fnz%5?X%gD881uCGM~;1d?VO>F3evD7MNi*S zgz0&Ula9in*#3+@-KA&BwV=)xUTMb`-!j^leMc?`A-*!Lf#SHYL)`KFNT_luFE`uXuRU)%W8G_0g;Ism^TznEa2fBB;xmU zS*(m;iEAE647Q!p{YTJ0{L*P={g}s0BJyEVZztIP{r)1$cV;9-;i4+_^Hq)(VJ4p# z?{FAw-(f1iU-+4HHf1I#i z(xk{qcdtsXGLS(hLUMnfj7yF1^#V4fI}~Ck8T$+k6XPFns56$+ovg) zHhZ6rS7>5*iQ|3vZ6rdK;8q9`*Pm>()h;H7@bJc3rH?I(4M5Pay$sHPqu(^q75#r@>bO1`WD9kqV^nC;m`}Rp;;b+p!S8 zS8g8&8{KT&X=tbB`ENiIz#3FJww#Gs1kBn|1vG&8VielrC3H05mwV&TTRJU;nr#I< zU@ZRyZsQjsB`#?Q0=&>L;`B$l4G=Uk>-C|u%-GNvoRFr@Fg44b$n3IfyDp8Jv9Lk= z8N7#^XOZ!U%>qCW%%ol5v&*VztGf0~Qg*3m?X|M^rQKU6$x0LgT6=Ci^mqh&lgqq{ z#fnKoBvCyd1fO5r9YK3!bv6!&(^qcIq{6o2ch<&0c5Iou@Qhd&pC5S08#EP|A^wdr zjI}xOnq6h$TgECx@?rEvr&x{ubd9-TaYtEt{u*X06brtkOwXRQOA2|KIhoK!yWL{3 zf!HBSYeC`y0%Ttr2VyD1D-d<0tlsS(QSG@rfPva?qHVQP6!?14p_I1a*pFID07mlo zR(@sS6=gx>(lvP7Bkf~m&_v_1`8kt%eat_AB`cbHUN97@xAJ~oM-PLw=)5aHnW;nf zZ7Gio`f%xPMx;K{uV&6`)Hy~;At=fPzUfsn5HiDcrH55+;5o!5+ef@{sB{UuxUzR)F@?AU!_)TMD1UkxrnTLSN*sO zhOHsV4C1yNJ@KVLGoRPqJJUe1VcdxZEPgQ0msqyhEzErDbXM;VJ%1*YieH-s;!K#P zZ5l%~uTkz@S-vaz9h2@c$0OR**=pd_>)j+6 zvI^|0wg&)E1{IpcEb~bSdZf$bFrB@-+uyvjYL4}siA@10=!{dvkgLSUMzb;l)$RNb z+8X`TkvAvq`Wl|(D9|$RqgUz%t8yK(&`b4qHLK1Anj4hJ>w3{`rk#^PrU)>!vz5Hft_PW+9nT1KQ;$62$+GSIP z7xcdOJ@xi+(0&Mduon0UIPj1mbT-!#u+jX)^O3Kw{8MWhHl**2BqY#8f}YK^ zDBhf(wX!~GR1bjs*zB@;84`w}8l&>D#$KQ0RJ+qr9p}vdl`bNDg!Q-uE3Z}1TFLA? z#?bS>EJ?%oJ&~zV*3+?`-}1Qwon@Or2l2-5qZOSiGxx1ByWJFrSd9hzwALnzX%s)% zBdjvDDEuLb)9dUESEFY|%LM@F-}`!HmRt9Q5i(9lT-^5flE|ul>OH8 zfM0Sv3@(Hm<0KBulG^rkP1_w z-I4@7!GDiY- zf4xY3zmdk+zmd`5{LcN|t)N7rnk9hrI77ec)a$lnGl|1xfoze=S;1F#Zq>|rkSi(J zr;#3DXd?a)Rf%?fwCsXI_|!G7-^5IR^*JK($Swlwdc?TsqqO(ji?slotueay>&2x( z3w`Ac)U&MMax%3vawHYTlFaAv-u0AE`{MffZ-sHxC3xDLOQn`yb3?gsf#xZi0*>zP zHYMz#SPn)#4LgyG++U<)(%tPY9sO0j1_=jcH1***_TC{x?$u8KM1OtXP0qzBdpz+a zVgeVq33}5lG!Zl0K0Ogq2KO-P)oL8(#Sr)z{SV_;`^)>Y&sq;I0!!)CUVQI3qE0Sa z=uqH4-@TZB`1%u6wQA&#&-NkO(JTKb|D#a**d7Q6`~S13`>_@MzZ8i7^RWL`vDqLG z{mAv4TYF7l$*GrLv$Ay9WpM^vQ>EVSXxU2|-Snw6<01Rwys9$MT~nw2@~qY^$C~NY zxl*mRtkOm$^W-f50O%>EqZ0*U<;sQve$m#c@)R?&i2`{e{0<1Gc%$lM z>TQh@O8Os7($C%r;U7|1#xrMe;zz?T_g_s%wAKjqTqh%)!Xm$T{gK5vN&l>(tWwO+ zPo{**dcpsmX_&TmULp_=p{Cch0+Ij&=w&2lCy=0_nyMJ7q_FdIiuV`lBrfZDJ&YCu zybP&*&aXeE2Db~S@BV2HL;8MK|Bft)RzgWRB-sYxFSV$yQl5>AN}7P|pU0@DpzhLK z8mHGJYDTZlrm|aUN5cnP$d5^e(7Svz{XPOFL5nH-wl)=$=5c1jQY*d%T3h7@yik2i z+jUEmP7JnyB(PSxmZQvEO>vtD14mN3`WHN~MTA1;2_}d4o}H_(Ngx}gyykAy4GU^O z3A3t0vA_6tcFKVqL3bhqi~@h|6uh?A1%a>Z#T1K4kEzSXD)1=oKFwA$i;4u|=CSf% zC8Us1FK)?kEhNeZj5^A(1QHoQ96jC61X`M#9nJ}LvazZ5!Q7TOq|GA7a{O6kTV7sM zp^OVQ`m>G8X`UoFYo|lZ$4tz@!HCF209e84cI@n@j<2Uuiw{5^b+k>R#6r_IcWsoQ01~v@(4qFj_4UOvj9t_Dc0kFp4C6Vl`{5>3+7D4 z#_|g$jDS|)A)(>aUOpI$jQ6(}= zXmi?CAY(T&@G+=r>8@MH&SZZ+7&(oe&&oU;f&c!Cp(?0sfvlmrt2pi^ZY&$is)~We z>Le&Hxa-m43oigvW!v*jb?5tbQ^)Rep^LNZ>38!=+w;PQhRKlI0=bkeqG})*V1Zz5 zE~n1iqoQotH=or&+%a*z+<5Zz5YEJ;+6S4FJhryt5^+?6i9uYi{oVZ2psYgR-OUpJ zxNh&Atk8Bc6(cz1q(zr-CCwd?4W`*lgo+wwtNI=moqQnsGKABwxI*h;xw;z1_c`uD zRw-4!!Ok}oW~j2qUQA&Rjj_^js@ENnUgIDf@^rF&_z->IMgV@ z{lBUmlw88%zTatpMY^^)f-7S&U@rS8^J4S3l+_D~d}txsl7VVoL9c~a@n;tU8|4{K zvUntXvYBa#e9A)ycIp#o0P6IUV=9@I0#VubPl9vPs?f4JEo6k&c98?VCbTZf``?YM zL~<1-yF3!IkD|1m%3wgRu8^7x3+4~kQI2Tj5CHpN6r5X9d{yuF-yu$U>uetLC^oB4 z{_B}$<89NSY%Eu&<@_33Ae*jvh|cKXw#32p_L5f#EBYq!nbc>hX@ak(xh>XG1-)i7 zf@c_56EarG9}Yb~_$!($9r<_8fh_g3kFsH83J#vuQ+V3?qQ$zZ)*2k#4zLUv4eBV3 z5^-@2DDqiUEV8ScHMxFjtYD^#CmMt2Ma7Ft<)Ii}X4J%*>;TtS z43m+_#Q>)t2en%1SW8;k?uv4s0N*svzgg?Pb!a&mK@Sxl>f-)fQ1$KDE>S$%4419oq@| zQ5_{kO;Ze3CcbYCRmYO)!9JME3zG!i0|ou=hyv}kTCRa!Rx`=2SBl%(#q9>_6v;uyM1)Szw%If8Y8Ho~m@RHgMoNVy}5o6sp8zAzC4~na*O6Hfl z!xtB;4W0Jn)zKK8HqVWUE_ZVVKIO{0O@1kZ-9VFa)a}9j&6bbHQ6Opxeyvc4=q=5^ zxX@t9k!nUpeqF?LwXXb*y=ynA*Ge*_t>e8)X>tN`sk9Bp0-D~6(m7Z3mX2^2U=7lF zMD#~=6h#j%ggic0Vh)RFJ*MOSF}A=*!ABvlyZilHdAbncKBupYic4`pSXQgaQRi)H z)j*g&l@Up6n!;r5+sl;Ia`jY#vq}#A`C%!ocC+oDes>gDh!1jjEIpF>pCyAhNAS`q z$>b}5a6!M%T9YSB@D&+T1nQx=Yw`WK_VHdd1xi`IG?E zPyU?T)WVX0V2KcWkTW+6!NOnPkc~sw0mmW45Jq=##oaxqIAnK{RJBZwCnhH2)?8mH zX%LNkTrg2FtL2?bh=#et<^C`=+%le)LPUTlOq8u`(gOem8|OezZCTqpkQu6IJsGX9 zSVglMV>DheJ)W4;kO>MhS~EL6G_+w!NT`1j`%BmrMwC>gDvw}9lO`+`0ZLBmO$jpL zYz6ER)qb7rPVnyqd^nrm(xb|M~?~#R;gW7CYBzR zh204R9gaciOoTupNIE(0lLWF~ORnwxqDQXI0gpl~xKS+*eyE_i!|I?+V?S#W&#NMU z^@-NTC57+@6YGpLG=s&yXVatIZ6JiJSC<5&Wi<|jx2BvP&yUEa3_(+N+5k(YKh{uP zHC9^^nK(iwXWp@?UW1F2po>l3t}rmq;)F?D!2v(33Z9IzcJ>Rr)Ys?5U1BbIMf2J#D#TjU!%Q~?i6Ix+XuPH z1irjQ&4A>Csx-Y{h9RFMAQs>@(M)xO^0&7VZG z4>g|^Cxwi+h2VG?1&?3jdE$YfsQfz$TX+(c>g^?K*_4daIz8vQl+C83;p=hQDO?nKr%p|>A;9b7C!%FWk?Z3>F_sxkdFnN4E-KXPLUgn?`uKwf zI1Q)IMN{#!PzTG*p2{=8P(bwISP+biqmyr)+bE>-&Z#6{brH^m~YWOEk=QxXSNaElK znI-bTp1Z4nX~o*T!Tk64;eVxzoo!Z?NkirS#)~K8sjJGL?LCwl#}S)p^=&HBfXXn1XpS zdRuH<+g3hPWzme@vdvqs(BWGp5je`oV%?eTB_T;rVhQAP8c)_Q>rAG3$9Y>^aDa`H zvvXsga!@t9%p8SXPH!J6a~6AZ)~RVqVmn?E>ax4(uy4YQ4$8c$rZz}PLlezV27~3z zK0)QZY*es+VRMsjFqU0*<9tIzFf=?Q@3M};-hgOl_trUU%ppA2jH=V#@*;9}dVY8J zN0gZ**#z3(tN~oZxGoMTIdp6+4y0)Pg-qgcF&{r2xw|_&&eHs40!V@B#3Q>#;q9TF zXeyct$djdDu@4*yl*&RW43jm@a9}S3;LqUR$$d~5%Tl|O(Y|5=jrb5wcYrLY0$`d z0fAUdY#Ei7W!7%m3V)U{vul)&Er{EBx0tlZHnY+rV{KunOr-jj;h|_2Fd1p+(U&bg zm25GZRRG{^Vx*Jga@%<6!NkSHno$FHfd?dkq_8PhefA%uDR{v%FR!VlDOjY4#78bJ zUoOLXe|;pFAz?($chh2dDojUcMM)|mNcvW)Db`s?ms*&vm8Bj7MFT>eP?aqu-T8t~ z2Ij_qnW0Bn=s`n7nx;q@mGLi>z1%z?eZMfVad&>G{Y8#&XFAJBOshXzC0kI|gYcDLHlaTr>% zwsuw&i75b0U5x%5fC;UCr{J9n* zDYiiYiqbaLw8>tMA9{MpR}38sDK;X3FhO8w!N; zhFlWe}*_gZEd^mvxlxMPd-0RrWLwO8{qPq~M{n2&}BeNpKT*~1`W>3qENO5&%+Cr&JRRRzaun1EbCkGm}5F~%0QzNj=zq7`K|H*1L?+M^OtM#Mil z+--#r{tR*%nS^_c(=Q~6yWpKxr?I}Ja^WyYzb)9-UQegzJ|4gg)VI@WQ^Ug|qNmWR zCQQ(iccoiq=`)~MB;HW-(wBho7tCN~Ar%J@N5EoHVIY)7acU^`dCx`__PayaS!&yh z6j$VT@HzCXbMfHZlF&7s&Ijm_3*Hn*II$FXByF~|gK@z*Nk_>b{i6DeESY3_R2+<~ zPv{z+j=ua&-NRmjoRY%LWI+-WJaxJR_T< zq%=cC#9*lDJy1R#)cKW*slBohbGnj>zGYG0);4kwN_*aj=@wj!?*Pg*3;YTuS}Hf2 z)xjwhnS8y@UrpKP4183KZV!l$OEt7(5b7B%;0+OZ(NfoMdUy-cA+wB`GWa3)4^nDm$D zLu;JWN4Vcs|G@k4;Cf0!NrXy*bg#g$7h{YfpdoxdrQrhmR~O)DgaFW@Vcm}AhH_Gb3^d` zOR5}({1@SJUz550FB9^AvMm2rZvPj{^1oFL``i7$9QI!d|8u$RzfI%(&%^$2=hd$J zKRA2KusE7^U6=^LB>{p32oT)eA%Oq^0t9ym5ZqmdNrJn(1$RgoT!Xv2yAC?I&S~tGnyDt!j=Ek76e#oYXxZI*|QwPY=pNWcB;&Zh3<5xn_)r z4;Ni4@fL6afvfrZm;FDs@$ZNIBg^{tjsJ((>i>Nc{;}u(e6HFm-k<{K3qCi;@d%@HY=s2%%mc5 zkw=RucmkDokH23Ej54IsTbc)#tG3J*pLqn$(IOxyDINFJk!9wu9$pzN#b30`hefw& z88Zd-sfZBDOv_vYLU07xl)L%mz*^p zD!0fBi%v;y^yEN|$2;Mt`(#2jtvx4Z=GR__i8)w;wd{*zUt^d&&S$h%@1zjE=KsuS zlw={G{V^MKG3k;0amsd2kJujHcw;^VKHB7WATvkSXkz$I0I|`RWg!x-n)rwxm9){Y z4GN$C)13o;E`#)R;YH}Uj{9>FH|@bgB}2WnTLUn^?~+qiwA%=qv+>*;TR_)pKknU8 z%Lm^Or9xzO6It4GALqK$rPrDIbX0wt;;X?|#O=AtXReq6IxMK=Lxb3-d*)U2WlmSK z#h-ycVd%|A))&0hcXsOGRqKi_YrN0C(?so0L)=h!d@v!+eLXqK(W=ey)SZ=f&N`$7&W{ zLlB!CaP7(P6(n8w-GS#h_C;7ILrj^cSA!#b4v$B;UU-2F;dlO?{Ov*pB^4spl{NUR zF}m?UjZN1=$-<>IT`tBF%-A-xgCarIe>ZpR?U61obTZrA$C1uwcbo1)=i;R>i=;eO zDm!;$1Fp_qA8zWZC9|0wd;93?xhH5H#~(BO^0O~2)kfN>JT@&6qj}>D>i`?&a9Xv& z28t>oZZce0;xU-MaBtu^GPk6Y``dP<$OTg-ect7KcyY@HyTsjXC~NeDjO6GJJSQ{g zb#93%pI7bn+e2Dp{s6x9{ZGHF6F9ys+d1y`2v>am(*QbSNQFq8bphY9jYdgtR)+*dU?2?a03eGrSUTO zXBlck2k&rB4xz>Lr|?r^j+M7y@R-p>5iP>koj&WJiwjyFGh_D#k)anPJ$MqOnxuz^ zW90Fg=GRCCz|*lyyqnfwNaK)sgh_&k@Vz*v7Uf>}b#P2l(hU;r5wY7iPFh-t;HTRa zq%tqJ*{7MjPY_=s$mMCub5wnC(!%Gm*xZMRc&&|r+gka8@qdLqML-_7-hic+!J!Q5 zye7v?c3v2It}@|Ti%303>%{>$CW~IM*zlp`*Yt8Wk0O;8g~Iuty(8r+;^9QA|Ty|}gge9lHu$?Mvrqii?`P%hN zWYJ5QW!iZ<(-M-t>mFb}EquN3BrEE2t1rokfl{cSSurKC{<60EUNQ2dT~g^9tB!25 z#Qkd6;eLUdOo8xG8z1U?*>2LOw}duV7D)FIH02`MS)c|N))=dWpOr2H zpk@K(qh(1f8=zx83^iMw*Qu%XyW_3*>3MEUx<;Y7HP1yZsJmX)gG;BI_kv{^2*sjf zHid4yoW+h;o@m+3RIV?DhfoQ{WZXt$uv`>_Jlj?b?)+u$h10S~xzKX`j%8z`@6CcI zTjBdlBFYYYC-I5js+t{gtO-wxtG>ZXz^+r3KRQ2^0L6WZ701~q84rBkf1mzGC|hVdD*&)#7ZCPfk3Uq{1VIw!UatE+dXSnf^m z58YnnZ7p%#ELVYXISCS^CgT^G_K3{A{y?(8{SKzy| zXlc~^(%xuF#G-nmmk0=c*G}HJ+jv~48k3Ts;)usU0&=l z#gDq)bC6zc_@Gwpm(t>U&&D*Bky5mqbCq3#trBf+CFiPgHpV$E_jwreNF|qxgvmUw zh^9+Mv2nLfl|P^P-b7NvIOC_v`kcc$jK#vk)@DjqeGm8-9S6{MzeSRITQNbkog>f+Vr zQ(_^ou!SfQn_?_Wr14NQGzJ=kaV{Zf4SszGKw^>hOBQ`Y$#LKkOdHgVX5 zU}ztlNB;PfgpZG>>~7i9Vq;|rF5+=PI;FgD`RKM!|1Cd$E>(m}im*AowcO)(3GrXw zHbgxd$r4n|&3eBZAB2nbAy+tuVws97<3qplXpboDaey>0_a_+d z>rviIo`Q5Ke6UAD1&l8zLf?JAj&FXP6|P(Bv{r4(!qarO8uJ|PX_AtW)iZ9&$&*bi zwD0?P>GXNuRtRLBs7yW>bhy~H*j8kj6@SO6uv;R`ZCtWz{>579&c&=@?v>-#mEEIj zSK!7#@oU$`gC|L*@efIL$bVj7^pKI(niIuS_$16BF9CxVKJz=NFBBq9}wqcYQ2-|FMS&x2Txiy_~N ztmvufwAb77`LZI?IjiseMZSxjPl$~H7R3B=lPaSf5+iat*fjsO4_f-Xt!avny%-Kc3(<0q@Br( z#ZDFKCRG?We{{FaTPoQnqxxcTfIm3!@~hV=Fp1+Zw_EF!j{|Dbtel;1SUpu8W=d}tXr3jYXUK1yxv!WaWLwE3J9w{ z)&&M^88^N8a_g)?OtQ3g2$R4>{Q2Nd9-rIQ6MQ(QAm__|u+4X;Jd1&3ev1@5aYji2 zG~S$?Dbv;!KDP)6%{mWCE!_~W%zUbAs75d8>gxVV>jrikY__m)?gGsJ#5dk+9WTXv(SQg^5LwfkR#chlA}%SQg^TNwH_!z3W@jrrc8noweeAlw^Skkh;PkSOzxgqDLs$2S$yTRZuPCLqGiHfRC_Tdzz5WrnJE5A+k*^ zT}EqzQP@y2fZ`%F&TIEw@`hpiq*@MTHjCUR!t0v=ck@KcKEd}%^IL)7;>CeW${0zx zSLCF6Ve6d1ikguQw-*j((DRg$7oybjW$o8pi+8$g&0X_;{;Rn{j7(BaW>@fMS2nlh zQ)LBx+B{J^hyE-ngOt_ul~Y4LgTm7_g>61bh4fb8BV`!nzv<9`fyv7tAxTSxKX}Ah z%sonYfhpHN2Yk?PM*6$g`CNYwDRC5wC|)sEXjp>Dk|YJmnvd#i*CjGUZ=N?y7CGyq z7T;lB{L3pOk`LhuUIJA4r%l!5mNGKIv-GkUV!Y#bn~No-g%m;(7tn!Bb?3B4&T04X zUd={>mcoN57QH>gV^|1FbD-_c#Q5Ccb>m$WFe+S);0bS>(Yt;7ZcO0h+@EXMz7W{8 z!)0A}QzVJ`W}|>ml)$Ma@-_^-)Dr>5+W_zK*L~l=G*=OGJU#-> z6_-4WE^_kO?c6bwzAr2^4o^Fn;;=f*5UdQtPNf&oDxGw2l_OnOZ>vl6XQ%(`ttP|c zfPV94vR_r@YLYD(np@`9SjJ-_!-3s(;qdwxgs0+@iDba~To4~;e0DZaNGvSR>iKBb zIe>jU2-MwU;^qO*<}Yc#x{qh^R>>`e!(@T~p*XOhCvqkpR$cYxPmy3`bt@mf6FlLF zJT|CID<}F5;U)P+-?-y1JnanEJSq4IORg=AM${!9S{zoMO5omSOr{BfUnozw%ir|i zfWQe`prQ$@LUbTW&pEZhP*S`S-m24?Ltw68?`44YZ+ z@7S(J($1svtQiS8b6Ey;5Wa3jN$Ke5Y)xj{Sj;StT}S3My>{LWY7MXE@qk?S7;hUv04E zJrk38z~22(d?^o1&lpurM5|7g42=``>m*F{2Y2WF9?mA?Gy?f$Ev|4*e8DDwZlVBw<)#;y1NyX`(c5IG?d!!R~tQ`G-XA2f~E6coZEwKlk zNIACLFErOmnnNUk@-ehc1J^ttTKxKv@ev32^56-t12v*p# zGE1>{*Xs__nb@fR-k^yNs@4o<{)mc+{5+mxT|zbHFol<|BrYIDl&8~&AJxY*Zc<8r69cI8G-}WfCqXDaVKQJ|JKOz~&er4w#E~nHERIeIzv=4TVjfGOkHz*@!5$*2XQ+j$ZprK%AgooJ06qVb>k08Ph@cnm2g9a8mb`Hbk35EF2(TKaYr-r z05Lc@gvX0;I!kXkOj#o(&&k7r4(Z0r_ ze4!v(mD_2`Rbf9M(lpP~clou;yoz{M>&tC#bgfk6Ik-5SVYTdPXC0GeBF@{5$WP%F zW_Xd;&D%Q0i+ZZ@Z$~jN{ID~b=nn4g6@aPTUOcPOXq>8PYyzHqLY^+0A66T0UWg*= zYj~8+^EH#1czrC@m#k1@@MjXBlYMc9mMgw39tTA$@@%JOK(3%ibJ!8t@=v`C_bCaYI=-Fyw@+l z!H_#QR|bo5^OvmxfOKWCPT2YhU;aGkFP71e1mD>$){8J}LI%@gqur0~kGIl9h+l5w zt1lc0#?V;^K_@3t#!?nbZZO%P9AD}f;;k~wX;Z|I0c@o+$L@pHOnj4>TihsXDieZh z-Nn{q>(W#|#>yQ$eOfuGTUywpIqUe^Z(0Kqmh-JEur8DU5-hcFrWgnYL%JyM#Vu0#1;LyhYR0|Cct1aQz+!|y`25(GKFFm zTxFe4ekPEn-q$)PH58vo^3{e~9C!-c{8Ak}c#f`j)Ul^%ASIvUC;0M|{ zeqJoW3Jz+Sj)x*G|E?NdQ&}ag)aID|TOSsZ)j`ynUw)CT=j5_#$3J|}SdYcQ{$&(B z@ihO9f#?|W)g9WQF^BEf&2cg{g4Es9Yb)~RexWoO|z|}S7H=(%Dp$zmS;Uvur)_; z8=AI)NC(&T`wk`yBPi*YK-VrIxim6Q^E7^kO^h_q1uc6EP$x z8$jFV3qN{GM3*wrwP@(PUs`)U_MR};@L6Id5A{mJ(HJ;tp{Z1fhFL_cSns#WBZMQ| z#PNvOKu#hXKr&pYO>f8sCOZJRsc}$;kdxSK*GM$1D#wP zr~xh!0>bjIFAiI(?;n`CBl&*<5dW<#-$tAw{t}gstXW-=1^{Fu1r7P1!fSenuS($b z?Y*bwJC5)SScM?-SD<1G1*Tf%?B_Za-WskJ%Ww_?4(ihrC3_AQx3YdAex;-)ny)gI z=uA^Jj~mGw;K^0=F#lIZfQG|a6pU#VI2~2=#n#{nf?!eF8CiX4L1BZHaLU+z8V}j4 zcq5Z+uz%J}-^K}uJq1{ef19m3XJ~167nkT}VVCbQxgM)h%xSWqVf{nx@5i<3QDM7K zp9X4Lw%2qbP)|{ua5?pY&a^AG}(6B?Fk^5jvL|0CZm|4Z; z{t6-zSGB)`JrLq?lI^fv<*OSa?qB?T%@g#0fs$!^>#_G;%lkCAM54%9g9|SNAsfzw znvu^f61E|mpY)yj_PNRCn_NhMJ1(|4pa*~$tviio^79!qV^QRhINVXCHHR@JN{K#h zX03)Hd#US@{RbX4dCQ8v1Sw>Jqj(5H0>_J8mV2C?;ZzP0YPGuAxx!x;OH%^Y9&L_N4K7P@T6J^`oT)Ft>T$K? z5w~?$Q;G&_taGW^MzMfgwk|Pvh3Uai;dwpi{^U>`j64?cMx6 z*F-g;CMk=i%St7y)WLBy3MRgA&}_-@i@N2x4Ji9{Lo?4w4d-ILJPokgCjv>O>Bx6g zCCg&HW01!ngs}fhc9&%nmytD>!TQ_LA(3tig5-{?E>q!xYrV>*RA0LDPi3#0g~j>A zW0Ow&qio#!Q-ue6Q-0PeE%@VY;HnVCIk_@)cho^t4hLPphAFXp`dAJx&+9ZZGo|Fy zwD+gU{+R`^738y*F4Yg%49AfS@V9-FXJ>bgjcCXK5zK*#Ga0(N!gr3x z&jPBpzny#Z7$DkMPO#;IA?@Pv^_;^dcGoyKE^~5bv91HbVwMuHM*r6^d)EaohugZ% zf`+@~1T{U-*|F8Jl%QcQZ(JjpV=7usuhx@)QrvOD!TqwgeQPh7KhV0JHLJ>LUnnzs zm5eQsSY^K`&%y-5;*TKBx^{nE2lso+21{Y9|7U_6BEeCq~kAD6Hax? z(i#VNN!!XAfo%Q{ciNxBy-shaqR}5V&M)d%cxzd!Ikhrcr*XbhTCaR;BB|$A{?o~p z)b-2OE(lyn&_#`|<9W}+?l~Gl*Fc)n{OVfP0iNj|mV9u(n~@6bKm|^H)%Au+x1lhd zI6B85Kq9hEB*%5zgS3s#NYn9DxK>j#-#R5NcfCx@2O#MV=Us0K0QF3vFB~_YAgIj& z_PWc!pj-uAjDa1?NZGj%fXu^ALXDqu-LX~^(*r|nJd-t9!c*yOPtb*Us0{{am>ZFQ ztux&lK*lO3bgezV%@vd2_r}7Zwsohkws_*x>j(REr;n^QZ&duDP18EyEBVFjmuu6w zCi5w*7p)Oe(O1^VWg+ehBbh22vc*W4Ro#JF6Q^tBxE{ravt`uEr6@_XwkIL?`<*4b zNx2(@=hJS-bK0>Co`{bRYXeo4@-Ij37`M&{Xq~ zTRtLZeSn=L5IDMjY#-IoMyo&qo>g%j16rW?F7tW+MZ6#VCoNm>{xf$@zUG}y#cO~m zYgJad(DkkdK7%ACnhm4^n{S^17~ySnU0q$00KaGeVP6GKk;}aYzynL<%8Dj9K9XPp z@5Nh62aYj(#{Q2V`Ns^s@*h<3uaangbn1VVp#KAp{?A(Vf9?36r1pOy+&cUTXY_Ui zBI=p${6!+Dr+BrZV>7V~?JdF-b{%5zV@77US8Cg(%H$G}d(ime&zkb+>huciHH(jy z`LK*r=~>>b1FRenp6BERzmEf~4yj{PBzNd)5U*4v`_`nz#2*jqCULRA-qe?ues~&$ z0z9h*-X<_W3|2C=?4(Lk4)7MblKqyg@+R=|JL@k^c!JWAt?MvY5qCDvCFFt4T4=v( z^IGg)1X;FIxLEg{B(2sp!$(IzTZOhQPkx`g>JKphcen1X93vTxp2pO=Km|>aY2&Elk0uq`V6Qjl?mm+en$oK=NAOi+MZlXmX z|3M4ejwinz77VG|Ta6=Bctrb4bc6$t2?nWshU4YheINMwyU*F`BNT;mg9d;YPL!9j zh+l4?=5$s1x@;2kej?GTh2!dRhT+j7MUvTw-l;}vN|`a|Or%`q8{fDHVhsi1Fa-eR zN92ypGGZu^*)yR|(Xv+sr}zNxa7sSNzOwXKm$GMwhaD=>wgeJwSBpI}+zD-aV(pOm z233QA5d2*?CpDG1qd&V=K3&e~?ZEo_IP)XMsns_VG05|wM75}mOA1M0LgQU!pl{?_- zd2F2o+*jBX%qHET#k5O&cJ`V~&-r|S#F`t-G9wOg<6+e-sP3Sc#C(-M zC#)dKO(@*cxZB?WeSgvwNI6w0Dc8g0eyv$B3gD)UU9nP)6%ej}o7zhO>HwqKC8i8T ze4DQclQ*)ad+S8{*3(b3?KAet683 zZW1W~(UNgz;NzQmGNGYb7iVG7-Pt<@&vBR-JKd0VEyoxyFhZdC4&$DSpXH!C#a?3z z5_>X3W8*RF>k&Ijds>}tP>la8X~a(-XzyC#l5hm`gGS->S}?~4kdDe!F}0i(lTKIi zjX_5HOzd78J@eTgZbT-#@Q$Z$9qs+&&HGveF~kz0Vuqp$H?3o($Ee5>X~wa3mZI=3 zbfUZ}^x23Ht7v|wyDd@wYHJ7jJK{LELSm^fMX#_?^&n*_{2vfCNSlR$s_mv}-dWr) zViDvYJql|+t=W9^BiK-=y)H%n5p=M>F2?C}ifpU#W-XHodaJ=#fX{*tKDdJDfAjhl z`lvIeGR+BUE@$q$*1(F-;2^p;UxGYReG-N*&+A0%A0g~reCDgd^|5EBt)p$x&AX49 zu?lig{Ah({z4CBcvv)ndNFoNW9&RCJ7q7jpx<)$ZJh znD;QobLS~Org%VY=r9O2mA5WK;tCf6w=9@p_?vIHHnDphKv9x`rLlZ4qiO7Bxa?glMnhn&9gf) zei4N*)_L0!A{$pJ#Q8rdGykf(fV;QG(8+y5PF=nGtQ+0_C^$L$JlRMl0bR-3GI4cf zmFM;oR(=ZULfXZ&sAoc-)qnbz`H}lLZq*B57(xPM?+hd~p?LRmHlzVF_c+h_Kc2rD zm5sD4er2dx+D+}l8p;#n<&o`G1ADYwiw-as@;!294nN*z;~kO1&uJHoqV5Cg&3vsn zYHgN{g~fIw58BEoB``x$3uy~joxA<6@3iV&Se09PdR&v+uj(v#AC?z{&3<&<2L~qe zCd;+_NC=Z0qs7vCzuXaC^B`$0ET08S+M704K}TV(3b&z&C-J$#-p)*@W;Qq#g7ETiI14bT*cLV0@jP`1!)VR>H)uhm3aEj zpUE@kO6GwN<|~+-#Pcvml8MLehkVCsB7HNC!@)MGm9ggAS{{j13ZX zyP>Hx?wZL*%Y-948J4Mvw>C@HNEJllVYj_pEA;R%6tCPu}2#LQ_m`Q-HA}EAD}4p5U*x2c?i9S{Pjfca~shQ@@cl)$Lm)0 zT8(1u`5RG&u-$v+MI{oeAfpcVgjVsYO1w{giwPK(E2EDT zz)o--O?2h+EW6~1BM5P#OAL^+gwsJ~_?KEK2oD|=U)itJ<$bnA>WkYP;kA&ston=s zfQe7`-62J^20^V<8FL9?YU1RC!(}{ZLkW+T-7t(Dh|9$L{9cNC)E*-)Hi=wdYH@|i zXxN(gdNDC-ZEkc{>(x*;f0V+JgSU7)G%du5S0{F?AeQE*uM&YWSL6K3ov_Mf(NYKS ztz)5wXKW%CDn~~t8#sv8Y02iEt6KsNw5_`~Jc-(>nixOX4KWBeiW$pN1cT;0dB095 zySDrQ$~w9b$5OHle2Ada;zabum`%dYRW5p{2bG#f-a^#8kc-DF7*NmVSmX$ghg>?( z4h#?3>Yb;wZ_F~$$Jo64k=fAG|DH$5S~^NInS5i!g;{$~cXPbeHyaIY*z@)LHq2FO z`w41nrmo2J93|IXB8{34Du_`Q$fY{cxfa}B{b6-A5b)g2NgMl!0i%}4{dVwq-Z>!@ zTrJFr7Y{Mu);Uk)v74@`UGa*?Fq8c7kTrSiu@rH;68T__{WsZdZ@lOtX1`b^9aRMi zgkQeT)UR*EcFO2i@;^!-3f=T`%*t#0w45w#aFt>gH_yG{NW3YW3Vt)Mf{mRl5Ah30 z@?7zZlvr{X#~iu6xb;=ddy4Qj{)?iTn!oDTYpq9Rmf_>HHbo{pP1aw;LGGe8r2{2B zp~d;z-*=vqg;loOJ^Q*<*@ZrgcxHc|Zg1BY$=!VGw4Mx?JtOo60RHJi$OA#h82I@A zjGz1qCjNt{U;7B&f^YJh`{o<_evkmi@76cO1k#Bs#7J$a9^T5^PfkoMl#Z}DIWZ~) zWe%SJfcv!%cH=!DwH+X$aR6wZX4Y_W_mrBNTBt5DIU!-075I7r^#sVNOX3_Cf1H`g z^+4+3&aU}k(`S7@pp^)w3s9~c_1u^yrEqrvvVJEgr&Q9n01t6(N=#Br^-vEnX#-B7 zgZ!UZ$bW+QzacOBfwAQk9v~xKeNW6}s-$q; z^83hz*nj3=$;%H+1y|9X1K(u$7d~;z=a|X)P_*Qunl_;o|#aGUz6$p1(?7ZxFt(v8I2q#;$=C zCp<9<;vcM+(vUCW4qWVT2gEI4|FdZKdQWRtV@X#`plm?{^Jm^Ub=t9@in6%FI)1h< z-W-d!j}g9>9-zE0|Mr&`Ts%6iZeK^3p2rf9TK7fG=|l>LLMf|3s;=8&A@Q2d+r)>_ z)nxK1GqH$0a0`ZrOXTE{f3bp>F^|?z{yeD;dEW^eRRA8y{PZC{^?&2G{(wG(1O}qC zjb|VNPwZ@+M?4!Fn_DJOD2+88B&T-tP646WRd>hyaQBd%act1%L2IrrEiK)Y+t=~S z;CGS)ZtjtGVG-p=9i3|Y{(kjbdCmlA{Vuw<@B@o=$c+hbK>cM{*T;dSWsSaZ)=187qOdf3!_T3-}hFLD9@syTul=k7vurkvMIgboytn18!jme^`@DP4<|sq4mH}ny&Gt47cpAfdv%BP}n>A{U zy+>Z#lu3tw*M}0aoB1&gj$QNBAs>#Iz69xh!4XX;PZ@81HkTqo-h5R--$N^JEo6-s zm&7cKQOQ^3rFD)0AUu}SUwh}LLA@~J5DhC|sh!XA?^(K*VbB`L_nJ>4I9 z2&yg9y@=;t7#jUUrit7?_rUx-Z2zK!5iALG()8I-fz6^Vg6oYD4JKRgf;3qWRfiX=n@@GgAfu=`W=k+etdL_Z3 znM3x z;}ykHfUt(uT2G2}u2-f0JuibpuX#XfWm9>fUqsrS-^I>ZW`{S(UhqTxbI`X)k^&dm zSO1hm;wWjcMyxAxceXjL>UQ4ShYFEfooI|_4vPDhu%P}bR1*Uwji{(z=*1<`qmaET zJc|i6Nv=YC3JjTvfZhxPGyu++--P{J3v`V2Qpn`9aFrQmVR~74ox%HqET6pt(m%dD z`V-kQZsJF}WUAW{32xd+M5c9Mt2WRolZM?A)sCe6!I7dBUqYUKf{2Q=Oxfi;dI4`X zoJE9`skSSis8rFd&HkFGfK!MoQ*ReY5r(KnmXoi}Cxim3!INOO;!fG>)aiT1+&3GL z?7~{DoKeRf$xRfDtVzw%1y(?)wnO8&T601WzpJn5rzgi-V+k zP3*Qh{bcONl!S@8%mo-r*{k&+di3|FXY=hf-$yJsl$@^@0_2&8u}dh7l;#H#>1qi_ z$KQ^%PvZ{6sFuurO2;C)2vFF=(nv5n*&sDLxL7H>*jHeJa|e=8%f;vVh4AErVjY^v z1bH-rmEC{p*Gmv-#9mk=&G*`^TeNDdmF1=}=|9|Y+nvi9j$17<(;VmSs6(Sf;#(D@ zs1xfK50_RW>d!@!vlA6ww~s^hg_zzB9{NI5PSRC&0}T+p64V#065uz71x+{93|q@_ zefKL4ZVQF~u3Zp{BG+b7gjsNeXG?_KTdf^^jXbX z3rIG|YUz}`s@luulp0@ib=5wbLmK^~p%&A{sN+4Ssud;y3E=RYfp1Mlg!^H^hrSjBRwi*a{3=v^_iF%D~ zkX2&uKz5u&t4Ip1b|Q|Ue(&yL%D97I^HEy=eXHg zEg&1ngjLb>b^c^IYH|u!$3DXd#b@PXx?=sgWgttB)Loz9NcS~mYfN70O;x|m2L6TD zkv@PY&Md*s9{ahVTBG{sjk|$W#2K-jee9mJ3O)O={K*dMqiM#Ho(HD03j1-A!F zA@HA38_x5)A7mFXl`@w+Yq)%pW2-_+xd&IXW1B4!A2r1($@d~{2H*6JhDEp1hu2aj zFJub_B861orn@q_5{&x;>eo^EJi{Hp`d));Tn(_>&U+9I* zYz#mpA^n;ECZUAeNZ3ipC27~(^euGJ2mo)w93EZKJtgD)hZ~i5IH435w@)0A^6xQ$ z-ypP>?pK5&XT_Q56&@~Dnoqp!x0DDaNjVt{W5g5V3@Tm7< z+(q_)M=$#4dgZ;*5-1wvt;~QH%Y93CUNwR5fLbJ=37P;0kMm1_+$9a}S^f1vNmG!C z=QcMXwQiKWe@{0pDQu8MuP;?O9lMUTz<93kN(R0vOl*OhGmXaG3J7B*dQGcG9twb3 z9+3Iu0Fq8SX}(XwQ=-Ni^&H%)uS z04!T6tb%)aTLgq(Un5Apf2YhTmj6iUR&RzR@pLMaZ}3&nFsj{YkWnEI&eS!!;nJZ) zTQA4rQIhUm`WwRnPq=fI2FsFzBr-W5>WvDQJ7*18KEoVNxn4ufQc4w0Qvkt-D0+=+ z#(v!T?B_lBx?X*XrXhN`7w2yuz_Ul`?2yHkG#PA6d#cBCssm-c<&ksfQksREq% zbn;1-2xpooXA^}gh$@I)x#Xe(AJzCUDqgJ$zR;E?xN`J81saXPLujS8KFu=E8Etv> zIrH;iIfNgIL{R?UV`0W207H=*QeUdnll@eBgonJz*hkx_>vr-t=_p0===s|j-hSt5 z(oL?#I0~PL8|A?A)Jl3+*Fjj|W1b;dF5%;oRI(qKp+gD2o+Ag|Y$#iv#`2{6_j)Tz zgI3c01`>^T#%?~<$!{IsFKy*u#=C7uj&OcctML42d&;O4bh^#cu$9OqrE&TLlgzFS zC)RA_Y=4j2!oT?ARr9V^tVz}q9`gLwz2NKqrLYrJ_&fu4Q!+u` ztvM?V9soDLgOVd%hwCLRKoqOi@-TQkUVB#oc{eb7jYN8WzHuj~$gok;Zra*b-J~`8 zi|I1nkum3^yRrh+Jn6(-P5B4=V4=-mo74Xum;8D)OOK?kcwlA^%73K*vZ$4 z8v-K3|G!COf7FC!8^1GF1`8+m5;+^`#6RL4dO{t4Z+rF}+NN?I?AHlA5Re%EZ6+DI zP8M`zp|ySe8>IF`Y^F|A=pl>$`$LUl3}0|gbDyK}!l2E>Q_5+eZJmjUiR>RuI)T0i zX6yVm9G+QZcu+xppuKxY*8NY8{brZ^KWB%{^#h){$AcYiTcj=DxAyoFh*#Hl%)5N+ z(wU7Ae7Yfc?cYgp%hkH;v9*}=11yl`y zDXT4Kp(3=*Q;BKTs4Sk6pPN5~pgWNoI^kq8fSTfSs|waEhA|#cvn9PMwL{!z`TMLi z;Xuk_g)f%)e1x-ttn3=4yX^}IeFtqJSy2HfI>+PY4$gefY(z4K1rOE+iEiHIp89|A zKe$YJe5s``gjD}~$E=5N@xq9q-nAfag~J*M=Fk)XYf6szRvj@&iE)8mFPY%PucsZ z{o7E)Pa=1kWkwz2a0!P@9!LR-6eh#Jt7q7(y8tP6Kg=Sz*!a)ea(vHd)(f0`IMWTb z-8c^scU4w@#tG>(O20BDtB_P`H2EermmxPs8W(p_7E2HRpq-!fwj2Mcgh@fdwQz4? zyG}ab*#n*9E|FT@@$>6HJob!qTtKd_J}6{2cZ2Qn4b*+Yh*j$iv&7>=xq4j9ca}^S zfH#QSu^;kBDez3MH1N$m6P~o$m=tn>q*})*neMsxC5_uCq&KhacaLSe4?Xrj_j~(a z)9pTk$kd8i_s9Vw#em?63+h5to^i-4S<1@%2G^!;gBr9lEOBHbqh`~#W{d*MY)-L0 zE^8i*QiBjuVPIzBu=!CNSLxd&aXF@5_tuIjJ~ifeHID3^ibT5Es3L=I%KRr21Lf6p z!pXqY%ka3}<&52Gtn5BuK@Deeh0~E*g=DW7>5f25@KgUiHRTQKn{5-ob5<<4ys5pj zD70C!S&lZ^C!q>VcW$O8?t8%aVNcrg=>Wp~RKWnHe2mn%_af3Uv*|J3R_Qde ztcbgPm)80gS$Lc&3c1gG3|^94un1${JdrtwdKJ>PA7kl0v|#C9l^u~rlsIROpB{cblL9ax|n&iR!rMUc^CnFtV&md1v@aybm7|&@v^s&xzEc{;lbAI7;8^~Lbw_}Ut{t7wW zBm6XBpgK1{PHPWgaa_aCd-Wy^9NuTbtZEL-;A7}&CVBlcm-hS<4OZGU5N-haPhh$M z3#YmmelrQuj!kL!PJX?1lOtWtHzgT+b*WPv_3O-^|>Z_nWWkr;17?sk6`S-RG>m)+1{R z8=?fje=`Icdp+trfu9?f=_$oESW}I{vGsUTOizrQHMLa63yHy3syU~=ODvjW@Df`_ zNEETJF?T@7@JSOU*oibb*{<5f;9F7PZZPq86RaK44@sfJd%cB3hDC9@3fo=6v83#l zhn;f8E=${Kbq`g|Yw|>CkRva??2|Ho7VI$Y!igh1c-@f{2Sps_e2q8Gwla70^M|7h zhI)3@0@h1jK8W=wo+HCh=r0HP5}@3#4Jc+4g|rgMSE(U;fq;h;$X7RGcM~Nh#htiz*|Feh#)JufS}PHqgL;SH+7w3bA=-s!L`JQ zyL6P&hTGY$ zu8LEU=r6v>kE}y;1;%YDcrQLEPUJ?vNgf_Wy5;T(D&H<>c+_f_7&b0$VWOe0JkFCp41kdqHmk~S44OhTt5;#P zw&?mH2@MwO^E&U?&#mER3((s%!R@3S)&mppcxA`|V()jMLBs5{^wDPkx3kRQ1{{V;|$$l7hXNMNRKUCl8U(^a^q@_+WFQAclzRMlhPfdFOywW z+#KVa{Z-Bu6?#Vr-22r(;E=0l|{E@0G`mCZM#udkz%}Z%Go7T z*;q$SsYsxD*<5XRD^*i`hdnPSMHuZTg)`xLXl*SngHV(38+$9Y@X1n?E}965HMFt^ ziX|xgS|k1)hpY1^^@fo#)2r zes2SSQhU#d2)jX4A@J*Af(|(yh4aHxhsR0x5_32_&g@iP#P$8nS&_LI^ks(Vj= z=9fYZHd7eJB8HEAIZ;Y}Ev?=k4j~LbanW4lyb~1qPzQ;kl4veki!|+$Cbj4%b-gEm zQ1|Z~;LXF5+3HDIa`=NQ=i7C%6gr)Qk2s^br1Twa?vQod0;0`oz=;A{i7thFzw@hEU+_I6gA-7Z5d^D7 zaFIQ`rw5sN+2;nUJQhLfG1cEWto+<626UO&!S9FIHFDN?w`?8B{dTKj+iP`Veu#C* zXCf7f>2TG=l~ouibWI~AJM<>68`pRHDJvX)St9IN-CLB?wV|%A>6C{~ffB}XLUjc_ zq*@%jFgCUXc`fqguhQ;2k((H~e?X_;%W;W0h#1}%WwJmNIt=Yq+;hQ#^as;GJlzX+ zB6G;Jxx}Wl4{|UB?41wzC2(urufAwxr5RH|SyHWgl-MnLBGUyEw*@gs00rtuu7bqn zNI?BZi7x<66)Y25^is5SKZ)Rt=q=ueH=wR1dBmU#xNEeNLk@>*v5!CeIfb`F@7qRGdIdHWtj zp|;Y&fVQo)>{g<*hTnaMeuTN((^+@dA8F%%j@jT`EZhMZrLtljPBm$O-eVfm_ z;XcTlHnh=yQh>%1I@0(ZaPck9e5aKDT38WB!AG|&&LU{}NUms@ zs$AeY8%2M^)|SCo-|#*8-BDe9k+0ipaJjI;xnqV?k_91C<Zf{Xsm-sd zktf4`*4@hq-LRFjg8`Yqa-{oGVzo5DSeF{5Z2r>hO+A?K-BhC{;gT9DyL8VB$0ICA z2w=6=vzd(om0smD5*)GcV(Nq^Bt&vF8IBY}8zMF8R=lSrAk30dJ=e>|@=ZHUr56S{ zcHtA}b!S3DQqY3Qtum8NJfSNwDb=p-vYwXsniU_4mA zoT~kgLiBczp%7$8XkdUe`?A-JDm-0uGxF?}rcGm6TZSpEvpa{p6kxkVr$r?uMnvN7n9&5V|Md#Hr%^C^`<*SCk{J|k$@Rl zBE_)Tl6Z3ZI4vYVgOI$HM%v9)*6)zZncnAv9&m~#pYnpCrOa24Hrgv}PZ(k|UE>)n zadM_*ogwpJ7H({rxN_3BTu-v*HB`8psrjg=T2E4~q{kv-yftnvfOPKKKSJNh)g4UP5 z;SOotG7ATn3lsgpw*zjKqSD=S1FW|*D{P#+l+>=VHWI#uDTYUgTsVJW$WmJdLrZV( z6i4^ow_ck;srKSofiqv5d3>52BbrV?c(9*|RXkI_P>MAfny0v-{p{9~)^0oMaaPvy za}I8y`8PYyYj>ZJu=i&zf9U5oy_od4cR&5HoH0DOXr!o0d#VTK9=%@erFkUXD=p7g zMzS-j!;Fid{qAo=oOao#+{!Fc{qf{TI)SuMRxac^%uj7^^m? zYJSqca2#0Ut3P(1bDTV5nCP_m(8{Rk^hWc@Lyqr&T|YOi${R-q(lE8AH|uypz;L9D zy`PKIo}82#3%=Zl5BCX#gaxS0UJJgax>>>I4$5nObRY7%cp6j5Q&HD#q(X^y0rRHv z`j%fjn)ZF?m+?Y3SU@js-d2#8N{|mu=)%xI8h6KcNg_Uz(e~qJTf5Dos#F$8?ekX$ zxjA?nxp`|YeouiLiVO=2EG|8MR9)uW(kZu-*n5he%C#OM872R|7y?%UKDFlTq{`N~ z1~a?!?u_Cv=cYL}E&ul#ow;J~aZ9$SUoWdT!7^5+v_&PUTG(XB)h=Ul8+?m{FY>NG zf@+*cgB$!?_-0quN&}sk(p+5H?(FSAXYzo%3s-dviP89&rdMUpBE|z2aN~}_5z_$+ zNFAT@^+^R)u~7!Mli>rYTeJDhl1Bp~!{bycTm7)RDZV%Z797@At2#)LCg6E5l6U9k zxvu)^2-4kRD*J(;3#0eJX)jHgtU4%x>>a?!Ij9DS54XFzonRLdgI)LTA2D&e|0szP zxiSo8{gHF6$(h+tfQM&5#0Z~IGIB&orLh7HaER4?xOXD zd0lrN>*xhlncnqxqc_TAO&g5~-jKVY@!?4@jdo)QJV=M`OdB~X4vBo76g~heR&69< zi?m?Pu+HFPnA_;ZCN}lmN$+TW()JU2N-`b%WmQlbcQHy zkNK*Py>PY73ah3>_AuHJe?QNc|1OdmA~9)8f`r|@}A_Xb~$h_7$1 zS(MLcPbl0llIlCQR_nFk2V{Qcg!5su62Y7s_V$5^K&GIsqLqHlP&~#EY3uGfZ+a_a zIKrD3pTeiuF;8N*(uw>BtA=bNrG{Hk{!Zu2!qPnKO1K$}R1DF5jt@Q6PuVIIx|&_x zedXMmo(D_u_?gPpIz9&#l#Ru_VOf}X_JDsg%L_Df+nW)5YZ>^{74g7~wO?mG%sX_02X_tZM( z8x|}+I7~Vahr#4l8d#Uw$C*=7q|rjM9evrBUX0L`E;@zgS=#l4aL2msZQH$E@)yhl z?^9SNoC3*24Q^*yJjLs!@NjCG8CY&_@pKzpUOP2Q;qhGd$+35zDpsoq@i&{iDup^D1)G_Of2y13FC~pw6+iKC z9gUNYOCjRae|sjBWg5a;<6z#HT{zrQr&{7YpZ}dm*6B!#NFuDf@Z@?2ZzYG^K=f|3 zF3W+-D*4cf`RE{n+4~;9VO`NoMJtFCvv6}PDWsaO{!>)f=9HjqzWo`a=Xm|vwAi?* zTj>6*-NaErV1ki^^T7@FfgJCufPsGJQb=-aqA4R}_u_#~!AX!e>>*bC&E7@(tAbcE zl?HEyH0b5r?oTw8F>6rMqrUF$)MxDUa&5v+@Cyd<0`ZlJRgz|dtA4u-q`<1m!kz4P zFZU}@SU?Xs21doa5QTEV6U^Jnf&K?esrFukW>2|{jg8OhKZx2Zf6)9mGDjcwJ-r}e zkh7v#m=o?Ir0hUPH}Szq>c~lqLW_@)e*)Q1l3@Cj#mh zC1w3BXGK-7sNdg&2_SNPtK_8S+1pZF+^?u;Q4^~ehK8p6)YX{F$*K0#srF>H`38`y zDn|yE#d3hlt7@uhG5|R*(7A&`(%2+uyi1$|TwT_^ev;P9l~lr%$U7C$3k*QY%57t! zr>Ut*FBwAApA$(H^$hsuvCM?5l<}GRHpVK7m^*0sDYDS+Qh^vuw70k-0EhFt8b!_d zuk*Hc0T$z?Fd^U}D1>?gH_JPQzD5zI3F0(g>7vVuI&s7Y_u&y;nh~dLo#$>YNyTXL zngq#NEDQe9+))Q|=b{*TN6KntL#Hr4h%W`NRE}!l*o~b;@dRbxc$-4N#t!qED#JBl z;j-L^T--GAi^7<~&%b14TQKxMq@Ys^<@D3!m7#ZF3GHnlMes3bF*+(U@I47ALS* z*gC3mp=KNBZ1^{k)0|d_(Cz0p#V{qZ%E@ij{j&E&(q*bB-&EvK>e_bG?+jwK1zb}NM(+I_!HwhFGYAUZqkIhP#? zy+qLSTq~q0&p_hS9)&`?zA9PAGP{dkvGP@S4bMz-ElDR|>r6S=>&+>r1(tGpO4nux z>C~S|U$F}(tjsUtpOG||lLmMT$x5S*M;%r1HN{!vrKS&u8%f<)xwK*U6W$9_AT0PIDB$tId(EVzK8}B2ncQB;1 zq7))!rnT}0cr$e4!fQM;F3skONgJrx+vad^`Io>#y2NW*LZ51Yii?q4?&7_TfMq$l z!bMEYl|xry*4rW>JCpH3fmp3gG8m2TBej_YpC+&?mV7z$Fm9|m@+F@ z3PI} zrC>~#tIAM?0gIZoTcYFBGH4$2t&2w|%&AzM0!q7)=GkR3w2flEpt z%$>Py)fE1eu&DQ74VqWVXBlo`KA;)8yj(#JCV`y0)_S-anrQ?Hk$y0vd@qgDSd6Xb$@my8nA~ZQxbISENY3QtWbWgpm zVWXoUgUp^H3UZ8rxxe;`VNi0`Q-8A~0G8r&9bigxJSfTco(;Dj^Ickb)vsw5#Y#oJ z-5VwMu_6g_y;>%AcGiWfxXt(>#|MNUUF3+zWp00e$8Crc12bcz3T-v`aY~XvlitOc zo~m}a^hwTE;;iLVNXW@4G0>K!WiHlOImflz>9LF zCig-z%0Q?L!a+kg%GE;R%xfuaFmZ$Zicffz8#3Awc#p^PxcX{`4$`7^7JP0$o2m`I zXx5&vo23BCw3$}7!oIe~Zr56F!GXs86L474!?GV!5NMbajIvJUMI=z5i>Az)gM&oy zjZA2)L(H2%gOB(fO!P-eziJ+j8nQ>RpL)~{U*)N7SWhGr)tZ!yVmmS4OAD|?I?V)Q zmrJnkdl4;v27;gs@AH{Qo<8nNm-f1(`aCuV@4qI z!LMI7jm$!rCOu5V$xtB0=C!RN%Umlr==PD}NmACN(aW9uSNeR2;bXCaJ>6^6fOr{u z7A5J0P|C}VR6Eq75S(8(zc#6oV_PsMwQo?)py!G+iJmM?=f@LB)B{n(ps;y^Yzpk-st?`{2Jsxp*Hs^1}?8Hz3InWoMY3lXxy7Opt)nMec0fAh0|B*kce&Wd9i(SNc^zc3IfO388w${$`ko z{K=yA6N-Zcy?L3ctIf_pvBH3;djhLYh;G;z6DBQ7blgu7!t*mB;OYfR0k%zDJ;$(z zAs-um7^$4O5DPr=kKvX&2ODao^11A09@_SguldkNjE zF&nsom%e_dN8x=w%@9vV{-aKY+?k>uW&+8T7Y=dd72X1?Yzaq(3xb*xwR!YVO0(mi zsG07w@{~j5+y=!Oq_fVJ@P%-@eBGj*8zxNcPjW$|{3AeByuJNh%u(5JAp$1W`xd^x z1|7foRobs-IVljdrwN+iB%Ftg zbZfz%;9TYT`s*fj745-_+zIZby0UhNGLvY#6(I#)IP;0wOsw;Y7x(nbsFw-Z?Qy|6 zSFnYRBuJJnZS$T9F|g$gA>gja7>{>sVHMH~Af)UF$1SzNW>6Ik_KAb_Tof0<3uWS^ z7dj1Q))Omtx&!-^m&U}k`BI%V>L(hZNBc-*lcnT5*x+0JX=C-6^t`{b4ek4ox~R;c z2c0!m9;Xg1IX2$O>KA0>`Q3iPDHkWb-yfJ9ow3S8p_GsB+BBh|-dr7|pJ1M}Ht$}V zO>#2KAu(S=Goc{;nKe8`e>k5Osh)ZdtO-R3@((Zea{URnRpqvwc3Ko zkSo?%7M!+9*Lvdw8Z*bbcIPlg;WIexO{MhoYyo${eY9jdyDkt%@LJ|Yowt$%)m(kg z4vWR7`@xId_MpZ|<}+~a8f9`8K~2cC!s%_-{L2!EgQg-a>Av(agH5Gg^o zU;`K4$rNID>zW}2)G|AFiX*l^Y@_bbs{gF9T&m499n?Nqa95Fr|dWo6?^ zNdrDis-f69iAofN7ZX=}B?0=}Qk=qs^V zAJB9*w18AP-b~Dq6c347u}Z0tY|oz8A)Vs=4Vl`swd{#lbR0>}>-|&S1kjB5m+7gNytg{wnz-ht zli7d9cafmOzv5)=k#IoHNNenPA|uuvw99FoB9_nc`8V%>@xWjQN6E^bij zCMzk6F-I~T6j#jRhE-E%-Hs4l23m}pnti&O*Rurj8=vr}5>BSX#TQKmhCo(@PXQ`5 zGiLB$c^$3kxH~-KejGn&#F=t#u;6t7YDjamw`b(_+*gzKCdpk6$S~BTW(=AHs{Cx} z89;o>w7QjQKMdgLv1*}Yf`-q#l4Yeg^5W#5YMwxZXFri2nu}Ygn)GfQ03pdnH$)Y7 z`gM>>(yuzFuLbW*OX?GURFv+g4Nc~FGI}U`BqT7!lc$kz(=E!E6@HwUvxEB{o+qHi zaCtx2QLx%dDOdNo2%DKRc>~Nw5YKEymx$LnNZj;=2wMtIx%AE=!BihCcEl)9=#HVz z84L1L9O_<_Jw1+lBKw3ev(x&;!XuwGetJ*DkN6{5i&)$JnH#q!_^Ch1a0QWm4}>#* z%(t?i&?uF#`R3V#pF3@haKNY%9JuuwGHrG*9ZWmHLDS}%-eQH*{^Z3qMar#`HOT_E zpnHOoN)U37^w8AUR%e|^Xn$jW|5$N9W|gR*wIg>#T8!Nc6{DN%^+?Jq6zp_8`V(mN zy*S!DZ9Lw%-ZWrE>MPmguDqHoYtG#mYnb+j%H)g`==T?vvO_?-Ez_Pkbi;K~>y+{9 z7xAgo+_suqozvMq9QnR?0+`>{Xl?GQ17ta(7I?srvj=natW9s@J+2vrnjjzaoNo}g zZFA{seGqx|icEOtSyz#=#rW6ryAMo10MBW8 z3S9f4d_0@8TSt}hIL$3i*ZF{>i#kWo#Z|(?b2qdXNKdZ*9_pLKz;{CP!mW%6b@dcN0j7cR6i;Ma~Th^3PS;6d%Lh2m>ql1YPTFwBZ^q zn@a?p%9tosY{0JDW;dy3GgSDhb`Q$ga4MuNArDpwJLRpSQdBwoa9YwT1R7^1FMfia z>Q=q6`{4E%#I(Kp9TS?!-B4`NGP}vLAa@&i=H}7XQQEL0p?9FkkSVI2G_IN`v%v4Z zm6oYgkyqFm;v=A;~>X zi=AQT;|kWk>0>A=6B!+^6z*?GQHY8)$GA<0=?!WMxpp9m1Lxz`_ks!6>javzCmf|K z<1U1eh(WNi><9e`#D+jtB%&`dG4eWvJ#qC5CmM95@203y>ZY8z+;Vl*c%~0e=Uik2 zgiI~k@uD9tMeMK}=Cx$gokY{PXy|x?h^DEqpL%{)lx(yp9r8WeKvJ!=`~3*VWcogN zV16x3q3E(-uUr2zBr?2@N@;(%Sb%@%i`D+2Z0X4lL|d6v{wd}xKfhiSmo*|gWTUWU ziq&9#`R0T_b9dbPRh?=Q;YS(m%RRvhjF*!`lTEjJYq|BP8nfiA`0jz_2^t);D!X@^#Eq?ld8B0Cg;`G|=2o09PA2%s0}YO1OxbQLqReXE6ffizaMUen?!#|>5e@DQ6D<@FyXcPeTzkpEx>2lPi z|L>Q7256H7Ei(RC(UUu9wzy#eBPq{-$$J{0u);CGo-TVI1?2s|5B&S%pxo>KZ}UQ} z#$Q$hHN?+q@99GM-`l*b z8x__mK?F8K)U!PoyyQoM*RNrEmd+3l%?}bZ@H=St!K5TQ{QTSm?QNY0Inyg{KyH;A zb+w#(nLj3_(vkdLCnmqN;R73qt|Kz$psu@r>Z6ObsL_X<0-J>9jZs_}R!K0tmg#0v z0oPpzL{^qP*&!Z^0vWQH9j&6vGz*@*O%>oCJoMCP5$XOavQuMcrF$Z#HkeJDa+dIBbZGkBSKTjz zBI+RMdl`CWM#g6%*R2e`ksp;MRf{~%3JJF+2x_tHV-g&<%z5)XLmx}#^VA2>NFH|l zWKF`)-S7<()k`^XpRU5&!$hkO^VUG8@M$iBmzN^R!9@HNOSWe5IR8V-?e;gdSd5EB zT958Uv#jl_pIZ%0kB%P_3(FjpDp%sP+e?A2!VYUeYVQ1-$af;>-5C{h2d8SvnptY$ zt??~8AiGnU#gFn2R*V6dTJ4~?a+UUm!F}JUr0tcDLR_ifXn_(0=o>|0^*pu{^c0_h zu-cr~#=VOXT-5Sm;*=HckJaT_$g1wMl`XE+f#916imTy*J@5n^rh^z}O^41Af#l>fKovQqt zLHm5g9Z#w)nk%yti%a}GUD?;qbJ}FRnSzWBAT0_GijH!9@4tr8l=ThvaX3GwLz-8#y=%PKfR zcW7ukis(uq@zo!vtw@BaPm%H+{hdk#LKa0|{f%>(osZ~xh{gf^O*9-`NNzlbf4#^a z@)}qXi2v~rZLXVgUHIKU(+AmXXrECL=Tip|EB>$X0>vGqZ#5Lb4Vh!8f;#CXuNCCc z(CB@OQcAK!1#xQumS9$sJ|VDwfMn0giqPpE+Q$rlQ?RU^nL>9By1ZVtMN0IWfF;=_ zuH4|y(9mq*q48G6zG5bz0wd=Q!^ul*r;?mVZx~YzHriWGE-o%0gyOy*S$3P;~6O5iXfKx zi3JlOCJ^jww0^&q2?YQ+P0;9m+jU%`8TCHo9@;O72S+C<^~c1B?B(MEuy17eQ-3wt z6NQ49!X`<9%avyDug=kHJShlgu`R3Cu{;v;OQBG^K!`)Zi)J&VBY|f+A8mvhKmSnkl zN853yquj=Rd}|h1t1s{+1GPx1q&Me`4y}J(rA!>S_C%$n+(+YxpUjh+ExX8=30g@d z<~=!4y9uUT2NS;%ji#>|5w<@aCIhcy9=<`=gZtLfhefY=_|Y4>7r*WEZt)VVG5-r{ zBtVCSX7*~~dIMC-Y{k&JygkBoJsp8r*ZuG|n3w&KNx0kJ4|XY%?=ONvAa0oSx6yL| z@&FSn^$3O|@{X={u}VdzXV#u1en2FkTckExuF|gMx>uHw<;h-R(6%&ut!$VMIw&c<2rZ3Ji zihbt04?KfLV9M-DvuOWT62bOYlpUeI{PxtHxT4+FGbdYv-ya6y;6t4fCq6|WoIAU$ zCy5wECbWlpRj+kFo`3QuV=5yNbnBuD=1qznlxHb)owBE;?T$R!@Gy1(f@-j(+~syv zc9{@pQyD+xSh@DPLIL+OS>t4B9hYq=?iMz3)_xt1mP7xC<-Gg`!jn6J>@S7L_oWEa zbs;9M7NNJC9tEtn>*t|(-P?0LqlZ`1M~cTHxtr{ay+_^C8F!>w2A%>T^v_JK@2+7* zR3eBoPSS>{X;&IFABT$P(yl8tI||OM=PEHT${21 zQ%y~Y{EwM&NL*;BQ>U7=4~|nUTSC7d&_Cuax;|4(|N8TUkPsc1R|D|!UR}0G=TtV) z-3KCDK#7crDJ|HcysDyLD1Y2f<^izmi%URH*Z&{0kNhPrS5UMyJ z!=Y|20=(PWgTLP{wG!n5Xy)YpDB;27r$JhA3FaX#oh!YbXqZ#>fk=n~aJGZt3<>w7M*=ZG z$8+s!kCmP4Md~aLp`!VAlS*2Kw^YlVbBD|J>_BU1;^L0xiVL+jjgztwK;5V?!Q<0N zR)}jPP4PuOG%_u<98j%mnc1LygA(_%g;9X>;y3(5Lj$FMD)$>EIr7JL>X5ZiQgX7> zwR3q~V9lQ}7B_4LS|803wEU*O5?-wAO*nS^AEg?>u08bU+|gEm_0?qm`KOY;pPGJs zU4}^;(DPXz&w*pen>ve&$23FFzO;JAq|!bHTf5C9e0y6{bPw3Q&6fYW9HJeBZ7S(H zPN2gZmFloy+Abq(`iR2q0Uw5b$=)+ojjJ!X*WBX!Wc5=`UA7myrPLVMH)II&4-yI4 zMHM@-`l63w7zLt9(v)F*GkctcXcZFx9kzD|6{FH7E= zyqqy85<0jnR3VUTl&#}ti)(VSeKsHC2}$qD1Zx;f3C~b7Fp%^^Mgsf zI1=HVi(*p-B4V_k$Kw$C*~ql>vR@6eH9$NN*J(VJ$&=f|!-n`*ORD3S0U3eO&)k&e zL&B>5lFL^bv)%6Gd@pYcx;{P2jUtl0=v8CUVTGTv9w%MMjbTLGxE|_})9rukD>Y7e z7-H4rI+zR33*IeTB%TFc7T*6~(}0;-q(`O90_2$SWN8W;tA<3i#9b2ok(%SDaKk!^ z1?vtrv_<*0+~VTh8$o_)kLBiPN#sa4t)ajN6X2l|G_Xbln}gl&i7aakCUg4$9nRhu z0AV8dKjrrSRPoH&)(Ky!8!*LC-8j8OL*t&Bo$fW_I++Fb_Q)z=FIP5tCNecUI|2+C z9Ys~pLC*=UcFtE5n(*v!2iov}K%>nG_pE^6VvH4OL|-{I-Mh_dU3zQUPp#TB+?G1? z2rZpRE1`cuPSt>^q`US6d%d!<(oH`79-6IZRL^!o`EMzJl9WRsphEv&tG9FZ2UQw% zbW#OGoD$%GK%kZk!!n^`5!`Z*8#GYcW)Zg_Li;@XY0_3q8YPI^!G#sCB&s>*wJhRo zZ~ppgV+#1onon|km&XnXH`!kE`C3Et2a2o3VZND9rLkE-_ zKi0{1CYllT))$Ls%!0z3%Zc(>GyY}r=S{43vm?}Wc|NwssV~n4r%<$m`tdaXWQRw* z?aV8_N6ozK>Nl;Pu(L&BrG>pR9Fdo!>-U+S9YA?z%Q!Ky(dtP-lU6*QLaj4F&}f*b zsQ0Zyo8_RaJU=dv_}Z3z%P4J{JA}(9*}p$q=h=yq#n7hw*`o%6oBEwwO?smTGxc?} zIZjDcoUCK84^2DSnQO8hFhzj5wCy3FV@TeUX*`C>ZTnv3LB3M<(aHqru0$ZLg*5=8 zYV}OewCtL>I*Rq`Dmhk1;Jf=mlHJ=YH(_DlHx}5t&!GfvwV7#m(5&Hu9nltL_a9(d z*UxiKDc45Fr5;1VFBN=?N7E~ygH)uwF;pu{(E}GynwB?HqRdgeXHQOd=(%rzfg)CNq?~uy>k6wEV2M=ZA`gX#*3tbZGlP{OzD zXTzXq_Qm-vKX7v}(`sN8gKy;Wgvld~3-hMxwC=;s^1XB)qSb%5#1|>nB!#0F%-R0= zZLxDWd0zN80o4w;%p=AFSk@>*$uAY^&nNv$n8?gFTuw&ts&EkW50te^_<(o!Uy*nW zibQ~B&~Gqe#LrZ`eh-gJ=zfSc#*oz1=xjcFnn9cD(wuQN{^qC= zrA~FtF^voS9NpmR@6aR3cU@%qO??t6Ej*ROE*ZJ>!5;ApcKs**t(BC|{sfTi-Kc`X z$?SKs-=y)_kGz)Y<{r5E^@)l9wLpd-joT$AhKEQ*lc(vn7|`FdbfM*C2I2fmkQ3!&`&hWF66L5pu4DYNZGA5?8^wvf{DoeLm7uIYo$aLu>; z$c9*4q@jo3hi~qwP)K9xu;Jq~2vC|dmvQxDW2?A@gbg&CaU|}!VWn|5JBY5nG5+S? zb;xp)Wjr8Qz0H1lk)8!c0+=WZ@0HJ`odj?eMi0cF<=Un-2;zkY&%?HbjZA%)8J4{Fm3D|G+DTwJ^(Cz7AGfS14K*TpnuU+ znVo;Ct*D5|X0yOS`E5s1UeZ zL=7mUiY8GVFVAu+*(hRSIBiU9Y)ohK+q7Aga--QYMszvD>u;2Ype%Kn5hAp|2-7*s6MHc@IA z$g|Q2KzgL27cB$miH5%eN~{s*ZNyj<4G|#ZdNWD2B@~vBJYgs%0?;R+!#Rn^N6y3v z6L7g(hnh=WgfmQ#;zn`Nv$aoVV2X5;#+9$T036SWpN8M~qAQUhiLMT|-OKm(ixiNj-Vk?*gK zo&fq;tkFhhuMv}3Yj&bYO1Z0WRA&S{vpy8Oy=OJN2jZ_)RxE;lFu4spqLhH9vuc-A zFSWm)&$;e!cJHzrmJ~s+vQ~;7+UdEN0}dTZw0zSi;MbpGkV?+SN6-9Di8ah>Vox34 z+4Vd$c&tAgefE5f-ys%MG^y<%R_D5p0dHiIk?N5NJapROGw6XlW8xPtmhKJ2a4MME zt~1u&y}C*oy;{W6js;Bmoi8Y{mX#YNQt%?JA-R7q$$$?axTG^MaR=r+ghI=DXp(%VoX&JZ1 z;1K+}T37m8^L9{avZVa~$mUTh@bZO)DD~g5W=$QwZ=Vn@Oeo|&dp@R@d+FE^+Xrm2 z!^nnK$PeF?W=}KqJdLaBI3ystcH!7OAVXM)ZM9x}>I#5U#`)IseP{#_q@Qatn75hk z3VpU&0xp~JvKXUB?VTBAgKjOm(}BS0v}V2keTK*kQ|TrfsI1Nl&LDk(X?L18^!3ow zrixj}{3kyJ04n9h?YfQ1+Zk#zKDY?lYHGnZ9lDQoqWlJk;vDsE{vp)Dhk8pWnHD}p zaHGRsf?UcB1KH_6%AAXi@7Mh2Oi?WqsY}@{)ByP%B++}qD=hXc2FeB(C`f;cuYn-! zU!r$ImhExuX4yA)g|!u1wL^>wr{)3tgKk2OQ4ytQK?lvJ?yLEF+#WaJ3J9Dc<&;Gy zF*nY~=$us+;0b@sJrdZcH9dWvCQ@-13cLJ z<}hMjxwbX&jkgu_ed~)|TADd%3iVW1oY9 z*0E~Y{N~Zn=4{8c6%ClIrrU?R)a@Thg||ftZ&E^vt40@G*pS~ z-L;ei4!*X~x3OW6JV}V6(YS+VCh~tp-51O7`%&BS@)=*$d!;)7j4mt`Cwp55T1b{3 zIg&P`NBgGt4(X{=_)UE`+e{AgF512!0kigNdiv5+gCwpJa`#_tNj7G+>b^LkSkBi+ zy_jqUv4|)`cZq>vZTeeb)QX|X<48annCTD1?VN)#6@N?svvM@kz7dXk5L`NxH$NS%VtDxH%f&nz0NfSop18_lI7w@|}heBR^3aVzAP7tHnmi5Q; znmU|kTikM4&W!;XR`{pb2DdQu7fp(utWPL9jIFjQo^9^X@mIdG7?uwkkQto1nioGv z(@km804iYg4EZUI^IZZuje;!M9RjH3t$||K!ojkHBQrgR>YzHxXVH3EFC&Y(Y{Il- z!=~GEB8G(<^f#?qahlmxC*TI7Vn)^C<(a!Y1p9sBF|5(lE!v*W;9qc|BdU&YnRBwiiYJopWU-8Hmx-HgS@fAKX|-oesPBJ2(thX~g#=(N9U zTJ3uXqn2x-;E<&rVO`Sz4$yo(obF#W4h1*yu@20vr`}A{4<)DQ$js>8zxz^g+=bID z|2r{_%iW^x$-X*`Vv%7Z zMOKi>sNX}|XLk<+@F7T$N*MO|M($ zX7%;L z#ibX-cXRDHF$OgWxJ!sz0GXH7HG-6{FQmc-V8|R$AEsosJv+KT`=w1?DP?Rk!EmJ^ zO6OnyDGrpe`|MOoxyPU4UM{ozxvpuX&nAu=v<7bo6MF|jv-Po*Dtit{Z1(s?_AHb( z_WM<^;qg_jhy1N!Uz4^bna_$e?0knjA|hi@pf-yMwf0Got|cTj;vO0YR#VfI>M(RJ z(_5dG6VfuxrGH&Zhk+kb1GpibwmX{I^kU0?vS$8tvvNsPgAT{AZ%J-IY5I==QD~-G zdKJ#ZOzL#x*-L^mv3O?aQ0d;vy_Vpa^TB#%?tRP`(7Ro+mNG+__*>?tce`EajdB~E z)XebO4`=*Tk^3s_BPA@Bq^tDoJ0qj^alrr#2 zc%8#Zqo+*z{ZKEX@Op#3BDqJot=5<)H|1@nV2 zAwg}K*%~zx7IY$HzQ|W8rHS#-Y_>kSl0D(B*;1Xuz4Wt3k10s}yKRdy9 zfRPNwwgdAjyyy5rqwuc*B*OGI8>d-n|7wtvEUZ!omcrr-28PU)}I#9TvJoK)j2VzV?vhcifMSvv9qXNi+etvDBtItm0m%_W8_8!&4 zZi+jcehaMiFM^k>_x>~s{oBeA6@2~@fNrXH_f1w5B`ej7BRlkF0bWqCerB4FZ;V}k z=av$GLa|5L$a>w(2Gmy$RLf5QIfF@+3Qd*y@LYz%zvTeefw5xd52tEwymdeYtDkJ7 z_keUO1o(lal#J7l8){AK?8gK6;Ubwh6*>03eVGOQl?y!qdPa>DgbV5?pg+kAWDzy^749?8OVB&qDz0Rjmy?72rM> zu?N@1KNwT^$ZBY4AZr=l;PcZZj($S>-0Fovz|3K@aNYKZym{bc0KW*QeRwG}ZEYDv zm{^x&JnEff{ck)@0Id3FG0tRZT=^VC_nQi$rp^IU)vSkAFqqZh_;XOfdthm7b$~e> zss!P*FAUa_B~9K==J?yBij;=vZ(j>icfpdw+4h^Z(cXpLwykV6NHonfE>JF~;*ulaGiIF)>x9 z6D1icM(!6=b1|mrRu88+A`zgzKuf|cvkZ-!t+L%BW>bIqy3~B83U~u63^~008Mbxe@7_aXh8}A+{ zU(g5>@I13d@zav_oo#APz4J*BWVKUGDhW!_n<&u>2?=?^8I~>MSuq#0v9WP8JjfHn zO^LbtZi!^3Aw3?Xx&9B#F;H_AOKt7dbLeeK<)#LeGUK7#gbf4kJY}h6WvWv-@sqt- zn2|a3*@gpEB#Nnux3J%l_^UNiki2>DP(0#9kdBnpNAOddC7eHe$YYIH8amC@*Aad& zVUWngY54yAdk6&bIN`di<}T6t>DfoQNQJ7#kJvMe=Nk+UUi--Pkt|<(BpZ&ABN%qO zM3-{m&*2@3eRy9VId-*o)Xi@*77SPS^EHO0@A45{*pi$DMb*FWS}d*$$R{j)LHo^4 zWWRG1^noOkB_^1LZFjd%JZ6}Bvz?APt0OJlL@2WzvB3Fu^u#8^Gaev@3xU25l=KTk}}*wiPg~5QB3c3W&urB zK~N!G&qMFGulBcDj9pmmQ`z5&wjPy?D%31-Gw>lNR|d zY@VG^A_W;rJaB*y@0YO<(OzVg*_yEFH(>qTp*g&>xxxgec}JG-O+%W6OeRRyNGYNs z4&R*n8g#;y9_1)cHALtNyYOW@-zy&v$bq^J&P@0kdTm`laivLqjOlyPB#YXv;~EC=;kUFVtr|O z_3G8bhYv4Kb_?r^D=Qs(V%UH&RZ(1)Yw`6Wy9!NAI)g1(j7Eauy=cKGqv~%6ryyx5 zRd)LpBi^bYA?){y6O%nEWpJLE^FnhWxTvBg=jC9ckMZ#pxZ%08J(}TT^Difu%hZqf z&(~Au=e13OQoQl+-F^iYIqgptOcf25di%C`NB!=-d*t2lwkz}r9WSov#>#Wu!0`|y z*i*9Y!bx{%G<_#zm#{3@qtF)F&ML!FP`<4*KS3VqoboOx%L#q5GpgCb2pFc z5o_0|Ztz%?BrH+UmpvR`6tX71G&`5T@_F-`%H32yi^XagAf<88z!yPbp`s)uft+TbxCS|H7M2@?y z=4{Y4=?KLO?}t1NXdzjtg`YzkMiy{TImlivFLk2Ip!j_s2^sP`2Z>N-It6c!Hz?3y zgA|N)9lapT_=d0kie6RwGIIQ<4dwP0CxPlkdpALL& z@XE453dstM!bf|yNw(^1_{PJR%q#Eg$RN>mbY^BoQ99wmY?)&_S1})Ud|Yn*`-MYQ zqaFXYy9FTs4ic?5lhvDz5U)xD)|qR_+Qx>8((Us2FRMh}`C{K=QayfLGq&u5`vf?N z5Qi}5I#y^PEn_c@{jBoGbfiH@< z=D6r#Ppzk$m#+>C0uq4OxB%%A)QrSJd>pIEL)-0rfKzMLSlPheolj7@Jj#t`zqXs` z-aTmO*RDafzD-6)aBTrQ8;TElt?RTsv!kg)hp+B^n-p)Vhs93$0jkWk6~?~Q%yU0q zmn)$@mp40Squl6=9XgiqTWP-bc@h!}P}zL-+bD-ZTJ3wd%@o1a&5M@Tl=F-LUwZdDW_qP&I`8M-aB>gh8 zveP)F6)=~FQVZ^s&E@&pBh-dVI7ZvvF@z)915p|dL}~ZtLw7faU^$qkbEl|CRKa<* zydSs2ySdC5Xy>O&3|^O0vEGb%s4U?A?c58G3uw%@b@)D$e)D;E+oScdV#Dtz%GAZ|L;9w zl#ud1V`#avC`)xq`^lng!0SI&Wn0a7#&8b-A^)-XM8G$(blL2%2Ex#;6@s>6E<+$h zT0`P`CUn`go2&@HUOs3=v1kaaKw;n^iWyN7`!U@V0Rh<4Q`C+XYxc;rVPm#fqyQnZ z&HmhI-1K`oB(ht(NQNgkRo_;q7rzr_m~Fhh7v044luYbDtErmaqP!YYBWI z)9%0LQHg=#E1rDUgRQc48bck}jEAC6x~T{WZ6k0c0dK%_2JfF&0-qKe47nW1My_L;MfZhR+ z{?U<6>@EyClTh5@6y)MsGs6PWTC)2C6&+1ikmM-=IVbw@nVm%HnX72netE#7b)gW!ID+QBozG_Db@+1q4l49@&Vn zpw8s6yMffHtMg`AFS0LeS2C44{Aqa!YWRG!I&Cggo7|SL(j-*Q6DJ$3srbVCA z%BE(HJnwjCci|pJm>R#9bvVcBq!_!6vnYFcKd=W?Q2mi*t7$HHeT(NRoO!+0@2VnL zIky`aIt0JjYQajY{gEX5xzaJ6No7kCv)GbSe0PNn@YaHcFh?*9K&2wrttZ(qn$(V0 zi@VSEE%xej0y1{xrW~xKNkf{On~S(Q1_lPu{66_o*%IP*wkvD8<9Y|xh+>+7JF)Tb z@CXTeR+Czqo2Q9bTR-{SOw5%!!6FZV_+aAAV(HTE?`a;_G(>IH_t+ZyX>@63`rKgJ zK1r{8jYMui{6n?aT#i_j7EH5g5Tuj++Isfn#2npNS_(6qeLRaL<|M(ahUxmmCZyMJ zv?F+9xeW@>zfNEcRQI1tB4KE$2cFF6;!vG4XPypF<|*cX3`and4x_CI%o82ns*??1~|TfsgNOFlWkv{e&Gh#XmbjsgALRGmqbRR$d z?(|sS5kAYa_1XG;;31)q`l1!M4Raq#-D`IB(5lY}6>m>7tArCPbv!|G1tP_&;wrys zuf5b4-cJM^Oc0>YS4e@p%Yvn*u22yRY6yvrWBb-%CkcrCtN;d@uf%mm?28^%R$A48 zeL$OqS20>*N}RiVs*hE67@LgDOnp&RsFmujqiXX4btLD&WaRARqTXzMpxs~S+WV8e zyHF4)qfNj#w;-Srqy80e=iP7Gn+}1isH;P}DSIrK{gQ7|>E$v$cmXy|)kWDaECXUJ z^xMb0-jAvk*KvYOZfYSzrTffC*94q0K`$g#q~UF@2b29{i1(g`r1P~MhrOA@+guzz z6hNq!94<0;Wu%iT7<(aNjg)=(9{r2tI+;A(9i~(~Jqv=8mFeUCUmn$S$lvNv+)X9#*O5 zGsqhewwUwX;ntLm@QW8Ri~Uvw!f>j6oveMZR3er>2rv#UEi1hlkJyalhpE33XiOdx zh-VJnyL-3D@dF+Xj>A|I6X=7O&||mOQ&iep4tE#IgHb-)KWA)UkV?^y-#vG#79%61 zU+c$_Nth<9%zBIvjO9X3nL7Am!kK3Be*OOJDWjKSfqM2ks`0pYR2&%XdCAh!mul}e8nm@87upX2i{^4{+>Ga_Z*B6h@1EEkEEHt4S*I~MqT049*Ub&IadGw^Qp*Y%hQAd zAs&EIOPuV8^duy&7$R?)!F^NCpug@BfFyx;pJp`zQ0{M>LJCpuUHulOny(M?4Dr=v zjwZkCoHs2wBUcn63m1$2ttD)4o3hu_0_d%!wH0WD9UUDs;r>9Yk&@B}^?%UO`D2nt!lyUsL=OOEhTRlqy*``*K}(HGqk1Mt=~n9T zAzdt`LPGMPjIL9`OPswH%OBz16jQs+=e5}P>9{yex}s0g)=~wo%@!gKBTfbYkfEdq zzu`FURfDcepu@h>$V38K9+XT79(=`J>81*sH5@FN`LXyR)x69`9;P>h32HPf-GU(n zEuY-Xp5VzI5?##xY zD~ltW!Unx2^c0@3>oefcTiLhD0UWSFckrmn;Su4r81CV$kb4*EzfdBk7lp1d*2Qj0 z6e-4`jxAQ+N0dD$`og`d;nP#$Gr}LuL>bCfUy}Ynj+ghJSW#C~-!jAkNDibOXi{A5 zCj$!G_Xvyy#++w?R4*b^1Jnw74mQ~M$H;!cO5&s*%($Kn^s%K+qj3);+kdv|4>ZVZ(`P`AWZ{I2s3IBj509g&)*_oLZbRHLd zB?QPlGu8GLXX>ou?SORRxQg55m(5mx^&&y$gvZ}m{JY{mFv)4S2b<`v{0<_3bCX?r z!uf(9v`=;7Vl<7fGz(A)vJmq|Qq?j${dMB850YFJ6*gSQFAA94L%$uROURg2nV2Fv zK;A5g?rXgql7FIXy*IlfP=j$Mq$R)$3@>Nd+D&uv9#jWXT_#zs^y`4H_4(SQFCu+D zAbtKZ-p}_j?!P4l*soH;m0Ayp`rp!F&GyQ3KIHUEwH%=hR=dWA_V~Y}eq9}7r0 zN#P5z!0^4lC5D@9R9S|MQdy00^SLRns>PWeRg5m&vX5K@KCd4lnEVmgeig%~H5{Th zm9!ziwFBMpD!PC)b~~a3FrgZBzx^u3wH z;SIid@Z^b}WnGw9dSqnemoG?OYs%d549}i<^q>!Y|NfHwS6Tdb(iIcGnRG1MFdhjj zE91m^m0PcfdB1L0H7!tweCv4ykfeX*pnkg3RYnn}3t^cL#hm>72|cb`V}*&zbowCn zXgE>s>8ZmXMfX1-0eNW0_~UV^a54S*W*R0YnQZwIu2=xNm?Epbetr8Nyn*vO^_!`C zK4`;WiE?Ui`Fc!F4qsx|&o&1z96!qO+^9!(^{D5|hmN!r@QBnL z=CUTg{r&y-EDQjEsGkr5ZgPKuI6{o~gh{u)m!j42-qrGxTr54ic$SOn+SBRETXl{S{13b*JM+FAL5Npp zU(x~-$!hoy3U_NRtbLuG&(rY!;9ye4Co*!?w9Y}3VZ=%jgQl*O{97B>u6XFn!{^nW zmBY1PF&8*uZ`g-P&_fTxI3|35X=0YvYmlmau}QP|b%H{CoEy3vK-w5~n+dODjw&4- zjB1EVWe`t5_SZTo8_9OeJ^P!Yi*+?|{iB`! zS&I8NPY1A|r9IGmUyR+x4HS=NQ_>Tap35U8Wgg=`CJ;Dd>KmOy`U%a7Az@~t@GINx zOo1JhGM6oji?N=muX0^w6@I@{Qik25FCwrpKKDsDTuda?ez-2%Y%&*9P;idL`2%UP z7cwCMEOI5}gZq^nq5HMuF{e=$s5&+Z(7B2rIsmx~aK5I{sp{Rjt(BFQRZ}2c`1ZW& z>gjp%__5H#q`SZaD!D{(A$5H?LB#Lw1Sf=6gvA(WemQQH2cBL$kQ!4CO2MgtOuvp> zEd`@8WHoi|yzMIe3Wl;kZCmPOhLuKRRK= z*-$!3U15qDz6WqFRs`53yiMPypU#hR|KQl5H?45A=sY~mf7qg2Z`0y>8He+DWdn7z zEnPnQV75E1+}vCU3TschRl;?+UtiF3Zk;g)S&^o^Z*j)lsw5;&u60ro%!h5^YqYtE z_p23>k{0OnEj-`i#od_uSm?&4-Zib^a+=!4RiUE7Sj*o#26N=dXFhTHVEPoGfm}&w zb$81Z{y^mb)W9xrXm!~cBk$sXA7AX&6VKH`y|fNffzDgT@JM$q=ZAgWIP6x?6pgZd zq@@e0vA(VcTGw5578DUZ*Qb~BZ$mUmnP)f;&(5-|r6~%X8g$wsH)dLeoc!oA+XY5I z^WSfUi;V!7B2WbEu)o;Y*y!u)3veMlu^bhaOXI~lddw4>Be?)=|L)y8fGN0wUu;>> zF)(O?^(FQ7SMLx{tc!TQ)Se|S zZCZtENUARo&4?wU-qvt=3sp07Cj`Y!I+|JU47L*S!tZtR(=RQz zZruVPc;--3I#pqjSXgM` z;84l-`S>T2z>4_Z#N&VdL)tl$*`vK>5_G!YaSIy%Q&AzpR;dUnP2T5b2@giD&8 zl6m)^=#4w68Tn4xy5_`Er)y0^Ytl&k^}NfrMk`;rVqKKZxGbjwYQ5GZO!8G^3 zsHqovqw3td+)ynG$w3E=fH~Ki`HrY(l)3H`1NOt8KX(()PCUf}AHM4z6mW}i=JNM# zKY_Nr_#?Cx`Shfl3ML=c){oVlG##1~jL?+#xXrzH8-iqfzH3E&Lm!(m=pofMo*TQ( zR&BL+96smaxKck8Qm(#O_gu+U07`zK9@XC@ADnI9epJx1ul3YUhZxH^KQSF zC!ft4hwyS(}l8K0-W%+XMdW9nqW@Lfx^)q)mS_IvY;5bdQvgMG| z(@kMwv5^p_zQ@iquVLrnzd2b=Sf)x(69y6#`WGo??!Q!>B zlyCa}D7FGiBux$FoG(4=&s$>=l#QqW&e=n{YbZ7eM7X*>W|Ia=9qo zYj1N8ye=!Mm3;hrnhx|`a9t8AcqJ36`6cF5Od&AqpB5-MVIji($Vg*`j^1Dyo-T)?~RVt9D!Z7V!?F^@#ASf*=dhr+T@INe zZ&|znqtaL7yi7*`ygMeK1>owhXW%9H8N&$1KbR!u=zt!NK-p~FWAQw|3lJ_w9t)0+ zifUg?A}1qz!DC2u3#i9m>aZJu*qhIQlulH^peC+MGhMHprZp4%FO0=%P;%wX6xGl$ zvSD+>vVj)CV6ZJWp%;%LfNk|*pM4|f)ms17yi{DRUByld9)WgF7PL&cvLVsOda~^m zO5Kt!eH3j}|xWh|lK zfIYI41g|!lLJawZY@iIGd&;0**$+kToDg@=Z!PizVejMwYT?X@LfkY(PS3TDU0I4v zuiDlSrTq`lLe38}c02Fa^9@TUK`Ni8o9=-G;RL&IK>*{+$N<&Cxb_S6S|^3VvFhq- z+VIYlloTxULl{gpcjS@x$0bf|Y;1_SrmCtcz)!1A7VEU}^6?R$wu^9}g_zk_ny0EW z1pE7+Z)sf{|4M_4;6DDXq0C^-!U}+~cUhV&GYS3njY2xX^oBnuPD%OLR_EyKEO53Q z%I9=gw0gtyZ@P&NG@>~;h|1L8y}=0rkKD+$dt{PS%4xuGCQWso4KJ@15%8JFU>9Q{ z+*Rpyz1~KdJ{UskI?>fNNZSqkd8z$6_{^PGe{cs7K@O`S)zxaF=XA76KnbA_p7<7K zxF7m8LTKQ{XW zCqPBS_6d6h=38Q*EaBSLry9P0$Xk<|rFA>6=UzQp(JECE+k}p~SSYu}chCLkFF(x~ zm9Wvz-+&iBMdl^qtCarY0J2!3tHl#+O!;Q|H;60eLL!QE zq5%ErCG6{1ddsb*jIrKFpaotLi?|o&MJOPx(%)Iu{wOui)ZHu;l;|FP(5O$n=*6SE z1<6d}D40<_D){=xAWY$&+8T=1F*ci3yRMX14U;(T$3D0ID2C@QN5a_EJIkl#6-Yyh z*eRT9HGyZ`$EU=RW(k1v^N0$=#${5ONDm%pTlOzoJ8Ek`i~S`s#{qsmfaLa)TmY~& zK#t#hZUk&7Lkrck>Sx4h4S(SCL;e0k!v39O{ulN2Mw_?Fb_PUT3&5oH`eErB8p7W@ z`YG-HtO!N{#r${4{X5?O2WtF(c`AP?&ZOVERseFjflw}Y1i%74uR=R?Blc*$z#XeO zbf|CBW&aAFhTX@{cz9xaTBO6pFafO{2!WK8x$-pZ1}t1$(LJxe1P1E;tr-B+hW~_3 zU`2(!tSnmW1-tF0yoH4YI{>gk*y(Fa*nd0In}kuipEsUd3ZkzV}BvvZDSay#Km8 z^LQXR%Rqpf{aujwg(ms|@Bs{{XAxN(HglIw4`@ZbQJxP}P&NPJXn;ng@JDrgV>I|Z z^FJwI;<)z{!~8>uNIY4+smKum&lXtz$P^Tko3{a;n=|M}U$2BmwXy>P)|DPj={c)+ z{mbdf{#2kPO?4mqeprG3-WQiCQPS5ZJ>$yk zID$mPQzx;@PBADhl!5;lI^z%waKw}RkL3UI0{lmOabsYu^GQV4%4ZZ}tViCOnwqlN zhk$?Xh(q7NV0Rh@3f}Ld7M&5>U5UU^N zD)>%heLTdT1r-&SmpdC2uKLK9;#HvtI~w2h<-yUlEPgB1#nG11yLaE`*PR?3Uc_Po z9davycKi12LlYh>3=9kcf^I-@Z)*dx6HBEI)h%ZlfPiohKZ#cYZdCi6u7ue_~T{_>6if<`mqJ z2{?3i=edSH_A#P3YN&E87X${{u9W{GDgVN|I+1giE9*R)O@gZ^yQY;FZIH-RUtgD9 zH$ZMdK(dJ3@6nR{e)C|IGP@jLr~ckpZyt?*;xp~3MgEA9fYN62C9#JPWM%dK7!zzV zR=~i(VA`7j@Oa1c&;Ie)%Ci5iWujtXwXP=Rt3zay-xYDi0uT()8-X(ZFTHU-V1npH zH4MOSOG``9d}lfb&BJYNpC>?hf6#)J@_-WwD0Y6BWjb`3+3&Ce`sD;;v6zGepn?HL zpAR2MW4$82e*OM8K$;-J0006uHgIdWDwbwKPB5?4Xs`cUPv!53@4IXb zQD-o)Yy&WWJdGZx|8Id4LL>fLO^bjBej5uW=?z}0+^e*mP-Qlf#XhwH*f;fd*ki91 zCbqM|3OHxb(xnK)xR&7uy)hKs+0(Ykr9KAn_zMNu!Gt+HMxFVGu8AFTRQR_!{kSCu=G6dO$x5I9`xLvaoLmOd(9wi7g?c6+q!x=R4~Uwu(0Zr{r;i{EmvH{TuIno&A-i@nx#n)s<&3S9NJ+j)C6lw|lUG$@_M9NEOkmtUL%8}G3?WC70fb$8Q#T#BrnVXgAJQ4Gt z4vh3L#ouw?Id+dEOvmS0@0LD1*FO}9W2zweouI6Cwa%k&r}rhoU3d%SLkw2Xq|P!v zsO1Grt&;l5>9pt}Ud?l_7Oy)(wg9==o~ZN$DMw}&+9{+vC>!|gxO8*v9U0nN35o7_ zKCybYoXWw$@U8AyHz_T%qRqaM^^LW$-N_GB$=_1L&S-;OzXS#8@8J{pQL(bJa&yP@ zzy1K4G4=A z=6lq#>vw$LC!xJxWrZxA+04h%uHd>OF+QnYJK;iAV7Qdfr642}Ug3Xex^tM_Wl#SS za9-KKbfBaS)Mq_BYK-v0kh$)Qgp)}|lp7yFWfd`s233LcK+S<-yY@FVNK$?`xsGE~ zAO-yj0gx^M=cTD;I3V);WfDk}H5yD8VHh>-qBd(HGa?PS%t`I4d?j> z-!S6&`hw6nPGi~U0ALW2^kzmz7oMP6(FCknS7*bF8ooxbMLYSwNPKzX08&~mlbac*k)WAQSTO}?HX+Kp`NdjS0)4Ua2g?fg~1pOMy zcAsAyCo7Eg3qj(Z2@@*$cHtubV7s^^OcwnlgyQ{>r#|7m(#tyQ$gW##y#h@Y@fPv z7U@SdES@wPtH2GehzfPEi)_0xR5{PRhy{`}0IC4-`@%%7B4cfYvr5HoyySt!ppxm8 zxbj?oL4=@5k%hfkO<2C_*(YG#N=umR(2iiPmE2jYQ{QRPNKneYD|n+7@W26jiBs+D zRnTHzZcv2%9@7Tx=9sVn<-mpl3lp~D&Q!7MVi@+F>3d>LwAPr^Bb!7fiF23NH%g&GUN+3j^^f{cU|m6UFa49i-a!iNuiCN z>HBD>`rMdv@Z=|mT23c8kmQ*U)YV6v=RxOtUnfl24a`oROAC{rZ9mLy{wb@Pn&?Yg zc-)|B;LF&adnWk1o%s@r(6~z}oBE($p=GjpV}&t6NsjA_u?G7pX4EH+5k77X)x>HA z_)p+PY}L#&p_m)E{rjIINnPWF3rIafJ-{|AEsXrL0ri}4xy}7Ys3G6OKcQJ~iVWEw z0M_?P490-A^aMXB>a{M1xBwOs7pcE{i5kA0o)gBBEVz!c-oQK(2bBwPNUJ=Bb}8UY zW`ttyQm^BdR`s{BvMiIMml5NchG$hboi# zlIRt69MjYmE;#+}a7>PDV1jSWv<4Zp*Q{D6corArg|>85fZZQ8WqadsK7Op#(Lp7y z_^E-=xw(Y2yZdi@)xWVP0cAM$-(@%etHP_)wu)9mYRZuBfc*esH;FG2f%VG+!S57Z z`L;NR>ocmK11FkqZ1X>Kga2HJ*AB#_Rn&eO1He~}agHzqF$5x8<<7dUgFXSR>$at~ zV1fN+bh8plvr}n-stcc2O+oMbiDrWqsx-Hln&6U&{I!HvL&4Cx_x(jWIHh)+gvW?C zlI-U0Uq+*y$EnXOHl(EEJ;O?gm<&{~O9y&56BkJ&EREI^cSUI>MjO}may*+3<)5#%v$!R&8 zA?>8RD(?u<9t=e-AU&DA)7Dvlfir2>*L-I+Us>PL4HB&C%;-Q8b9?@L*%wZzB9C#@x;kML+~yNG?* z?&@M0he$0iPNbitg}ADBaB-5B$ZK>+>9BX$7bNv{0?b=Ociuf&E)bH#N8|)F8tSp) zbMtqx2Td2TcUBp3cDj1SU+?eY^Pe#RqZ74x?mPMwH$@uYD97};g(d`8Vm|ozyfq?+ zRypU>&mLYqt8;T5fU~v6AMl!}N&s%()|+UDxzR;Y8yCh2^jwN|1wDBHdc$S&N?bvy zH{tl3jrQt<&L9or+AutI$fhN3{&+V1f&R;IMqU1JRr7+=h3qCRQ-QFm1@svAcSfwc8q72= z9r_q~c=sC(7^PJs>Udj~J|*A1IlzdmGP>GE^^mgT6;$*LDWY{5g|G^9MGKT(dKbmT zg{BqwH6!)r={f=lAcjEIxkQkF)`FnwWm2ycQ6u=U%Px$|euD|L(_#&<;p|$oPtl3+ zz{_7J)MJS#)nHp}O8ZxHj7AdaV~i2+yAc3FZvK5l%Js6lyU4@DIsX~X`8_HWpk7<7 zVcoifj4~FHYnWuxd^##us(I+x@m6=b}~$o`AN5$Kt`)U`7mZdOr&5pBAkj zKCK^)*&o0DTQmP#3;kD{``_H>zunybH3!oFfpoUsaX*Ei3GwFjC9}1HRzFa{gQaeL z77^f%c%c5}G6No%A6fHP>ip^V{%_9wpRWnD<-hjv46STAQj$VWQZjHrw~-T|V27=& z?64j$Rg9Hf-Q3(nCTzY;sJ-lqqAgYBP*P&Cq6mfT1l5n4vZIfG zY|w7jzAYCZ5;}1DK!AQ{>7GA4RePr_*GpMK2%rmAR`5hcMTypWE(@9Pji1CJL}me3 z56ZQL=|@0Fn44?jaLI7pySctT?z0BG8yJBO{lPs7mc%~oeT!~nWMmwBU>?4`&0}Dt ze-26C1ueQy-vCr0z;Xj$_ebHwC2xIRSZaxjbghz5nyS%aR!ro#Eg19|xMLVjlRuhf zpq|xIqOC?%AML)7nf)dfAwr2MzvQ{S?>4MwsIZ+(d#pQJ%J@jdpm&B!_F?=?znT<(luzU)5+FC-|nuVIhK0m#%N|W2d?b}N<1;ws?}`hJ2C-~W=kLuUc6(V!}p*P9cfMRXL|{wn9UDUp$qjN>kqdK7y%rgiSoF zmHcN#7p^+G`o#@(R~nw{0m9PQv$5AHJ_0PZuun2x*1FkaY^@PjEqEHMBOkG9jz<%C z?T!`ST6yoI;=jEyd!NBG8nl-y*`es%@tSnzHBB4C-mw*?U3IDh6&AtziMlblCuBP= z+tPg}$M(;q<_9L~4`D{NWN9G8fwN&-D1|ZP}^53 zD}{#MWi31C+a86m!v`B1bc_BBmv%Iwt~6#R1+MiQ_n>^Yc~A$SJz6-`hbQb zG981?x~4Ndwqz0wZvyHP>3EvFssy*wOpS{#x$3X0FZQCM0N)xVC8Zm>)ykquVS>yR z5i+tAL&FSQm|*AaY-_QqVN@d zteLrvBmpbSzH&lm#+Ar3`>oLi8B3eKQGB*V;9zG%?K-zNpVQQroPAD{gLrEQI@wt^ zBR-!*>LR2&b?wj=8${kss8#{mNBQhTm%d3iu0(b zF7>CHM0S4La&~(=0XeahNl$X-a0k8Z?V~I>et1loGWX4$`Rr(8UFhN@og2@>Yz7uO zF;agiV%G`ft0&@2$-z3_gh~sY!@h+r6Y`u8)TpODL3$!_c7?Bz=~f3t7P7JKV-NRo z$Ci$aAZQu+V!(ZR0^NqvSz6`{Qi&15y?Emv6>3h`II3+994?Bze`BweTnw}9WE8k6 zxpoT4kbC(Rq%dHBkMhKSlboz78YJbkDJInSvZv?crC28aOs)HDxr&0Ui@M!wM%X%P ze>S90gDt0K*d*iDt430<3U^UTQqoj@cst2P?+j(HIt;eUUX^9&a8b?+Dh z_VLzqpf85^=Jm257H?TA6 z4H-Bu>f;48kX$dr$257$K?ehJksm}3U$=ckgl&=b*1B!^J?)!>k*0H=P6Q!W6zFSK zt#GapyK_u;&N(xKw@xzwt~fAdgRy>dm5TRxtKbUV;P3={O{*95^x=ANOSzg7OL~2W zLCvKS$54Kv+vS;W>Q<<$t1p#?opo#J2BC4d$ruqGe!uwQaLG(Q0~;Psr44je_+Y9> zOeYe#F-DD}{L;a)yL3&^RUKwNjWHZ%F;hjmoJ|c%JgBzL?^k&fSa^R^9Bl3cujiPv zTQ+8q%1&^#%J_^9FQLzV$%)G>lpU z5;!YHP5XtzIB=e-uQKk-r{3dAng@2tp%nv$*k!8FNZLaO89mOD0!M!|F3^!*z*fBeVNO?p3jK zPnN4Fm30-+iBD|I*(YUwNz&=p(WDs3u34pzjtRIAHu#Ku)G9(|ot0Asf4n(M-z?LP zDyo{FWv@)Rc-QkP9z(dow>?voQ9wG=L z!a=?eY=1%);?SN{hfFau(xvo7WTyw#Dq!)>VW-NUy4ZL!k#7HT9*uaSh1kcMFD zYdD_}!#5A9)(?So;h%Y*(OI8owZRzUPR-#cT#wjLZMX2kTuG*Y;zTd5f5x+2cw0V4 zHujTLt+OrmG`yUhrCR?os*TY5qla5a!968fNg*LVrzeNjwujKkveXXO?o&ep(a-(! zNn)sUE+xt#Rc!PcYVHU^Rm=vVkuE(Yh@@c-$*MhF3;s#2pCfGRJd)~ew;NpB} zfwioJ&v~7|$HO_zb&DTZ^3CuI3l2(3l*D4ei;P4r6BU)63Fo4UYVa4OcacF=ojDP1 z+*!+FL28jw&3?{@>tTt7q$%PCy;VNQj}La+{o({adWnUPnZVBjeIKAb5GLWfy?fE2 z=GElU_ME_*;2}Zk{gd7ABkI-PlynMY!#3N~=tsGNfJLo`TgE;=-z2&k&lkt04NRl$ zq&)zJOw#r0JLm*U5r-2B?3wtFmYEcN&U)=QP^M-`sU#}3j&tO2-54t@^}#A%(NN%9dJ&Gz|AY&M|Ngyaf`&^MV@H><-{Y8WaC$0bz@ZJQ?N=M5AQ)zMz>eAk3cWy*SD?3(+6WO)y?g{(!}>R z(JlvHyI-K#A|1cs+jHK1Deb5^;4vU0v??B5A77iiHObaLGLqgM6IW5Zsaw7{ArQFc zYh0S6bxeQ-d3#xHV`pc_jDufal#-i&Y*PzokXqgzOcp6LSWKOab2mH%CoiOXb9_sm zGWnqClqas_z7rz8Nm9@W`j!M0V%1&_n?9Iuwu67Lu_R{ESFQ!!(|{zy`k`8MU3?5z zb;Fj+l>$N7i3)d3-rn|l;_Oa0Dym8Q?nyupSqe=&1>DfMAoQzmdk44yOlN#b{`G1nALPStJmGMaL{O)j40yTZfjj7m62t_MXRT*KCx5wAqjgpJENRDZ zsl`>>OtBo}8s6GGhbPtWr(Rd12{VXK7!q#P@dMDlOavV_hk$T;^ZH(Iy10!+8?8UH zncb*|uOGY7w*sOB!WOpAQrOKkoH4#O-1($RkksY#9ld~l%Uw;^MEjEtZgM#j6VlK8 zxa+4y2?*gWV<3x%0iq|GWZ>`WaaPIN5?MNs3Bg6XH9eusk!nZC$aV4Ic?)5OSXZ#d zN0-fD_l+a*qbsF#xJayTBY^(7$gumq791tGkY?7ozK*E~m|tY83>G(@+`zuVAfNYr z9KObool!GCWI2#kGFm9!Rx+sOB^t1?xbRZmYVmu^+ub?_tBgw)VSj(bYe5y!6Prhb zi<{q3W%kQ*KlY-^B|IWYf#FY7oP1&#!Gp$-6Zh#cEinK}9 z=cpkjYueJAT$Z|@+!0IBizd{BWj<&12N^J|#s;P^e7J>r@6`5SFNk20F>9aWQ2%}5 z-c@bm1+5fed#!aUXm2M7b$*O~={0ok>nj4l$uI8areSuKlFir&c{icgyVb{OEhK-@ zv`WavT3KDVn?1zE*5pOV1-uE0u$C87w9iQNQ?-A)E;G_y0?PTvY$9FR$yGJHd(isq zIyb?!QJwS3Dwg3Xlmdx}$s<4zDMwcZ)I#fY&ctus?mkvyJKFOBX^Sc=sO0aFuAj zB8_b@_hva}b}o(aR#vsLXqnV>)&tI$$Fj$>&Xi=juI_hH^UIOXI~?3mP^7J){3p`~ z2_Pke&631;m#}&L4WnmDu?cExx+!-NNJEt6l$IRrxRf`=>Wnvx{KRr<+8lrz+N_0GG#Z2OI_)hnqw3>o>O z<>Hc<^1#68747)1TVvtjxXjJ&eUmwP2o&ohG@%fgGVH5EAgtp#o=f1ZP#w{ZlPK9|9QcK|f;_NMh>gd{S?FNEt zffR(189 z-95(~*L{y!iP-8etPg?O40bGk=S1EE2~ibRPWyBP3Ic}|7|3jSB-k8Rq6|2{>b|Dd zU@iKqrp8pRY@l8f>-j-Mz|z-+~H?IgWbd% zvxULrs(E&^1B#y~gzOp$xO&(fs#`r(8Q8tTE0%xC|P zna+{1;O6eNmPP^=mX)uJanUfVO%`Rsa=O(-BD{5jK{MXUy7LC9+A8|cYvT+;-!~fPQRkSA7TA4Y;lhlqoB1pXj#&Yu?lT@?;9^q zHY6>B?&vz1H?58>5jL2*=H5pRm#~V^AwvZnMP+w*QbsA|Wor-9h362XWTzS4OY(>y zm^@65A(MUIRbE=Ep`!AtFBgDEaK!hOCl&aTyRnzuk9VgQl16j8qFYf~n)>J0@e%>` z%@tx_PO1BobKpeC_NAv-?!}A4*E4$|2iV)JZin?F;jam4>ga3XZ#idA6dX1_9$}-K z0Pxa-?3jJbx-QhpU3mTdn=D__R4;9nFdxsDM+dFi)4JT}&k6?Y$%DUa${6V#sPszN zy^Y){g*xy*B(CyGj7t}@6gRnq4<|Snyb^o&ZY_P=sqo(8_Qp$8(Xs&zJy9Oa2`9@; zuo;09EtN{Y&H3{m)>edRGwg9|XEH_`EO`BXJg{3QOZ@%yAS72<#l?j^QT9*!f<)>fG-;o&$f}f{Ub-?37Q#EhyObkT zZ1wx~aC=pc_XzI>1Jb67Ujz60;VojNX&x#6j^m{xz7y-OV$D;c_z=dzuF^<`fzZ1} ztk2*h1v~rFn8!b3K%}B`wOI>m0Vt~`1dvm_W%txd)BLJ{If`?y&iL3Ngy}{uK&(WH zF?A?-J8IOP|Ly2i?-Ku*Nd^zns*fnBS zSez`ZcfuvY{JbQmpu_!+;O8{Ej7XrwZ*gT#4x>%}GB|2)D!g76{Z-`y1n z8UIfxBmX@aOpITx7FBX(bQ0gP%?kzF(^Xaidm$N=6Cq?C*&3y^v=x{`=?C?l3Kf+M z!i=4={R8q!3YBtu+1CtpQ`)mj;-{VKSpEaS>l%`!=||f4-^jYF2Kl{qAt9!zeMb(v zLJN`4ca2juRYdJPJGMsVv_Rlh#mtWGle82L@HTWAbOi!`)_mJ&-4y@1 z*ThQf%#mldlpIAhtK%XiQ$L%99)6K37 zVf&aAPSJ)J`t!9!`k(gOsFa+@cPVe#Ux@VuyzW#rV+)dL75%Y3q#72MbV)aPJnq$I zWF?g+>~O3*nSy2v8cY$VIZP= zPf(LRnsE=K5SHrf^)}_lE%9Zuv`Zm5Z`idyAqHf0XY05%EF0u!_SU@YL45Hy&^w_| z5E&;r^=b8`X@q;&+-q*cOSo=wHoAM=)*TM<`BZn{ z*G6yfzut&4h7KL%m@0YBS}SE!BuA(4dMtaW@uNrawXlm0`cG1D1KJI~s}1eT<^c3_ zTU46`(K^IwF0MBnE?X7_pG)j2>OZ_JEv^B-k{`@Ye-bs{ib+)sE_071<%#aCbz*jSu{B+sQC$?3r53#`(R%;8y{Kbw^I_X%6_mcsy^ zsFp-)u4hm2oJQ#LEI~iN20FU;J1b@Jn)CV({+--onflVOzw&J{-vYFN0`WT8y$4S^ z$&#Xp5CBfZPt58SMaQ%2qh>Ljg{Do%Sz~rOPhzG^n!?n8G_sIq=j>LibtSz&|BZ&L zvT=7hxs-79?~k2kPuea3jjV36K6#DGVZ{Ah-7N0lF*@GnAsngv8UX@;D$#64l+L== zg8+T)qs7IA95h!56%BZ*qtw)V= z+fqYLO8lonttuA1jX|>?{nHavHsi}z;@G>lrdc&;owzs2p%e#)k7yjTale@ zfC8pfIx6$}Bj(`Y?2&#E3W^q%w_0P-MJX>&&_M&g@`jUk5?|Vv^)j=)nQA6{5*UHs z^oGyRTANDr)Z10#?#D@{gso?5wOg_F4J53f=JN9DGL%@d?cGf*cjik!D*PkC40#GZ z9^OT?>uPp&s%ec;EbMeBf@xxPIa|$w{&n<-xJiytXUaPJSYo-c=l_=L@1Ph0_|m)~lJSA9X9C%zxC-FGLh*h3T@0o`)8A=s%o zzMUTn9A4P40I4n7v)}lht%rTq>*I2(&)i>}cBRiqKc2a&+48R?tgNkdh#OBq5~YPY z3<}ogzorjmLC<&WX`|S@@Mj626mA0G9V%}!c{-KX4L30d%l2438CTZWSy9TI;aK-h z!UwJ@0iitU^)>B763M-WD}ML(goLl`G3(syplX)pu#uBuipZF8HI+E{z;AyC>NHW2 z8B-TNMnkD7F3j#`=2^b=AOvd+&pWJL9*>szl6Dck=f2#_)`M2E(M6v%|0Oyi?ECP1 zBnZ_~^H8De(eikFP8=*q%;Q|wF-2`~O&vFI5d>SgP2E^3{sFkq)#EwoYiom+$&0Y- zobTJ#ta=vevTa#s5O!ey;#OF~ZP;_flZfhQJUWzclcLs{)I_i=6F-{N$+m`Bw|HcMJwv9CI34vU5XuB!E@Q0ys zZh#SOT3(s1ouqg0glPLqQC!{$3bx(ea$g}G>n?KHaZWZ4(AbivnEC0EQgrgwC}!+! zu(LB93lKo=NHNl285}iW8*Oeqw53inA(|wLHM5k*zvAI5Y`8k>x>PfJt0ss~v}{|s zotRO!ETCKGzCNik?Q9OH_wx)e?}W|cj0h+pvGe=79yGZ8<~*CrnDTael&Y40%FJ`< zcDPFjZMU-Jwhj662|RW8>LcCWZo)_f@H*ZkFWyIwFT3unWwCzZCdqDp)Mh(qJN*u_ zJomhh?<01(kITmRq{?wt#RPVWkN9y4#YTP_T=-(^VI5ccSJRLX2-TI9qtknJ)1>Qj zm-pIhYwn2aj2*+t3~@$^$Nt)07jVU21Z7<;)Z(j*>V^NKLFs%TCfYZlvm`tz`A0O zYzs}Os!w+k5@2f-Yg)8o4SDWnTwEsJr=n)hl`!PK$vO|fa_qGt>?(#29 zb@zk+E`2_(iAyOXAUGNcaA+GKR3fN&dPy4fksFm8qm0+GmcxLG z+82Z{=Mh64c^fwFid*XNAi&q)zM9GbedO<>EtkW(kGC5_ur!8jsor>v`a6tyT0WshuqA~8~ozc!kRMXn}h7B`13TETXpuUtMGjd~}phh6?v zw@==yXj_w*w(}2|GVlE9lR0fJc`UaZqYwG~{Ex=LNq*T3TgY1W24kd`-?>1%c$zy$ zfAumi0H7o7h^V((pH;iOeT{@S>~Xnm<|=rOz^~E$U~VUJSMRQU;)38W1(b;OR=W|C z_>~PZ8zD_OGG?d2G5R}L8LTM(`xS)lO`^FME zEYi?hqE(K81j=KsQ{ILbd<98{4{TqbKcZr{c*btcP5Z!1+Xlb;WVHBn{J7Q!eA~$B z38rF)A9eC}W<2Pl+-Pv-6}W6T$8Edq8^1W-x0kXg#1=X0$kaK@isN?&i<8STybd5E zL-;Mlw~n!<7Kit%_mXkay_hU$ssz>UV;G!D%9KcKcpb{dia`_ zho7%8Rct87lI0sz{hmGKZ{NlA_l$_I{$`(v|CcNUYueD71gH`@iIR zNqOw^%ZsqRtWdU6p=zt6Mw(5z?INj^LpF7BE>+|2A4)kxgKgJ?PG}+5ugt=&wH`0i zx*E2a^>0F2!BTT0V^pcycqMl^c^9j#kcO}|Qm1B^d|;KlpO7^)_TUv% z;9XGoaSu$$4uCCv4um6R;x9pMxa#EZSdK)#-i!<<6eNl0~F67DdTTDdz>$q4V@ z!Uz85t?VOSmM%sKd{C3flJ?|O+=}lPqs9KofDMG#82+U&Z+LI6j(wor?D{8&0xMkn z3ti$>);6~mwuR>J*#ta?0%V>j*2!w#UN?DEPNcAl0KGT#vYc1+G@GGp$=>8S=b2eZ zH*WwibWu=O*O#c(&MXz7aDCC7*)tc23dl;<`jxEJ1@p5k?}m`P;BUczZK+IjPeIDS z%#Hrw0Ho8sE(2~ZH(AeQ^}!TvsRrR`fLVL&L2)fqoHe8phyv#h3(R_%6qQ0n$q zpPyLozup$!uPP88THS~$L?^`+j95?}7G+Qm7JioWzMt1P6sY5QwQjCUYEKmcR@%YF z57OvJBH|aYwV!%O*4lcKr_*>Ww9UPDzL%_g63TzFriTPU>V9@u8v^0-pUQZ2WnOIydAp8`I*rU=|G zvo-*v9}t2loU^N~PK^lxgDAKWtEdVWZoVSDut1t#YkpF2?CIZ$c5Y} z97Hs}E@Gb0a$Z+`_^fvW0b$;LOOI@P;39O?{C*|MG zLI|8?vg3~Qk50w*DnDK3og3}iwn`(JsHv!`f+CMa7ejGWq1 z0bM`MZRwUoGM8}KL3DyE1Ewo^i$XwNcEbsPmwtDV`^vJeAt(9W@#N$9tj2LsnM#Xi zE^0>7=5`11e#v!`$-BJVC_O`+uTtFumui$G{=&WsZ{=a)fiq3t0$0-+vATNX9}l7m za^-&B#AePKc#lwND!xS3@pnw&q0o{cEH$O_7D7t3NWAS63?QM&(SDbOzJ3OKuYn3- zyH)iwvRm1GMcC3p>!^skkkp^Qx9fMr?L|4c2ACe^W8Jf#2J88BuY~Q;cAx@N8kYm) zL8p{Lubqd z!QOq{U!oJr)Q@NN#<6Ja`0q0I!XQEGSwbp+Cjf6Aam*3y<+{M zDR7)^Vl28zZ;GL0FbwQ)2N%B#VeFS=RQMHot>nf zw*;*{AS=nMPAV6Knrk!oX-HU`$UUZUF2AdxTN*0Jo&#IXUah$cmGdyk_6@0{0wW

    %hVFfT-N^ftIj=J&`_U6VBCNQP8mGeZfDDv_hkUDonZb=5+dF|~Yaa`{F*I6}g z>{lLLdm>Cm_&2)4oSx3x>FL3T?S-le=CNlU4QC<{sQO|dehH>HbFF1?KjuZq?DA)B z(a>b(hx7AvWrz$Q1!TlU)lUpMH@{MRc9>{hw}cT{I>qFq-pz{=rzQy(snJqI@Kbf%gapAXxjO1u@ApRTQ;HdEH!G24DUnb&@@S1Qv>ic>6 z<&o2>6%TjK`*!;DvblhlvffI#g80sLV`O1}A=w%frpDVI*^6OswKRFSz`d3@cuTaU zg_cFg5vi>@KjdTDmeWN-#jx-pJ_BN5z>0}7(~~KSmV5pUcbG zWsep!sY^j+rgYZQLgj<>vs_8-<{i%AEnV2sHJyb)a(O^19W1Ce8I;QHaJTkd*va0I z)=vk6DO-U15ggT0<3Rt`cD1a;4q1;uzBb;52_Ab^%I>HnKwGjygqOF^y#Uxwm;}o3 zajeVbxo?VG>bsa)IEgSqQY8D)apC0q`90L4k!NqUS`3DaQ?+=bgdKB2G*|b!)9m;x zlGL~_+iV!*$J8D)SnUE&)P;NdLa1tvPH+2_Srs2aVe|eYHF`%oRtX)cmbt}fcO<($ z8#{-DqJ)Yhf=mDpc2lp_P+EK^3IDDz?FBfIax{<0s3d3iUcUSHq!LAm5k0%?PQ8)CS<@rt4%NeVc!V6Lt?&8&E>rHenYIt(_< zX86s`eJ`j+%NA_8QIPyV*2c<)Sa4Ns+&umUpM|%We$$S_dKt4(c^`6iAIITJyRT_I*T|#PT-Q=& z*CJgnUR{QCS%~dd|gCR;=MFkZwON#{Udzz@p5X!iZwJ3aszcsDCNxfpD zlUL9oU}Y9IqJTp(Fxif2lN z{fyJ$rBm{^Y4y>ViR=c9x7>n%;KvQ zzHneuYB4V_Az;B006~y0(svA8q6?PpnvFb(j9*Lr(^3o!G*!nPyx*C>fIxAd9dJW6 zA~|=5ou?ids6X$b`$JPxJ#?dqM#<|bh_`HvsKP*YC}j#Qi~d**L!G%VYrWD=t3L>E zhDd|&<8@Wj+>;VEyli3Yw1-A_d0(VP@Hw^5HKbLhDrAJ6ZfNQm7<1c#PT5Ktv19Qg zv;?p88p>#;w4!l!M0jcguyrJR#`VsN;d{CFXrKVwfgc$*oncKg^K3~qML+-R zehDqC2xTMo2_KdjSC>=5`226jumIyIy5MV`(z^%N)<#h2jfX8R!(t6>@7zHjh?@Vq zBD6AYgs{@PjJRQ`X0JIA zo!I8d!u=xJG->Cr^qf?sLzE}1bss<>=sk(|Aa>)mwv!T*QI2%%lEs7rX3Y8x(R?OV z;}W!(+;*(m$6eRcLctN-!TSE*Q|=1`J!<2Pky)1ZycSAF5F)SNcKib zhe}l@CMi_*$KIA4UK^SFVyuRgLakl&?H77q!k?z<66w(wn@|(Cl8k&(n!VnjKGdBq z|E?JDd@|zSlML3Eq5V2UPcP)cA+0&;xu$IiSkVpJ&M6l)&flvODjZc5E*mg%d?3qa zda@8K9Y3kCzx-I+6s4xGB9$DkrTGgQ&`>ZyJ`UmwFL;tf8@Bl!@>zHaUdXtgNx??5 z)@3fyo{Y}Fafgu78VNYKJ)d9ri$5_QSXSatpa|NVe8w3uFP(C`b)^97Rb|&hlHvhr zws6UoQ_C-IkvAm(OlF1#LR2DZ8&stb@!iij%-^Tm(^#OWS)_8_{EjUCxk(X89XSIX z9Dw6C>g{#H(BrPs5-Mye+_XPtZ&u?&Vw5J+v+7UzcYvM7(R4;b^%7)(C&HfEuVQjqf}2tea1WJyA}#*xxSltoP=N6a9$s+Pc}@` z%&&`sgH@K^%Jl4wm5KoT2-18(LZYjakGz>l2x)5NNWw<88ZjTXpqK4z7p3jt_?Sbk zwL+Tp_A>NJPy>i0YBh4t4>DE|vsvG9lY;{1@e-%M+Alc036Fj;4prY8=O?WBYcogU3#NS?RQ3|tN37d1+N2OPU=`utF(Civ{*yBEp2SfP}F5y2hlz(vv|Cc?xfI5DkdH7ZN z$4-XuPKU+1=}$7+ZIIo*+nFwg4X*WO-`BnEZ)0H@N8JX?&?LLu_jUS(o{AafmPPCQ z;Rw{%K~bP|$Poc9z)nZUnXi|f@?g?S{sSo)`^4|KO~mcXEL|8usf!~}Ak`Yy?XiB~_d&VU{{&TN zfOJQHp|f|^`XRJ*n3y8@m3kN}g6!LyV~v6OBipB=gRy8+-VLi0J-Uo53g-RP#7xzp z?Mi+{{pg@a_9pQB;&NvH5|Wb}6T0nyUEkj(yd zJUl0FquL@{*RKz&waX8jlw<&~&}a%jYBRDLa-ZMBWIf3v@IEw64*D*`ux8iTSKl*m zeZt%&nJU3$t-dSQ$WA^KIr-V^KHiYnQ}T-UXv~4V<0>}(%4818^JX$ z(=va+an)q5heb7=lbm_5=&S=&hI>4@2lNL1`aL=rIL&~J)r<+k}Tm&`DT3g zw}++ot#ia1p{@S8iL3z(Z-*3(YO*f2*SOcvlp)tKE~l^lU*YUm$_sLD%Qz=hYc}_X z(^|^>Y%JZ9aRXvry(#HmWekhMTNv1*<>u!SK+h>t7({i4EBv&A*!q?L!Q2>KTp?cH_yk3KNTA&F{DUO<%M@wR3jE|sc3LzI_D8V zKJ2NORu_E}o(9LO0T&BQh3m2xSwo3@wvc9>-I|#B1QB_hc0)Wsle=%{bb-UEd15lbvK~kj=Yac75nZn?_eoO3XZh>~vF9=(16&Hz)ijt7 z1j2O9%Po_ld=tXx<3k-)U@&f%Ia?ORVmRXAj@W-gOKBuKOvft(1HEM z4VH91e*E6b%KD!Fyh0)TfN(@KKKS&Dp9Dc*P<#?gOWmkWIrHlkYEa)VGH2Vg=AkRP zDK-w^SDcn6Y2pkg2R-YoI=)yCLQZ)AllbT#?#(|KgfW6dw1uMz9H{o%512`*iNtDW zw@iZPH%d@sjm=z_CZ{iUhP7``r#(1!p~x@DJ|o#Atz_G>Rt*vPv;B=c9P)D@oobhR zO=`xo%T}FpR*Cem{QEt|^t;L{ubVcT)s{s5`!g!6wi4*Ty$m_lSs#zbx3<%M5=$L} z7O*y3hebegc7we$*RRayQRuf(Lif|P%{<*#n#v{*lGPUl4NSISerg57-!(KR4-bpS zV=JB`n9zWkkRlhp@l9U>?RY-ws61Z>Uuz4NI92B{orLI6K18=c(6QO@Pyjwc7`fWNujNs zX6kv|EI_3+gw&x!4Pew(>H8m@SUvuO4QQs~*~kjfv*VA;5jX1J1}$WptCq`zU;Citj{ZgH2p>dQ)+^Eid)uhzo_VQT2BzJKg(j#eop zZyK)+#9N-2sTC_Szu@}pUObbab?_Hq8fM2J3>2U*LHI3U$B%mmKvy~H!{x<~Lri{j z>`pQUf|$q!aVh3U(pefDp9HoX=_quXuh- z)S%&QO*m6hiTAOe1j>l_7UDyX4OG-{Px~^;Fu;A7HQD)Lr1+X^l$@rGOr?mE%3QOa z;+%84QIT*HT@@QcOvs}%)Nac9lvH`W_cWG%%s&--dqo2Z52svAqk#dc8WQ>l98VuQ zW^G?c3TLwxJxUUg`S@T?+WT2{t**T1KEtuf(edj_7kIr2Q)Mlt_o^iLHr7IFZe!>C zvly9tGq7`rwVeETy~N~QyQ7Bxm{KV{?ISzO4vo}h+<~>q=+i1(vB4He20{VWBO`}6 z-$US^y?>+-X;FTKM5;f9kJ1xm{QSs+U7+`%Wp?&fua;X`zSy_hZi8}hWLCI)2lr5A zc>KmeZoaJhb}-|M&-?9-?+tRX(;w6{t6u68B7z>TKTRf5E8>wqvQHMIH+%BvSOpx& zUuAWcIP6{%3^T8VW?%K@XnlGoB9R=MsPXQL$<1z081G3yW@0y>R6oaNo)lahvb+Jc zG$cDz6Tav1+qQ!5tvnzsBe-3+SkCqNsgS*+nJ^OyfGj_r zHSzttlsO1cg(p!~_J!Dv6rnjpXpAkDZ zjYnBH0#^V?E7~0+dW{p-V&BCd#<7w4^A{hpTj^x9$__LbjUu;`cHFt7z(KjqNZ;na z>Y{QrYrd56Ed-Y-M0g)Lw?4P5iC|lQJb#<5fpdHElx7l3DZepV8N zw?s&Wt6j85b@viSPo+I+YeiYPzISdcvQZv6OptpVb|(~mpDr42^#b_devk4sqX+7 zxWz0Um&bHg&Ay(Z?$qGBvw5mboR=p~dRO&BA{bil zyBE*>lMG*d$>=xlJSp=k2CClJ=mvW>q%y_NtK3z0K6V%qPCb2(de9yqql95)?q~m1 zBndVEqf@WZetg_V@E&|E;(fG2StN@E+>&TM*e};vnYh>I7WM>TcIuY;bvgA}mqNiV z2GHQ33rGu=}6@=ZDVvWuS@n09(|SbfPMYY#Fb7~CFC^+ zciiDNY*{6eGhC-3v*^V#2mtsV{^_qhu!9)sm_F!hDDwwa+JuXCDIfR)u5iEB3>Dcy z?R2D>R2uo=c6!BsUu9;KpKBq}l7ku1$DXJGCvR5v@3@Y3t~TXotIn@uHhSzvPQRxF zHtde!G=&iRlYI#PR%Ng--kJ@@2ky&3Cl4NNQ;8 zo*yPoigr@bI%IOgd0c>PZEU`<;Jxh!YbNa!n_;;tPJYozt4?tAAO$uqJ)whsDJCE0 z9=}y0AvNaV&KR6yQ}TWUa)x~!q0Z_@r8*Eu6i%UZ(~hpMkoby~v7=b^FvR?H&QV5O z8EuvFu+CP`#wSMlCPv_{p+5ca_A*YIZMd8HFW25E`5!g#FPHm23H5&`4Q@Y>yvdF| zJ;4Qu1oDPLmj7-Cp5G5GJt?XG3zJ^wHO>@WbwH0zK&I4by(g+3GV`i62Js9#^s}9P6}K>Of%Kf@Ad0 zOc0kKJ#UL<-@v4{4x6sHBZl9EnVwzCj_pycR4nb|(hIy-bBY?mFCy$TWH@pn9irMO z1hgT~S$WkLn=={+)61vj16Ck>z2CgAGMe*v>edeK1~Kxu zL53<&NI}l?f}+sWx!!+n`=@TdKvOxZXF)W> z9J-*qoxigf>ge>?%_uaag2VTkI6X(ezAd>8t8{!%$xUVM)+cs~uHrJ^fD#_JxZ&U^ z3c;Evg0-iD_D7}B7pLdx<;%sm@uKenA!t)^zBn9OSr6BW#qTvb)A)}DXOV+vL78fF z{6tybeGN637ffG;!@L|XcNXPzBu?AqMdfbu>poy(oBwk>5^IU7CcSx^>>(Vlsfa(+ zdlfTh@Otd3fCmGcE*>Dwam}cugojK0u&t`#Wu|yF@Q;%i5M}5rt8a9#O+B)k=%EL^ za0sp#s;Qo>1I{yY$Zv4>;v5Y9}sBItx6*+cXcaDJzeL}QaV|W z0?dKBT=d_UgS{X&p7a}-P7{}I3hszNEY|l?nmS44vBbkwR)X053 zZra-BhwieNykF;m0RoqWBWR>MD76CskAOGxtD)1% zmju?oZUOrZ41T$+cIDZaKmjaC+q%u=D#tu_C(#of9}UaqXly=-lya|<7N;R=I6<$(Gw@MoTWP4p<9Vrhxes0KIVs_5lM>z7<5En4r4pM54)t1Hq_Jr`^tVd=HH{h7I) zWET_8ll=jw8wz0EpNK^k{s?A&9pqH9P;~SxGkdPEISd{6ZLJypMFK?M=pZ5d@IAwO z(^}Gi`6SZuu?N2P&LH&pUIq4ca(qY%rS>hfaK~~N?b7k+Vdx`|x_Z`Q2c`?%oE_ip zerMWcLz={lzTr%PO%BuQ-|fCNA)ddJep?k9i=1tW<*OKJlSjw7A*@wW7Z>h6Sj2ts zK;W-XDICo`i0@}$kXo8rxl6XQQ9bGK63;LL4&t1v{?oM4{40FCbuPy zVPLpn2coiau1H*`3G~v?UYoxD%D?Y+gOAV?cPK3(5hCXF^WEq9I~YKw$wJz2KcO1~ zX0EAqyVrY>lQZI*pHzUUZOgiyLEKIP>GLg)QtVVg9@^#nw~>|ES#6D<(y^CUj!3P^ z4?^)Mx5SE6ABZwCzJ@$5;M2t)mRW>{j{;HF=w*eR^i$jCArgaDX%+?bQph1DW8%;N z)Cr@)`FhjS@FqK_fU32>R}}+IG?Q;RQ zx^LjcC}3iaPT9n@<)?Up4SC1>d3gA8T4UvsfaBOtRv5$ca~z5a!{bixS;;q}b4KBd zh*AM<^vjiYpZV>Q-z1g`V|p!(FYls4U3{lC?@Nv1pl(s*ZJ8}ioMjQywTMYn3LG3N zZqUhH^d6@Y?x1NYDVImKA+EbEgS^7wkM)+rTYaABOpwBYz~UVUpVc}i10kG6c>nT^l0AbREX@PikOP81pC$FijS z`?ZS^Wh5J4M^;hpbC@(Kn@&;Ml)~*v%-g0VSr`h5_67p8DSDzbG+=0p9I~47A8^($ zEk@_+)B7dQm7~OU__`sh(W`WBzmihv8UAIrcWKW`dix)j>%L2=91Um-Xs5VWogXv(eYZal z>NVO?cXRvh5;=0<03z!a`)C0qdYK7Gc|CvvkV9{aXFUWHn3!qzS+jaaTz01!#%buY z-`C%*D=xqOVU#~$W`1zGefk%pEJH~@PE%qp95eg>qm-4hw;PQIt6ivT#VThb9{cZz zQrc`@zDXdDWiK1KyZN}jZ#_;|hI`S#=cvIlEg1h@lNZN*>?A5dA42!plW|wZ(_Hxb z9gLBJl$Jyi(!03!b_ocQ@sIhLmnU`=w63kPMo~^7^*F&B`tJzkw-+iBY)ETG0hcv> zqrr7)%ZRbhyupJ=4Tfs%@Z|&3vgNkkPCBuZ&MEt}FQk=%?nkyI!icDeD=gihv1mGz zMLZUupcC)h|Ly|uZrEPNMz+tVAp&lB5f=UxJWc;!EI?+7O=1SodjlconT54GKPq(= zce#JI^g#D6%iecmkR3C_Oay%Ppfsn;=j{&w4Xrh^*6t&$tC|8F(8$Go;XPRNn9sU! z%dpoYl~iCUE^guN)A7P2XYaQdygW$;VEW}Rgs8;*trLq)WCF!l;6tV9QQQk)(!OKW zLJ--Qn{k1>E6wm}$*Mr*TNjETMjy8y70I+mq2tol{_69%920sm?9Po?IyQgH|uHv&N`?2~OKSm5J*gUHBhjg^2>-#=i z{VI59iAlA}`*c5cBrSQ{Z$7<$wzO~8dHuY(dRrse5$_za zjf>sAg_9>V2{EaH|HWQ3d#+ftY;O?<o z$ZPaxMDSk4nr4A5NgP^2{`;nneKrvP18;yi4-5E6lCG zOy$~02CaXe!rMmwHzyFfE_0Bm_9VPR! zr-_?eR9V^3`ZRt1ceG+U#p+2KTb#v(C7P*HfOL;bz{haQ$`%c1wjK<2^Ce6V!iH^@ zw4gHkXPs`YX35DLp3{8gg+Jbs%>8`My(YBsK$fF9-P*4eLY48x}* zrjOR}-0*{bdTxb}gQ*hEM^8yFHzntuLShs8_&AxbW!D@7qT9Ric$Ce|((-DkysCOG z8sd@$V4H~L582{FsSEg}%Iro3SQ_uQcp7d?#!{f8|r*=}^VHB**$NNewj(^~VN#ntz-2A)y>w%YfWUF~-KfZUP_S@rna-`5gC0e{=J3HOQH-_Lu+LCPiXE>8ui{A&3=waUgZStLL4 zv{VUU-Ps5yTl#TZHe`a94^qpsacAZB+87VchBvpm%+>6k-QBYulL|Yaq)eSUbLPXu z#?S6uukkqDWZXw>s}#_d>O0h+?%I+jS0Y@3w2T@|27P^e8fDhM(%8R;>RL-==i^#8 zd1#yG>L9z*7;E!Sa03muCNC~9mHC>6)F+(lw$J?W+DUtv_u9JKei^J+Q}dNXzkIqH zziK`-s{Dz9H#$)Y3E^>zwLWlYrsFe&GcmN0R za^8aN!16Q45!2~~G?4GLK}sh5J5ne|x~Z$?mKjB4Usrb}P9+)q9_96<%n#)1T%5CC z01;aSQGa%Y=dHa>>Sve6Gso*Z9Hz&|n?n+8!#TT-@`lGs=>J99TL!n)v{8cBoW#rw zi4!|!X6D$jotT+q2AP=?$4oIZGsn!#95XYc%nYOVgRR}2`7t$9)jw2St=lc>O0DPg z0RV$N;l=r8LS3m*H z=%CJ!j9m9Vqg-)ui+IBd>^0ycpkccoIO7@E`>jqreW9}NmD})EfC}|%-ybsrkWe#Y@_YM)ep{%(k7?bSgGkBu>^n54cW>zRIDEUwTTX~+52U) z(6Y0gHb1o3w_8;+sI)5-HttUL{MToW1u7t4uC?SUU2);z7j`+M4#WJ<6S~G7=@ZS?)tg}2f8;x?R!6556$?(WWBVXL#2dB z+ZgXXAy<{LPx-{}xuiy(6yxM4Ds9=Z1;QA%U%ZM#+pX(uZ>g>hQ(2gqJ@JS%zjs?( zo3L?k@H(j$bDp#b&Mq}x4ltk^YqDmykEJ$W-|^@@t(E@%b8y>^Rmb=$dL_x*yeU(2 zTq=!*Q+#zr7$bTbSPRSl_N(qgYeoJ~92M1tRYi1PX7?_98CD$FBok1!isH;qf!%T0 zH;S9PRe0sHi`_>o=5Sv9yuSr)(UieQ3?(Ko9eLe+ROM@2mO-4r@fwl(CxFVgCS+kF zZ4C~{0vX%0tgZOk-w#v>I;eW{OvCp~)mcBHb}+vvoCG|6McH{vfd~*hj3S~FAplhm zgODatUbnvq3p@;KHaeE{a1^H}y}6Z8$t?vd!0h;xkD7p!{5P~fbgHD^ZQ|8{SAm%8 zV#wTB329JwbSHR^S-5ZI)XHMgOe_2$+KPgsEcMdK)jlc7Z)pr{$-ey=&|h8K-wppV z=YMLx16C?scCCX)T{+Bq9}}kpp1chU97G?zv8ZP ze7B@#g@fE9tPmynF}GPnv?*ok+{X?B^Qdcv4u9`)17T$BbVQKv(!<0gVTCk--;Fh$ z()3}JrKY-~**-6cMGt2Q_nx;KoTbWh2pe3O4%( z*0J*9)lDYsj@Y}>=9#$OjxDEOJ)ISJN}_#O&M?Y{5h{7#oTbEmw=4+J-&+dYd$kpM z(`2XURvNQzx{)GsXcmWs*$D$+{F3-3L$m}_++>Sthi>xZR1r?G_tL(V!y(8Rz7to| zG!ie;_Y{h$z~ZRH0uR*P+m46oN`y-;Za$Y3`-;1u)6*3)FtkNMF^caRsg{#(qJb{h z4=XRDQkj1VfRWhgk+A>|^B>y4?-_BV-^=#SqbpNVuHv{SB-8|!G92+H%eTlU`ufd> zt6XRMupJ}4G1jC)0iB!uJaV-xlLSMf@Z%-L#bIzjw)*(5obtH4!-XAi2|D6_YEmu} zq3vE*%CZSixzaxeBW%($d;(E&hz14nfVrDi-vXK!lorJA`RTC=Jr`_Z=0i#6WhLEI zmh?=^Gj_K^yj(YSw}J#hsR7^AbdCNkKU@dLMEH&LZ+-l2ZaQ6#qNXB>s`7R!$TXZ{ z(~5ei8q<)s@}M4VOG#0a>o1a?d5cDJEY|dBV);`rK|qJmUpHH?GK!6RrI`uNmc;kv zBduT6!qiBXKBK=xK98@w75cM`hYWPM?%xi1FSQCL;f|OHvZuTSj{UK>uG^)kpqU!U=S`STaG;KLl;d+!1~!GWW|PKQg|%9p{|*n|w1g9onwtX4cN%l38t zpIt3eb2h|f59efkw!_lh4Ae8bt-5vg-5jsO&8j5HZufmgq`}2Czd3G0V1%81-DhF$ zqR0%#*9Hda2r~GSH;w*Mvr0{q%kUUsq{ILE1vz70z;NtTY}eaIPP4&VG13$+j9|~v zerCGr4TLa|oTI`vdbjs(#pfeqnUdL9rX>nzeQlPJguo_p)odQDj4SrMqpII zJ$YKT|2u;q>0m%Mjx*CX{$8BAzl5h5#Z9M5%e=q;5T>I^Zc|ATQlV*85(GyvBViiU zT-ut-|jC(#i28tb*#cT$ad&sdNQ~T-kx1HGi^va0w#DI>*rHdP0n~R zNu-bY5#PRQc&@_*DX6*^$ZMBdC@Vk3Y0qnnv5~D@=CG>M6Xg}wq|T^%_ic&L zC5;V_k4uz7dVWnhjUx&|B_Lov&)N8l!PdR$WHCKeT$Rt+PAnxS4G&M=f>nq~Ef5N4`l`}yk z=V&!z!8xI}FI}0+-RPl5YzWRdcc9AT8*S(K*=^OZTF|q1u|yaI*YUC798-`hg!l^& zi%l}habU2p256wcyg8+r+Bo*1*hJ!O)_bN<`szz(amG+ox3|F`b)4Rk06 z=TteeAHLXjskyf27nH0;TlsUt(#~5j9rL2MPZ|}@@93Bq2@R}MF854y=Pc!gUejEYDt^Yj!>5fS+zQU_BMZ_B19TfgO z1`Z&^1@p*0UlzPDiGmv7a~-F5SWsl-i2KZby#s1EXM#)ihu-LaVwZaNd&qjljUXwJ zvMjg!BUAWE@y(4lNy-wd&laM9#KA_B=#R*!F6@iae$C281cvLGc^y=jR!`V2Y^zjL z;^Hqab1AUU_+X4TdIVGxyleRQZD^q=sQ)yh^RI=&UpbW%Xu|)*V|BpkJBFiTVPDEh zTKGu&F#0L<1)nd-{H$SxSvi>HO+}_*6qX&S_`^CoJE!1rzrH$^S^uHCe>}}9V((|b zA*+fX3kWpnfX{;r{qCEkLHPdMP?4j{AD9_pd~#Bjh~hzYgNDY3_U`V!OoDI%np+>z zf`ZeoZQX$NgelJ0e*OzD?d85!mm|=7*m%iPtNqQYs=r&iu)gKl)0&1J9>G2?43CyD zhkIFkqI`V3cN;Ojw6tp(OYtF?c7X7KRbeI;$w+{m(`}{+t%$ZyLw-cS^it6GwSxB) zMfIkSAWX93ab9G+vX+*qk+I)MnMHLn!U}W3IwlRlJo&VjFH>(w`~Ax!9@J63&{@hT z47Yft$2w!(TI+uP;#+{8VQXL(wXu<95S1kXZr$ow3)X2T;Z1|;XYRZ!@+1 zhnriEy(KTGsNq;xo1eX(F&3AW5D*Ze(1Jub)Qy;-k>&MSz;y;+s0eX$)0dCH`qqgH zoFtTbNP+PN9)$5Dj1Vf`#du>h@&+Cj`sg>}lm*m|-K0javl0wl_nAh!&f@mX0<^l`fd3qL@cf4Ixql|7cFQY;4)1Gm$$#svJ_bRB9=O<>vDUV^Rt+f!7FBA>f~Pf(^BNx ziD1a&Kuk&gWXc+Z=Rm+?l$R308!%%C@zfaN@@bLQBQB@E@gY-JUj1C2dDFj~6q4Q3 z=Q3D{Y7zfn0axgwASs&lE2uF*9n!P@Sg%6GFo}bxAMM4cv%!`oXq;>2yvmnNCan_o zJsB%zIFHjj7p3Xpm2_TXLS(!v#!l3}d>C)w&AJDtxZth#b=_IF-h9$gUGflSVDY=F z^4^(IdXi6V4E^QCNJATstZ89GHu-{%g3S5g7pEm3sjvX&a%f-?Sw1g zowCdSotKYqK?oJEXk*CVQi7w5o|lJ#CuZcyz7Bd8&AEv*F*|$HdsY7QH)_gRVcv?< zFN%C`ujzpxWwboZtzoCfi!((h<*vtW^*w9os7Vd@)v&KR(Z!1D)8;|7BvyNvSB(DVFY;&qBJ{;g?g zPv8nE1EFl^sT126H7#~_=-Q!x{9GE6@?)aWa2U}vXQuX_F^(s`G-nO5xKqY)7$rIfeT zTqbe#fUv^Is+`IX!EEH^JDm9_ z{zxXi0E$*LceUS{dl&4rT_iZ`cKpVT5#{rNMw9H{!4gS! z{2~v-=f1ep)HRUqGC9}c?2v(htIWdO(1)4r6(1!Ji!B^R09edF;QWDLq^Pi+9^-O) z>cqxgCtZ@CRkTmh^LX|kkZ!}%zW3mf>VtuMMmudirf7`WWm{4?$UOIkDi*}7U zOUO|JSydopcl+cL!L8Rrr)@k6D*2wLi?FAXpJIxZpXgPamU5e|ue8<&OUrijIn(DyNyH+&R zYaiTnB~>n8%G@0_^6RUJvw58RJ5|p5B0cpl%Il-YfA_$$fbVhc6OR@;HPtWLdQ<8d zR+#nclNcuup55Ch^zkzHi*-!l;e%E(Bz|&#L0;n`hCrCiD%V}BRr2SX>#+mP0vkj) z-?iri;tC;Iw@1$#^_4Y{oxe1be0VIz#O_t2+mx&-y~Y}-#;h**Bk-|xl$}iQIHJE- z=ka9^JG#pVE^f?Zu8q%reIR*8YwSUst?7}>>ok8Q>Ls#J@4i?=*)yzdW1wQ@{mw#O zkLqAh;#}t~I?`I3=3miQP~B{HqE^3S+yFZPrbgJGIgRwYfrxI#BEj=HER zUY2S2Bu(+jP@C3x;>L9fqsilg;PGd^B)}@9VvX3r!7qC9X`?$10RD7YBwv-@GI(h* z747b)ZNHrrYblXW!w{KSjqD))d9mN(|`9U$QfWP7|B z)%-IsA&Cye*4B!qKduR7U?u$iWX4Mh^vy>u*IF-a(>OC$Q=y)lD4x$Q&m#h|sVnj) zwp+O@NhqvEe!?Agr!R9+ydZ2iyjQ0l|7P&;5C>LOy0eJ`S<>i6Pfw9^XqY5Yq5%xh z;2+mtY+#Mp?tpjVZA-y8&vfa<=ey3&Wy)KN-0%0QDWTiyZc@9NLX1rDfv-=U>T{8{ z^M0M~41UFw zI_m&5?X!jq9c9y8iVo@9^YcnQ$Gq{uVXyrxXkl-TW=XY(gofS!az@KBGaIWmqpTLq z`-L44lm~f8Je1R!b3yblwu=u2dHUSW2}}Xs;@IDW)Yjr_&(+n;pGNI>$GjqFm|8hW zF1=kHF_T$dWrChSnpF8e`MI+bf96+xSJ7D#;wA{aHn7h(WR;A6J-Qe4OkNzb#{uR? zR$M*jx<>HHs%Krl&tD;Hsjhx4bxCYL>`NL=wF z*IG?!CeG^y3;*i?$v9nqXojwIc^-6Ge;V!+HHsBs;W6(CXQ?=LY6UzDU3;f<&2(}j z^&oX=yfkN%5?C9^mP+p~7Ez5=)-0;D*L=`^4}xHJ`4%`y`K@#E_RY#2XKxv*nqIZn z?Mb+(sOgxfK2CPck|C+TXUVEU9HW%4*?n+!dYdK4_t6;Vv*jVQ+S^=>abc*OAvS)T zD*|&x>X>i*VmGn0rj`39aa>~YZm>jp;&V*~t7)BMVwJ;dr`Tof?F+=@8%!^un-gwQ zOeq*eYMddE1j#U^1J-_=ag%kjMjngQx6rz6K!ki8p>Nj&@@`GRJL5)xyCg5K^n0|{ z03yKI+M1`M{@bP_P*E8>f!LtFYc%dM=gin}moSSIOr)2A?!p z6=_rUh)MY_eNpw<`#0ZqHmfJe)A|Nx`iS;ECOS@Mu^A1qi3)+Ram67s?46;28_@Bc;3M<^ z+)xCMnzIB|X^iP`wX)2gt+@MJ>Q)o0>#Sq@3lalJLgn_jF*{dTR3sTmpPT6ttbq^I zaBzmmgq}Zfj(YrNG#)$pB##hv_ECO}cu6>}*& znr0fy-L+ozIh+np6G0*Qp|{p$k>A+TPdLuWnhWGmg=xPr*2V z0pRKmald&~r3#h!J)^k$j7o$Y*)#EavgUR_#tohmx?Rz=y(0nC$8@ntYfs&NUMnm0 zuf!DJ9?wVafUw0!Oo?TFt$FR99DU3AJ!sUlG}ztLr9G|Nogi(!ggsYE6j_b_j2dc{qM!15JYMXki+hR1JodjV zrk5W#Ik%2+&8lOxW5A$hjXChJZiRoW1oTg;SdV%dYoXXCpcP?-)h}Y;yx4oW!5Ofp z4gFC6?F(>YqUZ{kmCzYS;$o6q6w@biy1&+~c*0v##ne8WX%a&5fugb~hTHS)>JPe5 z)==$q3`5x4%bV+h=s_JFocfqR+&k~H*IZ{O#~yx3%8>%Q8N0i%zG~x!pgg({lscS;bl|L<*ugUEMGS$Qa_2=@sVoVFa(Dz+ zy}9(ScA5TF^&w*Zr@s5&FUI~|MgFHo`%e`a^nRpcEpAkwxw*;vxqui^j5GkzZ~Ny5 z3<2gO6ui*q4C{EYoNuTX)dW_RM;Yzn&g*?)o%*)i$4m+O%+m&D!Z`-N6N5w$Qbu9* zp9@lU{_ZRJJ<$i~9uqaN7*=Y-!)~a}wHsng8w3gGDMmp@auYrTQ&sNc!LJhr~ z$CcqAGzE72bT^g{em|;QN|s`dQMdL;5Bt)v@s)tLQQ-V!DX1{_FdiLpX{d-i8KWoF zTOXa+>JXi5DcY?_qsJpJr=eg5^<7st64eweGZmuZ;iS)c_G9!Xd~3{61_)c1_&ie4 zGBRWgzPSFnc?g-S$XQjXcX}PAF#Xu)-yUr9$;;~gYOHVIkFw^sEPstuQOX=_;-$gQ z@ho4!=PVP08ogdnU+pIU%_kf#g@8IV)E$4!ji@C5I04j(xdUoX`-|A!$du>{AKkdc z;&<({UaZ(`9tCXXxuv;xg?A{l*g)c2g{ap*0VmhE6_l$V_u{7(002Qm*V%`J3Pw54 zUv|l!5GbCaNb=*|U1v(2#Ze zo}GdER$Amqu8o3J;bC^i@Rf%JSx+c+n_Gxou$h9+xcaDT|A|V~bf!FS_~jyWSjJVr zEkMiQQp)D0$@_d=e!Q-NgqDN$%-dE*T3X|;-_gy~3YH91PP-+x#=-t6MD!DHFz;jB9m9BwUwK-%$vRCCvTP>4o|!+H4S z5_)1a5acard~rO=C*bkni@R^6-gi;xG|m=J7tpjNZ^&F~_2Llzxbt=7=yj{-ZjGKh zM6N9S`qL-l)C{EC=X3be_ody<4QiKMIwKxx&Raf#5Xm?=VI{E2?EyYMKU&zn*F{*s z6RGK`vrU=xgR}}rNt4rN>E7-3p!Ca1+vJSX_3NUbrq;;?PpAz-ey(Jjy2lj@P;%&X z>mQ2hiK#a~v6dZ^p_^QH{9WF_eEp#2nXJ#Fxz(5N-lQwD6S^8<^_8D#O5t~9neWY>IC?8s zQ<{~`bQCrW+*C;GB4TEY{Q(Nf8z)hN)}-*Xwh#iqQ8gdmN#??7TPk0rFll~1Lixe^ z{QA=ZjLjsjR9F><;$H5VJ+?hoj-i8v6%|ym1GUDft>vUR<52oy*W~vJjjhm6*IRL9 zDYeNihMFey-21~yV59c;@6$cObDa)ZEv2)&8A)#*5-_e~jynN!hqL^4U%w^Sp!A-l0(_ zO4~$p=O?}qT6Ue8vP*;q==Hcmhba4cXE?w+jMWOffG=44YG36h{r&;?tC4<{-a`jh zwpN(?(lAIz!Vc5%)YqBDH~#o|jDKuyHN!(Z{)?o^ULp*sJ6?gu>$y0Y`^SPVXXjX} z(KQaP>Pc2;7BysLHi5!7g$_9oJQN!8o1ltW`y-Iq$}i9woRIbq)Nj>UNjf&24;#pX zzsK|Z>ACYK3OyK=9(vb@yD2k@NW5fuzA{qMf&Cv1Oa(ffJ;3!xoyesLDc@{4p#;=p z;M;rf-YWICQ!{MU8Lf@o2c;4m?DYaGzLFf~dUWeB5lq%oZhN0W{oYWBF)M39wAX@?iPIY ztz0n*_w5L|6(CP5eD`pMcXqBM0R&G!^_7{`MF_Tj}7>(y`ERQ zX<^(x*AmZ?0BKNOnL6o^aE@ip3O69TrdAWv6-p1eN} zM%XtYG9?Jkw|p^`)Mr(<#tiQSGRWaQp?Ok1@OSyt9B9pP-WzjB8B66-U55kgP{lax zU;*2Wx8fhr`U8rO+20Qz6#vDzr>bl6>qGb*qi10Zmg)civ%}9)z4&mx5y7MH63)Gb zXosYcp$Ppu*CfR;<1#APcGM50@z`YOJjg`fY;?^HF$|SJU@A>GKty&$o0aZm#k87^ z^ZH^VbQTReppMlf=R~YLqiGNS=}h9d!7fKge(%sh{ysszZu-H9A*(2PUe(+7bpzXH zWz=)+iS2MoCFPQ~*5Qa;GQ)-3+szNxy1ckbRDi~OFb>%?Y~)K==-1(XeIf7I?DuTH zX?V5&tmzf4bF$A|w>z-q6v6`H;^N$$PluQD-O4E5jGYLkq6n$uvQZ0|xk{bc8U54frOJU$)f{1%&!+ zC2Gzp)s(8=6VgbqcA;m3oSPG2e3PTPe=ax!nH`0ikVjBm8=F7WD)h5FYd!h$lre=u zQ=%^yk$}mQxHWLsXRqjWSfVQ3Ep}OOg~!%s!@-0|wHTq!Yv4TF5^N8Ev54sKM~@Q{ zRq_yO-FgCBd$e~fkO2GX$yHDUX`cV<5H43*+v0ohbv$X_jpfb3*Gt5jdh^31m&k++ zaoCQ53C?`pp_Kydgv*aHYYua^nJ!O_ohuf@2&4<`W%#}3P^{rz)9e2DK=J2 zc+goT4Nh+9zgXPwn+)dTViaz#MMw%9QPu`d)`dWwTSa z!^kWjT%t4MyoZ5!Pf4{$ea(4Eg+}$Hb?)ZcEv%x#e6VlbpFh1H)c@31J8E51clRP9 z0bmgFiuNxU90EwXS}dH|t25i~gJx4SyB)wcwrKmk=~M57d1o(D6v|D#y|bg^z6G66 zm}7W&NU?qD;JfvYEImK?sTHGM+*hv%O(F161j5;Dk5;$|*jAKNwm(Da$I6D?yuH$o zcEc`iIz4esz}(zlV=6%#lcDGz4%Tq08!Ht=g_pwXv%(M+)i-&iK%ydqX<{ zjT~fC=W>m9gXT--c4`)QU2hB1TlkzP*{-)|o=)yYTYtJnon{u}h)q*Y1zy*0lUU&7 zjapXzT$N64&d+t_^oMLVO7t+FOrOrq%qr3dcib*e9yl!y7)T~&F^>2l!JLxeGp)89 z3EK1&Ki3QL2a11{k>%u=-j7N8oNr{A7rNDzJeq9Ur=vU}{#Y3o?BOc#gFc#c!{>Sy zS^l=@K;N5EC<^xLN~E=B_4Yv4XrthG33n~E+Q*N0) z&xt=j>i(V39#mDq*^r%LX=xFMJ<)54@LNdKkh3a2^A)U;&bMP&X`=49wNeS{Px)!X zF!4oNrl%**xx%SZeX_WCv{3`qhzj^lWbgZl#kV)bFy&h#$xVy92}BvWHeH!0ZHj9C zZRG;xjLHvbY(?_g+q)tMt^CsaDL>vICb*O+WaazIUZI9P;RsgnP&Xs;f z*dZDj%MSEs^OBm-anJC#?kcz61PPal8Vc}n8Wp|N1!~8lsuI-?7a3j5Dzver6OeXu zXiYTQp&B@4zs*^mD2kyfSZCaob@FOiPzjS+Mr#T=-SR2b9n(lwBFSG?d2EY81AM91 z|G0+5M{8?(@`ppMyKYWf2S3k1TnrgyiHoqksYy(}4S$vPJ|<@~Er$cb2&|JaV%`B) zHy2kj#>l9cnx>aaHQDHt(yAcm=#U?azZ3F)jOBe41po)FrHF1)A zmrJtqSRIO!*pi}sXB5Y&$K3{gO`Q%WzSTUU3ur^pJ@&qV;M4VtUh?vkdj)5O;6eOS z(ji8P3led_N+@;S`Km`m++`i!FZ@XxcOEXhUZ#0B%b%@oOOGU8*v*}fGzy&8Bgy_M z_VG=M^Ux*mB!j-ssL3L{oZ>_opbgci(A}mUG3-tv9OcU~hj|j3)q1z*e{QoD2B})S z+chx)KHmm4I_m{vmD4bXFKfy2m-!D;%iK+BxE`Osbz z?Srm%8Ab`in7fiV&yk&C{2O5y07V}Fu1MoYWVElDxs8)Ko$iO8iEgS{vqf36g{8;c zvl}Z*(jnO@kA0~4-h>)ngTuNaLHR!5^Wrpcz~t{m+8S+tbl_+DLmQJ9(s!T7m&zO9udc?lIV~gqws8`-qWkIQNsQzXP64%gSt?T~G$ny+&S@Yh z*!#^*g^k{M+3y?P(@if&8yJPDFDh4Z>!Fy#&c(>D`iAin(=ijv#!-AD61_u@AfkNgPiA09n&y=ox|e}s~G=A+V0kfLIz}36>7?R zQgGzdaOv?>HOhaPoBf_gMa_t8+3b}lz85I&iP5 z&+G0Y5EW3T%D$5LYW-E$!UX7dOBH*$s(Wg0kF_c3e0Olw60P!7bX4fd3@go7xywcV z^xW)3h8x<>$}4?jE(^y=N9E(P{0dFmq11^?%ob+^N}HN!fkdsPv)_jvq2`tpS_*4| zrq{dXyTt+^d-nTB5bIs8T7~WVvokwq_VlX1n(?zJ**>QWf3;-SV>DVl8OgICwlRy! z=~61X+@o*Cd%=Yj%HJ12nMD+3)@S`!cB4)zjwFy+f4Y*_q|a{odEf{`IYv12e@WaATlw_>`>BK0-14vi(>3tJ{q&y zeo)L2bmS%cq`{-&B*DY;Rf?Ly&dM0ud@=f(am&&BR6{?wNiuoaH!dMzww|Qq^O%}O zs@j~S%9QdBBH+zp2$uNiyB+(M>N+m2yOd-vat?B;p`8D#s<^Jo(Z>)!;xo5-esu(V zd0nO0oXKJYhmu=^U>~`l9ig2GQblQU%NtB79L`zQr)6ZL+HfZ$w>2(>0!Cf7&h73l z+hz&{;yR0W?`O4bZ2)#=Ybo$~{k{hwfbi6fz`H3H5Q0p_&krlz|)@ADX(e-m3? zy&JP41;k>Yxo|-zf^Dbmf$(jg2Fv>>VSUMr&$9nT##d6 z9Q3ERoT+8$PLIt5N)*s%_GyIo%R&?L6R{V;3?fsf#|yn)`+q~Etzeq{GHdfc?!`~hP)cv7@D#;JL`*f{>A&TsMu<{A_L`WB!%88J5~Yf$MN^6&LA@@ zLzCw0t(cD3>FPn-gTV|1az(k$i+jd^94h)X_a90bWNp)P^DN`muTbI!hj;lmBQijF zmuf79cV=LDV><2kkHQIMch9+51(U7RUGO>DFf730hA_Gz0}t3+t}KOt01OP=z1%0I zrguF5AcBEGEJy+^@rvKCE2*Q1Tgd~ozNNKVUSb&=@IPjdYRitg#+MmDpt+*8f;2+J zBi|41^?sv?L2IV_$l$^3)a3@w!c@p9+w0vMawHWKX)sbr+5S(pr#j^N99`XEZ_i1t zwmLqx>LU&1rXjA#`t`rH)7t|}1zh4QJZ*dKt{GMGka-$T^YF5C$K{+O2*QYN8(M4A zup1QHIH3nPPmXU!!4_x=6P+c;(@FH1^>{XdH=v^(B)iV~m}gHSLI8NtHI-jR$@A3Q zcfUn08jP~}35lN63rJA#w$UrsJ8HNy(O<-uw3i5$N{`j{A;P#hyW8TsB^1;2=c;%* zlSE#n5aY-b)b?u0cc*y}(~fSqUT~_&I}=oX$W$ zhmipuK$LWU7SJ>pL5}RV6W7(nP+V?z+Iaju%-6lXaG)YUSUf2@yG-Zp?2a3=?0)^! z+l5HW#U-!WZMKC~P&!bc%M@+5ylnJt%4t+n%xSO*zM}hE@lLN>^5NmoQBmL+*&N2+ zz2pI86>%ALI7fl^kVsQ|_Su|E6bncW!HOUqG_xu+Hw?CZama~y^Z3+BnWqekj%LU? z|MqrqhjrZ_1c?RkFThmhlh*pOjLf77S`5jmG-8QiPYbAIk=QL6cL1Qu}cg< zwT=&o<9J5AOvm$!#!W_Z^v^Lzvms;ioB_?uh$CbqI)lRqP$l z>PJc7fJ_ssV&x5$8HCcs?az#t)S=);7JYs@S|AsxU|(KWznuOJ;JjP)@|NQ(gRx1u zeFl%f;NQU`Uu&3&;e$@0Tsi))=xwS22e7hcC<<4JXBc>JaeMDhN?qPX;^lUBEhIjE z*HC(&pW}6TI3xLP)^UWJ7;eVa;moZE4+9fBAAM?D2@fuG8^){FDssZ-n|5C#9`IcB_uA@-%81n zygrCMCX6iH{WNv1teU#9ONMdzNpIDEZtrExI(8f)Uz+GFi4XiNC-(~HIS23J3I7>R0xh@_V;? zk?O|E!Pe1p%=;0R*;q+Dar!iuy;Y12747S2fIg@3gnaMXKHNv=YLoP2w>-D!j|x@^ zYccBeT3YQ@2>%BGX?h6ul_4d}N%IYEij zVPewhk{U-Gahq%6VUB2*p~c1iwM=RQJPq+gF0Hv*={eYH6l9OB8WGP=500iF1McGF zerT+=dehMk+)MoWlFN3mCP+orAM(z%#r!(iSccXO-eOQ$oqck(emufyyu1?)E020qU5CN zaXsDLXVwF)PKAD}e)Sef%{$(%7cuF%h>2A6Avf%k-VdL%8DKR3giE~}5;=>}4F?MV zu#ja8lJrq4${th2sr_p0oFDujvzzU*%Tzh=UiGKEKibb| zu^(s=6I#5u#_AgE^eaz9YP%yd%V zWoiGLsJbwy^x_Qj-}J1eY+)xusdi^I0-l%regikr;fGJw3mj`rY(KX8TxxwhKSTx& z4DWw*5en}3SeXR3e^#Sp86TLiF?+&uc6OZ26t+gp_YuMEN2qCUC7nw|h4py2y63zL z^V+Q<^pXqJ9`Qt;M+ptBj24fQ@~}}~K21FYNk3DVcpwEJMG_|_`q1k^yxjU-dSj_; ztd@LkhCx*Wm^Q3ZheJC!b8x=NA@-XoTt;|rEa!8nq!8U3Zj7ux%btaTdD|?o0QMP| zL-XKtf&DfwNTXySchi(W*#n=UxT2hlOl43JnrR4?{q`myUGrO?&#I@FZ%*r=5}EBA z!u;r7bzBkPcPyR?%b&WxQGMrcc$Si4ad*CkhNx^O*k80bW? ztwBAUcE{}HWxsfx_xN0tCaqbkWP_p`Ax#;2=7r!nH5|gOCh>Oa6Ae%a*U;?N(`hpj z<->Z%#hic`OVzVDmQ)wEX8CvMJ) z4TuK}7dm;sw8WNjqMR`c^J~5anxY{mJFx}<@#?%Zq?C!VP~U9$wt%UUrY(VqgE`0$ z+%IE5Ow217oJ&7TE!W*;HQnkUHMv?3n?i`6P^vGuP)OGakz-{3JYJp}MQAcpVNpGA z1w1r&A$ooNs^Q&Nn?g#N_k+ILSCxeTdBXK3@VcIPV${{h~ z-y{_Q=DWP+m1a?aRlGVN#r<|**EZzEBEc?IXZuPDbLtt=8~e8@8G_iCqLqx!#Wv>Q zM181(ol`>dq0+kQ+(q?_KskCX<~#CMR6Epv#o$WW-g~-)dzSp>P*1mA8ikMx?7{{o z8P1B_VprMlL)_xCTxl=iFv`wBExV(;)OOYU_tZEz+6!290MK;sDk=;?SqqdtoT6Kb zcm!In|IX@2``6MVp2Y3-@^@AeDGI3*{){aa)vjMVy` z7b-vxc8nlxo{=}$>21wZPyUQW@t!I_W+=OEX4yTtp5|Ate#$EIKn#MdpU}6TvBi@gSxi=dE5J(Q2&0pLWhWMmDQyxCTWx%V%6do zhuO;54eov|rmLc!pC=|-BDi^2%Z1pxRHS58j&7ddgA-1jT%N7?`zb38pXq&GGbzTm zOrI|ePf6KW>sZ+9{-G?LiT00M$u;d*3)oe*Of@0Q+PfOeOf?r|=+$?4c418sI`l%1 zw~6t2iFv_r1B9kl4jfsJ?Y+>yBwXxuEcIy$VN-&CdhopIm_1u++{v(srAYu`?{#A_ zL*hUVU4wK})Nt^og^&r3D%ZkgVBe62((&$Ta-%Jo^_P+={e>>&3?&mPPM_7VMq4X} zsOpfZg*RPKJ%SqNUC7$Up7Ib5Vu1r`w3o$R(jIc}Z{cdE(+3RcNSwr)vW;mn(C2_7 z>sd6{ZVxP-0K~0)+34bA6Vvub_vw4DP&*)Zu}iw{rmngU>I_9cHy-%e^<#}oqlUmO zH}}wp^GcE0*NAkeSLr!kbK*}dx|wmx;)@bp-;EW|UB}g_uK{>UEmQl%@vmunW3)Pb z^9^B@_b_ECqh#w=WhEt{k&%$Ox~_l#$+vIMecG2dm#3o-Z*<>^)A6yTzq%$^{}+|& zViquTqj~v}@kP!~R9*stqr*l=IJUe&E@n;}Do=Nmmd$G)n>_FHyos(@s7&kExxMvR zAvkwa0y_O&^NZYQ1jXJm!!OhkA_|&XI>tX31%!;tgH1=nE4B{Y&oEL-9X@ zJ^z1Q)&H8x*VUoNJUKZzq|f~O=f6f={k;^7*E^Gu|3~B%bXFnYbG2JR{>J?hAN1$P z>XZN2=53gc84dBj=KVjGnZ2UUUws!^i>5k6=TWy-G_p6IYgalS`Nbtvy!g*D?mn#} zf=A8LM$MD_m$U6DYjFM~dP7!q^)|>cM7rPd$rCbBihFKB;`HV$tAAgGEMU_qf~K^B zW5B39=CS#=^%*VBj&>>?wL0zY4#}yt#fc;6%)8qIRAQutZf)%`Jd9ALfmu}mx3|8r zf4e8uPVT)}@d{jP<#xW_1<9PyuZHG+!jFX5(7|0{zo>Mb98g*0>$qNYJteE%<=N;2 z55XoyPs};zIZo#UpF@zD5aWWO(01VSRmhhShm;<405H~gTY-(-Gjn}He;u5`F+MhQ z)j&k%fep3i(?=9&C^6ys7NUE4`QgEkiY6Rai2(-iC30Pp8gra{gZg$1&;IMl6n>Ub z*!p%6HuS#{K(Sv0c|^Pj6hJU-;0+a1`rgwVj6EtyT+V{5xV6O=0&mQbMy0;a^4T4Q zasTA{g-NU-uclz0=vMw)BlPRVLk z#&xpK|0wLNgQ^JQy}yKnhyoJQNT(ohKsrQ}?vM^?q>(%aRHRF~LAsA{I3RgwknZm0 z(A{v4ckcVnoq6w_cW3|E+1Y2Gt=Zl0^ZR~3n6V>BS8lTvX)EXbsE{+SBbA&~?`u!N zC+haPx>6YP&PZaLmMB7Q(5X<=j5W2E8epO>^4++x*xGbP$xSnsM4E5ISkdR*Kv|F} z%BLOP`AvTTYIEnC>0#y7Gmtacu@*<%j%$=5{)zcRH+xAjySW~Fj_a(JR=gHTE?O2Z z)A>pn_Fmt9qnR{Arr1gOF6I)U-wUc;Gvm4Av>LuLTBBmeEhO^M|j%1c}H>}3xtv1 zJ`-$`+0^;({>CqQooJnW&Vd^rd6}|a7l~CaXl|bhmk0!fQW>I)HzK~r>a03k&X7<`PEcYU+56{++6K&hl-t@Z`#0C@n0i;pNUFC zD;EX5H?^YNM%&Gyh25bU&WpxV((h51@JUfE8b|gUX&uV8xL&2RgHPYA{cQCz&b75C z$DAARfp8B17RBJC$uwBA=%RA;K_FRlv1p zM%&%+zy%82_Ge6L)_L2wKy$kaX?Y&{2GG^6nObM3 zte>V92ri*D+}G&a_84Jv(E~fn@h~z6L^}=#mFO4GU7n_}pvd{K?lHca#Y*$^YDN{r zD{LbldvjUc9262m?NG;L@ePFF$)vquA31o|m!Tx(S8ZeRTG#e{iIrO;xs@P_f#Y9c zes+644>ol(`RftJ82Vv$Rt^K;nH?Ps+t4p@9sz@CqV#=*Cm-mkfoH&*CXl{>lYm74 z%Y>?A^s^_5TmX<{NZ8^Swd;O4Bl>3T${e@eVjr)rV3;8<>EzrX*w;G=yFbz| z6Udxr3ZcCK0C_0`UC(`8ERS(^iK0c7rJq z{k#H>+23`PUXzeDV@P@%q@6v!$PGvhPwW0Jp1hyYgys?T!*llpqK^S^A9%*N7c@dB zYrR5OaG{3_*gMl}9;OkCIYEFb$-||Q!A{ukp-Gj2;xL5_&rADobgEif&x1Md0nxJG zUoTPq1)*ztGHwG(#ImQndLi7ici--18V0kh403ShRN$3CYVt+(ZLEIpm6Q=Ql#Y>I z^l||0k=lLu=0)4A)`HBVvUGd;=HB_m-eK8avytjzn>Xh-AIZ6Jk3i2mlbV6+fN@bU~-TGG9I%k2e z`5Fx!mWf8dQtWlsAd>&esw?o!M`dB#3}PN!U^hfTrXi>7y-{LnJ6uEYjx9uY6=Nq2 z=S^wF87qVn@Nek(z=x09EUBni)q71X%|+X4K2>?g6yby8K@&#ypAo>{ZZCT-b4(HKU74XR zCR@%a5PNVd+ZRGrw4e0!??UM;rkgIh&|g+RACP(Fi$F|)`5dhlQoR;^90SR@=%P6} z`A^t|t7Wp=-YYAnQ1s*yBpWbjfJv0X&} z-_l!?78kOECqTz*VEr7@_wTtPl_B?}KmeB+NIA$_E*CGv43M+*wnc$_*YuN*p)jan zG?WDHsidsbl~7}`_%%#<;BGDRn08L7p3oBL9<1bA5V9Yg+hPNzq@O1A$;Ex@_mE+{MvCer6o~%gdQj=v%NqG>nnsMhlj; zVmhPn7Mo95BPU$SMhvOT886#XKbpbqnD%Pog6Gr4+^peZ!Y5GlIF}~lGg{K3B35+& z>b>AS$_b`CqXk}{!@%wXBLR8DF1J9O<$$plKd0GTf$-?ZNm`se79R>Qd**RhEq-wZ zv)C4e%tcyH!or7I2NC)?#UH!-8*ayvtqc)1U+U6JNXnc3=5E7Sl!5}_YVREyQ(kYG zyV9-weU8;dCDzpqH4u=!K#IglprdL1IH~v=r%GqSQG-PkNdyadAO$d6^L0A7*Nh`+ z_eUzox03jqXHsyQ9c|3meO(YZD1?_3-q)GB7e6|)a4voS7F-yOwAz`n^UO%tow_P~ zaH@6Q?mI=5lS1c&I;9F=TGCl;5wk*Z2nD;V_I{gr3Cj; zUg0KtFPGhNNo249C zzo8nLHMDQz9F7j~P;Sk?cUJhEqnoL8_YIMh;VH@?=;&+Wf186yzqxmno0y@7NUPSn zF2eCsbzH@1f-ymlT$|<_oNivZs1%nv%=!_7;3%g64kLRq9kIKE_3`a_1Tuf(6<=!G z{b{SPviNO@#N$3g61fBBA|$exX8vz6DJmD0>EM**>P6s+iJ5}%ucu|NH zj-sL^6lbFMm2ZCojpVwZjbVP41Dx(#q$b9XQcUNn1Lz@`__r7Li*o%>7DVl0Q16yC z(XZm#*WoG$&l^3sPttbhCVuA1^w?PwW7rH0sSU>(yfUtxfU}z--^gC_l#8?e^y42J zX-JNtk46IyOEMQB7WU@tXeO8ur*Cdo?>DLmdslZG2Gb6lDr+Qsocy%H8j|u@E;>_% zrqEPxJC#CkGKpSK9E_IekXnAPkevDW(_P92B03G?{)SWhG+zE&vS);84mPtG=N?WuAJAukneO&2VviB1?n2 zB>^6cB+@RYkN&`j9+F>L-+UEY^CWsaTfm-f>(oc$N3m^%khjcjj|HayYGev)@V>x3 zDDscpQc@{b#o{|L#;?Jh?#49Ym_UAD#3BVOMl$bpk|O0nv?&Ju7^o`h&ihNuYWO)L zQT)Q}fO{)X;_@hip#5AU50WBT7VdsRzGqI$!|u7bK?0oe&k?cVZ@R^oRKx~AQjz+n;vljZI0-O4)S;~$EQPHs08`X5n|3lXtu(Dl0) z@f{_bXp+ZGWj_kmYkn2^Qi`c84)~1!=}EE})}&D5B;tYFGjw_T-d8uiDxXwy5K~Su z&V8=MHIvQdzMIrsXkO5DLH*^}7<;iAr-KOhos`e8q~TKhp4ho#P7_@1GdO-bET>;$ zOc?+01*uV`pUs*yRv6RDNgO`-OCLqp@R6NaDx1J(pW=195@mFt2A7Z4aZ11g!u)8d z9{$tzQ@7u-_2fCR2km?O$3O$vkn==!<8%!@mB;G-dg{+?2r*sv6YM%Wsgt`@ohT|1 z4r?15hm9adgPg8wV;16==NBxC{m`#7dlE7z+oB4t3s_<-L+sK#XF08rqU1XxlGJ+@|Zlo9a7nM(9&6x zOTz?z^9u^Xbd9eT(Mn3}{uqGlbDDw!1F@b4eP2JcvB91&+PnzM06hs?;YKCNw433E zojrIv9~}%!x<52-Re#pui^2vtEeoER@MhfNfSX#LThe46&0j>U3x^UTUO&gzbP}d~ zdHQ3SL5L9fBn3G!uaC9m?B7cFmADt<`QVzJBF>Fkg(YY^y0tddL%?MD1HgYCj`&TvshF zEd!r1=7aR_FEsY${rh7`UdVicQoWrXBOQa9mRn+9rps&bYa`sTL%- z$&zOxstfd5+r{m9qTUtZ1-@8Hc(NV=3q&y(cwBO!j(tpic%2ysRz@YN{Mr_Pzlv^d z6uG?~l|DWIfbu$o7feeE>OkUmpI4Bugg`xDX+^^2a_Rm`XK=s%`a_26`D#P_5jK_k zi;~tk>{sGD=J``c9$_%YgWK&V@hvgv(I3L)TrJNh%*;=Q!QGyqSwVY_6~Ykuipk4E z)5yR*xuVU;+bGUNa64dUDda{=c0N0z z$|FqXzvzMGRyupQ^v@d!gK>F_s{4n$UMiR2619XhI!E6`A#1 zxZ)nR#(++pnq>19EvvRDMC#ixEc-o%Xx-t?;9xDKNB}k|VjmLUE|rc6)MSpm(ZxXn za8%W9(`;6E>{RA9;q%jRh1&C}B=*wC&$cuPb$FJ0($yZ8Mjr|(I3 zwbCSRh=eQ*6+i0^Kch`eI7$|mXjN;xGjB=H?I%-sc3@l}iw$fiPZj>jK`InHy((RJ z`*uFb(X8`a!}h__RI%z-7lmXLGz+CfmClu&Qqdd_9vSWRCiy&D+!CV#Y%>gIeBcLN zvvw^Bd)ONtyZoCYz|#8nHD|(-!OWRLd8|{dIVEwf5qEd^3c6%lZ(pBYKIdw?MyOJ2 zzKPTx--EOMF8BVJy$J36cjiCXXh5*7%j&?|`B32#)w8wA_94@@s$am!xSbi@P{yQc z!6%TDd7s2X1FcwvdBpuvZI~!0D!Vt~HbXMIHj+w2Rsj!)EWO)oF~1P-+xca){QI@F zC}1Geu+yB})ysT_?VngfBxp=}#;3Eb7C$alP?^Ui`YV)(64*k@UbmCwoy91ZL9>gF zLnk=skwlvC%2GWOGb_K8xq8cueN*FqOFRz zGl%o+pbnbkjHZCyL`C0N`iJ$w+Bt!b$^K ziU#CF;^RK1rVEsf!W)DwbDq;cX4WDigup*(7YW4FrB;-n$e*Kvg7BPEi& zqa#G_oFDT~cij)VHQxQh%@q*ZUIWbpHqC98f9*Q%*mBf+Y^&j8bwhusYP|4R3%xzq zVNr>j{Oe$~;Cf1hs?pRk_lfU{Qn)&R#{co~KuBeXRS%8z8OZZFkZ23y731j#_Z-7y z)7ga~TiqR$Dl(N4IWM6~N^Bn0HP+ORVjg&I^78VSXbC3A1zAft(EXEtgow(8lra(< zu0MYnCXi`(`4&@)dH>%HaC7^~)wQjvgiVwq{W=9n(Ii6`2Skhef!*tuUp{)j(SINK zE1Qg1bmi`+orr^D>Mg9Rr%U8{rqRlai`d-z>!6LM+oNX^5(;mK9Dm6MeFQN5EeQy! zR?JC>fEdVhk%-Gt9SUX4douNoqne4vfW~FZ-nRAE{54f*R*$Lu>B|SPpL?}%7PRh( z(ph?}19OoHk??demMKC&5=-(o!e#gOWTD)i#UVp%)S>szykOAE#83#quBEu` zk@jpmTprN0fc=?bVnlbk?3i@5d;3{yRwX!7p!7VGkOVpVKErij8dEE_$($v8G6{r_ zV^|b3{QUiZZbOglMc{+5q4uS`Md**~&9&qPN4sSk;VNZuj)}OYZOYsSpG>vCALvW& z8_Hr&-6{IX;MU~Z8B;JOK~KxVf;`bRnyIsr6AzI!yozxyklg}>^>1F~gQlA|PTTy( zsI&<>({+l`zl&#+W?;?>L8yz+^$c|@dfSx2#XDuTsS>nn3VRIMFo==%48K&A0a*B~ zqj)=z+-LFJaSvU6Y)mWEmZCXe!8v{-u|`>ZQq;bD%p5PGTU9`lm^+nRPjlb~azg{q zmFNmn{3IR+;b}yLUz6Ra&JjIb54->LdD}eX`0Be)PfzjA4yFA!=E1|7{iOS?R;Z6k z9zi%*AhKfn5Wf3GTdKS7HE`IwaM!j0brUe5JurEzPVMTNgu!c%A)#zJK`ZvNCp^<)Qz`EBqgD6;!Y}}RfOZVP0xnk0jL}}CI z*stcCv(;A^`dv%E0RUq{!?i+BC;mU(?zuil-11UM@-R!HD-!HC6Gocc)%?K99Cc-3- z5Sc3niWZ|J-6ZZ2S(6iP@~OI#7}g9x>HwKjaZJK~+pH(x`Nmm_7ghyq6J+Yu^$7>*X z3M?8>G^IW{Rk3VsTWsZWL~)|?cI8Fn$wXti8l$}&Mk*axKB9w@Ctr0*6#l*zvHOf9 z+rx`y?N|ExYdO$9^wK?Wu=`T2$(fy6R(Im-YRwy;-AJSJHu3$5@!He7cW9O z)YLEFXMQKWI3!N2dogjk7E;c+-JDRx zUY(lG6zn<4S8N%1Ifg4YT=mqMk7+s_>{o6uJtZ)9s=9WZ&M@blxGr9ei(9kp1TEi; z&Z_M-f69+lLP!~D#2IpJiVp<*(dE>wo?k4Zl{zHlFU3fE@`2ITRJmzNtx+sQp5O#G zjn25sFQ8wqlH=o*Fh36}7`31`Pttf0PT*~usJ=XBsi>1cDWqBKaMH(!ux@V9dX z=>BCTI~hV89=|C{-H0Uj2lzo=6HB@NsK?38!|j~*XXRSZj~!!FH^<{q@Y@8q(|C$Uy*%^4oS?#r zU!!%qg;;I88E7?iAyhT?!DJP>U49PUmT?}*v*T+etvOF{>3K4CrWb|Vl3F&bU$r|w z_M%e%@1{LQE3h(A5P=?B&7kK$ynDIgxb^E)V% zLpwclDs8~Q*3Q*I?pAAK_%Hp{u!K%|1ygad_iNLLH74?|E&h2?&CGPvs*U=$=TF!` z9F1!3|7F@Bd$Zd`>t}ydNye|)lgRYZN2|$JXq$w*gFx7AckGHQ%XSSo-PgEyhpPsG zceXpU#gF;lXExvKweL0%i6Bhuz)t}n#KyK54^Xs-AVmAV6Xt4Xk@hHTBiWV*kQ}T; z#3q{!jEYuPensfh(xbV#yZ4-Ml$I(f&BQD`W)l}Lf5INp0eWF?ml2=bQQ(5nn9jzS zofE`eQFqA2s++up?*Fv|AYmp4?9;HF^?EWzp@$TpSIsS@W#@V|n82Tzz!@rbR4fB| z5wDzl-NwM(6BsWnj|wdD3_E9GLU=5&uR+J*C7kir>a@UC zkMK1Gm7%%o=cn<-S-jru1X4;zCuAfE1LCl*KRrGI`~s6!Q#@FRV>w7saq|4G&DSSH zXh1Hnl%XN`-< z+g^u#8!1N1kkY`g#{m?zK2P2Uc3cVNff}NRczj?J3JCOHs()7TAgK zm!bbDZ=s0{x|YPOMAx$1UYS);!3DP1>!g|@e+Wg9^iyp{P%C1ZI1>}QpYC$Gqo5XM zFQaVtR;ETU$2G#$bzpa*u{P07|eJ4p3_Jd4DHTi!I<2mG82Wclp|m4Dbo91 zH4gD_I}R|-`>ETn3PQZIxX^-&i{&>alsm7zAx2NnEC$2aDKHAa&sQwbfaK(910^L> zSM&coX;o2WUR_9D(Dl|O^7-eXPIx41g3i zip^8ai32{L&d!3_Emq%jeVLi^;+(#495Md1Lv$hSd3ctC%Asz1?Xb_ z&t#r0m1li2^jPjFMQW>U%kA2jIw?<`EkOjO3!PfH-=Q&FIz9Wh&YY15*O;yuJDd)+ zH2+PPetRa=@LAs{-sHhe^WlFA(E*B!n;%_&ObjiTxC-kq6A%zU1LEa7C|giG*_T4e ziW7aSgt!6ajmV;%$s{yMR^nq%u46Lme+`O}|ByxlDs&a;h>D0Ghjr}E6rTA#{-O!-(kzQyVmo4OfUdYliWLHnR029 GZ~p-h#4;ZM literal 0 HcmV?d00001 diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/customize.py b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/customize.py new file mode 100644 index 000000000..f38079aca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-tiny-to-experiment/customize.py @@ -0,0 +1,560 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import cmind as cm +from cmind import utils + +import os +import subprocess +import json + +file_summary_json = 'mlperf-inference-summary.json' +file_result = 'cm-result.json' + +fix_benchmark_names = {'anomaly_detection': 'ad', + 'image_classification': 'ic', + 'keyword_spotting': 'kws', + 'visual_wake_words': 'vww'} + + +def preprocess(i): + + env = i['env'] + + cur_dir = os.getcwd() + + # Query cache for results dirs + r = cm.access({'action': 'find', + 'automation': 'cache,541d6f712a6b464e', + 'tags': 'get,repo,mlperf-tiny-results'}) + if r['return'] > 0: + return r + + lst = r['list'] + + for c in lst: + path = os.path.join(c.path, 'repo') + + if os.path.isdir(path): + meta = c.meta + + tags = meta['tags'] + + version = '' + for t in tags: + if t.startswith('version-'): + version = 'v' + t[8:] + break + + r = convert_repo_to_experiment(path, version, env) + if r['return'] > 0: + return r + + print('') + + return {'return': 0} + + +def convert_repo_to_experiment(path, version, env): + print('') + print('Processing MLPerf repo from CM cache path: {}'.format(path)) + print('* Version: {}'.format(version)) + + cur_dir = os.getcwd() + + # Get Git URL + os.chdir(path) + + burl = subprocess.check_output( + ['git', 'config', '--get', 'remote.origin.url']) + url = burl.decode('UTF-8').strip() + + print('* Git URL: {}'.format(url)) + + # Create virtual experiment entries + experiments = {} + + for division in ['closed', 'open']: + p1 = os.path.join(path, division) + if os.path.isdir(p1): + print(' * Processing division: {}'.format(division)) + + companies = os.listdir(p1) + + for company in companies: + p2 = os.path.join(p1, company) + if os.path.isdir(p2): + print(' * Processing company: {}'.format(company)) + + presults = os.path.join(p2, 'results') + psystems = os.path.join(p2, 'systems') + pcode = os.path.join(p2, 'code') + + if os.path.isdir(presults) and os.path.isdir( + psystems) and os.path.isdir(pcode): + # Exception for OctoML + presults2 = [presults] + + if company == 'OctoML' and version == 'v1.0': + presults2 = [] + + p3 = os.listdir(presults) + for p3x in p3: + p3y = os.path.join(presults, p3x) + if os.path.isdir(p3y): + presults2.append(p3y) + + for presult in presults2: + systems = os.listdir(presult) + for system in systems: + psystem = os.path.join(presult, system) + if os.path.isdir(psystem): + print( + ' * Processing result for system: {}'.format(system)) + + # Check system file + psystem_desc = os.path.join( + psystems, system + '.json') + psystem_dict = {} + + print( + ' File: {}'.format(psystem_desc)) + + # Check exceptions + if version == 'v1.0': + if company == 'OctoML': + x = os.path.basename(presult) + psystem_desc = os.path.join( + psystems, + 'system_description_' + + system.replace( + '-', + '') + + '_' + + x + + '.json') + elif company == 'STMicroelectronics': + psystem_desc = os.path.join( + psystems, system, system + '_system_description.json') + if not os.path.isfile( + psystem_desc): + psystem_desc = os.path.join( + psystems, system, system.replace( + '-', '_') + '_system_description.json') + elif company == 'syntiant': + psystem_desc = os.path.join( + psystems, system, system + '.json') + elif company == 'hls4ml': + psystem_desc = os.path.join( + psystems, 'system_description_pynq.json') + elif version == 'v0.7': + if company == 'renesas': + psystem_desc = os.path.join( + psystems, system + '_System_Description.json') + elif company == 'STMicroelectronics': + psystem_desc = os.path.join( + psystems, system, system + '_system_description.json') + if not os.path.isfile( + psystem_desc): + psystem_desc = os.path.join( + psystems, system, system.replace( + '-', '_') + '_system_description.json') + elif company == 'syntiant': + psystem_desc = os.path.join( + psystems, system, system + '.json') + elif company == 'hls4ml-finn': + psystem_desc = os.path.join( + psystems, 'system_description_' + system[:4] + '.json') + + if os.path.isfile(psystem_desc): + x = '' + if version == 'v1.0': + if company == 'OctoML': + x = '}\n\t"' + elif company == 'syntiant': + x = '"\n\t"' + elif company == 'hls4ml': + x = 'dummy' + elif version == 'v0.7': + if company == 'syntiant': + x = '"\n\t"' + + if x != '': + r = utils.load_txt(psystem_desc) + if r['return'] > 0: + return r + + s = r['string'] + + j = s.find(x) + if j >= 0: + s = s[:j + 1] + ',' + s[j + 1:] + + if s.endswith(',\n'): + s = s[:-2] + '}' + + psystem_dict = json.loads(s) + + else: + r = utils.load_json(psystem_desc) + if r['return'] > 0: + return r + psystem_dict = r['meta'] + + else: + print( + ' * Warning: system description not found in {}'.format(psystem_desc)) + input( + ' Press to continue') + + for benchmark in os.listdir(psystem): + pbenchmark = os.path.join( + psystem, benchmark) + if os.path.isdir(pbenchmark): + print( + ' * Processing benchmark: {}'.format(benchmark)) + + models = [''] + + # May have retrained models + pperf = os.path.join( + pbenchmark, 'performance', 'results.txt') + if not os.path.isfile(pperf): + pperf = os.path.join( + pbenchmark, 'performance', 'performance_results.txt') + + if not os.path.isfile(pperf): + # likely models + models = [] + + for model in os.listdir( + pbenchmark): + pmodel = os.path.join( + pbenchmark, model) + if os.path.isdir(pmodel): + models.append(model) + + for model in models: + + results = {} + + if model != '': + print( + ' * Processing model: {}'.format(model)) + pbenchmark = os.path.join( + psystem, benchmark, model) + + perf_file_type = 0 + pperf = os.path.join( + pbenchmark, 'performance', 'results.txt') + if not os.path.isfile(pperf): + pperf = os.path.join( + pbenchmark, 'performance', 'performance_results.txt') + perf_file_type = 1 # outdated/weird + + paccuracy = os.path.join( + pbenchmark, 'accuracy', 'results.txt') + if not os.path.isfile( + paccuracy): + paccuracy = os.path.join( + pbenchmark, 'accuracy', 'accuracy_results.txt') + + penergy = os.path.join( + pbenchmark, 'energy', 'results.txt') + + if os.path.isfile( + pperf) and os.path.isfile(paccuracy): + r = utils.load_txt(pperf) + if r['return'] > 0: + return r + + s = r['string'] + + median_throughput = 0 + + x1 = 'Median throughput is ' if perf_file_type == 0 else 'Throughput :' + x2 = 21 if perf_file_type == 0 else 18 + + j = s.find(x1) + if j >= 0: + j1 = s.find( + ' inf./sec.', j) + if j1 >= 0: + median_throughput = float( + s[j + x2:j1].strip()) + results['median_throughput'] = median_throughput + results['median_throughput_metric'] = 'inf./sec.' + results['Result'] = median_throughput + results['_Result'] = median_throughput + + if median_throughput == 0: + print( + ' * Warning: median_throughput was not detected in {}'.format(pperf)) + input( + ' Press to continue') + + r = utils.load_txt( + paccuracy, split=True) + if r['return'] > 0: + return r + + lines = r['list'] + + found = False + + for line in lines: + j = line.find( + 'ulp-mlperf: ') + if j >= 0: + j1 = line.find( + ':', j + 12) + if j1 >= 0: + accuracy_key = 'accuracy_' + \ + line[j + 12:j1] + value = line[j1 + 2:] + + if value.endswith( + '%'): + value = value[:-1] + results[accuracy_key + + '_metric'] = '%' + + value = float( + value) + + results[accuracy_key] = value + + if not found: + # first + # value + results['Accuracy'] = value + results['_Accuracy'] = value + + found = True + + if not found: + print( + ' * Warning: accuracy not found in the file {}'.format(paccuracy)) + input( + ' Press to continue') + + else: + print( + ' * Warning: performance or accuracy files are not present in this submission') + input( + ' Press to continue') + + if os.path.isfile(penergy): + r = utils.load_txt(penergy) + if r['return'] > 0: + return r + + s = r['string'] + + median_throughput = 0 + + j = s.find( + 'Median throughput is ') + if j >= 0: + j1 = s.find( + ' inf./sec.', j) + if j1 >= 0: + median_throughput = float( + s[j + 21:j1]) + + results['median_energy_median_throughput'] = median_throughput + results['median_energy_median_throughput_metric'] = 'inf./sec.' + + if median_throughput == 0: + print( + ' * Warning: median_throughput was not detected in {}'.format(penergy)) + input( + ' Press to continue') + else: + median_energy_cost = 0 + + j = s.find( + 'Median energy cost is ') + if j >= 0: + j1 = s.find( + ' uJ/inf.', j) + if j1 >= 0: + median_energy_cost = float( + s[j + 22:j1]) + + results['median_energy_cost'] = median_energy_cost + results['median_energy_cost_metric'] = 'uj/inf.' + + if median_energy_cost == 0: + print( + ' * Warning: median_energy_cost was not detected in {}'.format(penergy)) + input( + ' Press to continue') + + print( + ' * Results dict: {}'.format(results)) + + # Finalizing keys + results.update(psystem_dict) + + xbenchmark = benchmark if benchmark not in fix_benchmark_names else fix_benchmark_names[ + benchmark] + + results['git_url'] = url + \ + '/tree/master/' + division + '/' + company + + results['version'] = version + results['__version'] = version + results['Organization'] = company + results['__Organization'] = company + results['Division'] = division + results['Benchmark'] = xbenchmark + results['__System'] = system + + if model != '': + results['Model'] = model + results['__Model'] = model + + # Prepare experiment name + cm_name = 'mlperf-tiny--{}--' + division + '--' + xbenchmark + print( + ' * CM experiment name: {}'.format(cm_name)) + + name_all = cm_name.format( + 'all') + name_ver = cm_name.format( + version) + + for name in [ + name_all, name_ver]: + if name not in experiments: + experiments[name] = [] + experiments[name].append( + results) + + else: + print( + ' * Warning: some directories are not present in this submission') + input(' Press to continue') + + os.chdir(cur_dir) + + r = utils.save_json(file_summary_json, experiments) + if r['return'] > 0: + return r + + env_target_repo = env.get('CM_IMPORT_TINYMLPERF_TARGET_REPO', '').strip() + target_repo = '' if env_target_repo == '' else env_target_repo + ':' + + # Checking experiment + print('') + for name in experiments: + print(' Preparing experiment artifact "{}"'.format(name)) + + tags = name.split('--') + if 'mlperf' not in tags: + tags.insert(0, 'mlperf') + + # Checking if experiment already exists + r = cm.access({'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + r = cm.access({'action': 'add', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name, + 'tags': tags}) + if r['return'] > 0: + return r + + path = r['path'] + else: + path = lst[0].path + + results = experiments[name] + + # Check if already date directory + dirs = os.listdir(path) + + path2 = '' + for d in dirs: + dd = os.path.join(path, d) + if os.path.isdir(dd): + path2 = dd + break + + if path2 == '': + + r = utils.get_current_date_time({}) + if r['return'] > 0: + return r + + date_time = r['iso_datetime'].replace(':', '-').replace('T', '.') + + path2 = os.path.join(path, date_time) + + os.makedirs(path2) + + # Check if cm-result.json + fresult = os.path.join(path2, file_result) + + if os.path.isfile(fresult): + r = utils.load_json(fresult) + if r['return'] > 0: + return r + + existing_results = r['meta'] + + # Need to check which ones to add + for result in existing_results: + found = False + + # New results + for result2 in results: + matched = True + + # Need to iterate over keys in the new results since old + # results can have more keys (derivates, etc) + for k in result2: + if k != 'uid': + if k not in result or result2[k] != result[k]: + matched = False + break + + if matched: + found = True + break + + if not found: + results.append(result) + + # Check extra keys + final_results = [] + for result in results: + # Generate UID + if 'uid' not in result: + r = utils.gen_uid() + if r['return'] > 0: + return r + + result['uid'] = r['uid'] + + # Write results + r = utils.save_json(fresult, results) + if r['return'] > 0: + return r + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/README-extra.md new file mode 100644 index 000000000..05b4f592d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/README-extra.md @@ -0,0 +1,54 @@ +# About + +This portable script converts raw results from the [MLPerf™ Training benchmark]( https://github.com/mlcommons/training ) +to the [MLCommons CM format](https://github.com/mlcommons/ck) for the [Collective Knowledge Playground](https://x.cKnowledge.org). + +The goal is to make it easier for the community to analyze MLPerf results, +add derived metrics such as performance/Watt and constraints, +and link reproducibility reports. + +Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results). + +You can see these results at [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-training,all). + +## Usage + +We have tested this portable CM script on Ubuntu. + +Install [MLCommons CM automation language](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Pull the MLCommons CK repository with automation recipes for interoperable MLOps: +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +``` + +Install repositories with raw MLPerf training benchmark results: +```bash +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v1.0" --extra_cache_tags=mlperf-training-results,version-1.0 --branch=master --depth="" +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v1.1" --extra_cache_tags=mlperf-training-results,version-1.1 --branch=main --depth="" +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v2.0" --extra_cache_tags=mlperf-training-results,version-2.0 --branch=main --depth="" +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v2.1" --extra_cache_tags=mlperf-training-results,version-2.1 --branch=main +cmr "get git repo _repo.https://github.com/mlcommons/training_results_v3.0" --extra_cache_tags=mlperf-training-results,version-3.0 +``` + +You can install private submission repository as follows: +```bash +cm run script "get git repo _repo.https://github.com/mlcommons/submissions_training_v3.0" --extra_cache_tags=mlperf-training-results,version-3.0-private --branch=main --depth=4 +``` + +Convert raw MLPerf training results into CM experiment entries: +```bash +cmr "import mlperf training to-experiment" +``` + +Visualize results on your local machine via CK playground GUI: +```bash +cmr "gui _playground" +``` + +These results are also available in the [public CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-training,all). + +# Contact us + +This project is maintained by the [MLCommons taskforce on automation and reproducibility](https://cKnowledge.org/mlcommons-taskforce). +Join our [Discord server](https://discord.gg/JjWNWXKxwT) to ask questions, provide your feedback and participate in further developments. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/README.md b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/README.md new file mode 100644 index 000000000..8f5e4779b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/_cm.yaml new file mode 100644 index 000000000..7c7013e5e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/_cm.yaml @@ -0,0 +1,39 @@ +# Identification of this CM script +alias: import-mlperf-training-to-experiment +uid: b13d9b7337414f17 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" + +developers: "[Grigori Fursin](https://cKnowledge.org/gfursin)" + +# User-friendly tags to find this CM script +tags: + - import + - mlperf + - training + - mlperf-training + - experiment + - 2experiment + - to-experiment + +input_mapping: + target_repo: CM_IMPORT_MLPERF_TRAINING_TARGET_REPO + +# Dependencies on other CM scripts +deps: + + # Detect host OS features + - tags: detect,os + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + - names: + - python + - python3 + tags: get,python3 + + - tags: get,mlperf,logging diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/customize.py b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/customize.py new file mode 100644 index 000000000..457fabe84 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/customize.py @@ -0,0 +1,364 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import cmind as cm +from cmind import utils + +import os +import subprocess +import csv +import json +import copy + + +file_summary = 'summary.csv' +file_summary_json = 'mlperf-training-summary-{}.json' +file_summary2 = 'summary.xlsx' +file_result = 'cm-result.json' + +model2task = { + "resnet": "image-classification", + "maskrcnn": "object-detection-heavy-weight", + "ssd": "object-detection-light-weight", + "minigo": "reinforcement-learning", + "rnnt": "speech-recognition", + "bert": "language-processing", + "dlrm": "recommendation", + "3dunet": "image-segmentation" +} + +model2dataset = { + "resnet": "ImageNet", + "maskrcnn": "COCO", + "ssd": "OpenImages", + "minigo": "Go", + "rnnt": "LibriSpeech", + "bert": "Wikipedia", + "dlrm": "1TB Clickthrough", + "3dunet": "KiTS19" +} + + +model2accuracy = { + "resnet": 75.9, + "maskrcnn": 0.377, + "ssd": 34.0, + "minigo": 50, + "rnnt": 0.058, + "bert": 0.72, + "dlrm": 0.8025, + "3dunet": 0.908 +} + +model2accuracy_metric = { + "resnet": "% classification", + "maskrcnn": "Box min AP", + "ssd": "% mAP", + "minigo": "% win rate vs. checkpoint", + "rnnt": "Word Error Rate", + "bert": "Mask-LM accuracy", + "dlrm": "AUC", + "3dunet": "Mean DICE score" +} + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + cur_dir = os.getcwd() + + # Clean summary files + for f in [file_summary, file_summary2]: + if os.path.isfile(f): + os.remove(f) + + # Query cache for results dirs + r = cm.access({'action': 'find', + 'automation': 'cache,541d6f712a6b464e', + 'tags': 'get,repo,mlperf-training-results'}) + if r['return'] > 0: + return r + + lst = r['list'] + + for c in lst: + path = os.path.join(c.path, 'repo') + + if os.path.isdir(path): + + meta = c.meta + + tags = meta['tags'] + + version = '' + for t in tags: + if t.startswith('version-'): + version = t[8:] + break + + # Run MLPerf logger + run_script_input = i['run_script_input'] + automation = i['automation'] + + env['CM_MLPERF_TRAINING_REPO_PATH'] = path + env['CM_MLPERF_TRAINING_CURRENT_DIR'] = cur_dir + env['CM_MLPERF_TRAINING_REPO_VERSION'] = version + + print('') + print('Repo path: {}'.format(path)) + print('Repo version: {}'.format(version)) + + r = automation.run_native_script({'run_script_input': run_script_input, + 'env': env, + 'script_name': 'run_mlperf_logger'}) + if r['return'] > 0: + return r + + r = convert_summary_csv_to_experiment(path, version, env) + if r['return'] > 0: + return r + + return {'return': 0} + + +def convert_summary_csv_to_experiment(path, version, env): + print('* Processing MLPerf training results repo in cache path: {}'.format(path)) + + cur_dir = os.getcwd() + + # Get Git URL + os.chdir(path) + + burl = subprocess.check_output( + ['git', 'config', '--get', 'remote.origin.url']) + url = burl.decode('UTF-8').strip() + + print(' Git URL: {}'.format(url)) + + os.chdir(cur_dir) + + if not os.path.isfile(file_summary): + return {'return': 1, + 'error': '{} was not created'.format(file_summary)} + else: + summary = [] + + with open(file_summary, encoding='utf-8') as fcsv: + csv_reader = csv.DictReader(fcsv) + + for rows in csv_reader: + result = {} + + keys = rows.keys() + + for k in keys: + v = rows[k] + + if v == 'False': + v = False + elif v == 'True': + v = True + else: + try: + v = float(v) + + if v == int(v): + v = int(v) + except ValueError: + pass + + result[k] = v + + # Add extra tags + if url != '': + result['git_url'] = url + + location = result.get('Location', '') + if location != '': + result['url'] = url + '/tree/master/' + location + + if result.get('Accuracy', 0) > 0: + result['Accuracy_div_100'] = float( + '{:.5f}'.format(result['Accuracy'] / 100)) + + # Add ratios + + # Append to summary + summary.append(result) + + r = utils.save_json(file_summary_json.format(version), summary) + if r['return'] > 0: + return r + + # Create virtual experiment entries + experiment = {} + + for result in summary: + + for model in model2task: + if result.get(model, '') != '': + result1 = {} + + result1['Result'] = result[model] + result1['Result_Units'] = 'min.' + result1['Accuracy'] = model2accuracy[model] + result1['Accuracy_Metric'] = model2accuracy_metric[model] + result1['Task'] = model2task[model] + result1['Benchmark'] = model2task[model] + result1['Dataset'] = model2dataset[model] + result1['Model_ID'] = model + + result1['_Result'] = result[model] + result1['_Result_Units'] = 'min.' + result1['_Accuracy'] = model2accuracy[model] + result1['_Accuracy_Metric'] = model2accuracy_metric[model] + result1['_Task'] = model2task[model] + result1['_Dataset'] = model2dataset[model] + result1['_Model_ID'] = model + + result1['version'] = version + result1['_version'] = version + result1['Organization'] = result['submitter'] + result1['_Organization'] = result['submitter'] + result1['_System'] = result['system'] + + for k in result: + if k == model or k not in model2task: + result1[k] = result[k] + + xdivision = result['division'] + + name = 'mlperf-training--{}--' + \ + xdivision + '--' + model2task[model] + + name_all = name.format('all') + name_ver = name.format(version) + + for name in [name_all, name_ver]: + if name not in experiment: + experiment[name] = [] + experiment[name].append(result1) + + # Checking experiment + env_target_repo = env.get( + 'CM_IMPORT_MLPERF_TRAINING_TARGET_REPO', '').strip() + target_repo = '' if env_target_repo == '' else env_target_repo + ':' + + print('') + for name in experiment: + print(' Preparing experiment artifact "{}"'.format(name)) + + tags = name.split('--') + if 'mlperf' not in tags: + tags.insert(0, 'mlperf') + + # Checking if experiment already exists + r = cm.access({'action': 'find', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + r = cm.access({'action': 'add', + 'automation': 'experiment,a0a2d123ef064bcb', + 'artifact': target_repo + name, + 'tags': tags}) + if r['return'] > 0: + return r + + path = r['path'] + else: + path = lst[0].path + + results = experiment[name] + + # Check if already date directory + dirs = os.listdir(path) + + path2 = '' + for d in dirs: + dd = os.path.join(path, d) + if os.path.isdir(dd): + path2 = dd + break + + if path2 == '': + + r = utils.get_current_date_time({}) + if r['return'] > 0: + return r + + date_time = r['iso_datetime'].replace( + ':', '-').replace('T', '.') + + path2 = os.path.join(path, date_time) + + os.makedirs(path2) + + # Check if cm-result.json + fresult = os.path.join(path2, file_result) + + if os.path.isfile(fresult): + r = utils.load_json(fresult) + if r['return'] > 0: + return r + + existing_results = r['meta'] + + # Need to check which ones to add + for result in existing_results: + found = False + + # New results + for result2 in results: + matched = True + + # Need to iterate over keys in the new results since + # old results can have more keys (derivates, etc) + for k in result2: + if k != 'uid': + if k not in result or result2[k] != result[k]: + matched = False + break + + if matched: + found = True + break + + if not found: + results.append(result) + + # Check extra keys + final_results = [] + for result in results: + # Generate UID + if 'uid' not in result: + r = utils.gen_uid() + if r['return'] > 0: + return r + + result['uid'] = r['uid'] + + # Write results + r = utils.save_json(fresult, results) + if r['return'] > 0: + return r + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh new file mode 100644 index 000000000..112395d5f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "" +${CM_PYTHON_BIN_WITH_PATH} -m mlperf_logging.result_summarizer "${CM_MLPERF_TRAINING_REPO_PATH}/{*}" training ${CM_MLPERF_TRAINING_REPO_VERSION}.0 -csv summary.csv +# --xls summary.xlsx # Does't work with the latest pandas (need .close() instead of .save()) + +#${CM_MLPERF_LOGGING_SRC_PATH}/scripts/verify_for_v${CM_MLPERF_TRAINING_REPO_VERSION}_training.sh "${CM_MLPERF_TRAINING_REPO_PATH}/ASUSTeK" + +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-apt-package/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-apt-package/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-apt-package/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-apt-package/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-apt-package/_cm.yaml new file mode 100644 index 000000000..d5ba87af7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-apt-package/_cm.yaml @@ -0,0 +1,21 @@ +alias: install-apt-package +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_env: + CM_CLEAN_DIRS: bin + CM_SUDO: sudo +deps: +- tags: detect,os +new_env_keys: [] +tags: +- get +- install +- apt-package +- package +uid: 3688efcd8f324546 +variations: + package.#: + env: + CM_APT_PACKAGE_NAME: '#' diff --git a/cmx4mlops/cmx4mlops/repo/script/install-apt-package/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-apt-package/customize.py new file mode 100644 index 000000000..608d7b063 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-apt-package/customize.py @@ -0,0 +1,38 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import re + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + package_name = env['CM_APT_PACKAGE_NAME'] + + install_cmd = env.get('CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD') + if not install_cmd: + return { + 'return': 1, 'error': 'Package manager installation command not detected for the given OS'} + + sudo = env.get('CM_SUDO', '') + + env['CM_APT_INSTALL_CMD'] = sudo + ' ' + install_cmd + ' ' + package_name + + if env.get('CM_APT_CHECK_CMD', + '') != '' and env['CM_APT_INSTALL_CMD'] != '': + env['CM_APT_INSTALL_CMD'] = f"""{env['CM_APT_CHECK_CMD']} || {env['CM_APT_INSTALL_CMD']}""" + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-apt-package/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-apt-package/run.sh new file mode 100644 index 000000000..d72b2c9d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-apt-package/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cmd=${CM_APT_INSTALL_CMD} +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/README.md b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/README.md new file mode 100644 index 000000000..bf8abb0f4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/install-aws-cli](https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/install-aws-cli) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/_cm.yaml new file mode 100644 index 000000000..6fda1f71e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/_cm.yaml @@ -0,0 +1,21 @@ +alias: install-aws-cli +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Cloud automation +deps: +- tags: detect,os +env: + CM_CURL_URL: https://awscli.amazonaws.com/awscli-exe-[OS]-[PLATFORM].zip +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,aws-cli +tags: +- install +- script +- aws-cli +- aws +- cli +uid: 4d3efd333c3f4d36 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/customize.py new file mode 100644 index 000000000..d7fcced37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/customize.py @@ -0,0 +1,29 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/run.sh new file mode 100644 index 000000000..cc3abf3f9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-aws-cli/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=$PWD +echo "******************************************************" +echo $CM_CURL_URL +CM_CURL_URL=${CM_CURL_URL//"[OS]"/${CM_HOST_OS_TYPE}} +CM_CURL_URL=${CM_CURL_URL//"[PLATFORM]"/${CM_HOST_PLATFORM_FLAVOR}} +echo $CM_CURL_URL +echo "CM_CURL_URL=${CM_CURL_URL}" >> tmp-run-env.out +FILE="awscliv2.zip" +rm -rf ${FILE} +curl "${CM_CURL_URL}" -o "${FILE}" +unzip ${FILE} +sudo ./aws/install diff --git a/cmx4mlops/cmx4mlops/repo/script/install-bazel/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-bazel/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-bazel/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-bazel/README.md b/cmx4mlops/cmx4mlops/repo/script/install-bazel/README.md new file mode 100644 index 000000000..350319de1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-bazel/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-bazel/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-bazel/_cm.yaml new file mode 100644 index 000000000..65d913604 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-bazel/_cm.yaml @@ -0,0 +1,22 @@ +alias: install-bazel +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: 7.0.2 +deps: +- tags: detect,os +env: + CM_WGET_URL: https://github.com/bazelbuild/bazel/releases/download/[VERSION]/bazel-[VERSION]-installer-[OS]-[PLATFORM].sh +new_env_keys: +- CM_BAZEL_* +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,bazel +tags: +- install +- script +- bazel +uid: dfd3d2bf5b764175 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-bazel/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-bazel/customize.py new file mode 100644 index 000000000..d6c2e1d87 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-bazel/customize.py @@ -0,0 +1,73 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION', '') + if need_version == '': + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) + +# if 'CM_GIT_CHECKOUT' not in env: +# env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version + + if os_info['platform'] == 'windows': + prefix = '' + xos = 'windows' + platform = 'x86_64' + ext = '.exe' + else: + prefix = 'installer-' + xos = env['CM_HOST_OS_TYPE'] + platform = env['CM_HOST_PLATFORM_FLAVOR'] + ext = '.sh' + + filename = 'bazel-{}-{}{}-{}{}'.format(need_version, + prefix, + xos, + platform, + ext) + + url = 'https://github.com/bazelbuild/bazel/releases/download/{}/{}'.format( + need_version, filename) + + cur_dir = os.getcwd() + + if os_info['platform'] == 'windows': + bazel_bin = 'bazel.exe' + path = cur_dir + else: + bazel_bin = 'bazel' + path = os.path.join(cur_dir, 'install', 'bin') + + env['CM_BAZEL_DOWNLOAD_URL'] = url + env['CM_BAZEL_DOWNLOAD_FILE'] = filename + + env['CM_BAZEL_INSTALLED_PATH'] = path + env['CM_BAZEL_BIN_WITH_PATH'] = os.path.join(path, bazel_bin) + + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-bazel/run-aarch64.sh b/cmx4mlops/cmx4mlops/repo/script/install-bazel/run-aarch64.sh new file mode 100644 index 000000000..0b8753243 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-bazel/run-aarch64.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +CUR_DIR=$PWD +echo "******************************************************" + +CM_WGET_URL=${CM_WGET_URL//"[OS]"/${CM_HOST_OS_TYPE}} +CM_WGET_URL=${CM_WGET_URL//"[PLATFORM]"/arm64} +CM_WGET_URL=${CM_WGET_URL//"[VERSION]"/${CM_VERSION}} +CM_WGET_URL=${CM_WGET_URL//"-installer-"/-} +CM_WGET_URL=${CM_WGET_URL//".sh"/} +echo "CM_WGET_URL=${CM_WGET_URL}" > tmp-run-env.out +BAZEL_SCRIPT="bazel-${CM_VERSION}-${CM_HOST_OS_TYPE}-arm64" + +INSTALL_DIR=${CUR_DIR} +rm -rf ${INSTALL_DIR}/bin +wget -c ${CM_WGET_URL} +if [ "${?}" != "0" ]; then exit 1; fi +chmod +x ${BAZEL_SCRIPT} +ln -s ${BAZEL_SCRIPT} bazel +if [ "${?}" != "0" ]; then exit 1; fi + +echo "CM_BAZEL_INSTALLED_PATH=${INSTALL_DIR}" >>tmp-run-env.out +echo "CM_BAZEL_BIN_WITH_PATH=${INSTALL_DIR}/${BAZEL_SCRIPT}" >>tmp-run-env.out + +echo "Bazel is installed to ${INSTALL_DIR} ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/install-bazel/run.bat b/cmx4mlops/cmx4mlops/repo/script/install-bazel/run.bat new file mode 100644 index 000000000..7108a4758 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-bazel/run.bat @@ -0,0 +1,9 @@ +@echo off + +del /Q /S %CM_BAZEL_DOWNLOAD_FILE% +del /Q /S bazel.exe + +wget -c %CM_BAZEL_DOWNLOAD_URL% -O %CM_BAZEL_DOWNLOAD_FILE% --no-check-certificate +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +ren %CM_BAZEL_DOWNLOAD_FILE% bazel.exe diff --git a/cmx4mlops/cmx4mlops/repo/script/install-bazel/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-bazel/run.sh new file mode 100644 index 000000000..e5fe4651d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-bazel/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "******************************************************" + +CM_WGET_URL=${CM_WGET_URL//"[OS]"/${CM_HOST_OS_TYPE}} +CM_WGET_URL=${CM_WGET_URL//"[PLATFORM]"/${CM_HOST_PLATFORM_FLAVOR}} +CM_WGET_URL=${CM_WGET_URL//"[VERSION]"/${CM_VERSION}} + +echo "CM_WGET_URL=${CM_WGET_URL}" >> tmp-run-env.out + +BAZEL_SCRIPT="bazel-${CM_VERSION}-installer-${CM_HOST_OS_TYPE}-${CM_HOST_PLATFORM_FLAVOR}.sh" + +INSTALL_DIR=${CUR_DIR} + +rm -rf ${INSTALL_DIR}/bin + +wget -c ${CM_WGET_URL} --no-check-certificate + +if [ "${?}" != "0" ]; then exit 1; fi + +chmod +x ${BAZEL_SCRIPT} + +./${BAZEL_SCRIPT} --bin=${INSTALL_DIR}"/bin" --base=${INSTALL_DIR}"/install" +if [ "${?}" != "0" ]; then exit 1; fi + +echo "Bazel is installed to ${INSTALL_DIR} ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/README.md b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/README.md new file mode 100644 index 000000000..6113568d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/_cm.yaml new file mode 100644 index 000000000..4107931fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/_cm.yaml @@ -0,0 +1,26 @@ +alias: install-cmake-prebuilt +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: 3.28.3 +deps: +- tags: detect,os +new_env_keys: +- CM_CMAKE_* +- CM_GET_DEPENDENT_CACHED_PATH +- +PATH +- +LD_LIBRARY_PATH +- +C_INCLUDE_PATH +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,cmake +tags: +- install +- prebuilt +- cmake +- prebuilt-cmake +- install-prebuilt-cmake +uid: 5a39ef05992b4103 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/customize.py new file mode 100644 index 000000000..257b27ba1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/customize.py @@ -0,0 +1,135 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION', '') + if need_version == '': + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) + + version_split = need_version.split(".") + while len(version_split) < 3: + version_split.append("0") + + need_version = ".".join(version_split) + + host_os_bits = env['CM_HOST_OS_BITS'] + + if os_info['platform'] != 'windows': + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + + # Prepare package name + if os_info['platform'] == 'darwin': + if host_os_bits != '64': + return {'return': 1, + 'error': 'this package doesn\'t support non 64-bit MacOS'} + + package_name = 'cmake-' + need_version + '-macos-universal.tar.gz' + + elif os_info['platform'] == 'windows': + package_name = 'cmake-' + need_version + '-windows-' + + if host_os_bits == '64': + package_name += 'x86_64' + else: + package_name += 'i386' + + package_name += '.zip' + + else: + package_name = 'cmake-' + need_version + '-linux-' + + if host_os_machine.startswith( + 'arm') or host_os_machine.startswith('aarch'): + if host_os_bits == '64': + package_name += 'aarch64' + else: + return {'return': 1, 'error': 'this script doesn\'t support armv7'} + else: + package_name += 'x86_64' + + package_name += '.tar.gz' + + package_url = 'https://github.com/Kitware/CMake/releases/download/v' + \ + need_version + '/' + package_name + + print(recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) + + print('') + print('Downloading from {} ...'.format(package_url)) + + cm = automation.cmind + + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': package_url}) + if r['return'] > 0: + return r + + filename = r['filename'] + + # Check what to do with this file depending on OS + if os_info['platform'] == 'windows': + print('Unzipping file {}'.format(filename)) + + r = cm.access({'action': 'unzip_file', + 'automation': 'utils,dc2743f8450541e3', + 'strip_folders': 1, + 'filename': filename}) + if r['return'] > 0: + return r + + if os.path.isfile(filename): + print('Removing file {}'.format(filename)) + os.remove(filename) + + path_bin = os.path.join(os.getcwd(), 'bin') + path_include = os.path.join(os.getcwd(), 'include') + elif os_info['platform'] == 'darwin': + path_bin = os.path.join(os.getcwd(), 'CMake.app', 'Contents', 'bin') + path_include = os.path.join( + os.getcwd(), 'CMake.app', 'Contents', 'include') + else: + path_bin = os.path.join(os.getcwd(), 'bin') + path_include = os.path.join(os.getcwd(), 'include') + + env['CM_CMAKE_PACKAGE'] = filename + + env['CM_CMAKE_INSTALLED_PATH'] = path_bin + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + + bin_name = 'cmake.exe' if os_info['platform'] == 'windows' else 'cmake' + + env['CM_CMAKE_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) + + # We don't need to check default paths here because we force install to + # cache + env['+PATH'] = [env['CM_CMAKE_INSTALLED_PATH']] + + if os.path.isdir(path_include): + env['+C_INCLUDE_PATH'] = [path_include] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/run.sh new file mode 100644 index 000000000..a7b91ddd2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cmake-prebuilt/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +echo "" +echo "Unarchiving ${CM_CMAKE_PACKAGE} ..." + +tar --strip 1 -xf ${CM_CMAKE_PACKAGE} +test $? -eq 0 || exit 1 + +rm -f ${CM_CMAKE_PACKAGE} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/README.md b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/README.md new file mode 100644 index 000000000..48a49d06e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/install-cuda-package-manager](https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/install-cuda-package-manager) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/_cm.yaml new file mode 100644 index 000000000..3acae7227 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/_cm.yaml @@ -0,0 +1,19 @@ +alias: install-cuda-package-manager +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: CUDA automation +deps: +- tags: detect,os +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,cuda +tags: +- install +- package-manager +- cuda +- package-manager-cuda +- install-pm-cuda +uid: c1afdff8542f45be diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/customize.py new file mode 100644 index 000000000..72daf39cc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/customize.py @@ -0,0 +1,27 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/run-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/run-ubuntu.sh new file mode 100644 index 000000000..ff5bb8e19 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/run-ubuntu.sh @@ -0,0 +1 @@ +sudo apt-get install nvidia-cuda-toolkit diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/run.sh new file mode 100644 index 000000000..d52681cbf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-package-manager/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +CUR=${PWD} +echo "Package installation script not available yet for ${CM_HOST_OS_FLAVOR}" +exit 1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/README-extra.md new file mode 100644 index 000000000..ca9a792ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/README-extra.md @@ -0,0 +1,4 @@ +# Notes + +This script is in a prototyping alpha stage. Needs to be considerably updated and unified! + diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/README.md b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/README.md new file mode 100644 index 000000000..c8cd8831c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/install-cuda-prebuilt](https://docs.mlcommons.org/cm4mlops/scripts/CUDA-automation/install-cuda-prebuilt) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/_cm.yaml new file mode 100644 index 000000000..4d22a2d0c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/_cm.yaml @@ -0,0 +1,91 @@ +alias: install-cuda-prebuilt +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: CUDA automation +default_env: + CM_SUDO: sudo +default_version: 11.8.0 +deps: +- tags: detect,os +docker: + run: true +input_mapping: + install_prefix: CM_CUDA_INSTALL_PREFIX + local_run_file_path: CUDA_RUN_FILE_LOCAL_PATH + override-driver-check: CM_CUDA_DRIVER_INSTALL_OVERRIDE + skip_sudo: CUDA_SKIP_SUDO +new_env_keys: +- CM_CUDA_* +- CM_NVCC_* +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,cuda +prehook_deps: +- env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_CUDA_RUN_FILE_PATH + CM_DOWNLOAD_LOCAL_FILE_PATH: <<>> + extra_cache_tags: cuda,run,file + force_cache: true + names: + - download-script + tags: download,file + update_tags_from_env_with_prefix: + _url.: + - WGET_URL +tags: +- install +- prebuilt +- cuda +- prebuilt-cuda +- install-prebuilt-cuda +uid: 14eadcd42ba340c3 +variations: + driver: + env: + CM_CUDA_INSTALL_DRIVER: 'yes' + group: install-driver + no-driver: + default: true + env: + CM_CUDA_INSTALL_DRIVER: 'no' + group: install-driver +versions: + 11.7.0: + env: + CM_CUDA_LINUX_FILENAME: cuda_11.7.0_515.43.04_linux.run + 11.8.0: + env: + CM_CUDA_LINUX_FILENAME: cuda_11.8.0_520.61.05_linux.run + 12.0.0: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.0.0_525.60.13_linux.run + 12.1.1: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.1.1_530.30.02_linux.run + 12.2.0: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.2.0_535.54.03_linux.run + 12.3.2: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.3.2_545.23.08_linux.run + 12.4.0: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.4.0_550.54.14_linux.run + 12.4.1: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.4.1_550.54.15_linux.run + 12.5.0: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.5.0_555.42.02_linux.run + 12.5.1: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.5.1_555.42.06_linux.run + 12.6.0: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.6.0_560.28.03_linux.run + 12.6.1: + env: + CM_CUDA_LINUX_FILENAME: cuda_12.6.1_560.35.03_linux.run diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/customize.py new file mode 100644 index 000000000..63f43727d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/customize.py @@ -0,0 +1,66 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': + env['CM_SUDO'] = '' + + meta = i['meta'] + automation = i['automation'] + version = env.get('CM_VERSION') + + if version not in env.get('CM_CUDA_LINUX_FILENAME', ''): + supported_versions = list(meta['versions'].keys()) + return {'return': 1, 'error': "Only CUDA versions {} are supported now".format( + ', '.join(supported_versions))} + + install_prefix = env.get('CM_CUDA_INSTALL_PREFIX', os.getcwd()) + + env['CM_CUDA_INSTALL_PREFIX'] = install_prefix + + extra_install_args = '' + + if str(env.get('CM_CUDA_DRIVER_INSTALL_OVERRIDE', '')) != '': + extra_install_args += ' --override-driver-check' + + recursion_spaces = i['recursion_spaces'] + nvcc_bin = "nvcc" + + env['WGET_URL'] = "https://developer.download.nvidia.com/compute/cuda/" + \ + env['CM_VERSION'] + "/local_installers/" + \ + env['CM_CUDA_LINUX_FILENAME'] + + extra_options = env.get('CUDA_ADDITIONAL_INSTALL_OPTIONS', '') + if env.get('CM_CUDA_INSTALL_DRIVER', '') == "yes": + extra_options += " --driver" + env['CUDA_ADDITIONAL_INSTALL_OPTIONS'] = extra_options + + env['CM_CUDA_INSTALLED_PATH'] = os.path.join(install_prefix, 'install') + env['CM_NVCC_BIN_WITH_PATH'] = os.path.join( + install_prefix, 'install', 'bin', nvcc_bin) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_NVCC_BIN_WITH_PATH'] + + env['CM_CUDA_EXTRA_INSTALL_ARGS'] = extra_install_args + + # Set CUDA_RUN_FILE_LOCAL_PATH to empty if not set for backwards + # compatibility in download file + env['CUDA_RUN_FILE_LOCAL_PATH'] = env.get('CUDA_RUN_FILE_LOCAL_PATH', '') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/run.sh new file mode 100644 index 000000000..c13e96b3b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-cuda-prebuilt/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +INSTALL_DIR=${CM_CUDA_INSTALL_PREFIX}/install + +cmd="${CM_SUDO} bash ${CM_CUDA_RUN_FILE_PATH} --toolkitpath=${INSTALL_DIR} --defaultroot=${INSTALL_DIR} --toolkit ${CUDA_ADDITIONAL_INSTALL_OPTIONS} --silent --override ${CM_CUDA_EXTRA_INSTALL_ARGS}" +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/_cm.yaml new file mode 100644 index 000000000..f2194fd66 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/_cm.yaml @@ -0,0 +1,79 @@ +alias: install-diffusers-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- names: + - compiler + tags: get,compiler +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_DIFFUSERS_SRC_REPO_PATH + extra_cache_tags: diffusers,diffusers-src,src,diffusers-src,diffusers-src-repo + names: + - diffusers-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/huggingface/diffusers.git +name: Build diffusers from sources +new_env_keys: +- CM_DIFFUSERS_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- diffusers +- src-diffusers +uid: b2ddda995f63412f +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + for-intel-mlperf-inference-v4.0-sdxl: + base: + - tag.v0.25.1 + env: + CM_INTEL_MLPERF_INFERENCE_v4_0_STABLE_DIFFUSION_PATCH: 'yes' + python.#: + env: + CM_PYTHON_BIN_WITH_PATH: '#' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/huggingface/diffusers: + default: true + env: + CM_GIT_URL: https://github.com/huggingface/diffusers + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + diffusers-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/customize.py new file mode 100644 index 000000000..bcb4fa9ca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} + + +def postprocess(i): + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/run.sh new file mode 100644 index 000000000..8d5ca084a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-diffusers-from-src/run.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf diffusers +cp -r ${CM_DIFFUSERS_SRC_REPO_PATH} diffusers +test "${?}" -eq "0" || exit $? +cd diffusers +rm -rf build + +if [[ ${CM_INTEL_MLPERF_INFERENCE_v4_0_STABLE_DIFFUSION_PATCH} == "yes" ]]; then + wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/stable-diffusion-xl/pytorch-cpu/diffusers.patch + test "${?}" -eq "0" || exit $? + git apply diffusers.patch + test "${?}" -eq "0" || exit $? +fi + +${CM_PYTHON_BIN_WITH_PATH} -m pip install . +test "${?}" -eq "0" || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/README.md new file mode 100644 index 000000000..e18d6d4fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-gcc-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-gcc-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/_cm.yaml new file mode 100644 index 000000000..2c72a9734 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/_cm.yaml @@ -0,0 +1,25 @@ +alias: install-gcc-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +default_version: '12' +deps: +- tags: detect,os +env: + CM_GIT_URL: git://gcc.gnu.org/git/gcc.git +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,gcc +tags: +- install +- src +- gcc +- src-gcc +uid: faae0ebd6e1242db +versions: + master: + env: + CM_GIT_CHECKOUT: master diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/customize.py new file mode 100644 index 000000000..e428b949e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/customize.py @@ -0,0 +1,41 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION', '') + if need_version == '': + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) + + if 'CM_GIT_CHECKOUT' not in env: + env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version + + env['CM_GCC_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/run.sh new file mode 100644 index 000000000..472f4e9c1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gcc-src/run.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "******************************************************" + +if [ ! -d "src" ]; then + echo "Cloning GCC from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT}..." + git clone -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} src + if [ "${?}" != "0" ]; then exit 1; fi +fi + +mkdir -p install +mkdir -p build + +INSTALL_DIR="${CUR_DIR}/install" + +echo "******************************************************" +cd src +./contrib/download_prerequisites +cd ../build + +../src/configure --prefix="${INSTALL_DIR}" --with-gcc-major-version-only --disable-multilib + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} + +make -j${CM_MAKE_CORES} +if [ "${?}" != "0" ]; then exit 1; fi +make install +if [ "${?}" != "0" ]; then exit 1; fi + +# Clean build directory (too large) +cd ${CUR_DIR} +rm -rf build + +echo "******************************************************" +echo "GCC was built and installed to ${INSTALL_DIR} ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/README.md b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/README.md new file mode 100644 index 000000000..a1a03cd3b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/install-generic-conda-package](https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/install-generic-conda-package) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/_cm.yaml new file mode 100644 index 000000000..8beed5da9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/_cm.yaml @@ -0,0 +1,52 @@ +alias: install-generic-conda-package +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Python automation +clean_files: [] +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - conda + tags: get,conda +- names: + - conda + tags: get,conda +extra_cache_tags_from_env: +- env: CM_PYTHON_CACHE_TAGS + prefix: python- +local_env_keys: +- CM_GENERIC_PYTHON_PACKAGE_VARIANT +new_env_keys: +- CM_PYTHONLIB_* +tags: +- get +- install +- generic +- generic-conda-lib +- conda-lib +- conda-package +- generic-conda-package +uid: d9275487f5314195 +variations: + name.#: + ad: + conda: + tags: _name.# + package.#: + env: + CM_CONDA_PKG_NAME: '#' + package.python: + env: + CM_CONDA_PKG_NAME: python + new_env_keys: + - CM_PYTHON_BIN_WITH_PATH + source.#: + env: + CM_CONDA_PKG_SRC: '#' + group: package-source + source.intel: + env: + CM_CONDA_PKG_SRC: https://software.repos.intel.com/python/conda/ + group: package-source diff --git a/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/customize.py new file mode 100644 index 000000000..b9fce2df8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/customize.py @@ -0,0 +1,57 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import cmind as cm + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + automation = i['automation'] + run_script_input = i['run_script_input'] + + version_string = env.get('CM_TMP_PIP_VERSION_STRING', '').strip() + package_name = env['CM_CONDA_PKG_NAME'].strip() + + install_cmd = env['CM_CONDA_BIN_WITH_PATH'] + " install -y " + if env.get('CM_CONDA_PKG_SRC', '') != '': + install_cmd += " -c " + env['CM_CONDA_PKG_SRC'] + " " + + install_cmd += package_name + install_cmd += version_string + + env['CM_CONDA_PKG_INSTALL_CMD'] = install_cmd + + return {'return': 0} + + +def detect_version(i): + + # TBD + print(i['recursion_spaces'] + ' Detected version: {}'.format(version)) + + return {'return': 0, 'version': version} + + +def postprocess(i): + + env = i['env'] + version = env.get('CM_VERSION', '') + + if env['CM_CONDA_PKG_NAME'] == "python": + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + os.path.dirname(env['CM_CONDA_BIN_WITH_PATH']), "python") + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/run.sh new file mode 100644 index 000000000..68a48d9ee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-generic-conda-package/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + + +cmd="${CM_CONDA_PKG_INSTALL_CMD}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/_cm.yaml new file mode 100644 index 000000000..9dabcd42a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/_cm.yaml @@ -0,0 +1,61 @@ +alias: install-gflags-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_GFLAGS_SRC_REPO_PATH + extra_cache_tags: gflags,src,gflags-src,gflags-src-repo + names: + - gflags-src-repo + - gflags-src + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: {} +name: Build gflags from sources +new_env_keys: +- CM_GFLAGS_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- gflags +- src-gflags +uid: f311366ff15e4cdf +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/gflags/gflags: + default: true + env: + CM_GIT_URL: https://github.com/gflags/gflags + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + gflags-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/customize.py new file mode 100644 index 000000000..ff527b454 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/customize.py @@ -0,0 +1,39 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_GFLAGS_BUILD_PATH'] = os.path.join(os.getcwd(), "gflags", "build") + env['CM_DEPENDENT_CACHED_PATH'] = env['CM_GFLAGS_BUILD_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/run.sh new file mode 100644 index 000000000..79cd02608 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags-from-src/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf gflags +cp -r ${CM_GFLAGS_SRC_REPO_PATH} gflags +cd gflags +test "${?}" -eq "0" || exit $? +rm -rf build + +mkdir build +cd build +cmake .. +test "${?}" -eq "0" || exit $? +make -j${CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} +test "${?}" -eq "0" || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-gflags/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags/README.md b/cmx4mlops/cmx4mlops/repo/script/install-gflags/README.md new file mode 100644 index 000000000..a92f5255d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-gflags/_cm.yaml new file mode 100644 index 000000000..349560315 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags/_cm.yaml @@ -0,0 +1,23 @@ +alias: install-gflags +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: 2.2.2 +deps: +- tags: detect,os +- tags: detect,cpu +- tags: get,cmake + version_min: '3.1' +env: {} +new_env_keys: [] +tags: +- install +- src +- get +- gflags +uid: 10bb562c29ea459e +versions: + 2.2.2: + env: + CM_VERSION: 2.2.2 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-gflags/customize.py new file mode 100644 index 000000000..12e8ee957 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags/customize.py @@ -0,0 +1,42 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION', '') + if need_version == '': + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) + + return {'return': 0} + + +def postprocess(i): + inp = i['input'] + env = i['env'] + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-gflags/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-gflags/run.sh new file mode 100644 index 000000000..881eb6b75 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-gflags/run.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "***********************************************************" +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} +CM_WGET_URL=https://github.com/gflags/gflags/archive/refs/tags/v${CM_VERSION}.tar.gz +wget -nc ${CM_WGET_URL} +test $? -eq 0 || exit 1 +tar -xzf "v${CM_VERSION}.tar.gz" && cd gflags-${CM_VERSION} +test $? -eq 0 || exit 1 +rm -rf build +mkdir build && cd build +cmake .. +make -j${CM_MAKE_CORES} +test $? -eq 0 || exit 1 +sudo make install diff --git a/cmx4mlops/cmx4mlops/repo/script/install-github-cli/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-github-cli/README.md b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/README.md new file mode 100644 index 000000000..f58c3bb99 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-github-cli/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/_cm.yaml new file mode 100644 index 000000000..3be78588b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/_cm.yaml @@ -0,0 +1,15 @@ +alias: install-github-cli +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +clean_files: [] +deps: +- tags: detect,os +tags: +- install +- gh +- github +- cli +- github-cli +uid: cd948ec309344bf8 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-github-cli/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/customize.py new file mode 100644 index 000000000..2d5062251 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/customize.py @@ -0,0 +1,25 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + env['CM_TMP_PATH'] = os.path.join(os.getcwd(), 'install', 'bin') + env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run-macos.sh b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run-macos.sh new file mode 100644 index 000000000..6a329e98c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run-macos.sh @@ -0,0 +1 @@ +brew install gh diff --git a/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run-rhel.sh b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run-rhel.sh new file mode 100644 index 000000000..e3ef08f5c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run-rhel.sh @@ -0,0 +1,3 @@ +sudo dnf install -y 'dnf-command(config-manager)' +sudo dnf config-manager --add-repo https://cli.github.com/packages/rpm/gh-cli.repo +sudo dnf install -y gh diff --git a/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run.bat b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run.bat new file mode 100644 index 000000000..2ec8b9718 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run.bat @@ -0,0 +1 @@ +choco install gh diff --git a/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run.sh new file mode 100644 index 000000000..74aa873d1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-github-cli/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash +curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \ +&& sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \ +&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ +&& sudo apt update \ +&& sudo apt install gh -y +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/README.md new file mode 100644 index 000000000..9b277f70e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/_cm.yaml new file mode 100644 index 000000000..da1f041a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/_cm.yaml @@ -0,0 +1,76 @@ +alias: install-intel-neural-speed-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +deps: +- tags: detect,os +- tags: detect,cpu +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_INTEL_NEURAL_SPEED_SRC_REPO_PATH + extra_cache_tags: intel-neural-speed,neural-speed-src,src,intel-neural-speed-src,neural-speed-src-repo + names: + - neural-speed-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/intel/neural-speed +name: Build Intel Neural Speed from sources +new_env_keys: +- CM_INTEL_NEURAL_SPEED_* +sort: 1000 +tags: +- install +- src +- from.src +- neural-speed +- intel-neural-speed +uid: b5477fdc929744ce +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + for-intel-mlperf-inference-v4.0-gptj: + adr: + conda-package: + tags: _name.gptj-pt + deps: + - names: + - conda + tags: get,conda,_name.gptj-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/intel/neural-speed: + default: true + env: + CM_GIT_URL: https://github.com/intel/neural-speed + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + neural-speed-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +warnings: [] diff --git a/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/customize.py new file mode 100644 index 000000000..a8e9b5fdd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/customize.py @@ -0,0 +1,34 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['CM_CONDA_BIN_PATH'], "python") + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + env['+PATH'] = [] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/run.sh new file mode 100644 index 000000000..7068890f3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-intel-neural-speed-from-src/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=$PWD +echo $PWD +rm -rf neural-speed +cmd="cp -r ${CM_INTEL_NEURAL_SPEED_SRC_REPO_PATH} neural-speed" +echo "$cmd" +eval "$cmd" +${CM_PYTHON_BIN_WITH_PATH} -m pip install -r neural-speed/requirements.txt +test $? -eq 0 || exit $? +CMAKE_ARGS="-DNS_PROFILING=ON" ${CM_PYTHON_BIN_WITH_PATH} -m pip install -ve ./neural-speed +test $? -eq 0 || exit $? + +echo "******************************************************" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/README.md new file mode 100644 index 000000000..84a8cad68 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-ipex-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-ipex-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/_cm.yaml new file mode 100644 index 000000000..590467ec6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/_cm.yaml @@ -0,0 +1,346 @@ +alias: install-ipex-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- names: + - pytorch + skip_if_env: + CM_IPEX_SKIP_PYTORCH: + - 'yes' + tags: get,pytorch,from.src +- tags: get,generic,conda-package,_package.ninja +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_IPEX_SRC_REPO_PATH + extra_cache_tags: ipex,src,ipex-src,ipex-src-repo + names: + - ipex-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: {} +name: Build IPEX from sources +new_env_keys: +- CM_IPEX_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- ipex +- src-ipex +uid: 09364fff2bf04516 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + for-intel-mlperf-inference-3d-unet: + adr: + conda-package: + tags: _name.3d-unet-pt + pytorch: + tags: _for-intel-mlperf-inference-3d-unet + base: + - branch.1.9.0-rc + deps: + - tags: get,generic-sys-util,_libffi7 + - names: + - conda + tags: get,conda,_name.3d-unet-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.8' + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + version: 69.5.1 + - names: + - conda-package + - typing-extensions + tags: get,generic,conda-package,_package.typing-extensions,_source.conda-forge + - names: + - conda-package + - sympy + tags: get,generic,conda-package,_package.sympy,_source.conda-forge + - tags: get,cmake + version_max: 3.26.4 + - tags: get,gcc + version_max: '12.3' + version_max_usable: '12.3' + env: + CM_CONDA_ENV: 'yes' + CM_INTEL_IPEX_3D_UNET_PATCH: 'yes' + CM_IPEX_SKIP_PYTORCH: 'yes' + for-intel-mlperf-inference-resnet50: + adr: + conda-package: + tags: _name.resnet50-pt + pytorch: + tags: _for-intel-mlperf-inference-resnet50 + base: + - tag.v1.12.0 + deps: + - names: + - conda + tags: get,conda,_name.resnet50-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + version: 69.5.1 + - names: + - conda-package + - typing-extensions + tags: get,generic,conda-package,_package.typing-extensions,_source.conda-forge + - names: + - conda-package + - sympy + tags: get,generic,conda-package,_package.sympy,_source.conda-forge + - tags: get,cmake + version_max: 3.26.4 + - tags: get,gcc + version_max: '12.3' + version_max_usable: '12.3' + env: + CM_CONDA_ENV: 'yes' + CM_INTEL_IPEX_RESNET50_PATCH: 'yes' + for-intel-mlperf-inference-retinanet: + adr: + conda-package: + tags: _name.retinanet-pt + pytorch: + tags: _for-intel-mlperf-inference-retinanet + base: + - tag.v1.12.0 + deps: + - names: + - conda + tags: get,conda,_name.retinanet-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + version: 69.5.1 + - names: + - conda-package + - typing-extensions + tags: get,generic,conda-package,_package.typing-extensions,_source.conda-forge + - names: + - conda-package + - sympy + tags: get,generic,conda-package,_package.sympy,_source.conda-forge + - tags: get,cmake + version_max: 3.26.4 + - tags: get,gcc + version_max: '12.3' + version_max_usable: '12.3' + env: + CM_CONDA_ENV: 'yes' + CM_INTEL_IPEX_RETINANET_PATCH: 'yes' + for-intel-mlperf-inference-sdxl: + alias: for-intel-mlperf-inference-v4.0-sdxl + for-intel-mlperf-inference-v3.1-3d-unet: + alias: for-intel-mlperf-inference-3d-unet + for-intel-mlperf-inference-v3.1-dlrm-v2: + adr: + conda-package: + tags: _name.dlrm-v2-pt + pytorch: + tags: _for-intel-mlperf-inference-v3.1-dlrm-v2 + base: + - sha.7256d0848ba81bb802dd33fca0e33049a751db58 + deps: + - names: + - conda + tags: get,conda,_name.dlrm-v2-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + version: 69.5.1 + - names: + - conda-package + - typing-extensions + tags: get,generic,conda-package,_package.typing-extensions,_source.conda-forge + - names: + - conda-package + - sympy + tags: get,generic,conda-package,_package.sympy,_source.conda-forge + - names: + - pip-package + - numpy + tags: get,generic-python-lib,_package.numpy + version: 1.23.5 + - names: + - pytorch + - torch + tags: install,pytorch,from-src,_for-intel-mlperf-inference-v3.1-dlrm-v2 + env: + CM_CONDA_ENV: 'yes' + CM_INTEL_IPEX_DLRM_V2_PATCH: 'yes' + CM_IPEX_SKIP_PYTORCH: 'yes' + for-intel-mlperf-inference-v3.1-gptj: + adr: + conda-package: + tags: _name.gptj-pt + pytorch: + tags: _for-intel-mlperf-inference-v3.1-gptj + base: + - branch.v2.1.0.dev+cpu.llm.mlperf + deps: + - names: + - conda + tags: get,conda,_name.gptj-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + version: 69.5.1 + - names: + - conda-package + - typing-extensions + tags: get,generic,conda-package,_package.typing-extensions,_source.conda-forge + - names: + - conda-package + - sympy + tags: get,generic,conda-package,_package.sympy,_source.conda-forge + - tags: install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj + env: + CM_CONDA_ENV: 'yes' + CM_IPEX_SKIP_PYTORCH: 'yes' + CM_USE_LLVM_FOR_IPEX: 'yes' + for-intel-mlperf-inference-v3.1-resnet50: + alias: for-intel-mlperf-inference-resnet50 + for-intel-mlperf-inference-v3.1-retinanet: + alias: for-intel-mlperf-inference-retinanet + for-intel-mlperf-inference-v4.0-sdxl: + adr: + conda-package: + tags: _name.sdxl-pt + pytorch: + tags: _for-intel-mlperf-inference-sdxl + base: + - sha.f27c8d42a734ae0805de2bd0d8396ce205638329 + deps: + - names: + - conda + tags: get,conda,_name.sdxl-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - names: + - pip-package + - pip-torch + tags: get,generic-python-lib,_package.torch,_path.https://download.pytorch.org/whl/nightly/cpu/torch-2.3.0.dev20231214%2Bcpu-cp39-cp39-linux_x86_64.whl + - names: + - pip-package + - pip-torchvision + tags: get,generic-python-lib,_package.torchvision,_path.https://download.pytorch.org/whl/nightly/cpu/torchvision-0.18.0.dev20231214%2Bcpu-cp39-cp39-linux_x86_64.whl + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + version: 69.5.1 + - names: + - conda-package + - typing-extensions + tags: get,generic,conda-package,_package.typing-extensions,_source.conda-forge + - names: + - conda-package + - sympy + tags: get,generic,conda-package,_package.sympy,_source.conda-forge + - tags: get,cmake + version_max: 3.26.4 + - tags: get,gcc + version_max: '12.3' + version_max_usable: '12.3' + env: + CM_CONDA_ENV: 'yes' + CM_IPEX_SKIP_PYTORCH: 'yes' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/intel/intel-extension-for-pytorch: + default: true + env: + CM_GIT_URL: https://github.com/intel/intel-extension-for-pytorch + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + ipex-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_3d-unet_patch.sh b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_3d-unet_patch.sh new file mode 100644 index 000000000..f5a1b315a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_3d-unet_patch.sh @@ -0,0 +1,5 @@ +rm -rf unet3d.patch +wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/3d-unet-99/pytorch-cpu/unet3d.patch +test $? -eq 0 || exit $? +git apply unet3d.patch +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_resnet50_patch.sh b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_resnet50_patch.sh new file mode 100644 index 000000000..c30944979 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_resnet50_patch.sh @@ -0,0 +1,5 @@ +rm input_output_aligned_scales.patch +wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/resnet50/pytorch-cpu/input_output_aligned_scales.patch +test $? -eq 0 || exit $? +git apply input_output_aligned_scales.patch +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_retinanet_patch.sh b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_retinanet_patch.sh new file mode 100644 index 000000000..d219846fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/apply_intel_retinanet_patch.sh @@ -0,0 +1,5 @@ +rm runtime_ignore_dequant_check.patch +wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/retinanet/pytorch-cpu/runtime_ignore_dequant_check.patch +test $? -eq 0 || exit $? +git apply runtime_ignore_dequant_check.patch +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/customize.py new file mode 100644 index 000000000..ab6a15554 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/customize.py @@ -0,0 +1,51 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + env['IPEX_DIR'] = env['CM_IPEX_SRC_REPO_PATH'] + + if env.get('CM_USE_LLVM_FOR_IPEX', '') == 'yes': + env['DNNL_GRAPH_BUILD_COMPILER_BACKEND'] = 1 + env['USE_LLVM'] = env['CM_LLVM_INSTALLED_PATH'] + env['LLVM_DIR'] = os.path.join( + env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") + + run_cmd = "python setup.py clean && python setup.py install" + + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + env['CM_IPEX_BUILD_PATH'] = os.path.join(os.getcwd(), "ipex_src", "build") + env['CM_IPEX_INSTALLED_PATH'] = os.path.join( + env['CM_IPEX_BUILD_PATH'], + "Release", + "packages", + "intel_extension_for_pytorch") + env['CM_DEPENDENT_CACHED_PATH'] = env['CM_IPEX_INSTALLED_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/run.sh new file mode 100644 index 000000000..572455359 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-ipex-from-src/run.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +#export PATH=${CM_CONDA_BIN_PATH}:${PATH} +#echo $LD_LIBRARY_PATH +#exit 1 +rm -rf ipex_src +cp -r ${IPEX_DIR} ipex_src +cd ipex_src +pwd + +git submodule sync +git submodule update --init --recursive + +if [[ ${CM_INTEL_IPEX_RESNET50_PATCH} == "yes" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/apply_intel_resnet50_patch.sh + test "$?" -eq 0 || exit "$?" + +elif [[ ${CM_INTEL_IPEX_RETINANET_PATCH} == "yes" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/apply_intel_retinanet_patch.sh + test "$?" -eq 0 || exit "$?" + +elif [[ ${CM_INTEL_IPEX_3D_UNET_PATCH} == "yes" ]]; then + cd third_party/mkl-dnn + git fetch --tags && git checkout v2.7 + test "$?" -eq 0 || exit "$?" + cd ../../ + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/apply_intel_3d-unet_patch.sh + test "$?" -eq 0 || exit "$?" + +elif [[ ${CM_INTEL_IPEX_DLRM_V2_PATCH} == "yes" ]]; then + export LD_LIBRARY_PATH="" + wget https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/dlrm-v2-99/pytorch-cpu-int8/ipex.patch + test "$?" -eq 0 || exit "$?" + git apply ipex.patch + test "$?" -eq 0 || exit "$?" + cd third_party/libxsmm + git checkout c21bc5ddb4 + test "$?" -eq 0 || exit "$?" + cd ../ideep && rm -rf mkl-dnn && git checkout b5eadff696 + test "$?" -eq 0 || exit "$?" + git submodule sync && git submodule update --init --recursive + test "$?" -eq 0 || exit "$?" + cd mkl-dnn + wget https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/dlrm-v2-99/pytorch-cpu-int8/onednngraph.patch + test "$?" -eq 0 || exit "$?" + git apply -p1 onednngraph.patch + test "$?" -eq 0 || exit "$?" + cd ../../../ +fi + +rm -rf build +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} + +test "$?" -eq 0 || exit "$?" + +echo "******************************************************" + diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/README-extra.md new file mode 100644 index 000000000..1ad1e122b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/README-extra.md @@ -0,0 +1,99 @@ +# Get LLVM +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) detects the installed llvm on the system and if not found calls the [install script for llvm](../script/install-llvm-prebuilt). + +## Exported Variables +* `CM_LLVM_CLANG_BIN` +* `CM_LLVM_CLANG_BIN_WITH_PATH` +* `CM_C_COMPILER_BIN` +* `CM_C_COMPILER_WITH_PATH` +* `CM_CXX_COMPILER_BIN` +* `CM_CXX_COMPILER_WITH_PATH` +* `CM_COMPILER_*` + +## Supported and Tested OS +1. Ubuntu 18.04, 20.04, 22.04 +2. RHEL 9 +3. Windows 10, 11 + +# CLI + +## Default +```bash +cm run script "install llvm prebuilt" +``` +or +```bash +cm run script --tags=get,llvm +``` + +## Version + +```bash +cm run script "install llvm prebuilt" --version=14.0.0 +``` + +## Version min +```bash +cm run script "install llvm prebuilt" --version_min=12.0.0 +``` + +## Version max +```bash +cm run script "install llvm prebuilt" --version_max=13.999.999 --version_max_usable=13.0.0 +``` + +## Force new detection even if llvm is already found and cached +```bash +cm run script "install llvm prebuilt" --new +``` + +## Test + +```bash +cm run script "app image corner-detection" +``` + +## Reproducibility matrix + +*Test detection and installation on different platforms* + +* Windows, Linux, MacOS + +### Ubuntu 22.04 + +* 17.0.6 +* 17.0.5 +* 17.0.4 +* 17.0.2 +* 16.0.4 +* 16.0.0 + `sudo apt install libncurses5` +* 15.0.6 +* 14.0.0 + + +### RHEL 9 + +#### v14.0.0: ✓ + +```bash +cm rm cache -f +cm run script "install llvm prebuilt" --version=14.0.0 +cm run script "app image corner-detection" +``` + +#### v13.0.0: Need special command + +```bash +cm rm cache -f +cm run script "install llvm prebuilt" --version=13.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "app image corner-detection" +``` + +#### v12.0.0: Need special command + +```bash +cm rm cache -f +cm run script "install llvm prebuilt" --version=12.0.0 --env.CM_LLVM_PACKAGE=clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz +cm run script "app image corner-detection" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/README.md b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/README.md new file mode 100644 index 000000000..884ba531f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-llvm-prebuilt](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-llvm-prebuilt) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/_cm.yaml new file mode 100644 index 000000000..a389387a0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/_cm.yaml @@ -0,0 +1,28 @@ +alias: install-llvm-prebuilt +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +default_version: 15.0.6 +deps: +- tags: detect,os +name: Install prebuilt LLVM compiler +new_env_keys: +- CM_LLVM_* +- CM_COMPILER_NAME +- +PATH +- +LD_LIBRARY_PATH +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,llvm +tags: +- install +- prebuilt +- llvm +- prebuilt-llvm +- install-prebuilt-llvm +uid: cda9094971724a0a diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/customize.py new file mode 100644 index 000000000..0b181c194 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/customize.py @@ -0,0 +1,232 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION', '') + clang_file_name = "clang" + if need_version == '': + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) + + host_os_bits = env['CM_HOST_OS_BITS'] + + if os_info['platform'] != 'windows': + host_os_machine = env['CM_HOST_OS_MACHINE'] # ABI + + # Prepare package name + # First check if it is forced by external environment + package_name = env.get('CM_LLVM_PACKAGE', '').strip() + if package_name == '': + need_version_split = need_version.split('.') + + # If package_name if not forced, attempt to synthesize it based on OS + # and arch + if os_info['platform'] == 'darwin': + force_arch = env.get( + 'CM_LLVM_PACKAGE_FORCE_ARCH', + '') # To allow x86_64 if needed + if force_arch == '': + force_arch = 'arm64' + force_darwin_version = env.get( + 'CM_LLVM_PACKAGE_FORCE_DARWIN_VERSION', '') + if force_darwin_version == '': + if len(need_version_split) > 0: + hver = 0 + try: + hver = int(need_version_split[0]) + except BaseException: + pass + + if hver > 0 and hver < 16: + force_darwin_version = '21.0' + else: + force_darwin_version = '22.0' + package_name = 'clang+llvm-' + need_version + '-' + force_arch + \ + '-apple-darwin' + force_darwin_version + '.tar.xz' + + elif os_info['platform'] == 'windows': + package_name = 'LLVM-' + need_version + '-win' + host_os_bits + '.exe' + clang_file_name = "clang.exe" + + print('') + print('WARNING: Please copy the following path and then paste it') + print(' when LLVM installer asks you about the "Destination Folder":') + print('') + print(os.getcwd()) + print('') + input('Press Enter to continue!') + + else: + if host_os_machine.startswith( + 'arm') or host_os_machine.startswith('aarch'): + if host_os_bits == '64': + package_name = 'clang+llvm-' + need_version + '-aarch64-linux-gnu.tar.xz' + else: + package_name = 'clang+llvm-' + need_version + '-armv7a-linux-gnueabihf.tar.xz' + else: + host_os_flavor = env['CM_HOST_OS_FLAVOR'] + + host_os_version = env['CM_HOST_OS_VERSION'] + +# if 'debian' in host_os_flavor: +# return {'return':1, 'error':'debian is not supported yet'} +# +# else: + # Treat all Linux flavours as Ubuntu for now ... + + if True: + default_os = '22.04' + + if len(need_version_split) > 0: + hver = 0 + try: + hver = int(need_version_split[0]) + except BaseException: + pass + + if hver > 0: + if hver < 16: + default_os = '18.04' + else: + default_os = '22.04' + + if need_version == '10.0.1': + default_os = '16.04' + + elif need_version == '11.0.0': + default_os = '20.04' + + elif need_version == '11.0.1': + default_os = '16.04' + if host_os_version == '20.10': + default_os = '20.10' + + elif need_version == '12.0.0': + default_os = '16.04' + if host_os_version == '20.04' or host_os_version == '20.10': + default_os = '20.04' + + elif need_version == '12.0.1': + default_os = '16.04' + # if host_os_version.startswith('18') or host_os_version.startswith('20'): + # default_os = '18.04' + + elif need_version == '13.0.0': + default_os = '16.04' + if host_os_version.startswith('20'): + default_os = '20.04' + + elif need_version == '13.0.1': + default_os = '18.04' + + elif need_version == '14.0.0': + default_os = '18.04' + + elif need_version == '15.0.6': + default_os = '18.04' + + elif need_version == '16.0.0': + default_os = '18.04' + + elif need_version == '16.0.4': + default_os = '22.04' + + elif need_version == '17.0.2': + default_os = '22.04' + + elif need_version == '17.0.2': + default_os = '22.04' + + elif need_version == '17.0.4': + default_os = '22.04' + + elif need_version == '17.0.5': + default_os = '22.04' + + elif need_version == '17.0.6': + default_os = '22.04' + + package_name = 'clang+llvm-' + need_version + \ + '-x86_64-linux-gnu-ubuntu-' + default_os + '.tar.xz' + + package_url = 'https://github.com/llvm/llvm-project/releases/download/llvmorg-' + \ + need_version + '/' + package_name + + print(recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) + + print('') + print('Downloading from {} ...'.format(package_url)) + + cm = automation.cmind + + r = cm.access({'action': 'download_file', + 'automation': 'utils,dc2743f8450541e3', + 'url': package_url}) + if r['return'] > 0: + return r + + # 'clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz' # f['filename'] + filename = r['filename'] + + env['CM_LLVM_PACKAGE'] = filename + env['CM_LLVM_INSTALLED_PATH'] = os.path.join(os.getcwd(), 'bin') + env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + os.getcwd(), 'bin', clang_file_name) + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + + # We don't need to check default paths here because we force install to + # cache + env['+PATH'] = [env['CM_LLVM_INSTALLED_PATH']] + + path_include = os.path.join(os.getcwd(), 'include') + if os.path.isdir(path_include): + env['+C_INCLUDE_PATH'] = [path_include] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + version = env['CM_VERSION'] + os_info = i['os_info'] + +# cur_dir = os.getcwd() +# cur_dir_include = os.path.join(cur_dir, 'include') + +# if os.path.isdir(cur_dir_include): +# if os_info['platform'] == 'darwin': +# if '+C_INCLUDE_PATH' not in env: +# env['+C_INCLUDE_PATH'] = [] +# if cur_dir_include not in env['+C_INCLUDE_PATH']: +# env['+C_INCLUDE_PATH'].append(cur_dir_include) +# +# if '+CPLUS_INCLUDE_PATH' not in env: +# env['+CPLUS_INCLUDE_PATH'] = [] +# if cur_dir_include not in env['+CPLUS_INCLUDE_PATH']: +# env['+CPLUS_INCLUDE_PATH'].append(cur_dir_include) + + return {'return': 0, 'version': version} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/run.bat b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/run.bat new file mode 100644 index 000000000..922a0d8ed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/run.bat @@ -0,0 +1,3 @@ +echo Running %CM_LLVM_PACKAGE% ... + +%CM_LLVM_PACKAGE% --help diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/run.sh new file mode 100644 index 000000000..1ace2bb27 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-prebuilt/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +echo "" +echo "Unarchiving ${CM_LLVM_PACKAGE} ..." + +tar --strip 1 -xf ${CM_LLVM_PACKAGE} +test $? -eq 0 || exit 1 + +rm -f ${CM_LLVM_PACKAGE} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/README.md new file mode 100644 index 000000000..cab4a397c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-llvm-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-llvm-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/_cm.yaml new file mode 100644 index 000000000..91589e8ce --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/_cm.yaml @@ -0,0 +1,210 @@ +alias: install-llvm-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- skip_if_env: + CM_LLVM_CONDA_ENV: + - 'yes' + tags: get,cmake +- skip_if_env: + CM_LLVM_CONDA_ENV: + - 'yes' + tags: get,generic-sys-util,_ninja-build +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_LLVM_SRC_REPO_PATH + extra_cache_tags: llvm,src,llvm-src,llvm-src-repo + force_env_keys: + - CM_GIT_* + names: + - llvm-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG + _tag.llvmorg-: + - CM_VERSION +env: + CM_GIT_URL: https://github.com/llvm/llvm-project +name: Build LLVM compiler from sources (can take >30 min) +new_env_keys: +- CM_LLVM_* +- CM_GET_DEPENDENT_CACHED_PATH +- +PATH +- +C_INCLUDE_PATH +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,llvm +prehook_deps: [] +sort: 1000 +tags: +- install +- src +- llvm +- from.src +- src-llvm +uid: 2af16e9a6c5f4702 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + clang: + default: true + env: + CM_LLVM_ENABLE_PROJECTS: clang + group: clang + debug: + env: + CM_LLVM_BUILD_TYPE: debug + group: build-type + for-intel-mlperf-inference-v3.1-bert: + adr: + conda-package: + tags: _name.bert-pt + base: + - tag.llvmorg-15.0.7 + - clang + - release + deps: + - tags: get,gcc + - names: + - conda + tags: get,conda,_name.bert-pt + - names: + - conda-package + - ncurses + tags: get,conda-package,_package.ncurses,_source.conda-forge + - names: + - conda-package + - ninja + tags: get,generic,conda-package,_package.ninja + - names: + - conda-package + - cmake + tags: get,generic,conda-package,_package.cmake + - tags: get,conda-package,_package.llvm-openmp,_source.conda-forge + - tags: get,conda-package,_package.chardet + - names: + - conda-package + - libstdcxx-ng + tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + env: + CM_LLVM_CONDA_ENV: 'yes' + for-intel-mlperf-inference-v3.1-gptj: + adr: + conda-package: + tags: _name.gptj-pt + base: + - tag.llvmorg-16.0.6 + - clang + - release + deps: + - tags: get,generic-sys-util,_g++-12 + - tags: get,gcc + version_min: '12.1' + - names: + - conda + tags: get,conda,_name.gptj-pt + - names: + - conda-package + - python + tags: get,generic,conda-package,_package.python + version: '3.9' + - names: + - conda-package + - ncurses + tags: get,conda-package,_package.ncurses,_source.conda-forge + - tags: get,conda-package,_package.chardet + - names: + - conda-package + - libstdcxx-ng + tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.intel + version: 2023.1.0 + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.intel + version: 2023.1.0 + - names: + - conda-package + - intel-openmp + tags: get,generic,conda-package,_package.intel-openmp,_source.intel + version: 2023.1.0 + - names: + - conda-package + - gperftools + tags: get,generic,conda-package,_package.gperftools,_source.conda-forge + - names: + - conda-package + - pybind11 + tags: get,generic,conda-package,_package.pybind11,_source.conda-forge + version: 2.10.4 + - env: + + CXXFLAGS: + - -Wno-nonnull + - -Wno-maybe-uninitialized + - -Wno-uninitialized + - -Wno-free-nonheap-object + CM_PYTHON_BIN_WITH_PATH: <<>>/python3 + tags: get,generic-python-lib,_custom-python,_package.torch,_url.git+https://github.com/pytorch/pytorch.git@927dc662386af052018212c7d01309a506fc94cd + - names: + - conda-package + - typing-extensions + tags: get,generic,conda-package,_package.typing-extensions,_source.conda-forge + - names: + - conda-package + - sympy + tags: get,generic,conda-package,_package.sympy,_source.conda-forge + - env: + CM_PYTHON_BIN_WITH_PATH: <<>>/python3 + tags: get,generic-python-lib,_custom-python,_package.setuptools + version_max: 69.9.999 + version_max_usable: 58.2.0 + - env: + CM_PYTHON_BIN_WITH_PATH: <<>>/python3 + tags: get,generic-python-lib,_custom-python,_package.neural-compressor,_url.git+https://github.com/intel/neural-compressor.git@a2931eaa4052eec195be3c79a13f7bfa23e54473 + env: + CM_LLVM_16_INTEL_MLPERF_INFERENCE: 'yes' + CM_LLVM_CONDA_ENV: 'yes' + CUDA_VISIBLE_DEVICES: '' + USE_CUDA: '0' + full-history: + ad: + llvm-src-repo: + tags: _full-history + release: + default: true + env: + CM_LLVM_BUILD_TYPE: release + group: build-type + repo.#: + env: + CM_GIT_URL: '#' + group: repo + runtimes.#: + env: + CM_LLVM_ENABLE_RUNTIMES: '#' + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + base: + - full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/customize.py new file mode 100644 index 000000000..94832b3e4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/customize.py @@ -0,0 +1,87 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + clang_file_name = "clang" + extra_cmake_options = '' + + install_prefix = os.path.join(os.getcwd(), "install") + + if env.get('CM_LLVM_CONDA_ENV', '') == "yes": + install_prefix = env['CM_CONDA_PREFIX'] + extra_cmake_options = f"-DCMAKE_SHARED_LINKER_FLAGS=-L{install_prefix} -Wl,-rpath,{install_prefix}" + + if env.get('CM_LLVM_16_INTEL_MLPERF_INFERENCE', '') == "yes": + env['CM_REQUIRE_INSTALL'] = 'yes' + i['run_script_input']['script_name'] = "install-llvm-16-intel-mlperf-inference" + clang_file_name = "llvm-link" + # env['USE_LLVM'] = install_prefix + # env['LLVM_DIR'] = os.path.join(env['USE_LLVM'], "lib", "cmake", "llvm") + else: + if env.get('CM_LLVM_ENABLE_RUNTIMES', '') != '': + enable_runtimes = env['CM_LLVM_ENABLE_RUNTIMES'].replace(":", ";") + else: + enable_runtimes = '' + + if env.get('CM_LLVM_ENABLE_PROJECTS', '') != '': + enable_projects = env['CM_LLVM_ENABLE_PROJECTS'].replace(":", ";") + else: + enable_projects = '' + + llvm_build_type = env['CM_LLVM_BUILD_TYPE'] + + cmake_cmd = "cmake " + os.path.join(env["CM_LLVM_SRC_REPO_PATH"], "llvm") + " -GNinja -DCMAKE_BUILD_TYPE=" + llvm_build_type + " -DLLVM_ENABLE_PROJECTS=" + enable_projects + " -DLLVM_ENABLE_RUNTIMES='" + \ + enable_runtimes + "' -DCMAKE_INSTALL_PREFIX=" + install_prefix + \ + " -DLLVM_ENABLE_RTTI=ON -DLLVM_INSTALL_UTILS=ON -DLLVM_TARGETS_TO_BUILD=X86 " + \ + extra_cmake_options + + env['CM_LLVM_CMAKE_CMD'] = cmake_cmd + + need_version = env.get('CM_VERSION', '') + + # print(cmake_cmd) + + env['CM_LLVM_INSTALLED_PATH'] = install_prefix + env['CM_LLVM_CLANG_BIN_WITH_PATH'] = os.path.join( + env['CM_LLVM_INSTALLED_PATH'], "bin", clang_file_name) + + # env['+PATH'] = [] + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_LLVM_CLANG_BIN_WITH_PATH'] + + if env.get('CM_LLVM_CONDA_ENV', '') != "yes": + # We don't need to check default paths here because we force install to + # cache + env['+PATH'] = [os.path.join(env['CM_LLVM_INSTALLED_PATH'], "bin")] + + path_include = os.path.join(env['CM_LLVM_INSTALLED_PATH'], 'include') + if os.path.isdir(path_include): + env['+C_INCLUDE_PATH'] = [path_include] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh new file mode 100644 index 000000000..30b612b2b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/install-llvm-16-intel-mlperf-inference.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:${PATH} +#export LD_LIBRARY_PATH=${CM_CONDA_LIB_PATH}:${LD_LIBRARY_PATH} +ABI=$(python -c "import torch; print(int(torch._C._GLIBCXX_USE_CXX11_ABI))") +test $? -eq 0 || exit $? +export ABI=$ABI +mkdir -p llvm-project && cd llvm-project +wget -nc https://github.com/llvm/llvm-project/releases/download/llvmorg-16.0.6/cmake-16.0.6.src.tar.xz +wget -nc https://github.com/llvm/llvm-project/releases/download/llvmorg-16.0.6/llvm-16.0.6.src.tar.xz +tar -xf cmake-16.0.6.src.tar.xz +test $? -eq 0 || exit $? +mv cmake-16.0.6.src cmake +tar -xf llvm-16.0.6.src.tar.xz +mv llvm-16.0.6.src llvm +rm -rf build +mkdir -p build +cd build +export DEB_BUILD_MAINT_OPTIONS=hardening=-format +export CC=${CM_C_COMPILER_WITH_PATH} +export CXX=${CM_CXX_COMPILER_WITH_PATH} +cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-D_GLIBCXX_USE_CXX11_ABI=${ABI}" -DLLVM_TARGETS_TO_BUILD=X86 -DLLVM_ENABLE_TERMINFO=OFF -DLLVM_INCLUDE_TESTS=OFF -DLLVM_INCLUDE_EXAMPLES=OFF -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_INCLUDE_BENCHMARKS=OFF ../llvm/ + +test $? -eq 0 || exit $? +cmake --build . -j $(nproc) +test $? -eq 0 || exit $? +export LLVM_ROOT=$CONDA_PREFIX +cmake -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DCMAKE_SHARED_LINKER_FLAGS="-L$CONDA_PREFIX -Wl,-rpath,$CONDA_PREFIX" -P cmake_install.cmake +test $? -eq 0 || exit $? +ln -sf ${LLVM_ROOT}/bin/llvm-config ${LLVM_ROOT}/bin/llvm-config-13 +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/run.sh new file mode 100644 index 000000000..60c0efea6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-llvm-src/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +CUR_DIR=$PWD + +INSTALL_DIR="${CM_LLVM_INSTALLED_PATH}" +echo "INSTALL_DIR=${INSTALL_DIR}" + +if [[ ${CM_LLVM_CONDA_ENV} != "yes" ]]; then + cmd="rm -rf ${INSTALL_DIR}" + echo "$cmd" + eval "$cmd" +else + export PATH=${CM_CONDA_BIN_PATH}:$PATH +fi + +if [[ ${CM_CLEAN_BUILD} == "yes" ]]; then + rm -rf build +fi + +mkdir -p build + +# If install exist, then configure was done +if [ ! -d "${INSTALL_DIR}" ] || [ ${CM_LLVM_CONDA_ENV} == "yes" ]; then + echo "******************************************************" + + cd build + if [ "${?}" != "0" ]; then exit 1; fi + + echo "${CM_LLVM_CMAKE_CMD}" + eval "${CM_LLVM_CMAKE_CMD}" + ninja + if [ "${?}" != "0" ]; then exit 1; fi + ninja install + if [ "${?}" != "0" ]; then exit 1; fi + + mkdir -p ${INSTALL_DIR} +fi + +# Clean build directory (too large) +cd ${CUR_DIR} +rm -rf build + +echo "******************************************************" +echo "LLVM is built and installed to ${INSTALL_DIR} ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/README.md new file mode 100644 index 000000000..b803e73df --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/_cm.yaml new file mode 100644 index 000000000..c4d8f86bf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/_cm.yaml @@ -0,0 +1,36 @@ +alias: install-mlperf-logging-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +deps: + - tags: get,python3 + names: + - python + - python3 + - tags: get,git,repo,_repo.https://github.com/mlcommons/logging + extra_cache_tags: mlperf_logging + env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLPERF_LOGGING_REPO_PATH +docker_input_mapping: +input_description: +new_env_keys: + - CM_MLPERF_LOGGING_REPO_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- install +- mlperf +- logging +- from.src +uid: f67cb84a5dc942c3 +variations: {} +versions: + master: + env: + CM_MLPERF_LOGGING_VERSION: master + v3.1: + env: + CM_MLPERF_LOGGING_VERSION: v3.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/run.sh new file mode 100644 index 000000000..de622c9f3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-mlperf-logging-from-src/run.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +run "${CM_PYTHON_BIN_WITH_PATH} -m pip install -e ${CM_MLPERF_LOGGING_REPO_PATH}" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/README.md b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/README.md new file mode 100644 index 000000000..4211bc82f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts//install-nccl-libs](https://docs.mlcommons.org/cm4mlops/scripts//install-nccl-libs) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/_cm.yaml new file mode 100644 index 000000000..8011ab3ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/_cm.yaml @@ -0,0 +1,13 @@ +alias: install-nccl-libs +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- install +- nccl +- libs +uid: d1c76da2adb44201 +variations: + cuda: + deps: + - tags: get,cuda diff --git a/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/run-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/run-ubuntu.sh new file mode 100644 index 000000000..e56074a51 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/run-ubuntu.sh @@ -0,0 +1,2 @@ +CM_SUDO=${CM_SUDO:-sudo} +${CM_SUDO} apt install -y --allow-downgrades libnccl2=2.18.3-1+cuda${CM_CUDA_VERSION} libnccl-dev=2.18.3-1+cuda${CM_CUDA_VERSION} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/run.sh new file mode 100644 index 000000000..3a584c10c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-nccl-libs/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/README.md new file mode 100644 index 000000000..023a3c524 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/_cm.yaml new file mode 100644 index 000000000..3257c2cc4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/_cm.yaml @@ -0,0 +1,61 @@ +alias: install-numactl-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +deps: +- tags: detect,os +- tags: detect,cpu +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_NUMACTL_SRC_REPO_PATH + extra_cache_tags: numactl,src,numactl-src,numactl-src-repo + names: + - numactl-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/numactl/numactl +name: Build numactl from sources +new_env_keys: +- CM_NUMACTL_* +- +PATH +sort: 1000 +tags: +- install +- src +- from.src +- numactl +- src-numactl +uid: 4f355ae8ca1948b2 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/numactl/numactl: + default: true + env: + CM_GIT_URL: https://github.com/numactl/numactl + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + pytorch-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +warnings: +- This CM script will need sudo to install numactl! diff --git a/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/customize.py new file mode 100644 index 000000000..157be4d9b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + run_cmd = "python setup.py install" + + env['CM_RUN_CMD'] = run_cmd + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + env['+PATH'] = [] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/run.sh new file mode 100644 index 000000000..606b5d965 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-numactl-from-src/run.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +CUR_DIR=$PWD +echo $PWD +rm -rf numactl +cmd="cp -r ${CM_NUMACTL_SRC_REPO_PATH} numactl" +echo "$cmd" +eval "$cmd" +cd numactl +./autogen.sh +./configure +if [ "${?}" != "0" ]; then exit 1; fi +make +if [ "${?}" != "0" ]; then exit 1; fi +#make install DESTDIR=$CUR_DIR +sudo make install +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/README.md new file mode 100644 index 000000000..ad1458a75 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-onednn-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-onednn-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/_cm.yaml new file mode 100644 index 000000000..6a86bde63 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/_cm.yaml @@ -0,0 +1,79 @@ +alias: install-onednn-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ONEDNN_SRC_REPO_PATH + extra_cache_tags: onednn,src,onednn-src,onednn-src-repo + names: + - onednn-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/oneapi-src/oneDNN +name: Build oneDNN from sources +new_env_keys: +- CM_ONEDNN_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- onednn +- src-onednn +uid: fe3a652e315f4c8f +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + for-intel-mlperf-inference-v3.1-bert: + adr: + conda-package: + tags: _name.bert-pt + oneddn-src-repo: + tags: _norecurse-submodule + base: + - tag.v2.6 + env: + CM_CONDA_ENV: 'yes' + CM_FOR_INTEL_MLPERF_INFERENCE_BERT: 'yes' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/oneapi-src/oneDNN: + default: true + env: + CM_GIT_URL: https://github.com/oneapi-src/oneDNN + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + onednn-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/customize.py new file mode 100644 index 000000000..216391d33 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/customize.py @@ -0,0 +1,44 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + run_cmd = "" + + env['CM_RUN_CMD'] = run_cmd + env['CM_ONEDNN_INSTALLED_PATH'] = os.path.join(os.getcwd(), "onednn") + + if env.get('CM_FOR_INTEL_MLPERF_INFERENCE_BERT', '') == "yes": + i['run_script_input']['script_name'] = "run-intel-mlperf-inference-bert" + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run-intel-mlperf-inference-bert.sh b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run-intel-mlperf-inference-bert.sh new file mode 100644 index 000000000..77bff6883 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run-intel-mlperf-inference-bert.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +CUR_DIR=$PWD +rm -rf onednn +cp -r ${CM_ONEDNN_SRC_REPO_PATH} onednn +cd onednn +rm -rf build +pwd +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/onednnv2_6.patch +if [ "${?}" != "0" ]; then exit 1; fi +cmd="git apply onednnv2_6.patch" + +echo ${cmd} +eval ${cmd} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run-intel-mlperf-inference.sh b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run-intel-mlperf-inference.sh new file mode 100644 index 000000000..77bff6883 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run-intel-mlperf-inference.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +CUR_DIR=$PWD +rm -rf onednn +cp -r ${CM_ONEDNN_SRC_REPO_PATH} onednn +cd onednn +rm -rf build +pwd +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/onednnv2_6.patch +if [ "${?}" != "0" ]; then exit 1; fi +cmd="git apply onednnv2_6.patch" + +echo ${cmd} +eval ${cmd} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run.sh new file mode 100644 index 000000000..fbdd90f92 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onednn-from-src/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf onednn +cp -r ${CM_ONEDNN_SRC_REPO_PATH} onednn +cd onednn +test "${?}" -eq "0" || exit $? +rm -rf build + +mkdir build +cd build +cmake .. +test "${?}" -eq "0" || exit $? +make -j${CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} +test "${?}" -eq "0" || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/README.md new file mode 100644 index 000000000..6de5269ab --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-onnxruntime-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-onnxruntime-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/_cm.yaml new file mode 100644 index 000000000..20c58c5b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/_cm.yaml @@ -0,0 +1,76 @@ +alias: install-onnxruntime-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- tags: fail,filter,_windows +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- tags: get,cmake + version_min: 3.26.0 +- tags: get,gcc + version_max: 11.9.999 + version_max_usable: '11.0' +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_ONNXRUNTIME_SRC_REPO_PATH + extra_cache_tags: onnxruntime,src,onnxruntime-src,onnxruntime-src-repo + names: + - onnxruntime-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: {} +name: Build onnxruntime from sources +new_env_keys: +- CM_ONNXRUNTIME_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- onnxruntime +- src-onnxruntime +uid: 9798c7e7a5944cee +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + cuda: + deps: + - names: + - cuda + tags: get,cuda,_cudnn + env: + CM_ONNXRUNTIME_GPU: 'yes' + repo.https://github.com/Microsoft/onnxruntime: + default: true + env: + CM_GIT_URL: https://github.com/Microsoft/onnxruntime + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + onnxruntime-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/customize.py new file mode 100644 index 000000000..12c42eafc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/customize.py @@ -0,0 +1,34 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + run_cmd = "./build.sh --config RelWithDebInfo --build_wheel --parallel --allow_running_as_root --skip_tests " + + if env.get('CM_ONNXRUNTIME_GPU', '') == "yes": + cuda_home = env['CUDA_HOME'] + run_cmd += f"--use_cuda --cuda_home {cuda_home} --cudnn_home {cuda_home}" + + env['CM_RUN_DIR'] = env['CM_ONNXRUNTIME_SRC_REPO_PATH'] + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/run.sh new file mode 100644 index 000000000..4a2381af7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-onnxruntime-from-src/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +export CC=${CM_GCC_BIN_WITH_PATH} +export CXX=${CM_GCC_INSTALLED_PATH}/g++ + +echo "cd ${CM_RUN_DIR}" +cd ${CM_RUN_DIR} +test $? -eq 0 || exit $? +rm -rf build + +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} +test $? -eq 0 || exit $? + +exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/_cm.yaml new file mode 100644 index 000000000..83b253f2a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/_cm.yaml @@ -0,0 +1,63 @@ +alias: install-opencv-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_OPENCV_SRC_REPO_PATH + extra_cache_tags: opencv,src,opencv-src,opencv-src-repo + names: + - opencv-src-repo + - opencv-src + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/opencv/opencv +name: Build opencv from sources +new_env_keys: +- CM_OPENCV_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- opencv +- opencv +- src-opencv +uid: 98552486a0bc4214 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/opencv/opencv: + default: true + env: + CM_GIT_URL: https://github.com/opencv/opencv + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + opencv-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/customize.py new file mode 100644 index 000000000..1d7883e9f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/customize.py @@ -0,0 +1,39 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + env['CM_OPENCV_BUILD_PATH'] = os.path.join(os.getcwd(), "opencv", "build") + env['CM_DEPENDENT_CACHED_PATH'] = env['CM_OPENCV_BUILD_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/run.sh new file mode 100644 index 000000000..34c2b4dba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-opencv-from-src/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf opencv +cp -r ${CM_OPENCV_SRC_REPO_PATH} opencv +cd opencv +test "${?}" -eq "0" || exit $? +rm -rf build + +mkdir build +cd build +cmake .. +test "${?}" -eq "0" || exit $? +make -j${CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} +test "${?}" -eq "0" || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-openssl/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-openssl/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-openssl/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-openssl/README.md b/cmx4mlops/cmx4mlops/repo/script/install-openssl/README.md new file mode 100644 index 000000000..a4df1ecb8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-openssl/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl](https://docs.mlcommons.org/cm4mlops/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-openssl/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-openssl/_cm.yaml new file mode 100644 index 000000000..e478a2ecf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-openssl/_cm.yaml @@ -0,0 +1,28 @@ +alias: install-openssl +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +default_version: 1.1.1 +deps: +- tags: detect,os +- tags: detect,cpu +env: {} +new_env_keys: +- CM_OPENSSL_* +- +LD_LIBRARY_PATH +post_deps: +- skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,openssl +tags: +- install +- src +- openssl +- openssl-lib +uid: be472d3b1d014169 +versions: + 1.1.1: + env: + CM_VERSION: 1.1.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-openssl/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-openssl/customize.py new file mode 100644 index 000000000..fea55e323 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-openssl/customize.py @@ -0,0 +1,58 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION', '') + if need_version == '': + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) + + return {'return': 0} + + +def postprocess(i): + inp = i['input'] + env = i['env'] + tags = inp['tags'] + tag_list = tags.split(",") + install_path = os.path.join( + os.getcwd(), + 'openssl-' + + env['CM_VERSION'] + + 'g', + 'install') + path_lib = os.path.join(install_path, 'lib') + if '+LD_LIBRARY_PATH' not in env: + env['+LD_LIBRARY_PATH'] = [] + env['+LD_LIBRARY_PATH'].append(path_lib) + bin_name = "openssl" + path_bin = os.path.join(install_path, 'bin') + env['CM_OPENSSL_INSTALLED_PATH'] = path_bin + env['CM_OPENSSL_BIN_WITH_PATH'] = os.path.join(path_bin, bin_name) + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-openssl/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-openssl/run.sh new file mode 100644 index 000000000..2e6502c07 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-openssl/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "***********************************************************" +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} +CM_WGET_URL=https://www.openssl.org/source/openssl-${CM_VERSION}g.tar.gz +wget -nc ${CM_WGET_URL} +test $? -eq 0 || exit 1 +tar -xzf openssl-${CM_VERSION}g.tar.gz && cd openssl-${CM_VERSION}g +test $? -eq 0 || exit 1 +mkdir -p install +./config --prefix=`pwd`/install +make -j${CM_MAKE_CORES} +test $? -eq 0 || exit 1 +make install diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/README.md b/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/README.md new file mode 100644 index 000000000..a8a717348 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts//install-pip-package-for-cmind-python](https://docs.mlcommons.org/cm4mlops/scripts//install-pip-package-for-cmind-python) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/_cm.yaml new file mode 100644 index 000000000..765500d91 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/_cm.yaml @@ -0,0 +1,17 @@ +alias: install-pip-package-for-cmind-python +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +tags: +- install +- pip +- package +- pip-package +- for-cmind-python +- for.cmind-python +uid: b16ed087abab459c + +variations: + package.#: + env: + CM_PIP_PACKAGE_NAME: "#" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/customize.py new file mode 100644 index 000000000..1fa0b99f5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pip-package-for-cmind-python/customize.py @@ -0,0 +1,53 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess +import sys + + +def install(package): + additional_install_options = [] + r = subprocess.run([sys.executable, "-m", "pip", + "--version"], check=True, capture_output=True) + r = r.stdout.decode("utf-8") + if "pip" in r: + out_split = r.split(" ") + if len(out_split) < 2: + return {'return': 1, 'error': 'Pip version detection failed'} + pip_version = out_split[1].split(".") + if pip_version and len(pip_version) > 1 and int(pip_version[0]) >= 23: + additional_install_options.append("--break-system-packages") + run_cmd = [sys.executable, "-m", "pip", "install", package] + run_cmd += additional_install_options + r = subprocess.run(run_cmd, check=True) + + return {'return': 0} + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + if env.get('CM_PIP_PACKAGE_NAME', '') != '': + r = install(env['CM_PIP_PACKAGE_NAME']) + if r['return'] > 0: + return r + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-python-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-python-src/README.md new file mode 100644 index 000000000..a0032bf7f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/install-python-src](https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/install-python-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-python-src/_cm.yaml new file mode 100644 index 000000000..c0a618346 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-src/_cm.yaml @@ -0,0 +1,83 @@ +alias: install-python-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Python automation +default_env: + CM_CUSTOM_SSL: 'no' + CM_ENABLE_SSL: 'no' + CM_PYTHON_LTO_FLAG: '' + CM_PYTHON_OPTIMIZATION_FLAG: '' + CM_SHARED_BUILD: 'no' + CM_WGET_URL: https://www.python.org/ftp/python/[PYTHON_VERSION]/Python-[PYTHON_VERSION].tgz +default_version: 3.10.13 +deps: +- tags: detect,os +- tags: detect,cpu +- tags: get,generic-sys-util,_libffi-dev +- tags: get,generic-sys-util,_libbz2-dev +- tags: get,generic-sys-util,_libssl-dev +- enable_if_env: + CM_HOST_OS_FLAVOR: + - ubuntu + tags: get,generic-sys-util,_liblzma-dev +- enable_if_env: + CM_HOST_OS_FLAVOR: + - ubuntu + tags: get,generic-sys-util,_libncurses-dev +- enable_if_env: + CM_HOST_OS_FLAVOR: + - ubuntu + tags: get,generic-sys-util,_libreadline-dev +- enable_if_env: + CM_HOST_OS_FLAVOR: + - ubuntu + tags: get,generic-sys-util,_libsqlite3-dev +new_env_keys: +- CM_PYTHON_INSTALL_PATH +- CM_PYTHON_BIN_WITH_PATH +- +PATH +- +LD_LIBRARY_PATH +- +C_INCLUDE_PATH +post_deps: +- inherit_variation_tags: 'True' + names: + - python + - python3 + reuse_version: true + skip_if_env: + CM_REQUIRE_INSTALL: + - 'yes' + tags: get,python3 +tags: +- install +- src +- python +- python3 +- src-python3 +- src-python +uid: 12d3a608afe14a1e +variations: + lto: + env: + CM_PYTHON_INSTALL_CACHE_TAGS: with-lto + CM_PYTHON_LTO_FLAG: ' --lto' + optimized: + env: + CM_PYTHON_INSTALL_CACHE_TAGS: optimized + CM_PYTHON_OPTIMIZATION_FLAG: ' --enable-optimizations' + shared: + env: + CM_PYTHON_INSTALL_CACHE_TAGS: shared + CM_SHARED_BUILD: 'yes' + with-custom-ssl: + deps: + - tags: get,openssl + env: + CM_CUSTOM_SSL: 'yes' + CM_PYTHON_INSTALL_CACHE_TAGS: with-custom-ssl + with-ssl: + env: + CM_ENABLE_SSL: 'yes' + CM_PYTHON_INSTALL_CACHE_TAGS: with-ssl + group: ssl diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-python-src/customize.py new file mode 100644 index 000000000..26641f5d2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-src/customize.py @@ -0,0 +1,62 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + need_version = env.get('CM_VERSION', '') + if need_version == '': + return {'return': 1, + 'error': 'internal problem - CM_VERSION is not defined in env'} + + print(recursion_spaces + ' # Requested version: {}'.format(need_version)) + + path_bin = os.path.join(os.getcwd(), 'install', 'bin') + + env['CM_PYTHON_INSTALLED_PATH'] = path_bin + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + variation_tags = i['variation_tags'] + + path_lib = os.path.join(os.getcwd(), 'install', 'lib') + env['+LD_LIBRARY_PATH'] = [path_lib] + + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.getcwd() + + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['CM_PYTHON_INSTALLED_PATH'], 'python3') + + # We don't need to check default paths here because we force install to + # cache + env['+PATH'] = [env['CM_PYTHON_INSTALLED_PATH']] + path_include = os.path.join(os.getcwd(), 'install', 'include') + env['+C_INCLUDE_PATH'] = [path_include] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-python-src/run.sh new file mode 100644 index 000000000..d151283e7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-src/run.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +CUR_DIR=$PWD + +echo "***********************************************************" +export PYTHON_VERSION=${CM_VERSION} +CM_WGET_URL="${CM_WGET_URL//"[PYTHON_VERSION]"/$PYTHON_VERSION}" + +echo "CM_WGET_URL=${CM_WGET_URL}" >> tmp-run-env.out +echo "wget Python src from ${CM_WGET_URL} for version ${PYTHON_VERSION}..." + +CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES}} +CM_MAKE_CORES=${CM_MAKE_CORES:-2} + +if [[ ${CM_SHARED_BUILD} == "yes" ]]; then + SHARED_BUILD_FLAGS=" --enable-shared" +else + SHARED_BUILD_FLAGS="" +fi + +EXTRA_FLAGS="" + +if [[ ${CM_ENABLE_SSL} == "yes" ]]; then + EXTRA_FLAGS="${EXTRA_FLAGS} --enable-ssl" +fi + + +if [[ ${CM_CUSTOM_SSL} == "yes" ]]; then + EXTRA_FLAGS="${EXTRA_FLAGS} --with-openssl=${CM_OPENSSL_INSTALLED_PATH} --with-openssl-rpath=auto" +fi + +rm -rf src +mkdir src + +rm -rf install +mkdir install + +cd src + +pwd +wget -nc ${CM_WGET_URL} + +if [ "${?}" != "0" ]; then exit 1; fi + +tar xzf Python-${PYTHON_VERSION}.tgz +if [ "${?}" != "0" ]; then exit 1; fi + + +rm -f Python-${PYTHON_VERSION}.tgz +if [ "${?}" != "0" ]; then exit 1; fi + +cd Python-${PYTHON_VERSION} + +./configure ${CM_PYTHON_OPTIMIZATION_FLAG} ${CM_PYTHON_LTO_FLAG} ${SHARED_BUILD_FLAGS} ${EXTRA_FLAGS} --with-ensurepip=install --prefix="${CUR_DIR}/install" +if [ "${?}" != "0" ]; then exit 1; fi + +make -j${CM_MAKE_CORES} +make -j${CM_MAKE_CORES} install +if [ "${?}" != "0" ]; then exit 1; fi + +echo "Removing src files" +cd "${CUR_DIR}" && \ +rm -rf src + +if [ "${?}" != "0" ]; then exit 1; fi + +cd "${CUR_DIR}/install/bin" && ln -s python3 python +cd "${CUR_DIR}/install/bin" && ln -s pip3 pip + +echo "********************************************************" +echo "Python was built and installed to ${CUR_DIR}/install ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-venv/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-venv/README.md b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/README.md new file mode 100644 index 000000000..e9b62b7db --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/install-python-venv](https://docs.mlcommons.org/cm4mlops/scripts/Python-automation/install-python-venv) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-venv/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/_cm.yaml new file mode 100644 index 000000000..f914d7d57 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/_cm.yaml @@ -0,0 +1,31 @@ +alias: install-python-venv +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Python automation +clean_files: [] +deps: +- inherit_variation_tags: true + reuse_version: true + tags: get,python,-virtual +new_env_keys: +- CM_VIRTUAL_ENV_* +- CM_PYTHON_BIN_WITH_PATH +new_state_keys: +- script_prefix +post_deps: +- names: + - register-python + tags: get,python3 +tags: +- install +- python +- get-python-venv +- python-venv +uid: 7633ebada4584c6c +variations: + lto: {} + optimized: {} + shared: {} + with-custom-ssl: {} + with-ssl: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-venv/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/customize.py new file mode 100644 index 000000000..1e6afb71c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/customize.py @@ -0,0 +1,101 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + # Add extra tags to python + add_extra_cache_tags = [] # for this script + add_python_extra_cache_tags = ['virtual'] # for get-python script + + name = env.get('CM_NAME', '') + if not quiet and name == '': + print('') + x = input( + 'Enter some tag to describe this virtual env (mlperf-inf,octoml-bench,etc): ') + x = x.strip() + + if x != '': + name = x + + directory_name = 'venv' + if name != '': + directory_name = name.strip().lower() + name_tag = 'name-' + directory_name + + add_extra_cache_tags.append(name_tag) + add_python_extra_cache_tags.append(name_tag) + + env['CM_VIRTUAL_ENV_DIR'] = directory_name + env['CM_VIRTUAL_ENV_PATH'] = os.path.join(os.getcwd(), directory_name) + + s = 'Scripts' if os_info['platform'] == 'windows' else 'bin' + env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] = os.path.join( + env['CM_VIRTUAL_ENV_PATH'], s) + + env['CM_TMP_PATH'] = env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] + env['CM_TMP_FAIL_IF_NOT_FOUND'] = 'yes' + + r = automation.update_deps({'deps': meta['post_deps'], + 'update_deps': {'register-python': + {'extra_cache_tags': ','.join(add_python_extra_cache_tags)}}}) + if r['return'] > 0: + return r + + env['CM_PYTHON_INSTALLED_PATH'] = env['CM_VIRTUAL_ENV_SCRIPTS_PATH'] + + return {'return': 0, 'add_extra_cache_tags': add_extra_cache_tags} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + state = i['state'] + + script_prefix = state.get('script_prefix', []) + + path_to_activate = os.path.join( + env['CM_VIRTUAL_ENV_SCRIPTS_PATH'], 'activate') + + # If windows, download here otherwise use run.sh + if os_info['platform'] == 'windows': + path_to_activate += '.bat' + + s = os_info['run_bat'].replace('${bat_file}', '"' + path_to_activate + '"') + + script_prefix.append(s) + state['script_prefix'] = script_prefix + + python_name = 'python.exe' if os_info['platform'] == 'windows' else 'python3' + + # Will be passed to get-python to finalize registering of the new python + env['CM_PYTHON_BIN_WITH_PATH'] = os.path.join( + env['CM_PYTHON_INSTALLED_PATH'], python_name) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-venv/run.bat b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/run.bat new file mode 100644 index 000000000..6c48e1bdc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/run.bat @@ -0,0 +1,5 @@ +%CM_PYTHON_BIN_WITH_PATH% -m pip install virtualenv +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +%CM_PYTHON_BIN_WITH_PATH% -m venv %CM_VIRTUAL_ENV_DIR% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/install-python-venv/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/run.sh new file mode 100644 index 000000000..87dfcaf10 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-python-venv/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +#PIP_EXTRA=`${CM_PYTHON_BIN} -c "import pkg_resources; print(' --break-system-packages ' if int(pkg_resources.get_distribution('pip').version.split('.')[0]) >= 23 else '')"` +PIP_EXTRA=`${CM_PYTHON_BIN} -c "import importlib.metadata; print(' --break-system-packages ' if int(importlib.metadata.version('pip').split('.')[0]) >= 23 else '')"` + +${CM_PYTHON_BIN_WITH_PATH} -m pip install virtualenv ${PIP_EXTRA} +test $? -eq 0 || exit 1 + +${CM_PYTHON_BIN_WITH_PATH} -m venv ${CM_VIRTUAL_ENV_DIR} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/README.md new file mode 100644 index 000000000..5208545a3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-pytorch-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-pytorch-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/_cm.yaml new file mode 100644 index 000000000..479fa9e01 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/_cm.yaml @@ -0,0 +1,288 @@ +alias: install-pytorch-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- names: + - compiler + tags: get,compiler +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_PYTORCH_SRC_REPO_PATH + extra_cache_tags: pytorch,src,pytorch-src,pytorch-src-repo + names: + - pytorch-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/pytorch/pytorch +name: Build pytorch from sources +new_env_keys: +- CM_PYTORCH_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from-src +- from.src +- pytorch +- src-pytorch +uid: 64eaf3e81de94f41 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + cherrypicks.#: + ad: + pytorch-src-repo: + tags: _cherrypicks.# + env: {} + cuda: + deps: + - names: + - cuda + tags: get,cuda,_cudnn + env: + CUDA_HOME: <<>> + CUDA_NVCC_EXECUTABLE: <<>> + CUDNN_INCLUDE_PATH: <<>> + CUDNN_LIBRARY_PATH: <<>> + TORCH_CUDA_ARCH_LIST: Ampere Ada Hopper + TORCH_CXX_FLAGS: -D_GLIBCXX_USE_CXX11_ABI=1 + USE_CUDA: '1' + USE_CUDNN: '1' + for-intel-mlperf-inference-resnet50: + adr: + conda-package: + tags: _name.resnet50-pt + base: + - tag.v1.12.0-rc7 + - pr-to-apply.pull/76869/head + deps: + - names: + - conda + tags: get,conda,_name.resnet50-pt + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - tags: get,gcc + version_max: '11.9' + version_max_usable: '11.3' + - names: + - conda-package + - cmake + tags: get,generic,conda-package,_package.cmake + version_min: '3.26' + - names: + - pip-package + - numpy + tags: get,generic-python-lib,_package.numpy + version_max: 1.26.4 + - names: + - conda-package + - libstdcxx-ng + tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + env: + CM_CONDA_ENV: 'yes' + CM_MLPERF_INFERENCE_INTEL: 'yes' + CM_MLPERF_INFERENCE_INTEL_MODEL: resnet50 + USE_CUDA: '0' + for-intel-mlperf-inference-retinanet: + adr: + conda-package: + tags: _name.retinanet-pt + base: + - tag.v1.12.0-rc7 + - pr-to-apply.pull/89925/head + - cherrypicks.78cad998e505b667d25ac42f8aaa24409f5031e1 + deps: + - names: + - conda + tags: get,conda,_name.retinanet-pt + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - tags: get,gcc + version_max: '11.9' + version_max_usable: '11.3' + - names: + - conda-package + - cmake + tags: get,generic,conda-package,_package.cmake + version_min: '3.26' + - names: + - pip-package + - numpy + tags: get,generic-python-lib,_package.numpy + version_max: 1.26.4 + - names: + - conda-package + - libstdcxx-ng + tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + env: + CM_CONDA_ENV: 'yes' + CM_MLPERF_INFERENCE_INTEL: 'yes' + CM_MLPERF_INFERENCE_INTEL_MODEL: retinanet + USE_CUDA: '0' + for-intel-mlperf-inference-v3.1-bert: + adr: + conda-package: + tags: _name.bert-pt + base: + - tag.v1.12.0 + deps: + - tags: get,generic-sys-util,_libffi7 + - names: + - conda + tags: get,conda,_name.bert-pt + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.8' + - tags: install,llvm,src,_tag.llvmorg-15.0.7,_runtimes.libcxx:libcxxabi:openmp,_clang,_release,_for-intel-mlperf-inference-v3.1-bert + - names: + - conda-package + - ninja + tags: get,generic,conda-package,_package.ninja + - names: + - conda-package + - cmake + tags: get,generic,conda-package,_package.cmake + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.intel + version: 2023.1.0 + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.intel + version: 2023.1.0 + - names: + - conda-package + - intel-openmp + tags: get,generic,conda-package,_package.intel-openmp,_source.intel + version: 2023.1.0 + - names: + - conda-package + - llvm-openmp + tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge + - names: + - conda-package + - jemalloc + tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + version: 69.5.1 + - names: + - conda-package + - future + tags: get,generic,conda-package,_package.future,_source.conda-forge + - names: + - conda-package + - libstdcxx-ng + tags: get,generic,conda-package,_package.libstdcxx-ng,_source.conda-forge + env: + CM_CONDA_ENV: 'yes' + CM_MLPERF_INFERENCE_INTEL: 'yes' + CM_MLPERF_INFERENCE_INTEL_LANGUAGE_MODEL: 'yes' + USE_CUDA: '0' + for-intel-mlperf-inference-v3.1-dlrm-v2: + ad: + pytorch-src-repo: + tags: _no-recurse-submodules,_full-history + base: + - sha.927dc662386af052018212c7d01309a506fc94cd + deps: + - tags: get,cmake + version_min: 3.25.0 + for-nvidia-mlperf-inference-v3.1: + ad: + pytorch-src-repo: + tags: _no-recurse-submodules,_full-history + base: + - sha.b5021ba9 + - cuda + deps: + - tags: get,cmake + version_min: 3.25.0 + for-nvidia-mlperf-inference-v4.0: + ad: + pytorch-src-repo: + tags: _no-recurse-submodules,_full-history + base: + - sha.32f93b1 + - cuda + deps: + - tags: get,cmake + version_min: 3.25.0 + - tags: get,generic-python-lib,_package.numpy + version: 1.22.4 + - tags: get,generic-python-lib,_package.networkx + version: '3.1' + pr-to-apply.#: + ad: + pytorch-src-repo: + tags: _pr-to-apply.# + env: {} + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/pytorch/pytorch: + default: true + env: + CM_GIT_URL: https://github.com/pytorch/pytorch + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + pytorch-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/customize.py new file mode 100644 index 000000000..38d6582fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/customize.py @@ -0,0 +1,49 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + if env.get('CM_MLPERF_INFERENCE_INTEL_LANGUAGE_MODEL', '') == "yes": + i['run_script_input']['script_name'] = "run-intel-mlperf-inference-v3_1" + run_cmd = "CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . " + + env['CM_RUN_CMD'] = run_cmd + elif env.get('CM_MLPERF_INFERENCE_INTEL_MODEL', '') in ["resnet50", "retinanet"]: + i['run_script_input']['script_name'] = "run-intel-mlperf-inference-vision" + run_cmd = f"CC={env['CM_C_COMPILER_WITH_PATH']} CXX={env['CM_CXX_COMPILER_WITH_PATH']} USE_CUDA=OFF python -m pip install -e . " + + env['CM_RUN_CMD'] = run_cmd + + if not env.get('+ CFLAGS', []): + env['+ CFLAGS'] = [] + if not env.get('+ CXXFLAGS', []): + env['+ CXXFLAGS'] = [] + + env['+ CFLAGS'] += ["-Wno-error=uninitialized", + "-Wno-error=maybe-uninitialized", "-fno-strict-aliasing"] + env['+ CXXFLAGS'] += ["-Wno-error=uninitialized", + "-Wno-error=maybe-uninitialized", "-fno-strict-aliasing"] + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh new file mode 100644 index 000000000..7ad6fbd61 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +CUR_DIR=$PWD +rm -rf pytorch +cp -r ${CM_PYTORCH_SRC_REPO_PATH} pytorch +cd pytorch +rm -rf build + +git submodule sync +git submodule update --init --recursive +if [ "${?}" != "0" ]; then exit 1; fi +pushd third_party/gloo +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/gloo.patch +if [ "${?}" != "0" ]; then exit 1; fi +git apply gloo.patch +if [ "${?}" != "0" ]; then exit 1; fi +popd + +pushd third_party/ideep/mkl-dnn +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/clang_mkl_dnn.patch +if [ "${?}" != "0" ]; then exit 1; fi +git apply clang_mkl_dnn.patch +if [ "${?}" != "0" ]; then exit 1; fi +popd + +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/pytorch_official_1_12.patch +if [ "${?}" != "0" ]; then exit 1; fi +git apply pytorch_official_1_12.patch +if [ "${?}" != "0" ]; then exit 1; fi +pip install -r requirements.txt + +cmd="${CM_RUN_CMD}" +echo ${cmd} +eval ${cmd} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run-intel-mlperf-inference-vision.sh b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run-intel-mlperf-inference-vision.sh new file mode 100644 index 000000000..f3bd3d771 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run-intel-mlperf-inference-vision.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +#export PATH=${CM_CONDA_BIN_PATH}:$PATH +#export LIBRARY_PATH=${CM_CONDA_LIB_PATH}:$LIBRARY_PATH + +CUR_DIR=$PWD +rm -rf pytorch +cp -r ${CM_PYTORCH_SRC_REPO_PATH} pytorch +cd pytorch +rm -rf build + +git submodule sync +git submodule update --init --recursive +if [ "${?}" != "0" ]; then exit 1; fi +pip install -r requirements.txt + +cmd="${CM_RUN_CMD}" +echo ${cmd} +eval ${cmd} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run.sh new file mode 100644 index 000000000..08ddde105 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-from-src/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +gcc() +{ + ${CM_GCC_BIN_WITH_PATH} "$@" +} +export -f gcc + +CUR_DIR=$PWD +if [[ ! -e pytorch/dist/torch*.whl ]]; then + rm -rf pytorch + cp -r ${CM_PYTORCH_SRC_REPO_PATH} pytorch + cd pytorch + git submodule sync + git submodule update --init --recursive + rm -rf build + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r requirements.txt + test $? -eq 0 || exit $? + ${CM_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel + test $? -eq 0 || exit $? +else + cd pytorch +fi + +cd dist +${CM_PYTHON_BIN_WITH_PATH} -m pip install torch-2.*linux_x86_64.whl +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/README.md new file mode 100644 index 000000000..36bde21b6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-pytorch-kineto-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-pytorch-kineto-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/_cm.yaml new file mode 100644 index 000000000..11a5dd8ff --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/_cm.yaml @@ -0,0 +1,85 @@ +alias: install-pytorch-kineto-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- tags: get,cmake + version_min: 3.25.0 +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_PYTORCH_KINETO_SRC_REPO_PATH + extra_cache_tags: pytorch-kineto,kineto,src,pytorch-kineto-src,pytorch-kineto-src-repo + names: + - pytorch-kineto-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/pytorch/kineto +name: Build pytorch kineto from sources +new_env_keys: +- CM_PYTORCH_KINETO_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- pytorch-kineto +- kineto +- src-pytorch-kineto +uid: 98a4b061712d4483 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + cuda: + deps: + - names: + - cuda + tags: get,cuda,_cudnn + env: + CUDA_HOME: <<>> + CUDA_NVCC_EXECUTABLE: <<>> + CUDNN_INCLUDE_PATH: <<>> + CUDNN_LIBRARY_PATH: <<>> + TORCH_CUDA_ARCH_LIST: Ampere Ada Hopper + TORCH_CXX_FLAGS: -D_GLIBCXX_USE_CXX11_ABI=1 + USE_CUDA: '1' + USE_CUDNN: '1' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/pytorch/kineto: + default: true + env: + CM_GIT_URL: https://github.com/pytorch/kineto + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + pytorch-src-repo: + tags: _full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/customize.py new file mode 100644 index 000000000..d7fcced37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/customize.py @@ -0,0 +1,29 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/run.sh new file mode 100644 index 000000000..bd162e7f8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-pytorch-kineto-from-src/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf kineto +cp -r ${CM_PYTORCH_KINETO_SRC_REPO_PATH} kineto +cd kineto +rm -rf libkineto/build + +mkdir -p libkneto/build && cd libkineto/build +cmake .. +test $? -eq 0 || exit $? +make +test $? -eq 0 || exit $? +sudo make install +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/README.md new file mode 100644 index 000000000..d28e3a0b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/_cm.yaml new file mode 100644 index 000000000..de3024209 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/_cm.yaml @@ -0,0 +1,80 @@ +alias: install-qaic-compute-sdk-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +deps: +- extra_cache_tags: compute-sdk,qaic,from.src + names: + - qaic-software-git-repo + tags: get,git,repo,_repo.https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL +- names: + - cmake + tags: get,cmake + version_min: 3.24.0 +- names: + - llvm + tags: get,llvm,_from-src +- tags: get,generic,sys-util,_libudev-dev +- tags: get,generic,sys-util,_libpci-dev +- tags: get,google,test +- tags: get,generic-sys-util,_ninja-build +- tags: get,generic-sys-util,_rsync +- env: + CM_EXTRACT_FINAL_ENV_NAME: CM_HEXAGON_TOOLS_INSTALLED_DIR + extra_cache_tags: hexagon-compiler + force_cache: true + names: + - dae + tags: download-and-extract,_extract,_url.https://codelinaro.jfrog.io/artifactory/codelinaro-toolchain-for-hexagon/v15.0.5/clang+llvm-15.0.5-cross-hexagon-unknown-linux-musl.tar.xz +input_description: {} +input_mapping: {} +new_env_keys: +- +PATH +- CM_QAIC_COMPUTE_SDK_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- get +- qaic +- from.src +- software +- compute +- compute-sdk +- qaic-compute-sdk +- sdk +uid: 9701bdda97fa4045 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + debug: + env: + CM_QAIC_COMPUTE_SDK_INSTALL_MODE: debug + group: installation-mode + release: + default: true + env: + CM_QAIC_COMPUTE_SDK_INSTALL_MODE: release + group: installation-mode + release-assert: + env: + CM_QAIC_COMPUTE_SDK_INSTALL_MODE: release-assert + group: installation-mode + repo.#: + env: + CM_GIT_URL: '#' + group: repo-source + repo.quic: + default: true + env: + CM_GIT_URL: https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc + group: repo-source +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/customize.py new file mode 100644 index 000000000..c2d1f3d80 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/customize.py @@ -0,0 +1,61 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + env['CM_QAIC_COMPUTE_SDK_PATH'] = env['CM_GIT_CHECKOUT_PATH'] + + ''' + if env.get('+PATH', []) == []: + env['+PATH'] = [] + env['+PATH'].append(env['CM_LLVM_INSTALLED_PATH']) + + if env.get('+LD_LIBRARY_PATH', []) == []: + env['+LD_LIBRARY_PATH'] = [] + env['+LD_LIBRARY_PATH'].append(os.path.join(env['CM_LLVM_INSTALLED_PATH'], "..", "lib")) + ''' + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + # env['CM_QAIC_RUNNER_PATH'] = os.path.join(env['CM_QAIC_SOFTWARE_KIT_PATH'], "build", "utils", "qaic-runner") + + if '+PATH' not in env: + env['+PATH'] = [] + + env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] = os.path.join( + os.getcwd(), + "src", + "install", + "qaic-compute-" + + env['CM_QAIC_COMPUTE_SDK_INSTALL_MODE']) + + env['QAIC_COMPUTE_INSTALL_DIR'] = env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'] + + env['+PATH'].append(os.path.join(env['CM_QAIC_COMPUTE_SDK_INSTALL_PATH'], "exec")) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/run.sh new file mode 100644 index 000000000..734fe01b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-qaic-compute-sdk-from-src/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +function cmake() { +${CM_CMAKE_BIN_WITH_PATH} $@ +} + +export CC=${CM_C_COMPILER_WITH_PATH} +export CXX=${CM_CXX_COMPILER_WITH_PATH} + +export -f cmake +export HEXAGON_TOOLS_DIR=${CM_HEXAGON_TOOLS_INSTALLED_DIR}/clang+llvm-15.0.5-cross-hexagon-unknown-linux-musl/x86_64-linux-gnu + +mkdir -p src +rsync -avz --exclude=.git ${CM_QAIC_COMPUTE_SDK_PATH}/ src/ +cd src + +if [[ ${CM_CLEAN_BUILD} == "yes" ]]; then + rm -rf build +fi + +./scripts/build.sh --${CM_QAIC_COMPUTE_SDK_INSTALL_MODE} --install +test $? -eq 0 || exit $? + +cd - diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/_cm.yaml new file mode 100644 index 000000000..b754a02d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/_cm.yaml @@ -0,0 +1,61 @@ +alias: install-rapidjson-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_RAPIDJSON_SRC_REPO_PATH + extra_cache_tags: rapidjson,src,rapidjson-src,rapidjson-src-repo + names: + - rapidjson-src-repo + - rapidjson-src + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: {} +name: Build rapidjson from sources +new_env_keys: +- CM_RAPIDJSON_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- rapidjson +- src-rapidjson +uid: 5171e69b4bb94989 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/Tencent/rapidjson: + default: true + env: + CM_GIT_URL: https://github.com/Tencent/rapidjson + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + rapidjson-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/customize.py new file mode 100644 index 000000000..bcb4fa9ca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} + + +def postprocess(i): + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/run.sh new file mode 100644 index 000000000..4a6b2ec7d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rapidjson-from-src/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf rapidjson +cp -r ${CM_RAPIDJSON_SRC_REPO_PATH} rapidjson +cd rapidjson +test "${?}" -eq "0" || exit $? +rm -rf build + +mkdir build +cd build +cmake .. +test "${?}" -eq "0" || exit $? +make -j${CM_HOST_CPU_PHYSICAL_CORES_PER_SOCKET} +test "${?}" -eq "0" || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rocm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-rocm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rocm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rocm/README.md b/cmx4mlops/cmx4mlops/repo/script/install-rocm/README.md new file mode 100644 index 000000000..e96171c66 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rocm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-rocm](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-rocm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rocm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-rocm/_cm.yaml new file mode 100644 index 000000000..395ed8764 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rocm/_cm.yaml @@ -0,0 +1,19 @@ +alias: install-rocm +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +clean_files: [] +default_version: 5.7.1 +deps: +- tags: detect,os +env: {} +new_env_keys: +- CM_ROCM_* +- +PATH +tags: +- install +- rocm +- install-rocm +uid: 9d13f90463ce4545 +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rocm/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-rocm/customize.py new file mode 100644 index 000000000..66277942a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rocm/customize.py @@ -0,0 +1,32 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + installed_path = "/opt/rocm/bin" + env['CM_ROCM_INSTALLED_PATH'] = installed_path + env['CM_ROCM_BIN_WITH_PATH'] = os.path.join(installed_path, "rocminfo") + env['+PATH'] = [installed_path] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rocm/run-rhel.sh b/cmx4mlops/cmx4mlops/repo/script/install-rocm/run-rhel.sh new file mode 100644 index 000000000..10f8a6789 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rocm/run-rhel.sh @@ -0,0 +1,27 @@ +# Add the amdgpu module repository for RHEL +repo1="[amdgpu] +name=amdgpu +baseurl=https://repo.radeon.com/amdgpu/${CM_VERSION}/rhel/${CM_HOST_OS_VERSION}/main/x86_64 +enabled=1 +gpgcheck=1 +gpgkey=https://repo.radeon.com/rocm/rocm.gpg.key +" +echo "${repo1}" | sudo tee /etc/yum.repos.d/amdgpu.repo + +# Add the rocm repository for RHEL +mainversion="${CM_HOST_OS_VERSION%%.*}" +repo2="[rocm] +name=rocm +baseurl=https://repo.radeon.com/rocm/rhel${mainversion}/latest/main +enabled=1 +priority=50 +gpgcheck=1 +gpgkey=https://repo.radeon.com/rocm/rocm.gpg.key +" +echo "${repo2}" | sudo tee /etc/yum.repos.d/rocm.repo + +sudo yum clean all + +sudo yum install amdgpu-dkms + +sudo yum install rocm-hip-libraries diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rocm/run-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/install-rocm/run-ubuntu.sh new file mode 100644 index 000000000..400ba5fa7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rocm/run-ubuntu.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Make the directory if it doesn't exist yet. +# This location is recommended by the distribution maintainers. +sudo mkdir --parents --mode=0755 /etc/apt/keyrings +# Download the key, convert the signing-key to a full +# keyring required by apt and store in the keyring directory +wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | \ + gpg --dearmor | sudo tee /etc/apt/keyrings/rocm.gpg > /dev/null + +ubuntuflavor="jammy" +if [[ ${CM_HOST_OS_VERSION} == "22.04" ]]; then + ubuntuflavor="jammy" +elif [[ ${CM_HOST_OS_VERSION} == "20.04" ]]; then + ubuntuflavor="focal" +fi + +# Kernel driver repository +deb1="deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/${CM_VERSION}/ubuntu ${ubuntuflavor} main" +echo $deb1 | sudo tee /etc/apt/sources.list.d/amdgpu.list + +# ROCm repository +deb2="deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/debian ${ubuntuflavor} main" +echo $deb2 | sudo tee /etc/apt/sources.list.d/rocm.list + +# Prefer packages from the rocm repository over system packages +echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600 + +sudo apt update + +sudo apt install amdgpu-dkms + +sudo apt install rocm-hip-libraries diff --git a/cmx4mlops/cmx4mlops/repo/script/install-rocm/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-rocm/run.sh new file mode 100644 index 000000000..05a7907cf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-rocm/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash + diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/README.md b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/README.md new file mode 100644 index 000000000..95660a7a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-tensorflow-for-c](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-tensorflow-for-c) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/_cm.yaml new file mode 100644 index 000000000..31133a887 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/_cm.yaml @@ -0,0 +1,15 @@ +alias: install-tensorflow-for-c +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +clean_files: [] +default_version: 2.8.0 +deps: +- tags: detect,os +tags: +- install +- tensorflow +- lib +- lang-c +uid: d73783d8302547d7 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/customize.py new file mode 100644 index 000000000..4ab11f899 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/customize.py @@ -0,0 +1,43 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: + env[key] = [] + + env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), + 'install', 'include')) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), + 'install', 'include')) + + lib_path = os.path.join(os.getcwd(), 'install', 'lib') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/run.sh new file mode 100644 index 000000000..2f7c3957b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-for-c/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +CM_VERSION=${CM_VERSION:-2.8.0} +if [[ ${CM_HOST_PLATFORM_FLAVOR} != 'x86_64' ]]; then + echo "Platform ${CM_HOST_PLATFORM_FLAVOR} is not supported yet!"; + exit 1 +fi +mkdir install +FILENAME=libtensorflow-cpu-${CM_HOST_OS_TYPE}-x86_64-${CM_VERSION}.tar.gz +wget -q --no-check-certificate https://storage.googleapis.com/tensorflow/libtensorflow/${FILENAME} +tar -C install -xzf ${FILENAME} + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/README.md new file mode 100644 index 000000000..c3d184ac0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-tensorflow-from-src](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-tensorflow-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/_cm.yaml new file mode 100644 index 000000000..31542404e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/_cm.yaml @@ -0,0 +1,346 @@ +alias: install-tensorflow-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +clean_files: [] +default_env: + CM_GIT_DEPTH: '1' + CM_GIT_URL: https://github.com/tensorflow/tensorflow + CM_TFLITE: 'off' +default_version: master +deps: +- tags: detect,cpu +- tags: detect,os +- enable_if_env: + CM_HOST_OS_FLAVOR: + - ubuntu + CM_HOST_OS_VERSION: + - '18.04' + tags: get,generic-sys-util,_zlib +- tags: get,generic-python-lib,_package.numpy +extra_cache_tags_from_env: +- env: CM_PYTHON_CACHE_TAGS + prefix: python- +new_env_keys: +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +tags: +- get +- install +- tensorflow +- lib +- source +- from-source +- from-src +- src +- from.src +uid: a974533c4c854597 +variations: + tflite: + env: + CM_TFLITE: 'on' +versions: + master: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.12.999 + version_min: 3.9.0 + - tags: get,llvm + version_max: 17.9.99 + version_max_usable: 17.0.6 + version_min: 17.0.6 + - tags: get,bazel + version: 6.5.0 + env: + CM_GIT_CHECKOUT: master + v1.15.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: '3.7' + version_min: '3.3' + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 0.26.1 + env: + CM_GIT_CHECKOUT: v1.15.0 + v2.0.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.7.999 + version_max_usable: 3.7.12 + version_min: 3.3.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 0.26.1 + env: + CM_GIT_CHECKOUT: v2.0.0 + v2.1.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.7.999 + version_max_usable: 3.7.12 + version_min: 3.3.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 0.27.1 + env: + CM_GIT_CHECKOUT: v2.1.0 + v2.10.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.10.999 + version_max_usable: 3.10.12 + version_min: 3.7.0 + - tags: gcc,get + version_max: '10' + version_min: '9' + - tags: get,bazel + version: 5.1.1 + env: + CM_GIT_CHECKOUT: v2.10.0 + v2.11.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.10.999 + version_max_usable: 3.10.12 + version_min: 3.7.0 + - tags: gcc,get + version_max: '10' + version_min: '9' + - tags: get,bazel + version: 5.3.0 + env: + CM_GIT_CHECKOUT: v2.11.0 + v2.12.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.7.0 + - tags: gcc,get + version_max: '12' + version_min: '9' + - tags: get,bazel + version: 5.3.0 + env: + CM_GIT_CHECKOUT: v2.12.0 + v2.13.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.8.0 + - tags: get,llvm + version: 16.0.0 + - tags: get,bazel + version: 5.3.0 + env: + CM_GIT_CHECKOUT: v2.13.0 + v2.14.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.9.0 + - tags: get,llvm + version: 16.0.0 + - tags: get,bazel + version: 6.1.0 + env: + CM_GIT_CHECKOUT: v2.14.0 + v2.15.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.11.999 + version_max_usable: 3.11.12 + version_min: 3.9.0 + - tags: get,llvm + version: 16.0.0 + - tags: get,bazel + version: 6.1.0 + env: + CM_GIT_CHECKOUT: v2.15.0 + v2.16.1: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.12.999 + version_max_usable: 3.11.12 + version_min: 3.9.0 + - tags: get,llvm + version_max: 17.9.999 + version_max_usable: 17.0.6 + version_min: 16.0.0 + - tags: get,bazel + version: 6.5.0 + env: + CM_GIT_CHECKOUT: v2.16.1 + v2.2.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.8.999 + version_max_usable: 3.8.12 + version_min: 3.5.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 2.0.0 + env: + CM_GIT_CHECKOUT: v2.2.0 + v2.3.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.8.999 + version_max_usable: 3.8.12 + version_min: 3.5.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 3.1.0 + env: + CM_GIT_CHECKOUT: v2.3.0 + v2.4.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.8.999 + version_max_usable: 3.8.12 + version_min: 3.6.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 3.1.0 + env: + CM_GIT_CHECKOUT: v2.4.0 + v2.5.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.9.999 + version_max_usable: 3.9.12 + version_min: 3.6.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 3.7.2 + env: + CM_GIT_CHECKOUT: v2.5.0 + v2.6.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.9.999 + version_max_usable: 3.9.12 + version_min: 3.6.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 3.7.2 + env: + CM_GIT_CHECKOUT: v2.6.0 + v2.7.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.9.999 + version_max_usable: 3.9.12 + version_min: 3.7.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 3.7.2 + env: + CM_GIT_CHECKOUT: v2.7.0 + v2.8.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.10.999 + version_max_usable: 3.10.12 + version_min: 3.7.0 + - tags: gcc,get + version_max: 7.3.1 + version_min: '7' + - tags: get,bazel + version: 4.2.1 + env: + CM_GIT_CHECKOUT: v2.8.0 + v2.9.0: + deps: + - names: + - python + - python3 + tags: get,python + version_max: 3.10.999 + version_max_usable: 3.10.12 + version_min: 3.7.0 + - tags: gcc,get + version_max: '10' + version_min: '9' + - tags: get,bazel + version: 5.0.0 + env: + CM_GIT_CHECKOUT: v2.9.0 diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/customize.py new file mode 100644 index 000000000..0019fc216 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/customize.py @@ -0,0 +1,82 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + env['CC'] = env['CM_C_COMPILER_WITH_PATH'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: + env[key] = [] + bazel_install_root = os.path.join(os.getcwd(), "src", "bazel-out") + bazel_install_bin = os.path.join(os.getcwd(), "src", "bazel-bin") + inc_paths = [] + inc_paths.append(os.path.join(os.getcwd(), "src")) + inc_paths.append(bazel_install_bin) + inc_paths.append( + os.path.join( + bazel_install_bin, + "external", + "flatbuffers", + "_virtual_includes", + "flatbuffers")) + inc_paths.append( + os.path.join( + bazel_install_bin, + "external", + "FP16", + "_virtual_includes", + "FP16")) + inc_paths.append( + os.path.join( + bazel_install_bin, + "external", + "pthreadpool", + "_virtual_includes", + "pthreadpool")) + inc_paths.append( + os.path.join( + bazel_install_bin, + "external", + "cpuinfo", + "_virtual_includes", + "cpuinfo")) + + env['+C_INCLUDE_PATH'] = inc_paths + env['+CPLUS_INCLUDE_PATH'] = inc_paths + + tflite_lib = env.get("CM_TFLITE", "") + if tflite_lib == "on": + lib_path = os.path.join(bazel_install_bin, 'tensorflow', 'lite') + else: + lib_path = os.path.join(bazel_install_bin, 'tensorflow') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/run.sh new file mode 100644 index 000000000..d9090bf7d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tensorflow-from-src/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} +if [ ! -d "src" ]; then + echo "Cloning Tensorflow from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} --depth ${CM_GIT_DEPTH}..." + git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} --depth ${CM_GIT_DEPTH} src +fi +CM_PYTHON_BIN=${CM_PYTHON_BIN:-python3} + +INSTALL_DIR="${CUR_DIR}" + +echo "******************************************************" +cd src +#./configure +#if [ "${?}" != "0" ]; then exit 1; fi + +if [ "${CM_TFLITE}" == "on" ]; then + cmd="${CM_BAZEL_BIN_WITH_PATH} build -c opt --define tflite_with_xnnpack=true //tensorflow/lite:libtensorflowlite.so" + echo $cmd + eval $cmd + if [ "${?}" != "0" ]; then exit 1; fi + exit 0 +fi +./configure +if [ "${?}" != "0" ]; then exit 1; fi +echo "******************************************************" +cmd="${CM_BAZEL_BIN_WITH_PATH} build //tensorflow/tools/pip_package:build_pip_package" +echo $cmd +eval $cmd +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg +if [ "${?}" != "0" ]; then exit 1; fi + + +# Clean build directory (too large) +cd ${INSTALL_DIR} +if [ "${CM_TENSORFLOW_CLEAN_BUILD}" != "no" ]; then + rm -rf build +fi + +echo "******************************************************" +echo "Tensorflow is built and installed to ${INSTALL_DIR} ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/README.md new file mode 100644 index 000000000..3615b4cfc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/install-terraform-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/install-terraform-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/_cm.yaml new file mode 100644 index 000000000..a2cb2e446 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/_cm.yaml @@ -0,0 +1,24 @@ +alias: install-terraform-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Cloud automation +clean_files: [] +default_version: main +deps: +- tags: detect,cpu +- tags: get,tool,go +env: + CM_GIT_URL: https://github.com/hashicorp/terraform.git +new_env_keys: +- CM_TERRAFORM_* +- +PATH +tags: +- install +- terraform +- from-src +uid: d79d47a074f34428 +versions: + main: + env: + CM_GIT_CHECKOUT: main diff --git a/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/customize.py new file mode 100644 index 000000000..b17e4237b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + installed_path = os.path.join(os.getcwd(), 'bin') + env['CM_TERRAFORM_INSTALLED_PATH'] = installed_path + env['CM_TERRAFORM_BIN_WITH_PATH'] = os.path.join( + installed_path, "terraform") + env['+PATH'] = [installed_path] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/run.sh new file mode 100644 index 000000000..8cdb88302 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-terraform-from-src/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +CUR_DIR=${PWD} +if [ ! -d "terraform" ]; then + echo "Cloning Terraform from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT}..." + git clone -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} terraform +fi +test $? -eq 0 || exit 1 + +export GOPATH=$CUR_DIR +cd terraform +go install +test $? -eq 0 || exit 1 + +echo "******************************************************" +echo "Terraform is built and installed to ${GOPATH}/bin/terraform ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/README.md new file mode 100644 index 000000000..eff64d47f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-tflite-from-src](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-frameworks/install-tflite-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/_cm.yaml new file mode 100644 index 000000000..6825db569 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/_cm.yaml @@ -0,0 +1,40 @@ +alias: install-tflite-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: AI/ML frameworks +clean_files: [] +default_env: + CM_GIT_DEPTH: '1' +default_version: master +deps: +- tags: detect,cpu +- names: + - compiler + tags: get,compiler +- tags: get,cmake +env: + CM_GIT_URL: https://github.com/tensorflow/tensorflow +extra_cache_tags_from_env: +- env: CM_PYTHON_CACHE_TAGS + prefix: python- +new_env_keys: +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +LD_LIBRARY_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +tags: +- get +- install +- tflite-cmake +- tensorflow-lite-cmake +- from-src +uid: 5c72dab5eb88407c +versions: + master: + ad: + compiler: + tags: gcc + version_min: 10.0.0 + env: + CM_GIT_CHECKOUT: master diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/customize.py new file mode 100644 index 000000000..6d625a8d8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/customize.py @@ -0,0 +1,41 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + for key in ['+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', + '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: + # 20221024: we save and restore env in the main script and can clean env here for determinism + # if key not in env: + env[key] = [] + + env['+C_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'src')) + env['+CPLUS_INCLUDE_PATH'].append(os.path.join(os.getcwd(), 'src')) + + lib_path = os.path.join(os.getcwd(), 'build') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/run.sh new file mode 100644 index 000000000..fb453f2e6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tflite-from-src/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +CUR_DIR=${PWD:-tmp} +if [ ! -d "src" ]; then + echo "Cloning Tensorflow from ${CM_GIT_URL} with branch ${CM_GIT_CHECKOUT} --depth ${CM_GIT_DEPTH}..." + git clone --recursive -b "${CM_GIT_CHECKOUT}" ${CM_GIT_URL} --depth ${CM_GIT_DEPTH} src +fi + +INSTALL_DIR="${CUR_DIR}" +rm -rf ${INSTALL_DIR}/build + +cd ${INSTALL_DIR} +mkdir -p build +mkdir -p install + +echo "******************************************************" +cd build +cmake ../src/tensorflow/lite/c +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" +cmake --build . -j${CM_MAKE_CORES} +if [ "${?}" != "0" ]; then exit 1; fi + + +echo "******************************************************" +echo "Tflite is built to ${INSTALL_DIR}/build ..." diff --git a/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/README.md new file mode 100644 index 000000000..334d7bd95 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-torchvision-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-torchvision-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/_cm.yaml new file mode 100644 index 000000000..5e6bf9681 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/_cm.yaml @@ -0,0 +1,108 @@ +alias: install-torchvision-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- names: + - compiler + tags: get,compiler +- enable_if_env: + CM_TORCHVISION_NEEDS_PNG: + - 'yes' + tags: get,generic-sys-util,_libpng-dev +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_PYTORCH_VISION_SRC_REPO_PATH + extra_cache_tags: pytorchvision,torchvision,torchvision-src,src,pytorchvision-src,pytorchvision-src-repo + names: + - pytorchision-src-repo + - torchision-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/pytorch/vision +name: Build pytorchvision from sources +new_env_keys: +- CM_PYTORCHVISION_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- pytorchvision +- torchvision +- src-pytorchvision +uid: 68b855780d474546 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + cuda: + deps: + - names: + - cuda + tags: get,cuda,_cudnn + env: + CUDA_HOME: <<>> + CUDA_NVCC_EXECUTABLE: <<>> + CUDNN_INCLUDE_PATH: <<>> + CUDNN_LIBRARY_PATH: <<>> + TORCH_CUDA_ARCH_LIST: Ampere Ada Hopper + TORCH_CXX_FLAGS: -D_GLIBCXX_USE_CXX11_ABI=1 + USE_CUDA: '1' + USE_CUDNN: '1' + for-nvidia-mlperf-inference-v3.1: + base: + - sha.657027f3 + - cuda + deps: + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 + env: {} + for-nvidia-mlperf-inference-v4.0: + base: + - sha.657027f3 + - cuda + deps: + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v4.0 + env: {} + python.#: + env: + CM_PYTHON_BIN_WITH_PATH: '#' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/pytorch/vision: + default: true + env: + CM_GIT_URL: https://github.com/pytorch/vision + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + pytorch-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/customize.py new file mode 100644 index 000000000..bcb4fa9ca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} + + +def postprocess(i): + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/run.sh new file mode 100644 index 000000000..3ba73deee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-torchvision-from-src/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf pytorchvision +cp -r ${CM_PYTORCH_VISION_SRC_REPO_PATH} pytorchvision +cd pytorchvision +test "${?}" -eq "0" || exit $? +rm -rf build + +${CM_PYTHON_BIN_WITH_PATH} setup.py bdist_wheel +test "${?}" -eq "0" || exit $? +cd dist +${CM_PYTHON_BIN_WITH_PATH} -m pip install torchvision*linux_x86_64.whl +test "${?}" -eq "0" || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/README.md b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/README.md new file mode 100644 index 000000000..c66bbeb6f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-tpp-pytorch-extension](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-tpp-pytorch-extension) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/_cm.yaml new file mode 100644 index 000000000..07ac48e4e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/_cm.yaml @@ -0,0 +1,102 @@ +alias: install-tpp-pytorch-extension +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- names: + - pytorch + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,pytorch,from.src +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TPP_PEX_SRC_REPO_PATH + extra_cache_tags: tpp,tpp-pex,src,tpp-pex-src,tpp-pex-src-repo + names: + - tpp-pex-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: {} +name: Build TPP-PEX from sources +new_env_keys: +- CM_TPP_PEX_* +prehook_deps: [] +sort: 1000 +tags: +- install +- get +- src +- from.src +- tpp-pex +- src-tpp-pex +uid: 1701d2f5f4e84d42 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + for-intel-mlperf-inference-v3.1-gptj: + adr: + conda-package: + tags: _name.gptj-pt + pytorch: + tags: _for-intel-mlperf-inference-v3.1-gptj + base: + - branch.mlperf_infer_31 + deps: + - names: + - conda + tags: get,conda,_name.gptj-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.9' + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + version: 69.5.1 + - tags: install,llvm,src,_for-intel-mlperf-inference-v3.1-gptj + env: + CM_CONDA_ENV: 'yes' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/libxsmm/tpp-pytorch-extension: + default: true + env: + CM_GIT_URL: https://github.com/libxsmm/tpp-pytorch-extension + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + pytorch-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/customize.py new file mode 100644 index 000000000..fa39e8c95 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/customize.py @@ -0,0 +1,36 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + env['TPP_PEX_DIR'] = env['CM_TPP_PEX_SRC_REPO_PATH'] + env['DNNL_GRAPH_BUILD_COMPILER_BACKEND'] = 1 + env['USE_LLVM'] = env['CM_LLVM_INSTALLED_PATH'] + env['LLVM_DIR'] = os.path.join( + env['CM_LLVM_INSTALLED_PATH'], "lib", "cmake", "llvm") + + run_cmd = "python setup.py clean && python setup.py install" + + env['CM_RUN_DIR'] = env['TPP_PEX_DIR'] + env['CM_RUN_CMD'] = run_cmd + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/run.sh new file mode 100644 index 000000000..d426d4004 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-tpp-pytorch-extension/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:${PATH} + +cd ${CM_RUN_DIR} +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/README.md b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/README.md new file mode 100644 index 000000000..5a5e14cfa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-transformers-from-src](https://docs.mlcommons.org/cm4mlops/scripts/Compiler-automation/install-transformers-from-src) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/_cm.yaml new file mode 100644 index 000000000..d2c411c97 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/_cm.yaml @@ -0,0 +1,100 @@ +alias: install-transformers-from-src +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Compiler automation +deps: +- tags: detect,os +- tags: detect,cpu +- names: + - python + - python3 + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,python3 +- names: + - pytorch + skip_if_env: + CM_CONDA_ENV: + - 'yes' + tags: get,pytorch,from.src +- env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_TRANSFORMERS_SRC_REPO_PATH + extra_cache_tags: transformers,src,transformers-src,transformers-src-repo + names: + - transformers-src-repo + tags: get,git,repo + update_tags_from_env_with_prefix: + _branch.: + - CM_GIT_CHECKOUT + _repo.: + - CM_GIT_URL + _sha.: + - CM_GIT_CHECKOUT_SHA + _tag.: + - CM_GIT_CHECKOUT_TAG +env: + CM_GIT_URL: https://github.com/huggingface/transformers +name: Build transformers from sources +new_env_keys: +- CM_TRANSFORMERS_* +prehook_deps: [] +sort: 1000 +tags: +- install +- src +- from.src +- transformers +- src-transformers +uid: 88512c48ea5c4186 +variations: + branch.#: + env: + CM_GIT_CHECKOUT: '#' + for-intel-mlperf-inference-v3.1-bert: + adr: + conda-package: + tags: _name.bert-pt + pytorch: + tags: _for-intel-mlperf-inference-v3.1-bert + base: + - sha.9f4e0c23d68366985f9f584388874477ad6472d8 + deps: + - names: + - conda + tags: get,conda,_name.bert-pt + - names: + - conda-package + - python3 + tags: get,generic,conda-package,_package.python + version: '3.8' + - names: + - conda-package + - wheel + tags: get,generic,conda-package,_package.wheel,_source.conda-forge + - names: + - conda-package + - setuptools + tags: get,generic,conda-package,_package.setuptools,_source.conda-forge + env: + CM_CONDA_ENV: 'yes' + repo.#: + env: + CM_GIT_URL: '#' + group: repo + repo.https://github.com/pytorch/pytorch: + default: true + env: + CM_GIT_URL: https://github.com/huggingface/transformers + group: repo + sha.#: + env: + CM_GIT_CHECKOUT_SHA: '#' + tag.#: + ad: + pytorch-src-repo: + tags: _no-recurse-submodules,_full-history + env: + CM_GIT_CHECKOUT_TAG: '#' +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/customize.py b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/customize.py new file mode 100644 index 000000000..37321608e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/customize.py @@ -0,0 +1,33 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + run_cmd = "python setup.py install" + + env['CM_RUN_CMD'] = run_cmd + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/run.sh b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/run.sh new file mode 100644 index 000000000..8af8c6c77 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/install-transformers-from-src/run.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +export PATH=${CM_CONDA_BIN_PATH}:$PATH + +CUR_DIR=$PWD +echo $PWD +rm -rf transformers +cmd="cp -r ${CM_TRANSFORMERS_SRC_REPO_PATH} transformers" +echo "$cmd" +eval "$cmd" +cd transformers +rm -rf build + +wget -nc --no-check-certificate https://raw.githubusercontent.com/mlcommons/inference_results_v3.1/main/closed/Intel/code/bert-99/pytorch-cpu/patches/transformers.patch +if [ "${?}" != "0" ]; then exit 1; fi +git apply transformers.patch +if [ "${?}" != "0" ]; then exit 1; fi + +echo ${CM_RUN_CMD} +eval ${CM_RUN_CMD} + +if [ "${?}" != "0" ]; then exit 1; fi + +echo "******************************************************" diff --git a/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/README-extra.md new file mode 100644 index 000000000..3854e8ecb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/README-extra.md @@ -0,0 +1,3 @@ +# CM script + +Universal benchmark launcher via Collective Mind diff --git a/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/README.md b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/README.md new file mode 100644 index 000000000..26234efb0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Collective-benchmarking/launch-benchmark](https://docs.mlcommons.org/cm4mlops/scripts/Collective-benchmarking/launch-benchmark) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/_cm.yaml new file mode 100644 index 000000000..f45606bc2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/_cm.yaml @@ -0,0 +1,15 @@ +alias: launch-benchmark +uid: 5dc7662804bc4cad + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- launch +- benchmark + +category: "Collective benchmarking" + +gui: + title: "Launch benchmark" + use_customize_func: "gui" diff --git a/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/customize.py b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/customize.py new file mode 100644 index 000000000..9301357f0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/customize.py @@ -0,0 +1,742 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import cmind +import os +import copy + +base_path = {} +base_path_meta = {} + +########################################################################## + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + +########################################################################## + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} + + +########################################################################## +def load_cfg(i): + + tags = i.get('tags', '') + artifact = i.get('artifact', '') + + key = i.get('key', '') + + ii = {'action': 'find', + 'automation': 'cfg'} + if artifact != '': + ii['artifact'] = artifact + elif tags != '': + ii['tags'] = tags + + r = cmind.access(ii) + if r['return'] > 0: + return r + + lst = r['list'] + + prune = i.get('prune', {}) + prune_key = prune.get('key', '') + prune_key_uid = prune.get('key_uid', '') + prune_uid = prune.get('uid', '') + prune_list = prune.get('list', []) + + # Checking individual files inside CM entry + selection = [] + + if i.get('skip_files', False): + for l in lst: + meta = l.meta + full_path = l.path + + meta['full_path'] = full_path + + add = True + + if prune_key != '' and prune_key_uid != '': + if prune_key_uid not in meta.get(prune_key, []): + add = False + + if add: + selection.append(meta) + else: + for l in lst: + path = l.path + + main_meta = l.meta + all_tags = main_meta.get('tags', []) + + files = os.listdir(path) + + for f in files: + if key != '' and not f.startswith(key): + continue + + if f.startswith('_') or (not f.endswith( + '.json') and not f.endswith('.yaml')): + continue + + full_path = os.path.join(path, f) + + full_path_without_ext = full_path[:-5] + + r = cmind.utils.load_yaml_and_json(full_path_without_ext) + if r['return'] > 0: + print('Warning: problem loading file {}'.format(full_path)) + else: + meta = r['meta'] + + # Check base + r = process_base(meta, full_path) + if r['return'] > 0: + return r + meta = r['meta'] + + uid = meta['uid'] + + # Check pruning + add = True + + if len(prune) > 0: + if prune_uid != '' and uid != prune_uid: + add = False + + if add and len( + prune_list) > 0 and uid not in prune_list: + add = False + + if add and prune_key != '' and prune_key_uid != '' and prune_key_uid != meta.get( + prune_key, None): + add = False + + if add: + meta['full_path'] = full_path + + add_all_tags = copy.deepcopy(all_tags) + + name = meta.get('name', '') + if name == '': + name = ' '.join(meta.get('tags', [])) + name = name.strip() + meta['name'] = name + + file_tags = meta.get('tags', '').strip() + if file_tags == '': + if name != '': + add_all_tags += [v.lower() + for v in name.split(' ')] + else: + add_all_tags += file_tags.split(',') + + meta['all_tags'] = add_all_tags + + meta['main_meta'] = main_meta + + selection.append(meta) + + return {'return': 0, 'lst': lst, 'selection': selection} + +########################################################################## + + +def process_base(meta, full_path): + + global base_path, base_path_meta + + _base = meta.get('_base', '') + if _base != '': + name = '' + + filename = _base + full_path_base = os.path.dirname(full_path) + + if not filename.endswith('.yaml') and not filename.endswith('.json'): + return {'return': 1, 'error': '_base file {} in {} must be .yaml or .json'.format( + filename, full_path)} + + if ':' in _base: + x = _base.split(':') + name = x[0] + + full_path_base = base_path.get(name, '') + if full_path_base == '': + + # Find artifact + r = cmind.access({'action': 'find', + 'automation': 'cfg', + 'artifact': name}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + if not os.path.isfile(path): + return {'return': 1, 'error': '_base artifact {} not found in {}'.format( + name, full_path)} + + full_path_base = lst[0].path + + base_path[name] = full_path_base + + filename = x[1] + + # Load base + path = os.path.join(full_path_base, filename) + + if not os.path.isfile(path): + return {'return': 1, 'error': '_base file {} not found in {}'.format( + filename, full_path)} + + if path in base_path_meta: + base = copy.deepcopy(base_path_meta[path]) + else: + path_without_ext = path[:-5] + + r = cmind.utils.load_yaml_and_json(path_without_ext) + if r['return'] > 0: + return r + + base = r['meta'] + + base_path_meta[path] = copy.deepcopy(base) + + for k in meta: + v = meta[k] + + if k not in base: + base[k] = v + else: + if isinstance(v, str): + # Only merge a few special keys and overwrite the rest + if k in ['tags', 'name']: + base[k] += meta[k] + else: + base[k] = meta[k] + + +elif isinstance(v, elif) for vv in v: + base[k].append(vv) +elif isinstance(v, elif ) base[k].merge(v) + + meta = base + + return {'return': 0, 'meta':meta} + + +########################################################################## +def get_with_complex_key(meta, key): + + j = key.find('.') + + if j <0: + return meta.get(key) + + key0 = key[:j] + + if key0 not in meta: + return None + + return get_with_complex_key(meta[key0], key[j +1:]) + +########################################################################## + +def get_with_complex_key_safe(meta, key): + v = get_with_complex_key(meta, key) + + if v == None: + v='' + + return v + +########################################################################## + +def prepare_table(i): + + import pandas as pd + import numpy as np + + selection = i['selection'] + misc = i['misc_module'] + + html = '' + + all_data = [] + +# dimensions = [('input.model', 'MLPerf model'), +# ('input.implementation', 'MLPerf implementation'), +# ('input.framework', 'MLPerf framework')] + + dimensions = i.get('dimensions', []) + + dimension_values = {} + dimension_keys = [] + + if len(dimensions) == 0: + keys = [('test', 'CM test', 400, 'leftAligned')] + else: + keys = [('test', 'CM test', 50, 'leftAligned')] + + for k in dimensions: + key = k[0] + + keys.append((k[0], k[1], 100, 'leftAligned')) + + dimension_values[key] = [] + dimension_keys.append(key) + +# # assemble all values +# for s in selection: +# for k in dimensions: +# key = k[0] +# +# value = get_with_complex_key(selection, key) +# +# if value!=None and value!='' and value not in dimension_values[key]: +# dimension_values.append(value) + + # If dimensions, sort by dimensions + for d in list(reversed(dimension_keys)): + selection = sorted(selection, key= lambda x: get_with_complex_key_safe(selection, d)) + + + keys += [ + ('functional', 'Functional', 80, ''), + ('reproduced', 'Reproduced', 80, ''), + ('notes', 'Notes', 200, 'lefAligned'), + ] + + j = 0 + + badges_url ={'functional':'https://cTuning.org/images/artifacts_evaluated_functional_v1_1_small.png', + 'reproduced': 'https://cTuning.org/images/results_reproduced_v1_1_small.png'} + + + for s in selection: + row = {} + + j += 1 + + uid = s['uid'] + + url = misc.make_url(uid, key='uid', action='howtorun', md=False) + + name = s.get('name', '') + if name == '': + name = uid + + if len(dimensions) == 0: + row['test'] = '{}'.format( + url, name) + else: + row['test'] = 'View'.format(url) + for k in dimensions: + kk = k[0] + + v = get_with_complex_key_safe(s, kk) + + row[kk] = str(v) + + + # Check ACM/IEEE functional badge + x = '' + if s.get('functional', False): + x = '

    '.format( + url, badges_url['functional']) + row['functional'] = x + + # Check ACM/IEEE reproduced badge + x = '' + if s.get('reproduced', False): + x = '
    '.format( + url, badges_url['reproduced']) + row['reproduced'] = x + + # Check misc notes + row['notes'] = s.get('notes','') + + # Finish row + all_data.append(row) + + # Visualize table + pd_keys = [v[0] for v in keys] + pd_key_names = [v[1] for v in keys] + + pd_all_data = [] + for row in sorted(all_data, key=lambda row: (row.get('x1', 0))): + pd_row = [] + for k in pd_keys: + pd_row.append(row.get(k)) + pd_all_data.append(pd_row) + + df = pd.DataFrame(pd_all_data, columns= pd_key_names) + + df.index += 1 + + return {'return': 0, 'df':df} + + +########################################################################## +def gui(i): + + params = i['params'] + st = i['streamlit_module'] + misc = i['misc_module'] + meta = i['meta'] + gui_meta = meta['gui'] + skip_header = i.get('skip_title', False) + + end_html = '' + + if not skip_header: + # Title + title = gui_meta['title'] + + st.title('[Collective Mind](https://github.com/mlcommons/ck)') + + st.markdown('### {}'.format(title)) + + + # Check if test uid is specified + uid = '' + x = params.get('uid', ['']) + if len(x)>0 and x[0]!='': + uid = x[0].strip() + + bench_uid = '' + x = params.get('bench_uid', ['']) + if len(x)>0 and x[0]!='': + bench_uid = x[0].strip() + + compute_uid = '' + x = params.get('compute_uid', ['']) + if len(x)>0 and x[0]!='': + compute_uid = x[0].strip() + + + ############################################################## + # Check the first level of benchmarks + ii = {'tags': 'benchmark,run', 'skip_files':True, 'prune':{}} + + if uid != '': + ii['skip_files'] = False + ii['prune']['uid'] = uid + if bench_uid != '': + ii['artifact'] = bench_uid + if compute_uid != '': + ii['prune']['key'] = 'supported_compute' + ii['prune']['key_uid'] = compute_uid + + r = load_cfg(ii) + if r['return']>0: + return r + + lst = r['selection'] + + if len(lst) ==0: + st.markdown('Warning: no benchmarks found!') + return {'return': 0} + + test_meta = {} + + bench_id = 0 + + ########################################################################## + if uid != '': + if len(lst) ==0: + st.markdown('CM test with UID "{}" not found!'.format(uid)) + return {'return': 0} + elif len(lst) >1: + st.markdown( + 'Warning: More than 1 CM test found with UID "{}" - ambiguity!'.format(uid)) + return {'return': 0} + + test_meta = lst[0] + + bench_id = 1 + compute_uid = test_meta['compute_uid'] + bench_supported_compute = [compute_uid] + + if uid == '': + selection = sorted(lst, key= lambda v: v['name']) + bench_selection = [{'name': ''}] + selection + + if bench_uid != '': + bench_id_index = 1 + else: + # Check if want to force some benchmark by default + # 27c06c35bceb4059 == MLPerf inference v4.0 + + bench_id_index = 0 + + j = 0 + for b in bench_selection: + if b.get('uid','') =='27c06c35bceb4059': + bench_id_index = j + break + j += 1 + + bench_id = st.selectbox('Select benchmark:', + range(len(bench_selection)), + format_func=lambda x: bench_selection[x]['name'], + index = bench_id_index, + key = 'bench') + + bench_supported_compute = [] + bench_meta = {} + if bench_id >0: + bench_meta = bench_selection[bench_id] + bench_supported_compute = bench_meta.get('supported_compute', []) + + urls = bench_meta.get('urls', []) + if len(urls) >0: + x = '\n' + for u in urls: + name = u['name'] + url = u['url'] + + x += ' [ [{}]({}) ] '.format(name, url) + x += '\n' + + st.markdown(x) + + ########################################################################## + if True ==True: + ############################################################## + # Check compute + + ii = {'tags': 'benchmark,compute'} + if bench_id >0: + if compute_uid != '': + x = [compute_uid] + else: + x = bench_supported_compute + if len(x) == 0: + st.markdown('Warning: no supported compute selected!') + return {'return': 0} + + ii['prune'] = {'list':x} + + r = load_cfg(ii) + if r['return']>0: + return r + + selection = sorted(r['selection'], key= lambda v: v['name']) + + if len(selection) == 0: + st.markdown('Warning: no supported compute found!') + return {'return': 0} + + compute_selection = [{'name': ''}] + if len(selection) >0: + compute_selection += selection + + compute_id_index = 0 if compute_uid == '' else 1 + + if uid == '': + compute_id = st.selectbox('Select target hardware to benchmark:', + range(len(compute_selection)), + format_func=lambda x: compute_selection[x]['name'], + index = compute_id_index, + key = 'compute') + + compute = {} + if compute_id >0: + compute = compute_selection[compute_id] + compute_uid = compute['uid'] + + compute_meta = {} + for c in compute_selection: + if c.get('uid','') !='': + compute_meta[c['uid']] = c + + ########################################################################## + if uid == '': + + ############################################################## + # Check tests + ii = {'tags': 'benchmark,run'} + + if bench_id >0: + bench_uid = bench_selection[bench_id]['uid'] + ii['artifact'] = bench_uid + if compute_uid !='': + ii['prune'] = {'key':'compute_uid', 'key_uid':compute_uid} + + r = load_cfg(ii) + if r['return']>0: + return r + + selection = sorted(r['selection'], key= lambda v: v['name']) + + # Check how many and prune + if len(selection) == 0: + st.markdown('No CM tests found') + return {'return': 0} + + for s in selection: + c_uid = s.get('compute_uid', '') + if c_uid !='': + c_tags = compute_meta[c_uid].get('tags', '') + if c_tags !='': + s['all_tags'] += c_tags.split(',') + + s['compute_meta'] = compute_meta[c_uid] + + + if len(selection) >1: + # Update selection with compute tags + test_tags = '' + x = params.get('tags', ['']) + if len(x)>0 and x[0]!='': + test_tags = x[0].strip() + + test_tags = st.text_input('Found {} CM tests. Prune them by tags:'.format( + str(len(selection))), value=test_tags, key='test_tags').strip() + + if test_tags !='': + test_tags_list = test_tags.replace(' ', ',').split(',') + + pruned_selection = [] + + for s in selection: + all_tags = s['all_tags'] + + add = True + + for t in test_tags_list: + if t not in all_tags: + add = False + break + + if add: + pruned_selection.append(s) + + selection = pruned_selection + + test_selection = [{'name': ''}] + selection + + + + if len(selection) <200: + # Creating compute selector + test_id_index = 1 if len(selection) == 1 else 0 + + test_id = st.selectbox('Select a test from {}:'.format(str(len(selection))), + range(len(test_selection)), + format_func=lambda x: test_selection[x]['name'], + index = test_id_index, + key = 'test') + + + if test_id > 0: + test_meta = test_selection[test_id] + else: + ############################################################### + # View many (table) + ii = {'selection': selection, + 'misc_module': misc} + + # Check if dimensions in the bench + dimensions = bench_meta.get('dimensions', []) + if len(dimensions) >0: + viewer_selection = ['benchmark specific', 'universal'] + + viewer = st.selectbox('Viewer:', viewer_selection, key= 'viewer') + + if viewer == 'benchmark specific': + ii['dimensions'] = dimensions + + else: + st.markdown('---') + + r = prepare_table(ii) + if r['return']>0: + return r + + df = r['df'] + + html = df.to_html(escape=False, justify='left') + st.write(html, unsafe_allow_html= True) + +# st.dataframe(df, unsafe_allow_html = True) + + + ############################################################## + # Show individual test + if len(test_meta) >0: + if uid != '': + c_uid = test_meta.get('compute_uid', '') + if c_uid !='': + c_tags = compute_meta[c_uid].get('tags', '') + if c_tags !='': + test_meta['all_tags'] += c_tags.split(',') + + test_meta['compute_meta'] = compute_meta[c_uid] + + if uid == '': + st.markdown('---') + + uid = test_meta['uid'] + + # First, check if there is a README + test_path = test_meta['full_path'] + + test_md = test_meta['full_path'][:-5] +'.md' + if os.path.isfile(test_md): + + r = cmind.utils.load_txt(test_md) + if r['return']>0: + return r + + s = r['string'] + + st.markdown(s) + + # Next print some info (for now JSON) + import json + x = """ +--- +**CM test dictionary:** +```json +{} +``` + """.format(json.dumps(test_meta, indent=2)) + st.markdown(x) + + + # Create self link + # This misc module is in CM "gui" script + x1 = misc.make_url(uid, key='uid', action='howtorun', md=False) + end_html = '
    Self link
    '.format(x1) + + return {'return': 0, 'end_html': end_html} diff --git a/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/tests/debug.py b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/tests/debug.py new file mode 100644 index 000000000..7c8bab4b7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/launch-benchmark/tests/debug.py @@ -0,0 +1,6 @@ +import cmind + +r = cmind.access({'action': 'gui', + 'automation': 'script', + 'artifact': 'launch benchmark'}) +print(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/README-extra.md new file mode 100644 index 000000000..204c394fa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/README-extra.md @@ -0,0 +1,2 @@ +Useful info: +* https://medium.com/@yushantripleseven/managing-multiple-cuda-cudnn-installations-ba9cdc5e2654 diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/_cm.yaml new file mode 100644 index 000000000..da6f26635 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/_cm.yaml @@ -0,0 +1,60 @@ +alias: plug-prebuilt-cudnn-to-cuda +uid: 894452315a3247ef + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: + - plug + - prebuilt-cudnn + - to-cuda + +cache: true + +category: CUDA automation + +default_env: + CM_SUDO: sudo + +default_version: 9.3.0 + +deps: + - tags: detect,os + - tags: detect,cpu + - tags: get,cuda + +docker: + run: true + +input_description: + tar_file: + desc: Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn) + +input_mapping: + tar_file: CM_CUDNN_TAR_FILE_PATH + skip_sudo: CUDA_SKIP_SUDO + +new_env_keys: +- CM_CUDNN_* + +prehook_deps: +#- tags: get,generic-sys-util,_xz +- tags: download,file + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_CUDNN_TAR_FILE_PATH + extra_cache_tags: cudnn,archive + force_cache: true + names: + - download-script + skip_if_env: + CM_CUDNN_TAR_FILE_PATH: + - True + update_tags_from_env_with_prefix: + _url.: + - WGET_URL + +versions: + 9.3.0: + env: + CM_CUDNN_TAR_FILE_NAME_TEMPLATE: cudnn-linux-x86_64-9.3.0.75_cuda{{CUDA_MAJOR_VERSION}}-archive.tar.xz + CM_CUDNN_TAR_MD5SUM: 2fa73268de8bbdab5560f4aa1a5a73ab diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/customize.py b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/customize.py new file mode 100644 index 000000000..2e246e453 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/customize.py @@ -0,0 +1,63 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + recursion_spaces = i['recursion_spaces'] + + cur_dir = os.getcwd() + + os_info = i['os_info'] + + env = i['env'] + + if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': + env['CM_SUDO'] = '' + + meta = i['meta'] + automation = i['automation'] + version = env.get('CM_VERSION') + + supported_versions = list(meta['versions'].keys()) + + if version not in supported_versions: + return {'return': 1, 'error': "Only cuDNN versions {} are supported now".format( + ', '.join(supported_versions))} + + env['CM_CUDNN_VERSION'] = version + + filename = env['CM_CUDNN_TAR_FILE_NAME_TEMPLATE'] + cudnn_md5sum = env.get('CM_CUDNN_TAR_MD5SUM', '') + + cuda_version_split = env['CM_CUDA_VERSION'].split('.') + cuda_version_major = cuda_version_split[0] + + filename = filename.replace('{{CUDA_MAJOR_VERSION}}', cuda_version_major) + + env['CM_CUDNN_TAR_FILE_NAME'] = filename + + cudnn_dir = filename[:-7] + + cudnn_url = f'https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/{filename}' + + print('') + print(f'URL to download cuDNN: {cudnn_url}') + + env['CM_CUDNN_TAR_DIR'] = cudnn_dir + env['CM_CUDNN_UNTAR_PATH'] = os.path.join(cur_dir, cudnn_dir) + env['WGET_URL'] = cudnn_url + env['CM_DOWNLOAD_CHECKSUM'] = cudnn_md5sum + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/run.sh b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/run.sh new file mode 100644 index 000000000..bf6e72ec3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cudnn-to-cuda/run.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +CUR=${PWD} +INSTALL_DIR=${CUR}/install + +echo "******************************************" +echo "${CUR}" +echo "${CM_CUDNN_TAR_FILE_PATH}" +echo "${CM_CUDNN_TAR_DIR}" +echo "${CM_CUDNN_UNTAR_PATH}" +echo "${CUDA_HOME}" +echo "${CM_CUDA_PATH_INCLUDE}" +echo "${CM_CUDA_PATH_LIB}" +echo "******************************************" + +echo "Untaring file ..." +echo "" +tar -xf ${CM_CUDNN_TAR_FILE_PATH} +test $? -eq 0 || exit $? + +echo "Copying include files ..." +echo "" +${CM_SUDO} cp -P ${CM_CUDNN_TAR_DIR}/include/cudnn*.h ${CM_CUDA_PATH_INCLUDE} +${CM_SUDO} chmod a+r ${CM_CUDA_PATH_INCLUDE}/cudnn*.h + +echo "Copying lib files ..." +echo "" +${CM_SUDO} cp -P ${CM_CUDNN_TAR_DIR}/lib/libcudnn* ${CM_CUDA_PATH_LIB} +${CM_SUDO} chmod a+r ${CM_CUDA_PATH_LIB}/libcudnn* + +echo "Adding file that cuDNN is installed ..." +echo "" +if [ "${CM_SUDO}" == "sudo" ]; then + ${CM_SUDO} sh -c "echo '${CM_VERSION}' > ${CUDA_HOME}/cm_installed_cudnn.txt" +else + echo "${CM_VERSION}" > ${CUDA_HOME}/cm_installed_cudnn.txt +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/_cm.yaml new file mode 100644 index 000000000..b542a31f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/_cm.yaml @@ -0,0 +1,60 @@ +alias: plug-prebuilt-cusparselt-to-cuda +uid: d87ae2182d364483 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: + - plug + - prebuilt-cusparselt + - to-cuda + +cache: true + +category: CUDA automation + +default_env: + CM_SUDO: sudo + +default_version: 0.6.2.3 + +deps: + - tags: detect,os + - tags: detect,cpu + - tags: get,cuda + +docker: + run: true + +input_description: + tar_file: + desc: Full path to the cuSPARSELt Tar file downloaded from Nvidia website (https://developer.nvidia.com/cusparselt-downloads) + +input_mapping: + tar_file: CM_CUSPARSELT_TAR_FILE_PATH + skip_sudo: CUDA_SKIP_SUDO + +new_env_keys: +- CM_CUSPARSELT_* + +prehook_deps: +#- tags: get,generic-sys-util,_xz +- tags: download,file + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_CUSPARSELT_TAR_FILE_PATH + extra_cache_tags: cusparselt,archive + force_cache: true + names: + - download-script + skip_if_env: + CM_CUSPARSELT_TAR_FILE_PATH: + - True + update_tags_from_env_with_prefix: + _url.: + - WGET_URL + +versions: + 0.6.2.3: + env: + CM_CUSPARSELT_TAR_FILE_NAME_TEMPLATE: libcusparse_lt-linux-x86_64-0.6.2.3-archive.tar.xz + CM_CUSPARSELT_TAR_MD5SUM: 2fa73268de8bbdab5560f4aa1a5a73ab diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/customize.py b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/customize.py new file mode 100644 index 000000000..077bb670e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/customize.py @@ -0,0 +1,63 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + recursion_spaces = i['recursion_spaces'] + + cur_dir = os.getcwd() + + os_info = i['os_info'] + + env = i['env'] + + if str(env.get('CUDA_SKIP_SUDO', '')).lower() == 'true': + env['CM_SUDO'] = '' + + meta = i['meta'] + automation = i['automation'] + version = env.get('CM_VERSION') + + supported_versions = list(meta['versions'].keys()) + + if version not in supported_versions: + return {'return': 1, 'error': "Only CUSPARSELT versions {} are supported now".format( + ', '.join(supported_versions))} + + env['CM_CUSPARSELT_VERSION'] = version + + filename = env['CM_CUSPARSELT_TAR_FILE_NAME_TEMPLATE'] + cusparselt_md5sum = env.get('CM_CUSPARSELT_TAR_MD5SUM', '') + + cuda_version_split = env['CM_CUDA_VERSION'].split('.') + cuda_version_major = cuda_version_split[0] + + filename = filename.replace('{{CUDA_MAJOR_VERSION}}', cuda_version_major) + + env['CM_CUSPARSELT_TAR_FILE_NAME'] = filename + + cusparselt_dir = filename[:-7] + + cusparselt_url = f'https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-x86_64/{filename}' + + print('') + print(f'URL to download CUSPARSELT: {cusparselt_url}') + + env['CM_CUSPARSELT_TAR_DIR'] = cusparselt_dir + env['CM_CUSPARSELT_UNTAR_PATH'] = os.path.join(cur_dir, cusparselt_dir) + env['WGET_URL'] = cusparselt_url + env['CM_DOWNLOAD_CHECKSUM'] = cusparselt_md5sum + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/run.sh b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/run.sh new file mode 100644 index 000000000..d500f56c1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/plug-prebuilt-cusparselt-to-cuda/run.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +CUR=${PWD} +INSTALL_DIR=${CUR}/install + +echo "******************************************" +echo "${CUR}" +echo "${CM_CUSPARSELT_TAR_FILE_PATH}" +echo "${CM_CUSPARSELT_TAR_DIR}" +echo "${CM_CUSPARSELT_UNTAR_PATH}" +echo "${CUDA_HOME}" +echo "${CM_CUDA_PATH_INCLUDE}" +echo "${CM_CUDA_PATH_LIB}" +echo "******************************************" + +echo "Untaring file ..." +echo "" +tar -xf ${CM_CUSPARSELT_TAR_FILE_PATH} +test $? -eq 0 || exit $? + +echo "Copying include files ..." +echo "" +${CM_SUDO} cp -P ${CM_CUSPARSELT_TAR_DIR}/include/cusparseLt*.h ${CM_CUDA_PATH_INCLUDE} +${CM_SUDO} chmod a+r ${CM_CUDA_PATH_INCLUDE}/cusparseLt*.h + +echo "Copying lib files ..." +echo "" +${CM_SUDO} cp -P ${CM_CUSPARSELT_TAR_DIR}/lib/libcusparseLt* ${CM_CUDA_PATH_LIB} +${CM_SUDO} chmod a+r ${CM_CUDA_PATH_LIB}/libcusparseLt* + +echo "Adding file that CUSPARSELT is installed ..." +echo "" +if [ "${CM_SUDO}" == "sudo" ]; then + ${CM_SUDO} sh -c "echo '${CM_VERSION}' > ${CUDA_HOME}/cm_installed_cusparselt.txt" +else + echo "${CM_VERSION}" > ${CUDA_HOME}/cm_installed_cusparselt.txt +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/README.md b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/README.md new file mode 100644 index 000000000..b4919a758 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/prepare-training-data-bert](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/prepare-training-data-bert) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/_cm.yaml new file mode 100644 index 000000000..de1a41141 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/_cm.yaml @@ -0,0 +1,107 @@ +alias: prepare-training-data-bert +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +deps: [] +input_description: {} +input_mapping: + clean: CM_MLPERF_TRAINING_CLEAN_TFRECORDS + data_dir: CM_DATA_DIR +new_env_keys: +- CM_MLPERF_TRAINING_BERT_* +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: +- env: + CM_DOWNLOAD_CHECKSUM: 7f59165e21b7d566db610ff6756c926b + CM_DOWNLOAD_FILENAME: bert_config.json + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CONFIG_FILE_PATH + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: mlperf,training,bert,config + force_cache: true + tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1fbGClQMi2CoMv7fwrwTC5YYPooQBdcFW +- env: + CM_DOWNLOAD_CHECKSUM: 64800d5d8528ce344256daf115d4965e + CM_DOWNLOAD_FILENAME: vocab.txt + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_VOCAB_FILE_PATH + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,vocab + force_cache: true + tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1USK108J6hMM_d27xCHi738qBL8_BT1u1 +- env: + CM_DOWNLOAD_CHECKSUM: 7d3a0619cb8bf7e829af99fa5c29daa8 + CM_DOWNLOAD_FILENAME: bert_reference_results_text_md5.txt + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_REFERENCE_RESULTS_TEXT_MD5_FILE_PATH + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,data,results,md5 + force_cache: true + tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1tmMgLwoBvbEJEHXh77sqrXYw5RpqT8R_ +- env: + CM_DOWNLOAD_CHECKSUM: '' + CM_DOWNLOAD_FILENAME: results_text.tar.gz + CM_DOWNLOAD_PATH: <<>> + CM_EXTRACT_EXTRACTED_CHECKSUM_FILE: <<>> + CM_EXTRACT_EXTRACTED_FILENAME: results4 + CM_EXTRACT_FINAL_ENV_NAME: CM_BERT_TRAINING_DATA_PATH + CM_EXTRACT_PATH: <<>> + extra_cache_tags: bert,data,results + force_cache: true + tags: download-and-extract,file,_gdown,_extract,_url.https://drive.google.com/uc?id=14xV2OUGSQDG_yDBrmbSdcDC-QGeqpfs_ +- env: + CM_DOWNLOAD_CHECKSUM: 50797acd537880bfb5a7ade80d976129 + CM_DOWNLOAD_FILENAME: model.ckpt-28252.data-00000-of-00001 + CM_DOWNLOAD_FINAL_ENV_NAME: CM_BERT_CHECKPOINT_FILE_PATH + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,checkpoint,data + force_cache: true + tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1chiTBljF0Eh1U5pKs6ureVHgSbtU8OG_ +- env: + CM_DOWNLOAD_CHECKSUM: f97de3ae180eb8d479555c939d50d048 + CM_DOWNLOAD_FILENAME: model.ckpt-28252.index + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,checkpoint,index + force_cache: true + tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1Q47V3K3jFRkbJ2zGCrKkKk-n0fvMZsa0 +- env: + CM_DOWNLOAD_CHECKSUM: dbd16c731e8a8113bc08eeed0326b8e7 + CM_DOWNLOAD_FILENAME: model.ckpt-28252.meta + CM_DOWNLOAD_PATH: <<>> + extra_cache_tags: bert,checkpoint,meta + force_cache: true + tags: download,file,_gdown,_url.https://drive.google.com/uc?id=1vAcVmXSLsLeQ1q7gvHnQUSth5W_f_pwv +tags: +- prepare +- mlperf +- training +- data +- input +- bert +uid: 1e06a7abe23545eb +variations: + nvidia: + default: true + deps: + - extra_cache_tags: mlperf,training,results + tags: get,git,repo,_repo.https://github.com/wchen61/training_results_v2.1,_branch.fix_bert_prepare_data + env: + CM_TMP_VARIATION: nvidia + group: implementation + reference: + deps: + - names: + - mlperf-training-src + tags: get,mlperf,training,src + - names: + - python3 + tags: get,python3 + - tags: get,generic-python-lib,_tensorflow + version: 2.4.0 + - tags: get,generic-python-lib,_protobuf + version_max: 3.20.1 + version_max_usable: 3.20.1 + env: + CM_TMP_VARIATION: reference + group: implementation +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/customize.py b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/customize.py new file mode 100644 index 000000000..6c74e84e6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/customize.py @@ -0,0 +1,77 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + datadir = env.get('CM_DATA_DIR', os.getcwd()) + env['CM_DATA_DIR'] = datadir + + env['CM_BERT_CONFIG_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + env['CM_BERT_VOCAB_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + env['CM_BERT_DATA_DOWNLOAD_DIR'] = os.path.join(datadir, "download") + + env['CM_BERT_CHECKPOINT_DOWNLOAD_DIR'] = os.path.join(datadir, "phase1") + + if env.get("CM_TMP_VARIATION", "") == "nvidia": + code_path = os.path.join( + env['CM_GIT_REPO_CHECKOUT_PATH'], + 'NVIDIA', + 'benchmarks', + 'bert', + 'implementations', + 'pytorch-22.09') + env['CM_RUN_DIR'] = code_path + elif env.get("CM_TMP_VARIATION", "") == "reference": + code_path = os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + 'language_model', + 'tensorflow', + 'bert', + 'cleanup_scripts') + env['CM_RUN_DIR'] = code_path + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + data_dir = env['CM_DATA_DIR'] + env['CM_MLPERF_TRAINING_BERT_DATA_PATH'] = data_dir + + if env.get("CM_TMP_VARIATION", "") == "nvidia": + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + data_dir, "hdf5", "eval", "eval_all.hdf5") + elif env.get("CM_TMP_VARIATION", "") == "reference": + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + data_dir, "tfrecords", "eval_10k") + env['CM_MLPERF_TRAINING_BERT_TFRECORDS_PATH'] = os.path.join( + data_dir, "tfrecords") + + env['CM_MLPERF_TRAINING_BERT_VOCAB_PATH'] = env['CM_BERT_VOCAB_FILE_PATH'] + env['CM_MLPERF_TRAINING_BERT_CONFIG_PATH'] = env['CM_BERT_CONFIG_FILE_PATH'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run-nvidia.sh b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run-nvidia.sh new file mode 100644 index 000000000..23cd41289 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run-nvidia.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +CUR=${CM_DATA_DIR:-"$PWD/data"} +run "cd \"${CM_RUN_DIR}\"" +run "docker build --pull -t mlperf-nvidia:language_model ." +run "ID=`docker run -dt --runtime=nvidia --ipc=host -v $CUR:/workspace/bert_data mlperf-nvidia:language_model bash`" +run "docker exec $ID bash -c 'cd /workspace/bert && ./input_preprocessing/prepare_data.sh -s --outputdir /workspace/bert_data'" diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run-reference.sh b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run-reference.sh new file mode 100644 index 000000000..97524312f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run-reference.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +CUR=$PWD +DATA_DIR=${CM_DATA_DIR:-"$PWD/data"} + +cd ${CM_RUN_DIR} +mkdir -p ${DATA_DIR}/tfrecords +for i in $(seq -f "%05g" 0 499) +do + FILENAME="${DATA_DIR}/tfrecords/part-${i}-of-00500" + if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 500000000 ]] ; then + echo "Skipping regenerating existing ${FILENAME}" + continue; + fi + cmd="python3 create_pretraining_data.py \ + --input_file=${CM_BERT_DATA_DOWNLOAD_DIR}/results4/part-${i}-of-00500 \ + --output_file=${DATA_DIR}/tfrecords/part-${i}-of-00500 \ + --vocab_file=${CM_BERT_VOCAB_FILE_PATH} \ + --do_lower_case=True \ + --max_seq_length=512 \ + --max_predictions_per_seq=76 \ + --masked_lm_prob=0.15 \ + --random_seed=12345 \ + --dupe_factor=10" + run "$cmd" +done + +FILENAME="${DATA_DIR}/eval_intermediate" +if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 800000000 ]] ; then + echo "Skipping regenerating existing ${FILENAME}" +else + cmd="python3 create_pretraining_data.py \ + --input_file=${CM_BERT_DATA_DOWNLOAD_DIR}/results4/eval.txt \ + --output_file=${DATA_DIR}/eval_intermediate \ + --vocab_file=${CM_BERT_VOCAB_FILE_PATH} \ + --do_lower_case=True \ + --max_seq_length=512 \ + --max_predictions_per_seq=76 \ + --masked_lm_prob=0.15 \ + --random_seed=12345 \ + --dupe_factor=10" + + run "$cmd" +fi + +FILENAME=${DATA_DIR}/tfrecords/eval_10k +if [[ ${CM_MLPERF_TRAINING_CLEAN_TFRECORDS} != "yes" && -f ${FILENAME} && $(stat -c%s "$FILENAME") -gt 25000000 ]] ; then + echo "Skipping regenerating existing ${FILENAME}" +else + cmd="python3 pick_eval_samples.py \ + --input_tfrecord=${DATA_DIR}/eval_intermediate \ + --output_tfrecord=${DATA_DIR}/tfrecords/eval_10k \ + --num_examples_to_pick=10000" + + run "$cmd" +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run.sh b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run.sh new file mode 100644 index 000000000..ea6fd8aca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" + +CUR=${CM_DATA_DIR:-"$PWD/data"} +cd ${CM_RUN_DIR} + +if [[ ${CM_TMP_VARIATION} == "nvidia" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-nvidia.sh +elif [[ ${CM_TMP_VARIATION} == "reference" ]]; then + bash ${CM_TMP_CURRENT_SCRIPT_PATH}/run-reference.sh +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run_config.yml b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run_config.yml new file mode 100644 index 000000000..e39692ebc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-bert/run_config.yml @@ -0,0 +1,13 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + fake_run_deps: true + mounts: + - ${{ CM_DATA_DIR }}:${{ CM_DATA_DIR }} + +run_with_default_inputs: true #if false the script won't run automatic tests + +minimum_system_requirements: + ram: 512 #in GB + disk_space: 900 #in GB diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/README.md b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/README.md new file mode 100644 index 000000000..5893a4508 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/prepare-training-data-resnet](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/prepare-training-data-resnet) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/_cm.yaml new file mode 100644 index 000000000..25925f56d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/_cm.yaml @@ -0,0 +1,78 @@ +alias: prepare-training-data-resnet +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: MLPerf benchmark support +deps: +- names: + - imagenet-train + tags: get,dataset,imagenet,train +- names: + - imagenet-val + tags: get,dataset,imagenet,val,original,_full +- tags: get,generic-sys-util,_rsync +input_description: {} +input_mapping: + data_dir: CM_DATA_DIR +new_env_keys: +- CM_MLPERF_TRAINING_RESNET_* +- CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: +- env: + CM_DOWNLOAD_CHECKSUM: '' + CM_DOWNLOAD_FINAL_ENV_NAME: CM_IMAGENET_LABELS_FILE_PATH + CM_DOWNLOAD_PATH: <<>> + CM_DOWNLOAD_RENAME_FILE: synset_labels.txt + extra_cache_tags: imagenet,val,labels + force_cache: true + tags: download,file,_wget,_url.https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_2012_validation_synset_labels.txt +- enable_if_env: + CM_TMP_VARIATION: + - reference + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_IMAGENET_TO_GCS_SCRIPT_PATH + extra_cache_tags: imagenet_to_gcs,script + force_cache: true + tags: download,file,_wget,_url.https://raw.githubusercontent.com/tensorflow/tpu/master/tools/datasets/imagenet_to_gcs.py +tags: +- prepare +- mlperf +- training +- data +- input +- resnet +uid: d42a8a8ca2704f9f +variations: + mxnet.#: + env: + CM_MXNET_VERSION: '#' + nvidia: + default: true + deps: + - names: + - nvidia-training-code + tags: get,mlperf,training,nvidia,code + - env: + CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH + extra_cache_tags: nvidia,deeplearning_examples + tags: get,git,repo,_repo.https://github.com/NVIDIA/DeepLearningExamples,_sha.81ee705868a11d6fe18c12d237abe4a08aab5fd6 + env: + CM_TMP_VARIATION: nvidia + group: implementation + reference: + deps: + - names: + - mlperf-training-src + tags: get,mlperf,training,src + - names: + - python3 + tags: get,python3 + - tags: get,generic-python-lib,_tensorflow + - tags: get,generic-python-lib,_protobuf + env: + CM_TMP_VARIATION: reference + group: implementation +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/customize.py b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/customize.py new file mode 100644 index 000000000..f85dae70c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/customize.py @@ -0,0 +1,74 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + datadir = env.get('CM_DATA_DIR', os.getcwd()) + env['CM_DATA_DIR'] = datadir + + env['MXNET_VER'] = env.get('CM_MXNET_VER', '22.08').replace("-", ".") + + env['CM_IMAGENET_LABELS_DOWNLOAD_DIR'] = env['CM_DATASET_IMAGENET_TRAIN_PATH'] + + if env.get("CM_TMP_VARIATION", "") == "nvidia": + code_path = os.path.join( + env['CM_NVIDIA_DEEPLEARNING_EXAMPLES_REPO_PATH'], + 'MxNet', + 'Classification', + 'RN50v1.5') + env['CM_RUN_DIR'] = code_path + i['run_script_input']['script_name'] = "run-nvidia" + + elif env.get("CM_TMP_VARIATION", "") == "reference": + code_path = os.path.join( + env['CM_MLPERF_TRAINING_SOURCE'], + 'image_classification', + 'tensorflow2') + env['CM_RUN_DIR'] = code_path + i['run_script_input']['script_name'] = "run-reference" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + data_dir = env['CM_DATA_DIR'] + env['CM_MLPERF_TRAINING_RESNET_DATA_PATH'] = data_dir + + env['CM_MLPERF_TRAINING_IMAGENET_PATH'] = env['CM_DATASET_IMAGENET_TRAIN_PATH'] + + if env.get("CM_TMP_VARIATION", "") == "nvidia": + env['CM_GET_DEPENDENT_CACHED_PATH'] = data_dir + env['CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH'] = data_dir + + elif env.get("CM_TMP_VARIATION", "") == "reference": + env['CM_GET_DEPENDENT_CACHED_PATH'] = os.path.join( + data_dir, "tfrecords") + env['CM_MLPERF_TRAINING_RESNET_TFRECORDS_PATH'] = os.path.join( + data_dir, "tfrecords") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run-nvidia.sh b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run-nvidia.sh new file mode 100644 index 000000000..e7ffdb741 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run-nvidia.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +mkdir -p train_data/train +mkdir -p train_data/val +rsync -avz ${CM_DATASET_IMAGENET_TRAIN_PATH}/ train_data/train/ +rsync -avz ${CM_DATASET_IMAGENET_VAL_PATH}/ train_data/val/ +cd train_data/train +find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done +cd ../val +run "wget --no-check-certificate -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash" +cd ../../ +DATA_DIR=`pwd`/train_data + +CUR=${CM_DATA_DIR} +run "cd \"${CM_RUN_DIR}\"" +run "docker build --build-arg FROM_IMAGE_NAME=nvcr.io/nvidia/mxnet:${MXNET_VER}-py3 -t nvidia_rn50_mx ." +run "ID=`docker run -dt --gpus all --runtime=nvidia --ipc=host -v ${DATA_DIR}:/data -v ${CUR}:/preprocessed nvidia_rn50_mx bash`" +run "docker exec $ID bash -c './scripts/prepare_imagenet.sh /data /preprocessed'" diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run-reference.sh b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run-reference.sh new file mode 100644 index 000000000..332da70cc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run-reference.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +CUR=$PWD +DATA_DIR=${CM_DATA_DIR:-"$PWD/data"} + +cd ${CM_RUN_DIR} +mkdir -p ${DATA_DIR}/tfrecords +cmd="python3 ${CM_IMAGENET_TO_GCS_SCRIPT_PATH} \ + --raw_data_dir=${CM_DATASET_IMAGENET_TRAIN_PATH} \ + --local_scratch_dir=${DATA_DIR}/tfrecords \ + --nogcs_upload" +run "$cmd" diff --git a/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run_config.yml b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run_config.yml new file mode 100644 index 000000000..688f811ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prepare-training-data-resnet/run_config.yml @@ -0,0 +1,13 @@ +docker: + build: true + docker_os: ubuntu + docker_os_version: "22.04" + fake_run_deps: true + mounts: + - ${{ CM_DATA_DIR }}:${{ CM_DATA_DIR }} + +run_with_default_inputs: true #if false the script won't run automatic tests + +minimum_system_requirements: + ram: 512 #in GB + disk_space: 200 #in GB diff --git a/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/README.md b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/README.md new file mode 100644 index 000000000..33d72b82a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/_cm.yaml new file mode 100644 index 000000000..40ff0c669 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/_cm.yaml @@ -0,0 +1,37 @@ +alias: preprocess-mlperf-inference-submission +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +clean_files: [] +deps: +- names: + - python + - python3 + tags: get,python3 +- names: + - inference-src + - submission-checker-src + tags: get,mlcommons,inference,src +- names: + - get-mlperf-submission-dir + skip_if_env: + CM_MLPERF_INFERENCE_SUBMISSION_DIR: + - 'on' + tags: get,mlperf,submission,dir +input_mapping: + input: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: CM_MLPERF_SUBMITTER +tags: +- run +- mlc +- mlcommons +- mlperf +- inference +- submission +- mlperf-inference +- processor +- preprocessor +- preprocess +uid: c23068394a314266 diff --git a/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/customize.py b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/customize.py new file mode 100644 index 000000000..28ceaf7f8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/customize.py @@ -0,0 +1,63 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +from os.path import exists +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + + if submission_dir == "": + print("Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR") + return {'return': 1, + 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified'} + + if not os.path.exists(submission_dir): + print("Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR to a valid submission directory") + return {'return': 1, + 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not existing'} + + submission_dir = submission_dir.rstrip(os.path.sep) + submitter = env.get("CM_MLPERF_SUBMITTER", "MLCommons") + submission_processed = f"{submission_dir}_processed" + + if os.path.exists(submission_processed): + print(f"Cleaning {submission_processed}") + shutil.rmtree(submission_processed) + + CMD = env['CM_PYTHON_BIN'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + "preprocess_submission.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --output '" + submission_processed + "'" + env['CM_RUN_CMD'] = CMD + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + submission_dir = env["CM_MLPERF_INFERENCE_SUBMISSION_DIR"] + import datetime + submission_backup = submission_dir + "_backup_" + \ + '{date:%Y-%m-%d_%H:%M:%S}'.format(date=datetime.datetime.now()) + + submission_processed = submission_dir + "_processed" + shutil.copytree(submission_dir, submission_backup) + shutil.rmtree(submission_dir) + os.rename(submission_processed, submission_dir) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/run.sh b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/run.sh new file mode 100644 index 000000000..1b3c5c3c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/preprocess-mlperf-inference-submission/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cmd=${CM_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/print-any-text/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/print-any-text/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-any-text/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-any-text/README.md b/cmx4mlops/cmx4mlops/repo/script/print-any-text/README.md new file mode 100644 index 000000000..ae23369cd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-any-text/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-any-text](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-any-text) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-any-text/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/print-any-text/_cm.yaml new file mode 100644 index 000000000..2fd9bba2c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-any-text/_cm.yaml @@ -0,0 +1,34 @@ +alias: print-any-text +uid: f4bf2d1d33c24e31 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Tests + +developers: "Grigori Fursin" + +default_env: + CM_PRINT_ANY_TEXT: '' + +input_mapping: + text: CM_PRINT_ANY_TEXT + cm_env_keys: CM_PRINT_ANY_CM_ENV_KEYS + os_env_keys: CM_PRINT_ANY_OS_ENV_KEYS + +tags: +- print +- any-text + +variations: + text.#: + env: + CM_PRINT_ANY_TEXT: "#" + + cm_env.#: + env: + CM_PRINT_ANY_CM_ENV_KEYS: "#" + + os_env.#: + env: + CM_PRINT_ANY_OS_ENV_KEYS: "#" diff --git a/cmx4mlops/cmx4mlops/repo/script/print-any-text/customize.py b/cmx4mlops/cmx4mlops/repo/script/print-any-text/customize.py new file mode 100644 index 000000000..ff114354d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-any-text/customize.py @@ -0,0 +1,41 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +# Developer(s): Grigori Fursin + +from cmind import utils +import os + + +def postprocess(i): + + env = i['env'] + + cm_env_keys = env.get('CM_PRINT_ANY_CM_ENV_KEYS', '').strip() + os_env_keys = env.get('CM_PRINT_ANY_OS_ENV_KEYS', '').strip() + + printed = False + for k, e, t in [(cm_env_keys, env, 'CM_ENV'), + (os_env_keys, os.environ, 'OS_ENV')]: + + if k != '': + for kk in k.split(','): + kk = kk.strip() + if kk != '': + vv = e.get(kk) + + print('{}[{}]: {}'.format(t, kk, vv)) + printed = True + + if printed: + print('') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/print-any-text/run.bat b/cmx4mlops/cmx4mlops/repo/script/print-any-text/run.bat new file mode 100644 index 000000000..be97ff0a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-any-text/run.bat @@ -0,0 +1,5 @@ +if "%CM_PRINT_ANY_TEXT%" == "" ( + echo. +) else ( + echo %CM_PRINT_ANY_TEXT% +) diff --git a/cmx4mlops/cmx4mlops/repo/script/print-any-text/run.sh b/cmx4mlops/cmx4mlops/repo/script/print-any-text/run.sh new file mode 100644 index 000000000..7f04767d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-any-text/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +echo "${CM_PRINT_ANY_TEXT}" diff --git a/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/README-extra.md new file mode 100644 index 000000000..a3c638caa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/README-extra.md @@ -0,0 +1,16 @@ +# MLCommons CM automation recipe + +## Print [Croissant](https://github.com/mlcommons/croissant) description from metadata URL + +```bash +pip intstall cmind + +cm pull repo ctuning@mlcommons-ck + +cmr "print croissant desc" --url="https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json" +``` + +## About + +* Code snippet taken from https://github.com/mlcommons/croissant/pull/564/files ([@mkuchnik](https://github.com/mkuchnik)) +* CM automation recipe added by [@gfursin](https://github.com/gfursin). \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/README.md b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/README.md new file mode 100644 index 000000000..3b4b1561b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-croissant-desc](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-croissant-desc) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/_cm.yaml new file mode 100644 index 000000000..ef4d2a7ba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/_cm.yaml @@ -0,0 +1,29 @@ +alias: print-croissant-desc +uid: 59116d5c98a04d4f + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Tests + +input_mapping: + url: CM_PRINT_CROISSANT_URL + +default_env: + CM_PRINT_CROISSANT_URL: "https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json" + +deps: +- tags: detect,os +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 +- names: + - croissant + tags: get,croissant + +tags: +- print +- croissant +- desc diff --git a/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/code.py b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/code.py new file mode 100644 index 000000000..c53ad5fdf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/code.py @@ -0,0 +1,29 @@ +# Taken from https://github.com/mlcommons/croissant/pull/564/files (@mkuchnik) + +import os +import mlcroissant as mlc + + +def main(): + + url = os.environ.get('CM_PRINT_CROISSANT_URL', '') + + if url == '': + print('Error: --url is not specified') + exit(1) + + ds = mlc.Dataset(url) + metadata = ds.metadata.to_json() + + print('') + print('Croissant meta data URL: {}'.format(url)) + print('') + print(f"{metadata['name']}: {metadata['description']}") + + print('') + for x in ds.records(record_set="default"): + print(x) + + +if __name__ == '__main__': + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/run.bat b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/run.bat new file mode 100644 index 000000000..37f249b0f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/run.sh b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/run.sh new file mode 100644 index 000000000..9b94917d9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-croissant-desc/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/README.md b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/README.md new file mode 100644 index 000000000..063f8afda --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-java](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-java) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/_cm.yaml new file mode 100644 index 000000000..b38990817 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/_cm.yaml @@ -0,0 +1,17 @@ +alias: print-hello-world-java +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Tests +deps: +- tags: detect,os +- names: + - java + tags: get,java +tags: +- print +- hello world +- hello-world +- hello +- world +- java +uid: 3b62dc46cce3489c diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/code.java b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/code.java new file mode 100644 index 000000000..4bb917c9e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/code.java @@ -0,0 +1,27 @@ +/* + Developer: Grigori Fursin +*/ + +//Import libraries... +import java.io.*; + +public class hello_world +{ + static int N=16; + static double[][] A=new double [N][N]; + static double[][] B=new double [N][N]; + static double[][] C=new double [N][N]; + + // ******************************************************************* + public static void main(String args[]) + { + System.out.println("Hello world!"); + System.out.println(""); + + String env=System.getenv("CM_VAR1"); + System.out.println("CM_VAR1="+env); + + env=System.getenv("CM_VAR2"); + System.out.println("CM_VAR2="+env); + } +} diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/run.bat b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/run.bat new file mode 100644 index 000000000..f57f2084b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/run.bat @@ -0,0 +1,4 @@ +echo %CM_JAVA_BIN_WITH_PATH% + +%CM_JAVA_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.java +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/run.sh b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/run.sh new file mode 100644 index 000000000..7c5ab3f6a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-java/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +which ${CM_JAVA_BIN_WITH_PATH} + +${CM_JAVA_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.java +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/README.md b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/README.md new file mode 100644 index 000000000..e07f0c290 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-javac](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-javac) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/_cm.yaml new file mode 100644 index 000000000..8afdf4d26 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/_cm.yaml @@ -0,0 +1,17 @@ +alias: print-hello-world-javac +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Tests +deps: +- tags: detect,os +- names: + - javac + tags: get,javac +tags: +- print +- hello world +- hello-world +- hello +- world +- javac +uid: 040fafd538104819 diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/code.java b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/code.java new file mode 100644 index 000000000..9eb859cda --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/code.java @@ -0,0 +1,27 @@ +/* + Developer: Grigori Fursin +*/ + +//Import libraries... +import java.io.*; + +public class code +{ + static int N=16; + static double[][] A=new double [N][N]; + static double[][] B=new double [N][N]; + static double[][] C=new double [N][N]; + + // ******************************************************************* + public static void main(String args[]) + { + System.out.println("Hello world!"); + System.out.println(""); + + String env=System.getenv("CM_VAR1"); + System.out.println("CM_VAR1="+env); + + env=System.getenv("CM_VAR2"); + System.out.println("CM_VAR2="+env); + } +} diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/run.bat b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/run.bat new file mode 100644 index 000000000..583b89804 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/run.bat @@ -0,0 +1,8 @@ +echo "%CM_JAVA_BIN_WITH_PATH%" +echo. + +"%CM_JAVAC_BIN_WITH_PATH%" %CM_TMP_CURRENT_SCRIPT_PATH%\code.java +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +"%CM_JAVA_BIN_WITH_PATH%" -classpath "%CM_TMP_CURRENT_SCRIPT_PATH%" code +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/run.sh b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/run.sh new file mode 100644 index 000000000..c7fb26cbc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-javac/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +echo "${CM_JAVAC_BIN_WITH_PATH}" +echo "" + +${CM_JAVAC_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.java +test $? -eq 0 || exit 1 + +${CM_JAVA_BIN_WITH_PATH} -classpath "${CM_TMP_CURRENT_SCRIPT_PATH}" code +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/README.md b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/README.md new file mode 100644 index 000000000..8bfc479e6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-py](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world-py) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/_cm.yaml new file mode 100644 index 000000000..b927a85dd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/_cm.yaml @@ -0,0 +1,24 @@ +alias: print-hello-world-py +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Tests +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +- skip_if_env: + CM_SKIP_PRINT: + - 'True' + CM_SKIP_PRINT2: + - 'True' + tags: print,python-version +tags: +- print +- hello world +- hello-world +- hello +- world +- python +uid: d83274c7eb754d90 diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/app.py b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/app.py new file mode 100644 index 000000000..12382ac80 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/app.py @@ -0,0 +1,20 @@ +def main(): + print('') + + # Import cmind to test break points + import cmind.utils + import os + if os.environ.get('CM_TMP_DEBUG_UID', '') == 'f52670e5f3f345a2': + cmind.utils.debug_here( + __file__, + port=5678, + text='Debugging main.py!').breakpoint() + + print('HELLO WORLD from Python') + + x = 1 + print(x) + + +if __name__ == '__main__': + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/customize.py b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/customize.py new file mode 100644 index 000000000..3dc768d9b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/customize.py @@ -0,0 +1,31 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +# Developer(s): Grigori Fursin + +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/run.bat b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/run.bat new file mode 100644 index 000000000..c0980c59b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/run.bat @@ -0,0 +1,8 @@ +IF NOT DEFINED CM_TMP_CURRENT_SCRIPT_PATH SET CM_TMP_CURRENT_SCRIPT_PATH=%CD% + +rem %CM_PYTHON_BIN_WITH_PATH% --version + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\app.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo CM_NEW_VAR_FROM_RUN=XYZ > tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/run.sh b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/run.sh new file mode 100644 index 000000000..bc7e2c301 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world-py/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +#which ${CM_PYTHON_BIN_WITH_PATH} +#${CM_PYTHON_BIN_WITH_PATH} --version + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/app.py +test $? -eq 0 || exit $? + +echo "CM_NEW_VAR_FROM_RUN=$MLPERF_XYZ" > tmp-run-env.out diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world/README.md b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/README.md new file mode 100644 index 000000000..8fad99c80 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-hello-world) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/_cm.yaml new file mode 100644 index 000000000..12b26efd4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/_cm.yaml @@ -0,0 +1,48 @@ +alias: print-hello-world +uid: b9f0acba4aca4baa + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Tests + +default_env: + CM_ENV_TEST1: TEST1 + +env: + CM_ENV_TEST2: TEST2 + +input_mapping: + test1: CM_ENV_TEST1 + +new_env_keys: +- CM_ENV_TEST* + +new_state_keys: +- hello_world* + +tags: +- print +- hello-world +- hello world +- hello +- world +- native-script +- native +- script + +variations: + text.#: + env: + CM_PRINT_HELLO_WORLD_TEXT: "#" + + skip_print_env: + env: + CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV: 'yes' + +docker: + skip_run_cmd: 'yes' + skip_cm_sys_upgrade: 'yes' + cm_repo_flags: '--checkout=dev' + use_host_group_id: 'yes' + image_tag_extra: '-cm-dev' diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world/run.bat b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/run.bat new file mode 100644 index 000000000..8ce95fc1a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/run.bat @@ -0,0 +1,16 @@ +if not "%CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV%" == "yes" ( + echo. + echo CM_ENV_TEST1 = %CM_ENV_TEST1% + echo CM_ENV_TEST2 = %CM_ENV_TEST2% + echo CM_ENV_TEST3 = %CM_ENV_TEST3% +) + +echo. +echo HELLO WORLD! +if not "%CM_PRINT_HELLO_WORLD_TEXT%" == "" ( + + echo. + echo %CM_PRINT_HELLO_WORLD_TEXT% + +) +echo. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-hello-world/run.sh b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/run.sh new file mode 100644 index 000000000..fcb42d00e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-hello-world/run.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [[ ${CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV} != "yes" ]]; then + echo "" + echo "CM_ENV_TEST1 = ${CM_ENV_TEST1}" + echo "CM_ENV_TEST2 = ${CM_ENV_TEST2}" + echo "CM_ENV_TEST3 = ${CM_ENV_TEST3}" +fi + +echo "" +echo "HELLO WORLD!" +if [[ ${CM_PRINT_HELLO_WORLD_TEXT} != "" ]]; then + + echo "" + echo "${CM_PRINT_HELLO_WORLD_TEXT}" + +fi +echo "" diff --git a/cmx4mlops/cmx4mlops/repo/script/print-python-version/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/print-python-version/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-python-version/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-python-version/README.md b/cmx4mlops/cmx4mlops/repo/script/print-python-version/README.md new file mode 100644 index 000000000..b0794039f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-python-version/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-python-version](https://docs.mlcommons.org/cm4mlops/scripts/Tests/print-python-version) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/print-python-version/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/print-python-version/_cm.yaml new file mode 100644 index 000000000..bc2497ea5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-python-version/_cm.yaml @@ -0,0 +1,15 @@ +alias: print-python-version +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Tests +deps: +- names: + - python + - python3 + tags: get,python3 +tags: +- print +- python +- version +- python-version +uid: d3a538fa4abb464b diff --git a/cmx4mlops/cmx4mlops/repo/script/print-python-version/run.bat b/cmx4mlops/cmx4mlops/repo/script/print-python-version/run.bat new file mode 100644 index 000000000..e79030343 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-python-version/run.bat @@ -0,0 +1,8 @@ +echo. + +echo CM_PYTHON_BIN = %CM_PYTHON_BIN% +echo CM_PYTHON_BIN_WITH_PATH = %CM_PYTHON_BIN_WITH_PATH% + +echo . + +%CM_PYTHON_BIN_WITH_PATH% --version diff --git a/cmx4mlops/cmx4mlops/repo/script/print-python-version/run.sh b/cmx4mlops/cmx4mlops/repo/script/print-python-version/run.sh new file mode 100644 index 000000000..3c54cd68e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/print-python-version/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +echo "" + +echo "CM_PYTHON_BIN = ${CM_PYTHON_BIN}" +echo "CM_PYTHON_BIN_WITH_PATH = ${CM_PYTHON_BIN_WITH_PATH}" + +echo "" + +${CM_PYTHON_BIN_WITH_PATH} --version + diff --git a/cmx4mlops/cmx4mlops/repo/script/process-ae-users/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/process-ae-users/README.md b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/README.md new file mode 100644 index 000000000..5d1056b61 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Reproducibility-and-artifact-evaluation/process-ae-users](https://docs.mlcommons.org/cm4mlops/scripts/Reproducibility-and-artifact-evaluation/process-ae-users) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/process-ae-users/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/_cm.yaml new file mode 100644 index 000000000..f79331947 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/_cm.yaml @@ -0,0 +1,17 @@ +alias: process-ae-users +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +deps: +- names: + - python3 + - python + tags: get,python3 +input_mapping: + file: CM_PROCESS_AE_USERS_INPUT_FILE +tags: +- process +- ae +- users +uid: 5800f1ed677e4efb diff --git a/cmx4mlops/cmx4mlops/repo/script/process-ae-users/code.py b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/code.py new file mode 100644 index 000000000..7f3626fe4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/code.py @@ -0,0 +1,80 @@ +import os +import csv +import json +import cmind + + +def main(): + f = os.environ.get('CM_PROCESS_AE_USERS_INPUT_FILE', '') + + print('Input CSV file: {}'.format(f)) + + users = [] + with open(f, 'r') as ff: + csvreader = csv.DictReader(ff) + for row in csvreader: + if len(row) > 0: + users.append(row) + + print('') + html = '
      \n' + for user in sorted(users, key=lambda u: ( + u['last'].lower(), u['first'].lower())): + + full_name = user['first'] + ' ' + user['last'] + + name = full_name + ' (' + user['affiliation'] + ')' + + print(name) + + html += '
    • ' + name + '\n' + + # Checking contributor + r = cmind.access({'action': 'find', + 'automation': 'contributor', + 'artifact': full_name}) + if r['return'] > 0: + return r + + lst = r['list'] + + if len(lst) == 0: + print(' CM contributor not found!') + + meta = { + 'challenges': [ + 'ae-micro2023' + ], + 'last_participation_date': '202309', + 'name': full_name, + 'organization': user['affiliation'] + } + + print(' Adding to mlcommons@ck ...') + r = cmind.access({'out': 'con', + 'action': 'add', + # Need UID since using common function + 'automation': 'contributor,68eae17b590d4f8f', + 'artifact': 'mlcommons@ck:' + full_name, + 'meta': meta, + 'common': True + }) + if r['return'] > 0: + return r + + html += '
    \n' + + fo = f + '.html' + + print('') + print('Saved HTML to {}'.format(fo)) + + cmind.utils.save_txt(fo, html) + + return {'return': 0} + + +if __name__ == '__main__': + r = main() + if r['return'] > 0: + cmind.error(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/process-ae-users/customize.py b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/customize.py new file mode 100644 index 000000000..197b3c27b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/customize.py @@ -0,0 +1,22 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/process-ae-users/run.bat b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/run.bat new file mode 100644 index 000000000..37f249b0f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/process-ae-users/run.sh b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/run.sh new file mode 100644 index 000000000..9b94917d9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-ae-users/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/README.md b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/README.md new file mode 100644 index 000000000..191591ab4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/process-mlperf-accuracy](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/process-mlperf-accuracy) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/_cm.yaml new file mode 100644 index 000000000..f6d9acd5e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/_cm.yaml @@ -0,0 +1,263 @@ +alias: process-mlperf-accuracy +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +clean_files: [] +deps: +- names: + - python3 + - python + tags: get,python3 +- names: + - inference-src + - accuracy-check-src + tags: get,mlcommons,inference,src +input_mapping: + rerun: CM_RERUN + result_dir: CM_MLPERF_ACCURACY_RESULTS_DIR +new_state_keys: +- app_mlperf_inference_accuracy* +tags: +- run +- mlperf +- mlcommons +- accuracy +- mlc +- process +- process-accuracy +uid: 6e809013816b42ea +variations: + cnndm: + deps: + - tags: get,dataset,cnndm,_validation + - names: + - pip-package + - datasets + tags: get,generic-python-lib,_package.datasets + - names: + - pip-package + - rouge-score + tags: get,generic-python-lib,_package.rouge_score + - names: + - pip-package + - nltk + tags: get,generic-python-lib,_package.nltk + version_max: 3.8.1 + version_max_usable: 3.8.1 + - names: + - pip-package + - evaluate + tags: get,generic-python-lib,_package.evaluate + - names: + - pip-package + - absl-py + tags: get,generic-python-lib,_package.absl-py + - enable_if_env: + CM_MLPERF_IMPLEMENTATION: + - intel + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH + extra_cache_tags: intel,accuracy,file,gptj,mlperf,inference + force_cache: true + tags: download,file,_url.https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/gptj-99/ITREX/evaluation.py + - enable_if_env: + CM_MLPERF_IMPLEMENTATION: + - intel + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH + extra_cache_tags: intel,dataset,file,gptj,mlperf,inference + force_cache: true + tags: download,file,_url.https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/gptj-99/ITREX/dataset.py + - enable_if_env: + CM_MLPERF_IMPLEMENTATION: + - intel + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH + extra_cache_tags: intel,dataset,item,file,gptj,mlperf,inference + force_cache: true + tags: download,file,_url.https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/gptj-99/ITREX/item.py + - enable_if_env: + CM_MLPERF_IMPLEMENTATION: + - intel + tags: get,ml-model,gptj,_fp32,_pytorch + env: + CM_DATASET: cnndm + group: dataset + coco2014: + deps: + - names: + - coco2014-dataset + - coco2014-original + tags: get,dataset,coco2014,original + - tags: get,generic-python-lib,_package.ijson + - tags: get,generic-python-lib,_package.Pillow + - tags: get,generic-python-lib,_package.pandas + - tags: get,generic-python-lib,_package.torch + - tags: get,generic-python-lib,_package.open-clip-torch + - tags: get,generic-python-lib,_package.scipy + - names: + - pip-package + - numpy + tags: get,generic-python-lib,_package.numpy + env: + CM_DATASET: coco2014 + group: dataset + default-pycocotools: + default: true + group: coco-evaluation-tool + default-pycocotools,openimages: + deps: + - tags: get,generic-python-lib,_pycocotools + - names: + - for-pycocotools + - accuracy-check-src + tags: get,mlcommons,mlperf,inference,src,-_openimages-nvidia-pycocotools + float16: + env: + CM_ACCURACY_DTYPE: float16 + group: precision + float32: + default: 'true' + env: + CM_ACCURACY_DTYPE: float32 + group: precision + float64: + env: + CM_ACCURACY_DTYPE: float64 + group: precision + imagenet: + default: 'true' + deps: + - tags: get,dataset-aux,image-classification,imagenet-aux + - tags: get,generic-python-lib,_numpy + env: + CM_DATASET: imagenet + group: dataset + int16: + env: + CM_ACCURACY_DTYPE: int16 + group: precision + int32: + env: + CM_ACCURACY_DTYPE: int32 + group: precision + int64: + env: + CM_ACCURACY_DTYPE: int64 + group: precision + int8: + env: + CM_ACCURACY_DTYPE: int8 + group: precision + kits19: + deps: + - tags: get,dataset,preprocessed,medical-imaging,kits19 + - tags: get,generic-python-lib,_pandas + version_max: 1.53.0 + version_max_usable: 1.53.0 + env: + CM_DATASET: kits19 + group: dataset + librispeech: + deps: + - tags: get,dataset,preprocessed,speech-recognition,librispeech + env: + CM_DATASET: librispeech + group: dataset + nvidia-pycocotools: + group: coco-evaluation-tool + nvidia-pycocotools,openimages: + deps: + - tags: get,generic-python-lib,_nvidia-pycocotools + - names: + - for-pycocotools + - accuracy-check-src + tags: get,mlcommons,mlperf,inference,src,_openimages-nvidia-pycocotools + open-orca: + deps: + - names: + - openorca-dataset + tags: get,dataset,openorca,preprocessed + - names: + - llama2-model + skip_if_env: + CM_MLPERF_INFERENCE_API_SERVER: + - 'on' + tags: get,ml-model,llama2 + env: + CM_DATASET: openorca + group: dataset + openimages: + deps: + - enable_if_env: + CM_MLPERF_RUN_STYLE: + - valid + tags: get,dataset-aux,openimages,annotations + - names: + - openimages-original + skip_if_env: + CM_MLPERF_RUN_STYLE: + - valid + tags: get,dataset,openimages,original + - tags: get,generic-python-lib,_package.kiwisolver + env: + CM_DATASET: openimages + group: dataset + openorca-gsm8k-mbxp: + deps: + - names: + - pip-package + - rouge-score + tags: get,generic-python-lib,_package.rouge_score + - names: + - openorca-gsm8k-mbxp-combined + skip_if_env: + CM_MLPERF_DATASET_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + - 'yes' + tags: get,dataset-mixtral,openorca-mbxp-gsm8k-combined + - names: + - mixtral-8x7b-model + skip_if_env: + CM_MLPERF_MODEL_MIXTRAL_8X7B_DOWNLOAD_TO_HOST: + - 'yes' + tags: get,ml-model,mixtral + env: + CM_DATASET: openorca-gsm8k-mbxp-combined + group: dataset + squad: + add_deps_recursive: + inference-src: + tags: _deeplearningexamples + deps: + - tags: get,generic-python-lib,_boto3 + - tags: get,generic-python-lib,_package.transformers + - skip_if_env: + CM_DATASET_SQUAD_VAL_PATH: [] + tags: get,dataset,squad,language-processing + - skip_if_env: + CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: + - 'on' + tags: get,dataset-aux,squad-vocab + - skip_if_env: + CM_MLPERF_DEVICE: + - cuda + - gpu + tags: get,generic-python-lib,_torch + - enable_if_env: + CM_MLPERF_DEVICE: + - cuda + - gpu + tags: get,generic-python-lib,_torch_cuda + - tags: get,generic-python-lib,_tokenization + env: + CM_DATASET: squad + group: dataset + terabyte: + deps: + - tags: get,generic-python-lib,_ujson + - tags: get,generic-python-lib,_scikit-learn + - tags: get,generic-python-lib,_numpy + env: + CM_DATASET: terabyte + group: dataset diff --git a/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/customize.py b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/customize.py new file mode 100644 index 000000000..1e4363da6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/customize.py @@ -0,0 +1,245 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os + + +def preprocess(i): + + os_info = i['os_info'] + + xsep = ';' if os_info['platform'] == 'windows' else ':' + + env = i['env'] + results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") + + if results_dir == "": + print("Please set CM_MLPERF_ACCURACY_RESULTS_DIR") + return {'return': -1} + + # In fact, we expect only 1 command line here + run_cmds = [] + + if env.get('CM_MAX_EXAMPLES', '') != '' and env.get( + 'CM_MLPERF_RUN_STYLE', '') != 'valid': + max_examples_string = " --max_examples " + env['CM_MAX_EXAMPLES'] + else: + max_examples_string = "" + + results_dir_split = results_dir.split(xsep) + dataset = env['CM_DATASET'] + regenerate_accuracy_file = env.get( + 'CM_MLPERF_REGENERATE_ACCURACY_FILE', env.get( + 'CM_RERUN', False)) + + for result_dir in results_dir_split: + + out_file = os.path.join(result_dir, 'accuracy.txt') + + if os.path.exists(out_file) and ( + os.stat(out_file).st_size != 0) and not regenerate_accuracy_file: + continue + + if dataset == "openimages": + if env.get('CM_DATASET_PATH_ROOT', '') != '': + dataset_dir = env['CM_DATASET_PATH_ROOT'] + if 'DATASET_ANNOTATIONS_FILE_PATH' in env: + del (env['DATASET_ANNOTATIONS_FILE_PATH']) + else: + env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + dataset_dir = os.getcwd() # not used, just to keep the script happy + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " " + "'" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + "accuracy-openimages.py") + "'" + " --mlperf-accuracy-file " + "'" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "'" + " --openimages-dir " + "'" + dataset_dir + "'" + " --verbose > " + "'" + \ + out_file + "'" + + elif dataset == "imagenet": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH'], "tools", + "accuracy-imagenet.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "' --imagenet-val-file '" + os.path.join(env['CM_DATASET_AUX_PATH'], + "val.txt") + "' --dtype " + env.get('CM_ACCURACY_DTYPE', "float32") + " > '" + out_file + "'" + + elif dataset == "squad": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], + "accuracy-squad.py") + "' --val_data '" + env['CM_DATASET_SQUAD_VAL_PATH'] + \ + "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --vocab_file '" + env['CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH'] + \ + "' --out_file '" + os.path.join(result_dir, 'predictions.json') + \ + "' --features_cache_file '" + os.path.join(env['CM_MLPERF_INFERENCE_BERT_PATH'], 'eval_features.pickle') + \ + "' --output_dtype " + env['CM_ACCURACY_DTYPE'] + env.get( + 'CM_OUTPUT_TRANSPOSED', '') + max_examples_string + " > '" + out_file + "'" + + elif dataset == "cnndm": + if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'intel': + accuracy_checker_file = env['CM_MLPERF_INFERENCE_INTEL_GPTJ_ACCURACY_FILE_WITH_PATH'] + env['+PYTHONPATH'] = [os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_FILE_WITH_PATH'])] + [ + os.path.dirname(env['CM_MLPERF_INFERENCE_INTEL_GPTJ_DATASET_ITEM_FILE_WITH_PATH'])] + env['+PYTHONPATH'] + suffix_string = " --model-name-or-path '" + \ + env['GPTJ_CHECKPOINT_PATH'] + "'" + else: + accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "gpt-j", + "evaluation.py") + suffix_string = " --dtype " + \ + env.get('CM_ACCURACY_DTYPE', "float32") + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + \ + env['CM_DATASET_EVAL_PATH'] + "'" + \ + suffix_string + " > '" + out_file + "'" + + elif dataset == "openorca": + accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "llama2-70b", + "evaluate-accuracy.py") + if env.get('CM_VLLM_SERVER_MODEL_NAME', '') == '': + checkpoint_path = env['CM_ML_MODEL_LLAMA2_FILE_WITH_PATH'] + else: + checkpoint_path = env['CM_VLLM_SERVER_MODEL_NAME'] + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + checkpoint_path + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['CM_DATASET_PREPROCESSED_PATH'] + "'" + " --dtype " + env.get( + 'CM_ACCURACY_DTYPE', "int32") + " > '" + out_file + "'" + + elif dataset == "openorca-gsm8k-mbxp-combined": + accuracy_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "language", "mixtral-8x7b", + "evaluate-accuracy.py") + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + accuracy_checker_file + "' --checkpoint-path '" + env['MIXTRAL_CHECKPOINT_PATH'] + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --dataset-file '" + env['CM_DATASET_MIXTRAL_PREPROCESSED_PATH'] + "'" + \ + " --dtype " + env.get('CM_ACCURACY_DTYPE', + "float32") + " > '" + out_file + "'" + + elif dataset == "coco2014": + env['+PYTHONPATH'] = [ + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools"), + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "tools", + "fid")] + extra_options = "" + + if env.get('CM_SDXL_STATISTICS_FILE_PATH', '') != '': + extra_options += f" --statistics-path '{env['CM_SDXL_STATISTICS_FILE_PATH']}' " + + if env.get('CM_SDXL_COMPLIANCE_IMAGES_PATH', '') != '': + extra_options += f" --compliance-images-path '{env['CM_SDXL_COMPLIANCE_IMAGES_PATH']}' " + else: + extra_options += f""" --compliance-images-path '{os.path.join(result_dir, "images")}' """ + + if env.get('CM_COCO2014_SAMPLE_ID_PATH', '') != '': + extra_options += f" --ids-path '{env['CM_COCO2014_SAMPLE_ID_PATH']}' " + + if env.get('CM_SDXL_ACCURACY_RUN_DEVICE', '') != '': + extra_options += f" --device '{env['CM_SDXL_ACCURACY_RUN_DEVICE']}' " + + # env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", + "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --caption-path '" + os.path.join( + env['CM_MLPERF_INFERENCE_SOURCE'], + "text_to_image", + "coco2014", + "captions", + "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" + + elif dataset == "kits19": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_3DUNET_PATH'], + "accuracy_kits.py") + \ + "' --preprocessed_data_dir '" + env['CM_DATASET_PREPROCESSED_PATH'] +\ + "' --postprocessed_data_dir '" + result_dir +\ + "' --log_file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ + "' --output_dtype " + \ + env['CM_ACCURACY_DTYPE'] + " > '" + out_file + "'" + + elif dataset == "librispeech": + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_RNNT_PATH'], + "accuracy_eval.py") + \ + "' --dataset_dir '" + os.path.join(env['CM_DATASET_PREPROCESSED_PATH'], "..") +\ + "' --manifest '" + env['CM_DATASET_PREPROCESSED_JSON'] +\ + "' --log_dir '" + result_dir + \ + "' --output_dtype " + \ + env['CM_ACCURACY_DTYPE'] + " > '" + out_file + "'" + + elif dataset == "terabyte": + extra_options = "" + if env.get('CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH', '') != '': + extra_options += f" --aggregation-trace-file '{env['CM_DLRM_V2_AGGREGATION_TRACE_FILE_PATH']}' " + if env.get('CM_DLRM_V2_DAY23_FILE_PATH', '') != '': + extra_options += f" --day-23-file '{env['CM_DLRM_V2_DAY23_FILE_PATH']}' " + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_DLRM_V2_PATH'], "pytorch", "tools", + "accuracy-dlrm.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, + "mlperf_log_accuracy.json") + "'" + extra_options + \ + " --dtype " + env.get('CM_ACCURACY_DTYPE', + "float32") + " > '" + out_file + "'" + + else: + return {'return': 1, 'error': 'Unsupported dataset'} + + run_cmds.append(CMD) + + if os_info['platform'] == 'windows': + env['CM_RUN_CMDS'] = ( + '\n'.join(run_cmds)).replace( + "'", + '"').replace( + '>', + '^>') + else: + env['CM_RUN_CMDS'] = "??".join(run_cmds) + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + + xsep = ';' if os_info['platform'] == 'windows' else ':' + + results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") + + results_dir_split = results_dir.split(xsep) + + for result_dir in results_dir_split: + accuracy_file = os.path.join(result_dir, "accuracy.txt") + + if os.path.exists(accuracy_file): + print('') + print('Accuracy file: {}'.format(accuracy_file)) + print('') + + x = '' + with open(accuracy_file, "r") as fp: + x = fp.read() + + if x != '': + print(x) + + # Trying to extract accuracy dict + for y in x.split('\n'): + if y.startswith('{') and y.endswith('}'): + + import json + + try: + z = json.loads(y) + state['app_mlperf_inference_accuracy'] = z + + break + except ValueError as e: + pass + + print('') + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/run.bat b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/run.bat new file mode 100644 index 000000000..82705126d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/run.bat @@ -0,0 +1,8 @@ +echo Running command: +echo. +echo %CM_RUN_CMDS% +echo. + +%CM_RUN_CMDS% + +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/run.sh b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/run.sh new file mode 100644 index 000000000..6268860cb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/process-mlperf-accuracy/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +IFS="??" read -r -a cmd_array <<< "$CM_RUN_CMDS" +for cmd in "${cmd_array[@]}" +do + echo "${cmd}" + eval ${cmd} + test $? -eq 0 || exit 1 +done diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/README-extra.md new file mode 100644 index 000000000..e98cb6332 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/README-extra.md @@ -0,0 +1 @@ +Moved [here](https://github.com/ctuning/cm4research/blob/main/script/reproduce-neurips-paper-2022-arxiv-2204.09656/README-extra.md). diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/README.md b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/README.md new file mode 100644 index 000000000..fe238377d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-optimization/prune-bert-models](https://docs.mlcommons.org/cm4mlops/scripts/AI-ML-optimization/prune-bert-models) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/_cm.yaml new file mode 100644 index 000000000..0c9f63297 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/_cm.yaml @@ -0,0 +1,48 @@ +alias: prune-bert-models +automation_alias: script +automation_uid: 5b4e0237da074764 +category: AI/ML optimization +default_env: + CM_BERT_PRUNE_CONSTRAINT: '0.5' + CM_BERT_PRUNE_MODEL_NAME: bert-large-uncased + CM_BERT_PRUNE_TASK: squad + CM_MODEL_ZOO_STUB: bert-large-uncased +deps: +- tags: get,python3 +- tags: get,generic-python-lib,_numpy +- tags: get,generic-python-lib,_scipy +- tags: get,generic-python-lib,_cupy +- tags: get,generic-python-lib,_tqdm +- tags: get,generic-python-lib,_torch_cuda +- tags: get,generic-python-lib,_datasets +- tags: get,generic-python-lib,_transformers +- tags: get,generic-python-lib,_scikit-learn +- env: + CM_GIT_ENV_KEY: BERT_PRUNER_NEURIPS_2022 + tags: get,git,repo,_repo.https://github.com/cknowledge/retraining-free-pruning +- names: + - get-model + tags: get,ml-model,model,zoo,model-zoo,huggingface,_prune +input_mapping: + constraint: CM_BERT_PRUNE_CONSTRAINT + output_dir: CM_BERT_PRUNE_OUTPUT_DIR +tags: +- prune +- bert-models +- bert-prune +- prune-bert-models +uid: 76182d4896414216 +variations: + model.#: + adr: + get-model: + tags: _model-stub.# + env: + CM_BERT_PRUNE_MODEL_NAME: '#' + CM_MODEL_ZOO_STUB: '#' + path.#: + env: + CM_BERT_PRUNE_CKPT_PATH: '#' + task.#: + env: + CM_BERT_PRUNE_TASK: '#' diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/customize.py b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/customize.py new file mode 100644 index 000000000..824d78a37 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/customize.py @@ -0,0 +1,64 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + ckpt_path = env.get('CM_BERT_PRUNE_CKPT_PATH', '') + if ckpt_path == '': + p = env['CM_ML_MODEL_FILE_WITH_PATH'] + x = os.listdir(p) + for y in x: + if y.startswith('models--'): + z = os.path.join(p, y) + if os.path.isdir(z): + z1 = os.path.join(z, 'snapshots') + if os.path.isdir(z1): + z2 = os.listdir(z1) + if len(z2) > 0: + ckpt_path = os.path.join(z1, z2[0]) + + env['CM_BERT_PRUNE_CKPT_PATH'] = ckpt_path + + out_dir = env.get('CM_BERT_PRUNE_OUTPUT_DIR', '') + if out_dir == '': + out_dir = os.path.join(os.getcwd(), 'pruned-model-output') + env['CM_BERT_PRUNE_OUTPUT_DIR'] = out_dir + + print('') + print( + 'Local CM cache path to the updated BERT pruner src from NeurIPS 2022: ' + + env['CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH']) + + print('') + for k in ["CM_ML_MODEL_FILE_WITH_PATH", + "CM_BERT_PRUNE_CKPT_PATH", "CM_BERT_PRUNE_OUTPUT_DIR"]: + print('ENV["{}"]: {}'.format(k, env[k])) + + print('') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + print("Entered postprocess") + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/run.sh b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/run.sh new file mode 100644 index 000000000..68c077968 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-bert-models/run.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +echo "====================================================================" +echo "Start pruning ..." +echo "" + +CM_TMP_CURRENT_SCRIPT_PATH=${CM_TMP_CURRENT_SCRIPT_PATH:-$PWD} + +time ${CM_PYTHON_BIN_WITH_PATH} \ + ${CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH}/main.py \ + --model_name ${CM_BERT_PRUNE_MODEL_NAME} \ + --task_name ${CM_BERT_PRUNE_TASK} \ + --ckpt_dir ${CM_BERT_PRUNE_CKPT_PATH} \ + --constraint ${CM_BERT_PRUNE_CONSTRAINT} \ + --output_dir ${CM_BERT_PRUNE_OUTPUT_DIR} + +test $? -eq 0 || exit $? + +echo "====================================================================" diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-docker/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/prune-docker/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-docker/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-docker/README.md b/cmx4mlops/cmx4mlops/repo/script/prune-docker/README.md new file mode 100644 index 000000000..6dacfb6b2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-docker/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Docker-automation/prune-docker](https://docs.mlcommons.org/cm4mlops/scripts/Docker-automation/prune-docker) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-docker/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/prune-docker/_cm.yaml new file mode 100644 index 000000000..0d07b1983 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-docker/_cm.yaml @@ -0,0 +1,8 @@ +alias: prune-docker +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Docker automation +tags: +- prune +- docker +uid: 27ead88809bb4d4e diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-docker/run.bat b/cmx4mlops/cmx4mlops/repo/script/prune-docker/run.bat new file mode 100644 index 000000000..980baad8e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-docker/run.bat @@ -0,0 +1 @@ +docker system prune -a --volumes diff --git a/cmx4mlops/cmx4mlops/repo/script/prune-docker/run.sh b/cmx4mlops/cmx4mlops/repo/script/prune-docker/run.sh new file mode 100644 index 000000000..eb849e376 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/prune-docker/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker system prune -a --volumes diff --git a/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/README.md b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/README.md new file mode 100644 index 000000000..02349cb61 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Dashboard-automation/publish-results-to-dashboard](https://docs.mlcommons.org/cm4mlops/scripts/Dashboard-automation/publish-results-to-dashboard) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/_cm.yaml new file mode 100644 index 000000000..8f4ab4eda --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/_cm.yaml @@ -0,0 +1,14 @@ +alias: publish-results-to-dashboard +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Dashboard automation +deps: +- names: + - python3 + - python + tags: get,python3 +- tags: get,generic-python-lib,_wandb +tags: +- publish-results +- dashboard +uid: 4af3a2d09f14412b diff --git a/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/code.py b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/code.py new file mode 100644 index 000000000..2ce02a9df --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/code.py @@ -0,0 +1,105 @@ +# Developer: Grigori Fursin + +import os + + +def main(): + # For now quick prototype hardwired to "summary.json" from MLPerf + # Later need to clean it and make it universal + + print('') + print('Reading summary.json ...') + print('') + + import json + filename = os.environ.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY', '') + if filename == '': + filename = 'summary' + filename += '.json' + + f = open(filename) + + results = json.load(f) + + f.close() + + print('=========================================================') + print('Sending results to W&B dashboard ...') + print('') + + import wandb + + env = os.environ + + dashboard_user = env.get('CM_MLPERF_DASHBOARD_WANDB_USER', '') + if dashboard_user == '': + dashboard_user = 'cmind' + + dashboard_project = env.get('CM_MLPERF_DASHBOARD_WANDB_PROJECT', '') + if dashboard_project == '': + dashboard_project = 'cm-mlperf-dse-testing' + + for k in results: + + result = results[k] + + organization = str(result.get('Organization', '')) + if organization == '': + organization = 'anonymous' + + label = organization + + system_name = str(result.get('SystemName', '')) + if system_name != '': + label += '(' + system_name + ')' + + qps = result.get('Result', 0.0) + # since v4.1 mlperf results return a key:value pairs for accuracy. We + # are taking only the first key:value here + result_acc = result.get('Accuracy') + accuracy = 0.0 + if result_acc: + acc_split = result_acc.split(":") + if len(acc_split) > 1: + accuracy = float(acc_split[1]) / 100 + + result['performance'] = qps + result['qps'] = qps + result['accuracy'] = accuracy + + # Check extra env variables + x = { + "lang": "CM_MLPERF_LANG", + "device": "CM_MLPERF_DEVICE", + "submitter": "CM_MLPERF_SUBMITTER", + "backend": "CM_MLPERF_BACKEND", + "model": "CM_MLPERF_MODEL", + "run_style": "CM_MLPERF_RUN_STYLE", + "rerun": "CM_RERUN", + "hw_name": "CM_HW_NAME", + "max_batchsize": "CM_MLPERF_LOADGEN_MAX_BATCHSIZE", + "num_threads": "CM_NUM_THREADS", + "scenario": "CM_MLPERF_LOADGEN_SCENARIO", + "test_query_count": "CM_TEST_QUERY_COUNT", + "run_checker": "CM_RUN_SUBMISSION_CHECKER", + "skip_truncation": "CM_SKIP_TRUNCATE_ACCURACY" + } + + for k in x: + env_key = x[k] + if os.environ.get(env_key, '') != '': + result['cm_misc_input_' + k] = os.environ[env_key] + + wandb.init(entity=dashboard_user, + project=dashboard_project, + name=label) + + wandb.log(result) + + wandb.finish() + + print('=========================================================') + + +if __name__ == '__main__': + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/run.bat b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/run.bat new file mode 100644 index 000000000..37f249b0f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/run.sh b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/run.sh new file mode 100644 index 000000000..288833adb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/publish-results-to-dashboard/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# For now login to WANDB anonymously +wandb login --anonymously --relogin + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/README.md b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/README.md new file mode 100644 index 000000000..f8e9ac7b3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/pull-git-repo](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/pull-git-repo) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/_cm.yaml new file mode 100644 index 000000000..f6d85da88 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/_cm.yaml @@ -0,0 +1,17 @@ +alias: pull-git-repo +automation_alias: script +automation_uid: 5b4e0237da074764 +category: DevOps automation +default_env: {} +deps: +- tags: detect,os +input_mapping: + path: CM_GIT_CHECKOUT_PATH +new_env_keys: [] +tags: +- pull +- git +- repo +- repository +uid: c23132ed65c4421d +variations: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/customize.py b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/customize.py new file mode 100644 index 000000000..7f7633ec2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/customize.py @@ -0,0 +1,40 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + meta = i['meta'] + + if 'CM_GIT_CHECKOUT_PATH' not in env: + return {'return': 1, 'error': 'CM_GIT_CHECKOUT_PATH is not set'} + + env['CM_GIT_PULL_CMD'] = "git pull --rebase" + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/run.sh b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/run.sh new file mode 100644 index 000000000..db8612d56 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/pull-git-repo/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +CUR_DIR=$PWD +SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} + +path=${CM_GIT_CHECKOUT_PATH} +echo "cd $path" + +cd $path +test $? -eq 0 || exit $? + +echo ${CM_GIT_PULL_CMD} +eval ${CM_GIT_PULL_CMD} +#don't fail if there are local changes +#test $? -eq 0 || exit $? + +cd $CUR_DIR diff --git a/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/README.md b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/README.md new file mode 100644 index 000000000..c57e18090 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/push-csv-to-spreadsheet](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/push-csv-to-spreadsheet) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/_cm.yaml new file mode 100644 index 000000000..028275906 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/_cm.yaml @@ -0,0 +1,23 @@ +alias: push-csv-to-spreadsheet +automation_alias: script +automation_uid: 5b4e0237da074764 +category: DevOps automation +default_env: + CM_GOOGLE_SPREADSHEET_ID: 1gMHjXmFmwZR4-waPPyxy5Pc3VARqX3kKUWxkP97Xa6Y +deps: +- names: + - python3 + - python + tags: get,python3 +- tags: get,generic-python-lib,_google-api-python-client +- tags: get,generic-python-lib,_google-auth-oauthlib +input_mapping: + csv_file: CM_CSV_FILE_PATH + sheet_name: CM_GOOGLE_SHEET_NAME + spreadsheet_id: CM_GOOGLE_SPREADSHEET_ID +tags: +- push +- google-spreadsheet +- spreadsheet +- push-to-google-spreadsheet +uid: 5ec9e5fa7feb4fff diff --git a/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/customize.py b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/customize.py new file mode 100644 index 000000000..5d4fa830b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/customize.py @@ -0,0 +1,28 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + automation = i['automation'] + + return {'return': 0} + + +def postprocess(i): + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/google_api.py b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/google_api.py new file mode 100644 index 000000000..24926daed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/google_api.py @@ -0,0 +1,60 @@ +from __future__ import print_function + +import os.path +import os +import csv +from google.auth.transport.requests import Request +from google.oauth2.credentials import Credentials +from google_auth_oauthlib.flow import InstalledAppFlow +from googleapiclient.discovery import build +from googleapiclient.errors import HttpError + +# If modifying these scopes, delete the file token.json. +SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] + +# The ID of a sample document. +DOCUMENT_ID = os.environ['CM_GOOGLE_SPREADSHEET_ID'] + + +def main(): + """Shows basic usage of the Docs API. + Prints the title of a sample document. + """ + creds = None + # The file token.json stores the user's access and refresh tokens, and is + # created automatically when the authorization flow completes for the first + # time. + if os.path.exists('token.json'): + creds = Credentials.from_authorized_user_file('token.json', SCOPES) + # If there are no (valid) credentials available, let the user log in. + if not creds or not creds.valid: + if creds and creds.expired and creds.refresh_token: + creds.refresh(Request()) + else: + flow = InstalledAppFlow.from_client_secrets_file( + 'credentials.json', SCOPES) + creds = flow.run_local_server(port=0) + # Save the credentials for the next run + with open('token.json', 'w') as token: + token.write(creds.to_json()) + + try: + service = build("sheets", "v4", credentials=creds) + sheet_name = os.environ.get('CM_GOOGLE_SHEET_NAME', 'Sheet1') + csv_file = os.environ['CM_CSV_FILE_PATH'] + + f = open(csv_file, "r") + values = [r for r in csv.reader(f)] + request = service.spreadsheets().values().update( + spreadsheetId=DOCUMENT_ID, + range=sheet_name, + valueInputOption="USER_ENTERED", + body={ + "values": values}).execute() + + except HttpError as err: + print(err) + + +if __name__ == '__main__': + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/run.sh b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/run.sh new file mode 100644 index 000000000..5ba4257d5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-csv-to-spreadsheet/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/google_api.py diff --git a/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/README.md b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/README.md new file mode 100644 index 000000000..5b5f83efd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/_cm.yaml new file mode 100644 index 000000000..9efeb0e24 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/_cm.yaml @@ -0,0 +1,37 @@ +alias: push-mlperf-inference-results-to-github +automation_alias: script +automation_uid: 5b4e0237da074764 +category: MLPerf benchmark support +default_env: + CM_MLPERF_RESULTS_GIT_REPO_URL: https://github.com/mlcommons/mlperf_inference_submissions_v4.0 +deps: +- names: + - python3 + - python + tags: get,python3 +- tags: get,generic-sys-util,_rsync +- names: + - get-mlperf-submission-dir + skip_if_env: + CM_MLPERF_INFERENCE_SUBMISSION_DIR: + - 'on' + tags: get,mlperf,submission,dir +input_mapping: + branch: CM_GIT_BRANCH + commit_message: CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE + repo_branch: CM_GIT_BRANCH + repo_url: CM_MLPERF_RESULTS_GIT_REPO_URL + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR +prehook_deps: +- names: + - get-git-repo + tags: get,git,repo +tags: +- push +- mlperf +- mlperf-inference-results +- publish-results +- inference +- submission +- github +uid: 36c2ffd5df5d453a diff --git a/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/customize.py b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/customize.py new file mode 100644 index 000000000..0ea2d2ce8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/customize.py @@ -0,0 +1,50 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + automation = i['automation'] + + repo = env.get('CM_MLPERF_RESULTS_GIT_REPO_URL', '') + if repo.strip() == '': + return {'return': 1, 'error': 'Invalid GIT_REPO_URL for MLPERF results'} + + branch = env.get('CM_GIT_BRANCH', '') + if branch: + extra_tags_string = f",_branch.{branch}" + else: + extra_tags_string = "" + + r = automation.update_deps({'deps': meta['prehook_deps'], + 'update_deps': { + 'get-git-repo': { + 'tags': "_repo." + repo + extra_tags_string + } + } + }) + if r['return'] > 0: + return r + env['CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE'] = env.get( + 'CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE', 'Added new results') + + return {'return': 0} + + +def postprocess(i): + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/run.bat b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/run.bat new file mode 100644 index 000000000..2052eb564 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/run.bat @@ -0,0 +1,31 @@ +@echo off + +REM Check if CM_GIT_REPO_CHECKOUT_PATH is set +if not defined CM_GIT_REPO_CHECKOUT_PATH ( + echo "Error: CM_GIT_REPO_CHECKOUT_PATH is not set." + exit /b 1 +) + +cd /d "%CM_GIT_REPO_CHECKOUT_PATH%" +if %errorlevel% neq 0 ( + echo "Error: Failed to change directory to %CM_GIT_REPO_CHECKOUT_PATH%" + exit /b 1 +) + +git pull +git add * + +REM Check if the CM_MLPERF_INFERENCE_SUBMISSION_DIR variable is set +if defined CM_MLPERF_INFERENCE_SUBMISSION_DIR ( + robocopy "%CM_MLPERF_INFERENCE_SUBMISSION_DIR%" "%CM_GIT_REPO_CHECKOUT_PATH%" /E /COPYALL /DCOPY:DAT + git add * +) + +REM Check if the previous command was successful +if %errorlevel% neq 0 exit /b %errorlevel% + +git commit -a -m "%CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE%" +git push + +REM Check if the previous command was successful +if %errorlevel% neq 0 exit /b %errorlevel% diff --git a/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/run.sh b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/run.sh new file mode 100644 index 000000000..1eb4f663e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/push-mlperf-inference-results-to-github/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Check if CM_GIT_REPO_CHECKOUT_PATH is set +if [ -z "${CM_GIT_REPO_CHECKOUT_PATH}" ]; then + echo "Error: CM_GIT_REPO_CHECKOUT_PATH is not set." + exit 1 +fi + +cd "${CM_GIT_REPO_CHECKOUT_PATH}" +git pull +git add * +if [[ -n ${CM_MLPERF_INFERENCE_SUBMISSION_DIR} ]]; then + rsync -avz "${CM_MLPERF_INFERENCE_SUBMISSION_DIR}/" "${CM_GIT_REPO_CHECKOUT_PATH}/" + git add * +fi +test $? -eq 0 || exit $? + +git commit -a -m "${CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE}" +git push +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/README-extra.md new file mode 100644 index 000000000..e69de29bb diff --git a/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/README.md b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/README.md new file mode 100644 index 000000000..0b58b0e9b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Remote-automation/remote-run-commands](https://docs.mlcommons.org/cm4mlops/scripts/Remote-automation/remote-run-commands) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/_cm.yaml new file mode 100644 index 000000000..dd49f650b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/_cm.yaml @@ -0,0 +1,28 @@ +alias: remote-run-commands +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Remote automation +default_env: + CM_SSH_CLIENT_REFRESH: '10' + CM_SSH_HOST: localhost + CM_SSH_KEY_FILE: $HOME/.ssh/id_rsa + CM_SSH_PORT: '22' + CM_SSH_USER: $USER +input_mapping: + client_refresh: CM_SSH_CLIENT_REFRESH + host: CM_SSH_HOST + password: CM_SSH_PASSWORD + port: CM_SSH_PORT + run_cmds: CM_SSH_RUN_COMMANDS + skip_host_verify: CM_SSH_SKIP_HOST_VERIFY + ssh_key_file: CM_SSH_KEY_FILE + user: CM_SSH_USER +tags: +- remote +- run +- cmds +- remote-run +- remote-run-cmds +- ssh-run +- ssh +uid: b71e24b03c9d49cd diff --git a/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/customize.py b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/customize.py new file mode 100644 index 000000000..0c71225ab --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/customize.py @@ -0,0 +1,61 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + cmd_string = '' + + # pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', ['source $HOME/cm/bin/activate']) + pre_run_cmds = env.get('CM_SSH_PRE_RUN_CMDS', []) + + run_cmds = env.get('CM_SSH_RUN_COMMANDS', []) + + run_cmds = pre_run_cmds + run_cmds + + for i, cmd in enumerate(run_cmds): + if 'cm ' in cmd: + # cmd=cmd.replace(":", "=") + cmd = cmd.replace(";;", ",") + run_cmds[i] = cmd + + cmd_string += " ; ".join(run_cmds) + user = env.get('CM_SSH_USER') + password = env.get('CM_SSH_PASSWORD', None) + host = env.get('CM_SSH_HOST') + if password: + password_string = " -p " + password + else: + password_string = "" + cmd_extra = '' + + if env.get("CM_SSH_SKIP_HOST_VERIFY"): + cmd_extra += " -o StrictHostKeyChecking=no" + if env.get("CM_SSH_KEY_FILE"): + cmd_extra += " -i " + env.get("CM_SSH_KEY_FILE") + + ssh_command = "ssh " + user + "@" + host + \ + password_string + cmd_extra + " '" + cmd_string + "'" + env['CM_SSH_CMD'] = ssh_command + + return {'return': 0} + + +def postprocess(i): + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/run.bat b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/run.sh b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/run.sh new file mode 100644 index 000000000..f9fac760b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/remote-run-commands/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +cmd=$CM_SSH_CMD +echo $cmd +eval $cmd diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/README.md new file mode 100644 index 000000000..6b86e491d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/README.md @@ -0,0 +1,42 @@ +# CM script to run and reproduce experiments + +Original repository: https://github.com/UofT-EcoSystem/Grape-MICRO56-Artifact/wiki#installation + +### Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install reusable MLCommons automations: + +```bash +cm pull repo mlcommons@ck +``` + +Install this repository with CM interface for reproduced experiments: +```bash +cm pull repo ctuning@cm4research +``` + +### Install dependencies + +```bash +cmr "reproduce project micro-2023 22 _install_deps" +cmr "reproduce project micro-2023 22 _install_deps_driver" +cmr "reproduce project micro-2023 22 _install_deps_cuda" +cmr "reproduce project micro-2023 22 _install_deps_pytorch" +cmr "reproduce project micro-2023 22 _install_deps_transformers" +``` + +Please reboot the machine after the above installation steps for the GPU driver installation to take effect. This can be verified from the message `NVRM: loading customized kernel module from Grape` when running the command `sudo dmesg`. If the message does not appear, please repeat the command + +```bash +cmr "reproduce project micro-2023 22 _install_deps_driver" +``` + +### Run experiments + +```bash +cmr "reproduce project micro-2023 22 _run_figure13" +cmr "reproduce project micro-2023 22 _run_figure11" +cmr "reproduce project micro-2023 22 _run_figure12" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/_cm.yaml new file mode 100644 index 000000000..8f309ca88 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/_cm.yaml @@ -0,0 +1,45 @@ +alias: reproduce-ieee-acm-micro2023-paper-22 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +- tags: get,git,repo,_repo.https://github.com/UofT-EcoSystem/Grape-MICRO56-Artifact + env: + CM_GIT_ENV_KEY: 'GRAPE_MICRO56' + extra_cache_tags: micro56,artifact,ae,grape +script_name: run +tags: +- reproduce +- project +- paper +- m +- micro +- micro-2023 +- '2023' +- '22' +uid: e26c9ce3e7b84526 +variations: + install_deps: + script_name: install_deps + install_deps_driver: + script_name: install_deps_driver + install_deps_cuda: + script_name: install_deps_cuda + install_deps_pytorch: + script_name: install_deps_pytorch + install_deps_transformers: + script_name: install_deps_transformers + run: + script_name: run + run_figure11: + script_name: run_figure11 + run_figure12: + script_name: run_figure12 + run_figure13: + script_name: run_figure13 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps.sh new file mode 100644 index 000000000..c9d37d0ba --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +. scripts/Installation/0-install_build_essentials.sh +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_cuda.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_cuda.sh new file mode 100644 index 000000000..f3a345ec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_cuda.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +. scripts/Installation/2-install_CUDA.sh +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_driver.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_driver.sh new file mode 100644 index 000000000..3e6d33783 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_driver.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +. scripts/Installation/1-install_NVIDIA_GPU_driver.sh +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_pytorch.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_pytorch.sh new file mode 100644 index 000000000..f961aaa00 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_pytorch.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +. scripts/Installation/3-build_PyTorch.sh +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_transformers.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_transformers.sh new file mode 100644 index 000000000..effe47e97 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/install_deps_transformers.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +echo "git submodule update --init submodules/transformers" +git submodule update --init submodules/transformers + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run.sh new file mode 100644 index 000000000..6b50d1b81 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +source scripts/Installation/activate + +. ${CM_TMP_CURRENT_SCRIPT_PATH}/run_figure13.sh +. ${CM_TMP_CURRENT_SCRIPT_PATH}/run_figure11.sh +. ${CM_TMP_CURRENT_SCRIPT_PATH}/run_figure12.sh + + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure11.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure11.sh new file mode 100644 index 000000000..bf2c7b0fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure11.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +source scripts/Installation/activate + +./scripts/Experiment_Workflow/2-test_runtime_performance.sh --model=gpt2 +./scripts/Experiment_Workflow/2-test_runtime_performance.sh --model=gptj +./scripts/Experiment_Workflow/2-test_runtime_performance.sh --model=wav2vec2 + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure12.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure12.sh new file mode 100644 index 000000000..1d9ea8027 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure12.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +source scripts/Installation/activate + +./scripts/Experiment_Workflow/3-test_runtime_breakdown.sh + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure13.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure13.sh new file mode 100644 index 000000000..6d2f05bf3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-22/run_figure13.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to GRAPE repo: ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_GRAPE_MICRO56_CHECKOUT_PATH} + +echo "" + +source scripts/Installation/activate + +./scripts/Experiment_Workflow/1-test_metadata_compression.sh + + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/README.md new file mode 100644 index 000000000..c0b235ba2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/README.md @@ -0,0 +1,61 @@ +# CM script to run and reproduce experiments + +Original repository: https://github.com/neel-patel-1/XFM_MICRO2023.git + +### Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install reusable MLCommons automations: + +```bash +cm pull repo mlcommons@ck +``` + +Install this repository with CM interface for reproduced experiments: +```bash +cm pull repo ctuning@cm4research +``` + +### Regenerate Figures via CM interface + +1) Install deps: +```bash +cmr "reproduce project micro-2023 xfm _install_deps" +``` + +2) Run experiments: + +```bash +cmr "reproduce project micro-2023 xfm _run" +``` + +3) Plot results: + +```bash +cmr "reproduce project micro-2023 xfm _plot" +``` + +You should find `XFM_Access_Distribution.png` and `results.csv` in the `results` folder current directory. + +### Regenerate SPEC Workloads Experiments via CM Interface + +* if hosted SPEC 2017 for artifact evaluation purposes is no longer available, provide path to a local install of SPEC: + +1) (Optional) Provide path to local SPEC2017 .iso file +```bash +# if local spec is available, run below to avoid fetching remote SPEC, otherwise skip this step +cmr "download file _url.https://spec2017iso.s3.us-east-2.amazonaws.com/cpu2017-1_0_5.iso" --local_path=/path/to/local/cpu2017-1_0_5.iso +``` + +1) Install deps: +```bash +cmr "reproduce project micro-2023 xfm _install_spec_deps" +``` + +2) run: +```bash +cmr "reproduce project micro-2023 xfm _run_spec" +``` + +You should find `results.txt` in the `results` folder of current directory. \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/_cm.yaml new file mode 100644 index 000000000..e2ed10c86 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/_cm.yaml @@ -0,0 +1,40 @@ +alias: reproduce-ieee-acm-micro2023-paper-28 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +- tags: get,generic-python-lib,_pandas +- tags: get,generic-python-lib,_matplotlib +- tags: get,git,repo,_repo.https://github.com/neel-patel-1/XFM_MICRO2023 +- tags: download,file,url.https://spec2017iso.s3.us-east-2.amazonaws.com/cpu2017-1_0_5.iso + env: + CM_GIT_ENV_KEY: 'XFM' + extra_cache_tags: micro23,artifact,ae,xfm,spec +  force_cache: true +script_name: run +tags: +- reproduce +- project +- paper +- micro +- micro-2023 +- 28 +- xfm +uid: 72c44b58be0e4e16 +variations: + install_deps: + script_name: install_deps + plot: + script_name: plot + run: + script_name: run + install_spec_deps: + script_name: install_spec_deps.sh + run_spec: + script_name: run_spec.sh diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/install_deps.sh new file mode 100644 index 000000000..aba23e8d4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/install_deps.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to XFM repo: ${CM_GIT_REPO_XFM_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_XFM_CHECKOUT_PATH} + +echo "" + +# Done via _cm.yaml +#${CM_PYTHON_BIN_WITH_PATH} -m pip install pandas +#${CM_PYTHON_BIN_WITH_PATH} -m pip install matplotlib + +git submodule update --init --recursive . +test $? -eq 0 || exit 1 + +cd memory_channel_interleave_ratios +test $? -eq 0 || exit 1 + +./build_gzip.sh +test $? -eq 0 || exit 1 + +./fetch_corpus.sh +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/install_spec_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/install_spec_deps.sh new file mode 100644 index 000000000..46488b66b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/install_spec_deps.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +CUR_DIR=${PWD} +SPEC_EXP_ROOT=${CM_GIT_REPO_XFM_CHECKOUT_PATH}/spec_workload_experiment +SPEC_INSTALL=${CM_GIT_REPO_XFM_CHECKOUT_PATH}/spec_workload_experiment/spec +SPEC_MNT=${CM_GIT_REPO_XFM_CHECKOUT_PATH}/spec_workload_experiment/spec_mnt + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "" +echo "SPEC ISO PATH:${SPEC_ISO}" +echo "Installing to ${SPEC_INSTALL}" + +mkdir -p ${SPEC_MNT} +test $? -eq 0 || exit 1 + +mkdir -p ${SPEC_INSTALL} +test $? -eq 0 || exit 1 + +sudo mount -t iso9660 -o ro,exec,loop /path/to/cpu2017-1_0_5.iso ${CUR_DIR}/spec_mnt +test $? -eq 0 || exit 1 + +cd ${SPEC_MNT} +./install.sh -d ${SPEC_INSTALL} +test $? -eq 0 || exit 1 + +cp ${CM_GIT_REPO_XFM_CHECKOUT_PATH}/spec_workload_experiment/config/default.cfg ${SPEC_INSTALL}/config +test $? -eq 0 || exit 1 + +cd ${SPEC_EXP_ROOT} +./fetch_corpus.sh +test $? -eq 0 || exit 1 +cd lzbench +make -j BUILD_STATIC=1 +test $? -eq 0 || exit 1 \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/plot.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/plot.sh new file mode 100644 index 000000000..c79e24720 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/plot.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to XFM repo: ${CM_GIT_REPO_XFM_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_XFM_CHECKOUT_PATH} + +echo "" + +cd xfm_access_model + +${CM_PYTHON_BIN_WITH_PATH} xfm_access_model.py +test $? -eq 0 || exit 1 + +mkdir -p ${CUR_DIR}/results/XFM_Access_Results + +cp XFM_Access_Distribution.png ${CUR_DIR}/results/XFM_Access_Results diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/run.sh new file mode 100644 index 000000000..49ca2bc6f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/run.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to XFM repo: ${CM_GIT_REPO_XFM_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_XFM_CHECKOUT_PATH} + +echo "" + +cd memory_channel_interleave_ratios + +./run.sh +test $? -eq 0 || exit 1 + +mkdir -p ${CUR_DIR}/results/memory_channel_interleave_ratios +test $? -eq 0 || exit 1 + +cp results.csv ${CUR_DIR}/results/memory_channel_interleave_ratios +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/run_spec.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/run_spec.sh new file mode 100644 index 000000000..5de27e232 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-28/run_spec.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to XFM repo's SPEC2017 Directory: ${CM_GIT_REPO_XFM_CHECKOUT_PATH}/spec_workload_experiment" +cd ${CM_GIT_REPO_XFM_CHECKOUT_PATH}/spec_workload_experiment + +./run.sh +test $? -eq 0 || exit 1 + +echo "" + +mkdir -p ${CUR_DIR}/results/spec +test $? -eq 0 || exit 1 + +./parse.sh | tee ${CUR_DIR}/results/spec/results.txt +test $? -eq 0 || exit 1 + + diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/README.md new file mode 100644 index 000000000..42d9809e9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/README.md @@ -0,0 +1,74 @@ +# CM script to run and reproduce experiments + +Original repository: https://github.com/filipmazurek/spa-artifact + +### Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install reusable MLCommons automations: + +```bash +cm pull repo mlcommons@ck +``` + +Install this repository with CM interface for reproduced experiments: +```bash +cm pull repo ctuning@cm4research +``` + +### Set up and start Docker container + +```bash +cmr "reproduce project m 2023 33 _install_deps" +``` + +You should be within the Docker container now. + +The next step is not yet fully automated by CM and you need to do it manually to set up Conda environment: + +### Set up Conda + +```bash +cd /shared/ +bash ./in-docker-bash-scripts/set-up-conda.sh + +# Use conda with the bash shell +eval "$(/root/miniconda3/bin/conda shell.bash hook)" + +conda activate spa +``` + +### Install CM inside Conda to continue using CM interface + +```bash +python3 -m pip install cmind +cm pull repo mlcommons@ck +cm pull repo ctuning@cm4research +``` + +### Download Ubuntu Image and Kernel + +```bash +cmr "reproduce project m 2023 33 _install_deps_kernel" +``` + +### Copy gem5 PARSEC Binaries + +```bash +cmr "reproduce project m 2023 33 _install_deps_gem5" +``` + +### Run experiments Using gem5 + +```bash +cmr "reproduce project m 2023 33 _run" +``` + +### Collect data and reproduce results + +```bash +cmr "reproduce project m 2023 33 _plot" +``` + +All figures should be available in `/shared/paper-figures/`. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/_cm.yaml new file mode 100644 index 000000000..4db4f4539 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/_cm.yaml @@ -0,0 +1,48 @@ +alias: reproduce-ieee-acm-micro2023-paper-33 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +- tags: get,git,repo,_repo.https://github.com/filipmazurek/spa-artifact + env: + CM_GIT_ENV_KEY: 'SPA_ARTIFACT' + extra_cache_tags: micro23,artifact,ae,spa_artifact + skip_if_env: + CM_RUN_INSIDE_DOCKER: + - yes +script_name: run +tags: +- reproduce +- project +- paper +- m +- micro +- micro-2023 +- '2023' +- '33' +uid: 5dad99d41c0b422b +variations: + install_deps: + script_name: install_deps + install_deps_kernel: + script_name: install_deps_kernel + env: + CM_RUN_INSIDE_DOCKER: yes + install_deps_gem5: + script_name: install_deps_gem5 + env: + CM_RUN_INSIDE_DOCKER: yes + plot: + script_name: plot + env: + CM_RUN_INSIDE_DOCKER: yes + run: + script_name: run + env: + CM_RUN_INSIDE_DOCKER: yes diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps.sh new file mode 100644 index 000000000..1fa6f8b86 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to SPAM repo: ${CM_GIT_REPO_SPA_ARTIFACT_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_SPA_ARTIFACT_CHECKOUT_PATH} + +echo "" + +bash ./artifact-bash-scripts/set-up-docker.sh +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps_gem5.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps_gem5.sh new file mode 100644 index 000000000..667f6a768 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps_gem5.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +cd /shared/ +bash ./in-docker-bash-scripts/copy-parsec-binaries.sh + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps_kernel.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps_kernel.sh new file mode 100644 index 000000000..973589a92 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/install_deps_kernel.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +cd /shared/ +bash ./in-docker-bash-scripts/download-disk.sh + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/plot.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/plot.sh new file mode 100644 index 000000000..89c33b485 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/plot.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "" + +cd /shared/python-runners/ +${CM_PYTHON_BIN_WITH_PATH} convert-gem5-results-to-csv.py + +test $? -eq 0 || exit 1 + +cd /shared/paper-figures/ + +${CM_PYTHON_BIN_WITH_PATH} figure-1.py +${CM_PYTHON_BIN_WITH_PATH} figure-2.py +${CM_PYTHON_BIN_WITH_PATH} figure-4.py +${CM_PYTHON_BIN_WITH_PATH} figure-5.py +${CM_PYTHON_BIN_WITH_PATH} figure-6_7.py +${CM_PYTHON_BIN_WITH_PATH} figure-8_9.py +${CM_PYTHON_BIN_WITH_PATH} figure-10_11.py +${CM_PYTHON_BIN_WITH_PATH} figure-12.py +${CM_PYTHON_BIN_WITH_PATH} figure-13.py + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/run.sh new file mode 100644 index 000000000..8e17e4544 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-33/run.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "" + +cd /shared/python-runners/ + +chmod 777 /shared/gem5/build/X86/gem5-mesi.fast +${CM_PYTHON_BIN_WITH_PATH} meta-runner.py + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/README.md new file mode 100644 index 000000000..34ea8ce60 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/README.md @@ -0,0 +1,50 @@ +# CM script to run and reproduce experiments + +Original repository: https://github.com/HieronZhang/G10-Artifact + + +### Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install reusable MLCommons automations: + +```bash +cm pull repo mlcommons@ck +``` + +Install this repository with CM interface for reproduced experiments: +```bash +cm pull repo ctuning@cm4research +``` + +## Install Python virtual environment via CM + +```bash +cm run script "install python-venv" --name=reproducibility +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=reproducibility" +``` + +### Run G10 via CM interface + +Perform the following steps to evaluate G10 Artifact with MLCommons CM automation language: + +1) This command will install all the dependencies for G10 and requires sudo: + +```bash +cmr "reproduce project micro-2023 G10 _install_deps" +``` + +2) This command will prepare and run all experiments: + +```bash +cmr "reproduce project micro-2023 G10 _run" --max_process_num=[nthreads] +``` + +- The variable `max_process_num` is the maximum allowed number of parallel experiments in the script. Note that user need to specify the `max_process_num` based on their machine's main memory capacity. Each experiment process will need a peak memory of 28.5 GB. (We recommend reserving 30 GB for each process to ensure that the program won't crash. For example, if your machine has 128 GB of main memory, `max_process_num` can be set as 4). + +3) In case of successful execution of a previous command, this command will generate plots to help you validate results from the paper: + +```bash +cmr "reproduce project micro-2023 G10 _plot" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/_cm.yaml new file mode 100644 index 000000000..a7de67b4e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/_cm.yaml @@ -0,0 +1,36 @@ +alias: reproduce-ieee-acm-micro2023-paper-38 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +default_env: + max_process_num: 1 +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +- tags: get,git,repo,_repo.https://github.com/HieronZhang/G10-Artifact.git + env: + CM_GIT_ENV_KEY: 'G10' + extra_cache_tags: micro23,artifact,ae,G10 +input_mapping: + max_process_num: max_process_num +script_name: run +tags: +- reproduce +- project +- paper +- micro +- micro-2023 +- g10 +- G10 +uid: b6ec80696a364ff4 +variations: + install_deps: + script_name: install_deps + plot: + script_name: plot + run: + script_name: run diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/install_deps.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/install_deps.bat new file mode 100644 index 000000000..47f7e7ce2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/install_deps.bat @@ -0,0 +1,18 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + + echo. + echo Installing requirements.txt ... + echo. + + %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/install_deps.sh new file mode 100644 index 000000000..02b1446fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/install_deps.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +sudo apt-get update +sudo apt install flex bison tmux python3-pip + +${CM_PYTHON_BIN_WITH_PATH} -m pip install matplotlib networkx pandas PyPDF2 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/plot.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/plot.bat new file mode 100644 index 000000000..7e786771a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/plot.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/plot.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/plot.sh new file mode 100644 index 000000000..6058cb5a3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/plot.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to G10 repo: ${CM_GIT_REPO_G10_CHECKOUT_PATH}" +cd "${CM_GIT_REPO_G10_CHECKOUT_PATH}" + +cd src/resources + +# Collect all the numbers, store it in raw_output/data.json +${CM_PYTHON_BIN_WITH_PATH} gatherKernelInfo.py + +# Gather data for figure 11 +${CM_PYTHON_BIN_WITH_PATH} figureDrawingDataPrepOverallPerformance.py # The gathered data is stored in figure_drawing/overall_performance + +# Gather data for figure 12 +${CM_PYTHON_BIN_WITH_PATH} figureDrawingDataPrepBreakdown.py # The gathered data is stored in figure_drawing/overall_breakdown + +# Gather data for figure 13 +./figureDrawingDataPrepKernelCDF.sh # The gathered data is stored in figure_drawing/overall_slowdown_cdf + +# Gather data for figure 14 +${CM_PYTHON_BIN_WITH_PATH} figureDrawingDataPrepTraffic.py # The gathered data is stored in figure_drawing/overall_traffic + +# Gather data for figure 15 +${CM_PYTHON_BIN_WITH_PATH} figureDrawingDataPrep.py # The gathered data is stored in figure_drawing/overall_batchsize + +# Gather data for figure 16 +${CM_PYTHON_BIN_WITH_PATH} figureDrawingDataPrepCPUsensitivity.py # The gathered data is stored in figure_drawing/sensitivity_cpumem + +# Gather data for figure 17 +${CM_PYTHON_BIN_WITH_PATH} figureDrawingDataPrepCPUSensitivityCombined.py # The gathered data is stored in figure_drawing/sensitivity_cpumem_combined + +# Gather data for figure 18 +${CM_PYTHON_BIN_WITH_PATH} figureDrawingDataPrepSSD.py # The gathered data is stored in figure_drawing/sensitivity_ssdbw + +# Gather data for figure 19 +${CM_PYTHON_BIN_WITH_PATH} figureDrawingDataPrepVariation.py # The gathered data is stored in figure_drawing/sensitivity_variation + +cd figure_drawing + +# Plot figures for Figure 2-4, and Figure 20-21 (Appendix) + +${CM_PYTHON_BIN_WITH_PATH} plot_mem_consumption.py # Figure 2 is output/dnn_memconsumption.pdf + +${CM_PYTHON_BIN_WITH_PATH} plot_tensor_time_cdf.py # Figure 3 is output/tensor_time_cdf.pdf + +${CM_PYTHON_BIN_WITH_PATH} plot_tensor_period_distribution.py # Figure 4 is output/tensor_periods_distribution.pdf + +${CM_PYTHON_BIN_WITH_PATH} plot_detail_mem_breakdown_live.py # Figure 20 is output/dnn_mem_consumption_breakdown_live.pdf + +${CM_PYTHON_BIN_WITH_PATH} plot_detail_mem_breakdown_active.py # Figure 21 is output/dnn_mem_consumption_breakdown_active.pdf + +# Draw Figure 11 +${CM_PYTHON_BIN_WITH_PATH} overallPerf.py # Figure 11 is output/OverallPerfNew.pdf + +# Draw Figure 12 +${CM_PYTHON_BIN_WITH_PATH} overallBreakdown.py # Figure 12 is output/Breakdown.pdf + +# Draw Figure 13 +${CM_PYTHON_BIN_WITH_PATH} overallSlowdownCDF.py # Figure 13 is output/KernelTimeCDF.pdf + +# Draw Figure 14 +${CM_PYTHON_BIN_WITH_PATH} overallTraffic.py # Figure 14 is output/OverallTraffic.pdf + +# Draw Figure 15 +${CM_PYTHON_BIN_WITH_PATH} overallBatchSize.py # Figure 15 is output/OverallPerfBatchSize.pdf + +# Draw Figure 16 +${CM_PYTHON_BIN_WITH_PATH} sensitivityCPUMem.py # Figure 16 is output/OverallPerfCPUMem.pdf + +# Draw Figure 17 +${CM_PYTHON_BIN_WITH_PATH} sensitivityCPUMemCombined.py # Figure 17 is output/OverallPerfCPUMemCombined.pdf + +# Draw Figure 18 +${CM_PYTHON_BIN_WITH_PATH} sensitivitySSDbw.py # Figure 18 is output/OverallPerfSSDBW.pdf + +# Draw Figure 19 +${CM_PYTHON_BIN_WITH_PATH} SensitivityKernelVariation.py # Figure 19 is output/SensitivityVariation.pdf diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/run.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/run.bat new file mode 100644 index 000000000..6c1274ce6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/run.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/run.sh new file mode 100644 index 000000000..6475bf30f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-38/run.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +if [ -z "$max_process_num" ]; then + printf "\033[0;31m<--max_process_num> is not specified. Please specify it using --max_process_num=[nthreads]\033[0m\n" + exit 1 +fi +echo "Max number of processes: ${max_process_num}" + +echo "Changing to G10 repo: ${CM_GIT_REPO_G10_CHECKOUT_PATH}" +cd "${CM_GIT_REPO_G10_CHECKOUT_PATH}" + +cd src +make clean +make -j"$(nproc)" + +cd resources +${CM_PYTHON_BIN_WITH_PATH} genconfigs.py + +tmux kill-server > /dev/null 2> /dev/null + +# First run experiments for figure 11-14 +./run.sh -p "(BERT\/256|VIT\/1280|Inceptionv3\/1536|ResNet152\/1280|SENet154\/1024)-sim_(deepUM|prefetch_lru|FlashNeuron|G10GDSSSD|G10GDSFULL|lru)\.config" -dr -j $max_process_num +# The time for running this is about 104m33.975s (for max_process_num=6) + +# Then run experiments for figure 15 +./run.sh -p "(BERT\/(128|256|512|768|1024)|VIT\/(256|512|768|1024|1280)|Inceptionv3\/(512|768|1024|1280|1536|1792)|ResNet152\/(256|512|768|1024|1280)|SENet154\/(256|512|768|1024))-sim_(deepUM|prefetch_lru|FlashNeuron|lru)\.config" -dr -j $max_process_num +# The time for running this is about 155m11.104s (for max_process_num=6) + +# Then run experiments for figure 16 +./run.sh -p "(BERT\/(256|384|512|640)|VIT\/(768|1024|1280|1536)|Inceptionv3\/(512|1024|1280|1536)|ResNet152\/(768|1024|1280|1536)|SENet154\/(256|512|768|1024))-sim_prefetch_lru(-cpu(0|16|32|64|96|192|256))?\.config" -dr -j $max_process_num +# The time for running this is about 406m30.954s (for max_process_num=6) + +# Then run experiments for figure 17 +./run.sh -p "(VIT\/1024|Inceptionv3\/1280)-sim_(deepUM|prefetch_lru|FlashNeuron)-cpu(0|16|32|64|256)\.config" -dr -j $max_process_num +# The time for running this is about 24m8.144s (for max_process_num=6) + +# Then run experiments for figure 18 +./run.sh -p "(BERT\/512|VIT\/1280|Inceptionv3\/1536|ResNet152\/1280|SENet154\/1024)-sim_(deepUM|prefetch_lru|FlashNeuron|lru)-ssd(6_4|12_8|19_2|25_6|32)-.*\.config" -dr -j $max_process_num +# The time for running this is about 354m40.747s (for max_process_num=6) + +# Then run experiments for figure 19 +./run.sh -p "(BERT\/256|VIT\/1280|Inceptionv3\/1536|ResNet152\/1280|SENet154\/1024)-sim_prefetch_lru-var0_(05|10|15|20|25)\.config" -dr -j $max_process_num +# The time for running this is about 124m17.909s (for max_process_num=6)] diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/README.md new file mode 100644 index 000000000..637717712 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/README.md @@ -0,0 +1,30 @@ +# CM script to run and reproduce experiments + +## Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install this repository with CM interface for reproduced experiments: + +```bash +cm pull repo ctuning@cm4research +``` + +## Install Python virtual environment via CM + +```bash +cm run script "install python-venv" --name=reproducibility +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=reproducibility" +``` + +## Install dependencies + +```bash +cmr "reproduce paper m2023 5 _install_deps" +``` + +## Run and create graphs + +```bash +cmr "reproduce paper m2023 5" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/_cm.yaml new file mode 100644 index 000000000..65a520d01 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/_cm.yaml @@ -0,0 +1,20 @@ +alias: reproduce-ieee-acm-micro2023-paper-5 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +tags: +- reproduce +- paper +- project +- micro +- micro-2023 +- m2023 +- '5' +uid: e3a42d0dc64b4f8f +variations: + install_deps: + script_name: install_deps + run: + script_name: run +versions: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/install_deps.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/install_deps.bat new file mode 100644 index 000000000..834ec600d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/install_deps.bat @@ -0,0 +1,4 @@ +rem native script + +echo "Windows is not supported yet" +exit /b 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/install_deps.sh new file mode 100644 index 000000000..322d4671b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/install_deps.sh @@ -0,0 +1,24 @@ +echo "================== Install Docker container (you can skip if already installed)==================" + +sudo apt-get update +sudo apt-get -y install \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg \ + lsb-release \ + tar + +# Add Docker’s official GPG key +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +sudo apt-get update +sudo apt-get -y install docker-ce docker-ce-cli containerd.io + +sudo usermod -aG docker $USER + +su - $USER diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/main.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/main.py new file mode 100644 index 000000000..caa499bf0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/main.py @@ -0,0 +1,10 @@ +import os + +if __name__ == "__main__": + + print('') + print('Main script:') + print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('') + + exit(0) diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/run.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/run.bat new file mode 100644 index 000000000..834ec600d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/run.bat @@ -0,0 +1,4 @@ +rem native script + +echo "Windows is not supported yet" +exit /b 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/run.sh new file mode 100644 index 000000000..071e755eb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-5/run.sh @@ -0,0 +1,41 @@ +echo "====================== Atifacts Evaluation for MICRO23 paper==========================" +echo "Sparse-DySta: Sparsity-Aware Dynamic and Static Scheduling for Sparse Multi-DNN Workloads" +container="docker" + +echo "================== Run a container test to make sure container works ==================" +${container} run docker.io/hello-world + +echo "=====================================================================================" + +echo "================== Pulling the Docker image to run the experiments ==================" +${container} pull hxfan/spar-dysta-micro23:ae + +echo "================== Creating Container to run the experiments ==================" +sudo ${container} run -it -d --name spar-dysta --gpus all hxfan/spar-dysta-micro23:ae /bin/bash # Create container + + +echo "================== Generate Figure-12, Attention ==================" +sudo ${container} exec --workdir /workspace/dysta-sparse/dysta_scheduler spar-dysta script/attnn/dysta_comparison_sanger_tradeoff_analysis.sh +${container} cp -r spar-dysta:/workspace/dysta-sparse/dysta_scheduler/Sanger_Tradeoff_slo10.0.pdf . +echo "================== Generate Figure-12, CNN ==================" +sudo ${container} exec --workdir /workspace/dysta-sparse/dysta_scheduler spar-dysta script/cnn/dysta_comparison_eyerissv2_tradeoff_analysis.sh +${container} cp -r spar-dysta:/workspace/dysta-sparse/dysta_scheduler/EyerissV2_Tradeoff_slo10.0.pdf . + +echo "================== Generate Figure-13, Attention ==================" +sudo ${container} exec --workdir /workspace/dysta-sparse/dysta_scheduler spar-dysta script/attnn/effect_sparsity_sanger.sh +${container} cp -r spar-dysta:/workspace/dysta-sparse/dysta_scheduler/Sanger_Sparsity_Effect30_sample1000_across_slo10.0_prema.pdf . +echo "================== Generate Figure-13 CNN ==================" +sudo ${container} exec --workdir /workspace/dysta-sparse/dysta_scheduler spar-dysta script/cnn/effect_sparsity_eyerissv2.sh +${container} cp -r spar-dysta:/workspace/dysta-sparse/dysta_scheduler/EyerissV2_Sparsity_Effect3_sample1000_across_slo10.0_prema.pdf . + + +echo "================== Generate Table5 & Figure-14, Attention ==================" +sudo ${container} exec --workdir /workspace/dysta-sparse/dysta_scheduler spar-dysta script/attnn/dysta_comparison_sanger_across_slo.sh +${container} cp -r spar-dysta:/workspace/dysta-sparse/dysta_scheduler/Sanger_Metrics_rate30_sample1000_across_slo.pdf . +${container} cp -r spar-dysta:/workspace/dysta-sparse/dysta_scheduler/Sanger_Metrics_rate40_sample1000_across_slo.pdf . +echo "================== Generate Table5 & Figure-14 CNN ==================" +sudo ${container} exec --workdir /workspace/dysta-sparse/dysta_scheduler spar-dysta script/cnn/dysta_comparison_eyerissv2_across_slo.sh +${container} cp -r spar-dysta:/workspace/dysta-sparse/dysta_scheduler/EyerissV2_Metrics_rate3_sample1000_across_slo.pdf . +${container} cp -r spar-dysta:/workspace/dysta-sparse/dysta_scheduler/EyerissV2_Metrics_rate4_sample1000_across_slo.pdf . + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/.gitignore b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/.gitignore new file mode 100644 index 000000000..1377554eb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/README.md new file mode 100644 index 000000000..c0f9d185c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/README.md @@ -0,0 +1,74 @@ +# CM script to run and reproduce experiments + +Original repository: [https://github.com/FPSG-UIUC/micro23-teaal-artifact](https://github.com/FPSG-UIUC/micro23-teaal-artifact) + +## Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install this repository with CM interface for reproduced experiments: + +```bash +cm pull repo ctuning@cm4research +``` + +## Install Python virtual environment via CM + +```bash +cm run script "install python-venv" --name=reproducibility +export CM_SCRIPT_EXTRA_CMD="--adr.python.name=reproducibility" +``` + +## Run TeAAL via the CM interface + +To install dependencies, run: + +```bash +cmr "reproduce paper m 2023 8 _install_deps" +``` + +Note that the install script makes its best guess for the correct UID and GID +for the container to be using (the current user's UID and GID). If you would +like to change the UID and/or GID of the container, you can do so in the +artifact repository `/path/to//repo/docker-compose.yaml`. +Instructions for finding this repository are below. + +To check that the environment is correctly set up and evaluate each accelerator +configuration on a small example, run: + +```bash +cmr "reproduce paper m 2023 8 _check" +``` + +To run the real experiments, run: + +```bash +cmr "reproduce paper m 2023 8 _run" +``` + +To plot the results of the real experiments, run +```bash +cmr "reproduce paper m 2023 8 _plot" +``` + +The plots will be stored in the artifact repository at `/path/to//repo/data/plots`. Instructions for finding this repository are below. + +To plot pregenerated results (e.g., if you don't want to run the experiments +yourself), run: + +```bash +cmr "reproduce paper m 2023 8 _plot_pregenerated" +``` + +### Finding the Artifact Repository + +You can also find this directory via CM as follows: +```bash +cm show cache --tags=git,artifact,fpsg,teaal +``` +or +```bash +cm find cache --tags=git,artifact,fpsg,teaal +``` + diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/_cm.yaml new file mode 100644 index 000000000..79cdc1fa3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/_cm.yaml @@ -0,0 +1,40 @@ +alias: reproduce-ieee-acm-micro2023-paper-8 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +default_env: + CM_EXPERIMENT: '1' +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +- tags: get,git,repo,_repo.https://github.com/FPSG-UIUC/micro23-teaal-artifact + env: + CM_GIT_ENV_KEY: 'FPSG_UIUC_TEAAL' + extra_cache_tags: artifact,fpsg,uiuc,teaal +input_mapping: + experiment: CM_EXPERIMENT +tags: +- reproduce +- project +- paper +- m +- micro +- micro-2023 +- '2023' +- '8' +uid: 1f15f5f53c6d469a +variations: + install_deps: + script_name: install_deps + check: + script_name: check + run: + script_name: run + plot: + script_name: plot + plot_pregenerated: + script_name: plot_pregenerated diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/check.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/check.sh new file mode 100644 index 000000000..edec77ffe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/check.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH} + +docker-compose run cl scripts/check.sh + +test $? -eq 0 || exit 1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/install_deps.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/install_deps.bat new file mode 100644 index 000000000..47f7e7ce2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/install_deps.bat @@ -0,0 +1,18 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +if exist "%CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt" ( + + echo. + echo Installing requirements.txt ... + echo. + + %CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% +) diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/install_deps.sh new file mode 100644 index 000000000..15c20da89 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/install_deps.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH} + +# We install python venv via CM and cache it inside CM cache +# Path to python from venv will be in ${CM_PYTHON_BIN_WITH_PATH} +#python3 -m venv env +#source env/bin/activate + +${CM_PYTHON_BIN_WITH_PATH} -m pip install -r scripts/cm-requirements.txt + +cd scripts + +${CM_PYTHON_BIN_WITH_PATH} install_deps.py + +docker-compose > /dev/null 2> /dev/null +if [ $? -ne 0 ] +then + sh install_docker.sh +fi + +test $? -eq 0 || exit 1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/main.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/main.py new file mode 100644 index 000000000..caa499bf0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/main.py @@ -0,0 +1,10 @@ +import os + +if __name__ == "__main__": + + print('') + print('Main script:') + print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('') + + exit(0) diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot.bat new file mode 100644 index 000000000..7e786771a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +rem echo. +rem %CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +rem IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot.sh new file mode 100644 index 000000000..8c11c44a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" +echo "ENV CM_EXPERIMENT: ${CM_EXPERIMENT}" + +echo "${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH}" + +docker-compose run cl scripts/plot.sh + +test $? -eq 0 || exit 1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot_pregenerated.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot_pregenerated.sh new file mode 100644 index 000000000..9980e7ea4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/plot_pregenerated.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH} + +docker-compose run cl scripts/plot_pregenerated.sh + +test $? -eq 0 || exit 1 + diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/run.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/run.bat new file mode 100644 index 000000000..6c1274ce6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/run.bat @@ -0,0 +1,12 @@ +@echo off + +set CUR_DIR=%cd% + +echo. +echo Current execution path: %CUR_DIR% +echo Path to script: %CM_TMP_CURRENT_SCRIPT_PATH% +echo ENV CM_EXPERIMENT: %CM_EXPERIMENT% + +echo. +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/run.sh new file mode 100644 index 000000000..b2c7c1e3c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-8/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_FPSG_UIUC_TEAAL_CHECKOUT_PATH} + +docker-compose run cl scripts/run.sh + +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/Dockerfile b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/Dockerfile new file mode 100644 index 000000000..62c2dcdae --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/Dockerfile @@ -0,0 +1,28 @@ +#Bootstrap: docker +From ubuntu:20.04 + +#%post + RUN mkdir /root/artifact_evaluation + RUN apt-get -y clean + RUN apt-get -y update + RUN apt-get -y install python3 build-essential + RUN apt-get -y install git + RUN apt-get -y install vim pip + RUN pip install numpy + WORKDIR /root/artifact_evaluation + RUN git clone https://github.com/lchangxii/sampled-mgpu-sim.git + RUN git clone https://github.com/lchangxii/akita.git + RUN git clone https://github.com/lchangxii/dnn.git + RUN apt-get -y install wget + RUN wget https://go.dev/dl/go1.20.1.linux-amd64.tar.gz + RUN tar -xvzf go1.20.1.linux-amd64.tar.gz + ENV PATH="/root/artifact_evaluation/go/bin:$PATH" + ENV HOME /root + RUN git clone https://github.com/lchangxii/micro2023_figures.git + RUN pip install pandas + RUN pip install matplotlib + RUN pip install openpyxl +#%environment +#export PATH=/opt/riscv/:$PATH + + diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/README.md new file mode 100644 index 000000000..05954766f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/README.md @@ -0,0 +1,40 @@ +# CM script to run and reproduce experiments + +Original repository: https://github.com/lchangxii/photon + + +### Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install reusable MLCommons automations: + +```bash +cm pull repo mlcommons@ck +``` + +Install this repository with CM interface for reproduced experiments: +```bash +cm pull repo ctuning@cm4research +``` + +### Run Photon via CM interface + +Perform the following steps to evaluate Photon with MLCommons CM automation language: + +1) This command will install system dependencies for Docker and require sudo (skip it if you have Docker installed): +```bash +cmr "reproduce project m 2023 photon _install_deps" +``` + +2) This command will prepare and run all experiments via Docker: + +```bash +cmr "reproduce project m 2023 photon _run" +``` + +3) In case of successful execution of a previous command, this command will generate plots to help you validate results from the article: + +```bash +cmr "reproduce project m 2023 photon _plot" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/_cm.yaml new file mode 100644 index 000000000..392e396b7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/_cm.yaml @@ -0,0 +1,30 @@ +alias: reproduce-ieee-acm-micro2023-paper-85 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +script_name: run +tags: +- reproduce +- project +- paper +- micro +- micro-2023 +- m +- '2023' +- '85' +- photon +uid: 9e0b8254b62c4349 +variations: + install_deps: + script_name: install_deps + plot: + script_name: plot + run: + script_name: run diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/install_deps.sh new file mode 100644 index 000000000..04998192f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/install_deps.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + + + +container="docker" + + +if [ "${container}" = "docker" ]; then + + echo "================== Install Docker container (you can skip if already installed)==================" + + sudo apt-get update + sudo apt-get -y install \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg \ + lsb-release \ + tar + + # Add Docker’s official GPG key + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + + sudo apt-get update + sudo apt-get -y install docker-ce docker-ce-cli containerd.io + + sudo usermod -aG docker $USER + + su - $USER + +else + +echo "================== Install Podman container (you can skip if already installed)==================" + +sudo apt-get update +sudo apt-get -y install podman +su - $USER + +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/plot.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/plot.sh new file mode 100644 index 000000000..b3c8f18d1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/plot.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + + + + + + + + +print_colorful_text() { + local text="$1" + local color_code="$2" + echo "\e[${color_code}m${text}\e[0m" +} + +container="docker" +image="micro2023-photon" + +echo "================== Run a container test to make sure container works ==================" + +#${container} run docker.io/hello-world + + +echo "================== Build the Docker image to run the experiments ==================" + +#${container} build -t ${image} -f "${CM_TMP_CURRENT_SCRIPT_PATH}/Dockerfile" . + +echo "================== Get All Results ==================" + +mkdir figures +##get all benchmarks +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testallbench.py -check;cd /root/artifact_evaluation/micro2023_figures/r9nano;./r9nano.py;./r9nanolevels.py;mv *.png /root/figures/;mv *.pdf /root/figures/" + +##get all benchmarks with architecture mi100 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testallbench.py -arch=mi100 -check;cd /root/artifact_evaluation/micro2023_figures/mi100;./mi100.py;mv *.pdf /root/figures/;mv *.png /root/figures" +# +###vgg16 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=vgg16 -check;cd /root/artifact_evaluation/micro2023_figures/vgg16;./vgg16.py;./vgg16speedup.py;mv *.pdf /root/figures/;mv *.png /root/figures" +###vgg19 +echo "Benchmarks MGPUSim-Simtime MGPUSim-Walltime Photon-Simtime Photon-Walltime" +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=vgg19 -check |grep Sum |awk -F Sum '{ printf \"vgg19\";print \$2}' " +###resnet18 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet18 -check |grep Sum|awk -F Sum '{printf \"resnet18\";print \$2}'" +####resnet32 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet32 -check |grep Sum|awk -F Sum '{printf \"resnet32\";print \$2}'" +####resnet50 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet50 -check|grep Sum |awk -F Sum '{printf \"resnet50\";print \$2}'" +####resnet101 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet101 -check|grep Sum|awk -F Sum '{printf \"resnet101\";print \$2}'" +####resnet152 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet152 -check|grep Sum|awk -F Sum '{printf \"resnet152\";print \$2}'" +## +#### +${container} run --rm -v $PWD/gpudata:/root/gpudata/ -v $PWD/figures:/root/figures/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testpagerank.py -check|grep pagerank|grep -v __pagerank" diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/run.sh new file mode 100644 index 000000000..885b63322 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-85/run.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + + + +print_colorful_text() { + local text="$1" + local color_code="$2" + echo "\e[${color_code}m${text}\e[0m" +} + +container="docker" +image="micro2023-photon" + +echo "================== Run a container test to make sure container works ==================" + +${container} run docker.io/hello-world + + +echo "================== Build the Docker image to run the experiments ==================" + +${container} build -t ${image} -f "${CM_TMP_CURRENT_SCRIPT_PATH}/Dockerfile" . + +echo "================== Execute all benchmarks ==================" +mkdir gpudata +##run all benchmarks +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testallbench.py" + +##run all benchmarks with architecture mi100 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testallbench.py -arch=mi100" + +##vgg16 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=vgg16" +##vgg19 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=vgg19" +##resnet18 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet18" +##resnet32 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet32" +##resnet50 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet50" +##resnet101 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet101" +##resnet152 +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testdlapps.py -bench=resnet152" +##pagerank +${container} run --rm -v $PWD/gpudata:/root/gpudata/ ${image} /bin/bash -c "cd /root/artifact_evaluation/sampled-mgpu-sim/samples/sampledrunner;./testpagerank.py" + + diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_build_onikiri.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_build_onikiri.sh new file mode 100644 index 000000000..1cb9d45d6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_build_onikiri.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsPreliminaryExperiments/ + +cd onikiri2/project/gcc/ +make -j$(nproc) +cd ../../../ diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_create_binary.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_create_binary.sh new file mode 100644 index 000000000..0a6d2af25 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_create_binary.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsPreliminaryExperiments/ + +cd raytracing.github.io/build_micro2023_ae/ +sed s@~@../../../../ClockhandsEvaluation/A-riscv@ -i common.mk +make +cd ../../ +cp raytracing.github.io/build_micro2023_ae/InOneWeekend/a.out onikiri2/benchmark/RayTracing/riscv64/bin/InOneWeekend +cp raytracing.github.io/build_micro2023_ae/TheNextWeek/a.out onikiri2/benchmark/RayTracing/riscv64/bin/TheNextWeek +cp raytracing.github.io/build_micro2023_ae/TheRestOfYourLife/a.out onikiri2/benchmark/RayTracing/riscv64/bin/TheRestOfYourLife diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_experiment.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_experiment.sh new file mode 100644 index 000000000..f4b7c0d2f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_experiment.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsPreliminaryExperiments/ + +cd onikiri2/tool/AutoRunTools/ +sed s@/path/to@$(realpath ../../../)@ -i cfg.xml + +# You can change this! +GigaInsns=1 + +echo "Register lifetimes experiment for $GigaInsns giga instructions." +echo "It will take $(echo $GigaInsns \* 4 | bc) minutes." +echo "You can change the number of instructions to evaluate by modifying $BASH_SOURCE" +sed '115 s@".*"@"'"$GigaInsns"'G"@' -i cfg.xml + +perl enqueue.pl -t +cd result/001/sh/exec/ +for i in *.sh; do sh $i & PID="$PID $!"; done +wait $PID +cd ../../../../ +perl summary.pl +cd ../../../ diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_experiment_setup.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_experiment_setup.sh new file mode 100644 index 000000000..9f70db2ee --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_experiment_setup.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsPreliminaryExperiments/ + +sed '59,74d' -i onikiri2/tool/AutoRunTools/cfg.xml diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_plot.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_plot.sh new file mode 100644 index 000000000..cf0ee26fa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/Preliminary_plot.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsPreliminaryExperiments/ + +echo "" +echo "Please go to $(pwd) and check ClockhandsPreliminaryExperiments*.xlsx ." +echo "The procedure of generating charts are described on them." diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/README.md new file mode 100644 index 000000000..787326bc8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/README.md @@ -0,0 +1,49 @@ +# CM script to run and reproduce experiments + +Archived artifact: https://zenodo.org/record/8218698 + +## Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). +Note that you need run the following command to install CM automation scripts: + +```bash +cm pull repo mlcommons@ck +``` + +Install this repository with CM interface for reproduced experiments: + +```bash +cm pull repo ctuning@cm4research +``` + +## Install deps + +To install dependencies, run: + +```bash +cmr "reproduce paper micro-2023 clockhands _install_deps" +``` + +## Run + +```bash +cmr "reproduce paper micro-2023 clockhands _build_compiler" +cmr "reproduce paper micro-2023 clockhands _create_binary" +cmr "reproduce paper micro-2023 clockhands _build_onikiri" +cmr "reproduce paper micro-2023 clockhands _experiment_setup" +cmr "reproduce paper micro-2023 clockhands _experiment" +cmr "reproduce paper micro-2023 clockhands _Preliminary_build_onikiri" +cmr "reproduce paper micro-2023 clockhands _Preliminary_create_binary" +cmr "reproduce paper micro-2023 clockhands _Preliminary_experiment_setup" +cmr "reproduce paper micro-2023 clockhands _Preliminary_experiment" +``` + +## Plot + +To plot the results of the real experiments, run + +```bash +cmr "reproduce paper micro-2023 clockhands _plot" +cmr "reproduce paper micro-2023 clockhands _Preliminary_plot" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/_cm.yaml new file mode 100644 index 000000000..869258b3e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/_cm.yaml @@ -0,0 +1,55 @@ +alias: reproduce-ieee-acm-micro2023-paper-87 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +- tags: download-and-extract,_extract,_url.https://zenodo.org/record/8218698/files/Clockhands_Artifact_MICRO2023.tar?download=1 + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_ARTIFACT_CLOCKHANDS + CM_EXTRACT_FINAL_ENV_NAME: CM_ARTIFACT_CLOCKHANDS_EXTRACTED +# CM_DOWNLOAD_CHECKSUM: + force_cache: true + extra_cache_tags: reproduce,paper,artifact,micro,clockhands +tags: +- reproduce +- project +- paper +- m +- micro +- micro-2023 +- '2023' +- '87' +- clockhands +- Clockhands +uid: bd56037bf32c4b71 +variations: + install_deps: + script_name: install_deps + build_compiler: + script_name: build_compiler + create_binary: + script_name: create_binary + build_onikiri: + script_name: build_onikiri + experiment_setup: + script_name: experiment_setup + experiment: + script_name: experiment + plot: + script_name: plot + Preliminary_build_onikiri: + script_name: Preliminary_build_onikiri + Preliminary_create_binary: + script_name: Preliminary_create_binary + Preliminary_experiment_setup: + script_name: Preliminary_experiment_setup + Preliminary_experiment: + script_name: Preliminary_experiment + Preliminary_plot: + script_name: Preliminary_plot diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/build_compiler.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/build_compiler.sh new file mode 100644 index 000000000..4a43299a0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/build_compiler.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsEvaluation/ + + +cd A-riscv/ + +git clone https://github.com/riscv-collab/riscv-gnu-toolchain +cd riscv-gnu-toolchain/ +git checkout 2022.01.17 +CFLAGS="-O2 -static" ./configure --prefix=$(realpath ../riscv_gcc111) --with-arch=rv64g +make linux -j$(nproc) +make -j$(nproc) +cd ../ + +cd musl/ +CC=../riscv_gcc111/bin/riscv64-unknown-linux-gnu-gcc CROSS_COMPILE=../riscv_gcc111/bin/riscv64-unknown-linux-gnu- ./configure --prefix=$(realpath ../musl-gcc) --target=riscv64 +make -j$(nproc) +make install +cd ../../ + +wget https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.1/clang+llvm-12.0.1-x86_64-linux-gnu-ubuntu-16.04.tar.xz +tar xf clang+llvm-12.0.1-x86_64-linux-gnu-ubuntu-16.04.tar.xz +mv clang+llvm-12.0.1-x86_64-linux-gnu-ubuntu- clang+llvm-12.0.1-x86_64-linux-gnu-ubuntu-16.04 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/build_onikiri.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/build_onikiri.sh new file mode 100644 index 000000000..cb0de224b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/build_onikiri.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsEvaluation/ +cd onikiri2/project/gcc/ +make -j$(nproc) +cd ../../../ diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/create_binary.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/create_binary.sh new file mode 100644 index 000000000..aaf0ebb50 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/create_binary.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsEvaluation/ + +sed s@~@..@ -i A-riscv/stuff/make.inc +cd A-riscv/coremark/ +make +cd ../../ + +cd B-straight/toolchain/Test/coremark/ +make +cd ../../../../ + +cd C-clockhands/coremark/ +make +cd ../../ diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/experiment.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/experiment.sh new file mode 100644 index 000000000..669eaa0d2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/experiment.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsEvaluation/ + +cd evaluation/ +make -j$(nproc) diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/experiment_setup.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/experiment_setup.sh new file mode 100644 index 000000000..c112258a9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/experiment_setup.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsEvaluation/ + +cp A-riscv/coremark/rvbin/coremark.rvbin evaluation/0.coremark +cp B-straight/toolchain/Test/coremark/stbin/coremark.stbin evaluation/0.coremark +cp C-clockhands/coremark/chbin/coremark.chbin evaluation/0.coremark +cp onikiri2/project/gcc/onikiri2/a.out evaluation/onikiri2 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/install_deps.sh new file mode 100644 index 000000000..2a8c9c716 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/install_deps.sh @@ -0,0 +1,4 @@ +echo "Install dependencies to build riscv-gcc." +sudo apt install autoconf automake autotools-dev curl python3 python3-pip libmpc-dev libmpfr-dev libgmp-dev gawk build-essential bison flex texinfo gperf libtool patchutils bc zlib1g-dev libexpat-dev ninja-build git cmake libglib2.0-dev +echo "Install dependencies to make figures." +sudo apt install gnuplot diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/plot.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/plot.sh new file mode 100644 index 000000000..c6f2910a3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-87/plot.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}" + +cd ${CM_ARTIFACT_CLOCKHANDS_EXTRACTED}/Clockhands_Artifact_MICRO2023/ClockhandsEvaluation/ + +cd evaluation/ + +grep ExecutedCycles way*/*.xml | grep -v way[^v]*chbin | sort -V | sed -e 's/\(way[0-9]*\)-.*coremark./\1 /g' -e 's/bin.xml.*"\(.*\)"/ \1/' | awk 'NR==1{a=$3}NR%3==1{printf($1)}{printf(" "a/$3)}NR%3==0{print""}' > PerformanceImprovement.dat +echo 'set terminal png; set out "PerformanceImprovement.png"; set style histogram clustered; plot [] [0:2] "PerformanceImprovement.dat" using 2:xtic(1) with histogram title "R", "PerformanceImprovement.dat" using 3 with histogram title "S", "PerformanceImprovement.dat" using 4 with histogram title "C";' | gnuplot + +grep Retirer -B3 way8-*/*.xml | grep NumOpCode | grep -v way[^v]*chbin | sed 'y/",/ /' | awk 'NR==1{for(i=3;i<37;++i){a+=$(i)}}{for(i=3;i<37;++i){$(i)/=a}}{print (NR==1?"R":NR==2?"S":"C"),$4+$5,$9,$7,$10+$20,$11+$21,$14+$15,$16+$17,$22+$23+$24+$25+$26+$27+$28+$29,$13,$33,$30+$31}' > InstructionBreakdown.dat +echo 'set terminal png; set out "InstructionBreakdown.png"; set style histogram rowstacked; set key invert; plot "InstructionBreakdown.dat" using 2:xtic(1) with histogram title "Call+Ret", "InstructionBreakdown.dat" using 3 with histogram title "Jump", "InstructionBreakdown.dat" using 4 with histogram title "CondBr", "InstructionBreakdown.dat" using 5 with histogram title "Load", "InstructionBreakdown.dat" using 6 with histogram title "Store", "InstructionBreakdown.dat" using 7 with histogram title "ALU", "InstructionBreakdown.dat" using 8 with histogram title "Mul+Div", "InstructionBreakdown.dat" using 9 with histogram title "FLOPs", "InstructionBreakdown.dat" using 10 with histogram title "Move", "InstructionBreakdown.dat" using 11 with histogram title "NOP", "InstructionBreakdown.dat" using 12 with histogram title "Others";' | gnuplot + +cat <(grep SkippedInsns skip-result/*.chbin.xml) <(grep 'Register.*Frequency' skip-result/*.chbin.xml) | sed 'y/",/ /' | awk 'NR==1{insns=$2}NR!=1{for(s=t=u=v=i=0;i<16;++i){s+=$(2+i);t+=$(18+i);u+=$(34+i);v+=$(50+i)}print (NR==2?"Write":"Read"),s/insns,t/insns,u/insns,v/insns,(NR==2?(insns-s-t-u-v)/insns:0)}' > HandBreakdown.dat +echo 'set terminal png; set out "HandBreakdown.png"; set style histogram rowstacked; set key invert; plot "HandBreakdown.dat" using 2:xtic(1) with histogram title "s hand", "HandBreakdown.dat" using 3 with histogram title "t hand", "HandBreakdown.dat" using 4 with histogram title "u hand", "HandBreakdown.dat" using 5 with histogram title "v hand", "HandBreakdown.dat" using 6 with histogram title "no dst hand";' | gnuplot + +cat <(grep SkippedInsns skip-result/*.chbin.xml) <(grep LifetimeDistributionKey skip-result/*.chbin.xml) <(grep LifetimeDistributionCount skip-result/*.chbin.xml) | sed 'y/",/ /' | awk 'NR==1{insns=$2}NR==2{for(i=2;i<700;++i){a[i]=$(i)}}NR>2{sum=1e-300;for(i=699;i>1;--i){sum+=$(i);b[NR][i]=sum/insns}}END{for(i=2;i<700;++i){print a[i],b[3][i],b[4][i],b[5][i],b[6][i]}}' > LifetimeByHand.dat +echo 'set terminal png; set out "LifetimeByHand.png"; set logscale x; set logscale y; plot [1:1e6] [1e-6:1] "LifetimeByHand.dat" using 1:2 with line title "v", "LifetimeByHand.dat" using 1:3 with line title "u", "LifetimeByHand.dat" using 1:4 with line title "t", "LifetimeByHand.dat" using 1:5 with line title "s";' | gnuplot + +cat <(grep SkippedInsns skip-result/*.rvbin.xml) <(grep LifetimeDistributionKey skip-result/*.rvbin.xml) <(grep LifetimeDistributionCountAll skip-result/*.rvbin.xml) | sed 'y/",/ /' | awk 'NR==1{insns=$2}NR==2{for(i=2;i<700;++i){a[i]=$(i)}}NR==3{for(i=699;i>1;--i){sum+=$(i);print a[i],sum/insns}}' > Lifetime-RV.dat +echo 'set terminal png; set out "Lifetime-RV.png"; set logscale x; set logscale y; plot [1:1e6] [1e-6:1] "Lifetime-RV.dat" using 1:2 with line title "RV";' | gnuplot +cat <(grep SkippedInsns skip-result/*.stbin.xml) <(grep LifetimeDistributionKey skip-result/*.stbin.xml) <(grep LifetimeDistributionCountAll skip-result/*.stbin.xml) | sed 'y/",/ /' | awk 'NR==1{insns=$2}NR==2{for(i=2;i<700;++i){a[i]=$(i)}}NR==3{for(i=699;i>1;--i){sum+=$(i);print a[i],sum/insns}}' > Lifetime-ST.dat +echo 'set terminal png; set out "Lifetime-ST.png"; set logscale x; set logscale y; plot [1:1e6] [1e-6:1] "Lifetime-ST.dat" using 1:2 with line title "ST";' | gnuplot +cat <(grep SkippedInsns skip-result/*.chbin.xml) <(grep LifetimeDistributionKey skip-result/*.chbin.xml) <(grep LifetimeDistributionCountAll skip-result/*.chbin.xml) | sed 'y/",/ /' | awk 'NR==1{insns=$2}NR==2{for(i=2;i<700;++i){a[i]=$(i)}}NR==3{for(i=699;i>1;--i){sum+=$(i);print a[i],sum/insns}}' > Lifetime-CH.dat +echo 'set terminal png; set out "Lifetime-CH.png"; set logscale x; set logscale y; plot [1:1e6] [1e-6:1] "Lifetime-CH.dat" using 1:2 with line title "CH";' | gnuplot + +echo "see $(pwd)/*.png!" diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/README.md new file mode 100644 index 000000000..68c190378 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/README.md @@ -0,0 +1,45 @@ +# CM script to run and reproduce experiments + +Original repository: https://github.com/CMU-SAFARI/Victima + + +### Reusability using MLCommons CM automation language + +Install MLCommmons CM using [this guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +Install reusable MLCommons automations: + +```bash +cm pull repo mlcommons@ck +``` + +Install this repository with CM interface for reproduced experiments: +```bash +cm pull repo ctuning@cm4research +``` + +### Run Victima via CM interface + +Perform the following steps to evaluate Victima with MLCommons CM automation language: + +1) This command will install system dependencies for Docker and require sudo (skip it if you have Docker installed): +```bash +cmr "reproduce project m 2023 victima _install_deps" +``` + +2) This command will prepare and run all experiments via Docker: + +```bash +cmr "reproduce project m 2023 victima _run" +``` + +You can specify --job_manager and --container if needed: +```bash +cmr "reproduce project m 2023 victima _run" --job_manager=native|slurm --contianer=docker|podman +``` + +3) In case of successful execution of a previous command, this command will generate plots to help you validate results from the article: + +```bash +cmr "reproduce project m 2023 victima _plot" +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/_cm.yaml new file mode 100644 index 000000000..7daa9e663 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/_cm.yaml @@ -0,0 +1,42 @@ +alias: reproduce-ieee-acm-micro2023-paper-96 +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: Reproducibility and artifact evaluation +default_env: + CM_VICTIMA_JOB_MANAGER: native + CM_VICTIMA_CONTAINER: docker +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python +- tags: get,git,repo,_repo.https://github.com/CMU-SAFARI/Victima + env: + CM_GIT_ENV_KEY: 'CMU_SAFARI_VICTIMA' + extra_cache_tags: artifact,cmu,safari,victima +input_mapping: + job_manager: CM_VICTIMA_JOB_MANAGER + container: CM_VICTIMA_CONTAINER +script_name: run +tags: +- reproduce +- project +- paper +- m +- micro +- micro-2023 +- '2023' +- '96' +- cmu +- safari +- victima +uid: fc5bee3426174e7b +variations: + install_deps: + script_name: install_deps + plot: + script_name: plot + run: + script_name: run diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/install_deps.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/install_deps.sh new file mode 100644 index 000000000..3458dd15b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/install_deps.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to Victima repo: ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH} + +if test -f "${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt"; then + echo "" + echo "Installing requirements.txt ..." + echo "" + + ${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + test $? -eq 0 || exit 1 +fi + +echo "" + +sh install_docker.sh +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/main.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/main.py new file mode 100644 index 000000000..caa499bf0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/main.py @@ -0,0 +1,10 @@ +import os + +if __name__ == "__main__": + + print('') + print('Main script:') + print('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT', ''))) + print('') + + exit(0) diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/plot.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/plot.sh new file mode 100644 index 000000000..50723da50 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/plot.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to Victima repo: ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH} + +echo "" +sh ./scripts/produce_plots.sh ${CM_VICTIMA_CONTAINER} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/run.sh new file mode 100644 index 000000000..541c72839 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ieee-acm-micro2023-paper-96/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CUR_DIR=${PWD} + +echo "" +echo "Current execution path: ${CUR_DIR}" +echo "Path to script: ${CM_TMP_CURRENT_SCRIPT_PATH}" + +echo "Changing to Victima repo: ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH}" +cd ${CM_GIT_REPO_CMU_SAFARI_VICTIMA_CHECKOUT_PATH} + +echo "" + +sh artifact.sh --${CM_VICTIMA_JOB_MANAGER} ${CM_VICTIMA_CONTAINER} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/README-extra.md new file mode 100644 index 000000000..28afd8094 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/README-extra.md @@ -0,0 +1,75 @@ +# CM-based reproducibility demo for IPOL journal + +This is a part of the [open challenge](https://access.cknowledge.org/playground/?action=challenges&name=f284c08891c44058) +to make it easier to reproduce experimental results from research papers +using the [MLCommons CM scripting language](https://github.com/mlcommons/ck). + +Code and sample images are taken from https://ipolcore.ipol.im/demo/clientApp/demo.html?id=439 . + +The demo illustrates the method proposed by Daudt et al. (2019) for change detection on satellite images. It takes as input two color images in PNG format. Both images should be satellites images of the same area, and co-registered. +The output image is a change map. For each pixel in the input images, the value of the change map is 1 if a change is detected and 0 otherwise. + +Pair of images from the OSCD test set are already provided with the demo. For those images, +the ground truth is available in the original dataset: https://ieee-dataport.org/open-access/oscd-onera-satellite-change-detection. + +## Authors + +* [Jose Hernandez](https://www.linkedin.com/in/jose-hernandez-a261182b) +* [Grigori Fursin](https://cKnowledge.org/gfursin) + +## Initial discussion and materials + +* https://github.com/mlcommons/ck/issues/617 +* http://www.ipol.im/pub/art/2022/439/ +* https://access.cknowledge.org/playground/?action=challenges&name=reproduce-and-automate-ipol-paper + +## Implementation + +We implemented 2 CM scripts for this challenge: + +* [Download IPOL paper sources and cache them in CM]( https://github.com/mlcommons/cm4mlops/tree/main/script/get-ipol-src ) +* [Run IPOL 2022 439 paper demo using above script and PyTorch]( https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439 ) + +## Reproducibility + +CM scripts are implemented for a demo on Ubuntu and must be tested across different systems: + +1. Install MLCommons CM(CK2) automation framework as described [here](https://github.com/mlcommons/ck/blob/master/docs/installation.md). + +2. Install MLCommons repository with CM automation scripts: + +```bash +cm pull repo mlcommons@cm4mlops --checkout=dev +``` + +3. Install src from IPOL 2022 439 paper: +```bash +cm run script "get ipol src" --year=2022 --number=439 + +cm show cache --tags=ipol,src +``` + +4. Download sample images and run demo (CM will detect or install missing dependencies) +```bash +cm run script "download file _wget" --url=https://cKnowledge.org/ai/data/ipol-paper-2024-439-sample-image-1.png --verify=no --env.CM_DOWNLOAD_CHECKSUM=850639287ad23194576582680c2ecfc3 +cm run script "download file _wget" --url=https://cKnowledge.org/ai/data/ipol-paper-2024-439-sample-image-2.png --verify=no --env.CM_DOWNLOAD_CHECKSUM=31364c03d91873ed2d244cce6d664dd0 +cm run script "reproduce ipol 2022-439" +cm run script "reproduce ipol 2022-439" --adr.torch.version=1.13.1 --adr.torchvision.version=0.14.1 +``` + +This script will use 2 sample images from this paper +and should produce *diff.png* in the current directory. + +## Usage with different images + +You can use other 2 images by specifying their full path as follows: +```bash +cm run script "reproduce ipol 2022-439" \ + --image1={full path to png image 1} \ + --image2={full path to png image 2} +``` + +## Collaborative development + +Join the public [MLCommons Task Force on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) +to participate in further collaborative developments. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/README.md new file mode 100644 index 000000000..2dffd2161 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439](https://docs.mlcommons.org/cm4mlops/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/_cm.yaml new file mode 100644 index 000000000..bd7c9e140 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/_cm.yaml @@ -0,0 +1,40 @@ +alias: reproduce-ipol-paper-2022-439 +uid: f9b9e5bd65e34e4f + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Reproducibility and artifact evaluation + +input_mapping: + image1: CM_IMAGE_1 + image2: CM_IMAGE_2 + +deps: +- tags: detect,os +- tags: get,python3 + names: + - python + - python3 +- tags: get,ipol,src + names: + - ipol-src +- tags: get,generic-python-lib,_torch + names: + - torch +- tags: get,generic-python-lib,_torchvision + names: + - torchvision + +tags: +- app +- python +- reproduce +- project +- paper +- ipol +- journal +- repro +- reproducibility +- pytorch +- 2022-439 diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/customize.py new file mode 100644 index 000000000..10b385d3e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/customize.py @@ -0,0 +1,51 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + + env = i['env'] + + # Check if input files are empty and add files + input_file_1 = env.get('CM_INPUT_1', '') + if input_file_1 == '': + input_file_1 = 'ipol-paper-2024-439-sample-image-1.png' + + if not os.path.isfile(input_file_1): + return {'return': 1, + 'error': 'input file 1 "{}" not found'.format(input_file_1)} + + env['CM_INPUT_1'] = os.path.abspath(input_file_1) + + input_file_2 = env.get('CM_INPUT_2', '') + if input_file_2 == '': + input_file_2 = 'ipol-paper-2024-439-sample-image-2.png' + + if not os.path.isfile(input_file_2): + return {'return': 1, + 'error': 'input file 2 "{}" not found'.format(input_file_2)} + + env['CM_INPUT_2'] = os.path.abspath(input_file_2) + + return {'return': 0} + + +def postprocess(i): + + print('') + print('Please check "diff.png"') + print('') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/requirements.txt b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/requirements.txt new file mode 100644 index 000000000..82a4d6034 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/requirements.txt @@ -0,0 +1,5 @@ +jupyter +numpy +imageio +IPython +scikit-image diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/run.bat b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/run.bat new file mode 100644 index 000000000..7aafa4a34 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/run.bat @@ -0,0 +1,33 @@ +@echo off + +echo ======================================================= + +set CUR_DIR=%cd% +echo Current path in CM script: %CUR_DIR% + +echo. +echo Installing extra requirements (latest versions) ... + +echo. +%CM_PYTHON_BIN_WITH_PATH% -m pip install -r %CM_TMP_CURRENT_SCRIPT_PATH%\requirements.txt + +echo ======================================================= + +cd %CM_IPOL_PATH% + +echo Current path in CM cache: %cd% + +echo Running author's code ... + +del /F /Q cm.png +del /F /Q %CUR_DIR%\diff.png + +echo. +%CM_PYTHON_BIN_WITH_PATH% main.py --input_0=%CM_INPUT_1% --input_1=%CM_INPUT_2% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +rem Copy diff png to current path +copy /B cm.png %CUR_DIR%\diff.png +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================= diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/run.sh new file mode 100644 index 000000000..99a474627 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-ipol-paper-2022-439/run.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +echo "=======================================================" + +CUR_DIR=${PWD} +echo "Current path in CM script: ${CUR_DIR}" + +echo "" +echo "Installing extra requirements (latest versions) ..." + +echo "" +${CM_PYTHON_BIN_WITH_PATH} -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt + +echo "=======================================================" + +cd ${CM_IPOL_PATH} + +echo "Current path in CM cache: ${PWD}" + +# Check default images +if [ "${CM_INPUT_1}" == "" ]; then + CM_INPUT_1=${CM_TMP_CURRENT_SCRIPT_PATH}/sample-images/1.png +fi + +if [ "${CM_INPUT_2}" == "" ]; then + CM_INPUT_2=${CM_TMP_CURRENT_SCRIPT_PATH}/sample-images/2.png +fi + +echo "Running author's code ..." + +rm -f cm.png +rm -f ${CUR_DIR}/diff.png + +echo "" +${CM_PYTHON_BIN_WITH_PATH} main.py --input_0=${CM_INPUT_1} --input_1=${CM_INPUT_2} +test $? -eq 0 || exit 1 + +# Copy diff png to current path +cp cm.png ${CUR_DIR}/diff.png +test $? -eq 0 || exit 1 + +echo "=======================================================" diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-inference-dummy/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-inference-dummy/README.md new file mode 100644 index 000000000..36f245ef7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-inference-dummy/README.md @@ -0,0 +1,381 @@ +
    +Click here to see the table of contents. + +* [About](#about) +* [Summary](#summary) +* [Reuse this script in your project](#reuse-this-script-in-your-project) + * [ Install CM automation language](#install-cm-automation-language) + * [ Check CM script flags](#check-cm-script-flags) + * [ Run this script from command line](#run-this-script-from-command-line) + * [ Run this script from Python](#run-this-script-from-python) + * [ Run this script via GUI](#run-this-script-via-gui) + * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) +* [Customization](#customization) + * [ Variations](#variations) + * [ Script flags mapped to environment](#script-flags-mapped-to-environment) + * [ Default environment](#default-environment) +* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) +* [Script output](#script-output) +* [New environment keys (filter)](#new-environment-keys-(filter)) +* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) +* [Maintainers](#maintainers) + +
    + +*Note that this README is automatically generated - don't edit!* + +### About + +#### Summary + +* Category: *Modular MLPerf benchmarks.* +* CM GitHub repository: *[mlcommons@cm4mlops](https://github.com/mlcommons/cm4mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* CM "database" tags to find this script: *reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy* +* Output cached? *False* +___ +### Reuse this script in your project + +#### Install CM automation language + +* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +* [CM intro](https://doi.org/10.5281/zenodo.8105339) + +#### Pull CM repository with this automation + +```cm pull repo mlcommons@ck``` + + +#### Run this script from command line + +1. `cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy[,variations] [--input_flags]` + +2. `cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[ variations]" [--input_flags]` + +* `variations` can be seen [here](#variations) + +* `input_flags` can be seen [here](#script-flags-mapped-to-environment) + +#### Run this script from Python + +
    +Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
    + + +#### Run this script via GUI + +```cmr "cm gui" --script="reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy,dummy-harness,dummy) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[ variations]" [--input_flags]` + +___ +### Customization + + +#### Variations + + * *Internal group (variations should not be selected manually)* +
    + Click here to expand this section. + + * `_bert_` + - Workflow: + * `_gptj_` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,ml-model,gptj + * CM names: `--adr.['gptj-model']...` + - CM script: [get-ml-model-gptj](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj) + * get,dataset,cnndm,_validation + - CM script: [get-dataset-cnndm](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm) + * `_llama2-70b_` + - Workflow: + +
    + + + * *No group (any variation can be selected)* +
    + Click here to expand this section. + + * `_pytorch,cpu` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch + - CM script: [get-generic-python-lib](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib) + * `_pytorch,cuda` + - Workflow: + 1. ***Read "deps" on other CM scripts*** + * get,generic-python-lib,_torch_cuda + - CM script: [get-generic-python-lib](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib) + * `_singlestream,resnet50` + - Workflow: + * `_singlestream,retinanet` + - Workflow: + +
    + + + * Group "**backend**" +
    + Click here to expand this section. + + * **`_pytorch`** (default) + - Environment variables: + - *CM_MLPERF_BACKEND*: `pytorch` + - Workflow: + +
    + + + * Group "**batch-size**" +
    + Click here to expand this section. + + * `_bs.#` + - Workflow: + +
    + + + * Group "**device**" +
    + Click here to expand this section. + + * **`_cpu`** (default) + - Environment variables: + - *CM_MLPERF_DEVICE*: `cpu` + - Workflow: + * `_cuda` + - Environment variables: + - *CM_MLPERF_DEVICE*: `gpu` + - *CM_MLPERF_DEVICE_LIB_NAMESPEC*: `cudart` + - Workflow: + +
    + + + * Group "**loadgen-scenario**" +
    + Click here to expand this section. + + * `_multistream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `MultiStream` + - Workflow: + * `_offline` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Offline` + - Workflow: + * `_server` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `Server` + - Workflow: + * `_singlestream` + - Environment variables: + - *CM_MLPERF_LOADGEN_SCENARIO*: `SingleStream` + - Workflow: + +
    + + + * Group "**model**" +
    + Click here to expand this section. + + * `_bert-99` + - Environment variables: + - *CM_MODEL*: `bert-99` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_bert-99.9` + - Environment variables: + - *CM_MODEL*: `bert-99.9` + - Workflow: + * `_gptj-99` + - Environment variables: + - *CM_MODEL*: `gptj-99` + - *CM_SQUAD_ACCURACY_DTYPE*: `float32` + - Workflow: + * `_gptj-99.9` + - Environment variables: + - *CM_MODEL*: `gptj-99.9` + - Workflow: + * `_llama2-70b-99` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99` + - Workflow: + * `_llama2-70b-99.9` + - Environment variables: + - *CM_MODEL*: `llama2-70b-99.9` + - Workflow: + * **`_resnet50`** (default) + - Environment variables: + - *CM_MODEL*: `resnet50` + - Workflow: + * `_retinanet` + - Environment variables: + - *CM_MODEL*: `retinanet` + - Workflow: + +
    + + + * Group "**precision**" +
    + Click here to expand this section. + + * `_fp16` + - Environment variables: + - *CM_MLPERF_MODEL_PRECISION*: `float16` + - Workflow: + * **`_fp32`** (default) + - Environment variables: + - *CM_MLPERF_MODEL_PRECISION*: `float32` + - Workflow: + * `_uint8` + - Environment variables: + - *CM_MLPERF_MODEL_PRECISION*: `uint8` + - Workflow: + +
    + + +#### Default variations + +`_cpu,_fp32,_pytorch,_resnet50` + +#### Script flags mapped to environment +
    +Click here to expand this section. + +* `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` +* `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` +* `--mlperf_conf=value` → `CM_MLPERF_CONF=value` +* `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` +* `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` +* `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` +* `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` +* `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` +* `--rerun=value` → `CM_RERUN=value` +* `--results_repo=value` → `CM_MLPERF_INFERENCE_RESULTS_REPO=value` +* `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` +* `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` +* `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` +* `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` +* `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` +* `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` +* `--user_conf=value` → `CM_MLPERF_USER_CONF=value` + +**Above CLI flags can be used in the Python CM API as follows:** + +```python +r=cm.access({... , "count":...} +``` + +
    + +#### Default environment + +
    +Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + +* CM_MLPERF_LOADGEN_SCENARIO: `Offline` +* CM_MLPERF_LOADGEN_MODE: `performance` +* CM_SKIP_PREPROCESS_DATASET: `no` +* CM_SKIP_MODEL_DOWNLOAD: `no` +* CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `dummy` +* CM_MLPERF_SKIP_RUN: `no` + +
    + +___ +### Script workflow, dependencies and native scripts + +
    +Click here to expand this section. + + 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/_cm.yaml)*** + * detect,os + - CM script: [detect-os](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os) + * detect,cpu + - CM script: [detect-cpu](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu) + * get,sys-utils-cm + - CM script: [get-sys-utils-cm](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm) + * get,mlcommons,inference,src + * CM names: `--adr.['inference-src']...` + - CM script: [get-mlperf-inference-src](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-src) + * get,mlcommons,inference,loadgen + * CM names: `--adr.['inference-loadgen']...` + - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen) + * generate,user-conf,mlperf,inference + * CM names: `--adr.['user-conf-generator']...` + - CM script: [generate-mlperf-inference-user-conf](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-user-conf) + * get,generic-python-lib,_mlperf_logging + * CM names: `--adr.['mlperf-logging']...` + - CM script: [get-generic-python-lib](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib) + * get,git,repo + * CM names: `--adr.inference-results...` + - CM script: [get-git-repo](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/_cm.yaml) + 1. ***Run native script if exists*** + * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/run.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/customize.py)*** + 1. ***Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-dummy/_cm.yaml)*** + * benchmark-mlperf + * `if (CM_MLPERF_SKIP_RUN not in ['yes', True])` + * CM names: `--adr.['runner', 'mlperf-runner']...` + - CM script: [benchmark-program-mlperf](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program-mlperf) + * save,mlperf,inference,state + * CM names: `--adr.['save-mlperf-inference-state']...` + - CM script: [save-mlperf-inference-implementation-state](https://github.com/mlcommons/cm4mlops/tree/main/script/save-mlperf-inference-implementation-state) +
    + +___ +### Script output +`cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy dummy-harness dummy[,variations]" [--input_flags] -j` +#### New environment keys (filter) + +* `CM_DATASET_*` +* `CM_HW_NAME` +* `CM_IMAGENET_ACCURACY_DTYPE` +* `CM_MAX_EXAMPLES` +* `CM_MLPERF_*` +* `CM_ML_MODEL_*` +* `CM_SQUAD_ACCURACY_DTYPE` +#### New environment keys auto-detected from customize + +___ +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md new file mode 100644 index 000000000..ab78e4a31 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md @@ -0,0 +1,13 @@ +This script reproduces OctoML MLPerf TinyML Submission from v1.0. +## Install +```bash +cm run script --tags=reproduce,tiny,mlperf,octoml,_[VARIANT],_[MODEL] +``` +where, +* `[VARIANT]` is one of `cmsis_nn`,`native` +* `[MODEL]` is one of `ad`, `ic`, `kws`, `vww` + +The generated binary can be located inside +```bash +find `cm find cache --tags=reproduce,tiny,mlperf,octoml,_[VARIANT],_[MODEL] +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/README.md new file mode 100644 index 000000000..75b929f34 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results](https://docs.mlcommons.org/cm4mlops/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/_cm.yaml new file mode 100644 index 000000000..5dbee3b43 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/_cm.yaml @@ -0,0 +1,81 @@ +alias: reproduce-mlperf-octoml-tinyml-results +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Reproduce MLPerf benchmarks +default_version: r1.0 +deps: +- tags: detect,os +- tags: detect,cpu +- tags: get,sys-utils-cm +- names: + - python3 + - python + tags: get,python3 +- names: + - zephyr + tags: get,zephyr +- names: + - zephyr-sdk + tags: get,zephyr-sdk +- names: + - cmsis + tags: get,cmsis +- names: + - microtvm + tags: get,microtvm +- names: + - cmake + tags: get,cmake + version_min: 3.20.0 +- tags: get,gcc +input_mapping: + flash: CM_FLASH_BOARD + recreate_binary: CM_RECREATE_BINARY +local_env_keys: +- CM_* +new_env_keys: +- CM_TINY_* +post_deps: +- enable_if_env: + CM_FLASH_BOARD: + - 'True' + tags: flash,tiny,mlperf +tags: +- reproduce +- tiny +- results +- mlperf +- octoml +- mlcommons +uid: a63803a707d04332 +variations: + NRF: + env: + CM_TINY_BOARD: NRF5340DK + NUCLEO: + env: + CM_TINY_BOARD: NUCLEO_L4R5ZI + ad: + env: + CM_TINY_MODEL: ad + cmsis_nn: + env: + CM_MICROTVM_VARIANT: microtvm_cmsis_nn + ic: + env: + CM_TINY_MODEL: ic + kws: + env: + CM_TINY_MODEL: kws + native: + env: + CM_MICROTVM_VARIANT: microtvm_native + vww: + env: + CM_TINY_MODEL: vww +versions: + r1.0: + add_deps_recursive: + microtvm: + version: main diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/customize.py new file mode 100644 index 000000000..033ff8281 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/customize.py @@ -0,0 +1,36 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if 'CM_MICROTVM_VARIANT' not in env: + env['CM_MICROTVM_VARIANT'] = 'microtvm_cmsis_nn' + if 'CM_TINY_MODEL' not in env: + env['CM_TINY_MODEL'] = 'ic' + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env['+C_INCLUDE_PATH'] = [] + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/run.sh new file mode 100644 index 000000000..c8d2f077f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-octoml-tinyml-results/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +CUR_DIR=$PWD + +code=${CM_MICROTVM_SOURCE}/closed/OctoML/code +model=${CM_TINY_MODEL:-ad} +microtvm_variant=${CM_MICROTVM_VARIANT} +board=${CM_TINY_BOARD:-NUCLEO_L4R5ZI} +source=${code}/${microtvm_variant} + +path_suffix="${board}/${model}" +cmake_src=${source}/${path_suffix} +build_path=${CUR_DIR}/${path_suffix} +echo "CM_TINY_BUILD_DIR=${build_path}/build" > tmp-run-env.out +mkdir -p ${build_path} +cd ${build_path} +binary_path=${build_path}/build/zephyr/zephyr.elf +if [ -f "${binary_path}" ] && [ "${CM_RECREATE_BINARY}" != "True" ]; then + echo "ELF binary existing at ${binary_path}. Skipping regeneration." + cd build +else + rm -rf build + mkdir -p build + cd build + CM_MAKE_CORES=${CM_MAKE_CORES:-${CM_HOST_CPU_TOTAL_CORES:-2}} + cmake ${cmake_src} + test $? -eq 0 || exit 1 + make -j${CM_MAKE_CORES} + test $? -eq 0 || exit 1 + cd ../ + echo "ELF binary created at ${build_path}/build/zephyr/zephyr.elf" +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/README.md b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/README.md new file mode 100644 index 000000000..0ed47226e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia](https://docs.mlcommons.org/cm4mlops/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/_cm.yaml new file mode 100644 index 000000000..a118ee3f7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/_cm.yaml @@ -0,0 +1,80 @@ +# Identification of this CM script +alias: reproduce-mlperf-training-nvidia +uid: f183628f292341e2 +cache: false + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "Reproduce MLPerf benchmarks" + + +# User-friendly tags to find this CM script +tags: + - reproduce + - mlcommons + - mlperf + - train + - training + - nvidia-training + - nvidia + + +# Map script inputs to environment variables +input_mapping: + system_conf_name: CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME + results_dir: CM_MLPERF_RESULTS_DIR + +new_state_keys: + - mlperf-training-implementation + - CM_SUT_* + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,nvidia-docker + skip_if_env: + CM_SKIP_GET_NVIDIA_DOCKER: + - yes + + # Detect CUDA + - names: + - cuda + tags: get,cuda + version: 11.7.0 + +variations: + resnet: + group: benchmark + env: + CM_MLPERF_TRAINING_BENCHMARK: resnet + deps: + - tags: prepare,mlperf,training,resnet,_nvidia + names: + - prepare-training-data + - nvidia-training-data + - tags: get,nvidia,training,code + names: + - nvidia-training-code + +versions: + r2.1: + adr: + nvidia-training-code: + version: r2.1 + env: + resnet_benchmark_implementation: mxnet-22.04 + r3.0: + adr: + nvidia-training-code: + version: r3.0 + env: + resnet_benchmark_implementation: mxnet diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/customize.py b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/customize.py new file mode 100644 index 000000000..0b603ffac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/customize.py @@ -0,0 +1,46 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + env = i['env'] + + conf = env.get('CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME', '') + if conf == "": + return {'return': 1, + 'error': 'Please provide --system_conf_name='} + + if not conf.endswith(".sh"): + conf = conf + ".sh" + + if env.get('CM_MLPERF_TRAINING_BENCHMARK', '') == "resnet": + i['run_script_input']['script_name'] = "run-resnet" + + env['CONFIG_FILE'] = conf +# print(env) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/run-resnet.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/run-resnet.sh new file mode 100644 index 000000000..d64cf068c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/run-resnet.sh @@ -0,0 +1,16 @@ +#!/bin/bash +benchmark_implementation=${benchmark_implementation:-"mxnet-22.04"} +echo "cd ${CM_MLPERF_TRAINING_NVIDIA_CODE_PATH}/benchmarks/resnet/implementations/${benchmark_implementation}" +cd ${CM_MLPERF_TRAINING_NVIDIA_CODE_PATH}/benchmarks/resnet/implementations/${benchmark_implementation} +docker build --pull -t mlperf-nvidia:image_classification . +test $? -eq 0 || exit $? +echo "source ${CONFIG_FILE}" +source ${CONFIG_FILE} +test $? -eq 0 || exit $? + +DATADIR=${CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH} +echo "DATADIR=${CM_MLPERF_TRAINING_NVIDIA_RESNET_PREPROCESSED_PATH}" + +echo "CONT=mlperf-nvidia:image_classification DATADIR=${DATADIR} LOGDIR=${RESULTS_DIR} ./run_with_docker.sh" +CONT=mlperf-nvidia:image_classification DATADIR=${DATADIR} LOGDIR=${RESULTS_DIR} ./run_with_docker.sh +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/run.sh b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/run.sh new file mode 100644 index 000000000..ddcd0b550 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/reproduce-mlperf-training-nvidia/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash +if [[ ${CM_CALL_MLPERF_RUNNER} == "no" ]]; then + cd ${CM_RUN_DIR} + cmd=${CM_RUN_CMD} + echo "${cmd}" + eval "${cmd}" + test $? -eq 0 || exit $? +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/README.md b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/README.md new file mode 100644 index 000000000..01f5427b1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/README.md @@ -0,0 +1,237 @@ +
    +Click here to see the table of contents. + +* [About](#about) +* [Summary](#summary) +* [Reuse this script in your project](#reuse-this-script-in-your-project) + * [ Install CM automation language](#install-cm-automation-language) + * [ Check CM script flags](#check-cm-script-flags) + * [ Run this script from command line](#run-this-script-from-command-line) + * [ Run this script from Python](#run-this-script-from-python) + * [ Run this script via GUI](#run-this-script-via-gui) + * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta)) +* [Customization](#customization) + * [ Variations](#variations) + * [ Default environment](#default-environment) +* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts) +* [Script output](#script-output) +* [New environment keys (filter)](#new-environment-keys-(filter)) +* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize) +* [Maintainers](#maintainers) + +
    + +*Note that this README is automatically generated - don't edit!* + +### About + +#### Summary + +* Category: *MLPerf benchmark support.* +* CM GitHub repository: *[mlcommons@cm4mlops](https://github.com/mlcommons/cm4mlops)* +* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models)* +* CM meta description for this script: *[_cm.yaml](_cm.yaml)* +* CM "database" tags to find this script: *run,natively,all,mlperf-models* +* Output cached? *False* +___ +### Reuse this script in your project + +#### Install CM automation language + +* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md) +* [CM intro](https://doi.org/10.5281/zenodo.8105339) + +#### Pull CM repository with this automation + +```cm pull repo mlcommons@cm4mlops --checkout=dev``` + + +#### Run this script from command line + +1. `cm run script --tags=run,natively,all,mlperf-models[,variations] ` + +2. `cmr "run natively all mlperf-models[ variations]" ` + +* `variations` can be seen [here](#variations) + +#### Run this script from Python + +
    +Click here to expand this section. + +```python + +import cmind + +r = cmind.access({'action':'run' + 'automation':'script', + 'tags':'run,natively,all,mlperf-models' + 'out':'con', + ... + (other input keys for this script) + ... + }) + +if r['return']>0: + print (r['error']) + +``` + +
    + + +#### Run this script via GUI + +```cmr "cm gui" --script="run,natively,all,mlperf-models"``` + +Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=run,natively,all,mlperf-models) to generate CM CMD. + +#### Run this script via Docker (beta) + +`cm docker script "run natively all mlperf-models[ variations]" ` + +___ +### Customization + + +#### Variations + + * *No group (any variation can be selected)* +
    + Click here to expand this section. + + * `_phoenix,reference` + - Workflow: + +
    + + + * Group "**implementation**" +
    + Click here to expand this section. + + * `_deepsparse` + - Environment variables: + - *DIVISION*: `open` + - *IMPLEMENTATION*: `deepsparse` + - Workflow: + * `_intel` + - Environment variables: + - *IMPLEMENTATION*: `intel` + - Workflow: + * `_mil` + - Environment variables: + - *IMPLEMENTATION*: `mil` + - Workflow: + * `_nvidia` + - Environment variables: + - *IMPLEMENTATION*: `nvidia` + - Workflow: + * `_qualcomm` + - Environment variables: + - *IMPLEMENTATION*: `qualcomm` + - Workflow: + * `_reference` + - Environment variables: + - *IMPLEMENTATION*: `reference` + - Workflow: + * `_tflite-cpp` + - Environment variables: + - *IMPLEMENTATION*: `tflite_cpp` + - Workflow: + +
    + + + * Group "**power**" +
    + Click here to expand this section. + + * **`_performance-only`** (default) + - Workflow: + * `_power` + - Environment variables: + - *POWER*: `True` + - Workflow: + +
    + + + * Group "**sut**" +
    + Click here to expand this section. + + * `_macbookpro-m1` + - Environment variables: + - *CATEGORY*: `edge` + - *DIVISION*: `closed` + - Workflow: + * `_orin.32g` + - Environment variables: + - *CATEGORY*: `edge` + - *DIVISION*: `closed` + - Workflow: + * `_phoenix` + - Environment variables: + - *CATEGORY*: `edge,datacenter` + - *DIVISION*: `closed` + - Workflow: + * `_sapphire-rapids.24c` + - Environment variables: + - *CATEGORY*: `edge,datacenter` + - *DIVISION*: `closed` + - Workflow: + +
    + + +#### Default variations + +`_performance-only` +#### Default environment + +
    +Click here to expand this section. + +These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. + + +
    + +___ +### Script workflow, dependencies and native scripts + +
    +Click here to expand this section. + + 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/_cm.yaml) + 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/customize.py)*** + 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/_cm.yaml) + 1. ***Run native script if exists*** + * [run-bert-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-bert-macos.sh) + * [run-bert.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-bert.sh) + * [run-cpp-implementation.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-cpp-implementation.sh) + * [run-mobilenet-models.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-mobilenet-models.sh) + * [run-nvidia-4090.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-nvidia-4090.sh) + * [run-nvidia-a100.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-nvidia-a100.sh) + * [run-nvidia-t4.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-nvidia-t4.sh) + * [run-pruned-bert.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-pruned-bert.sh) + * [run-reference-models.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-reference-models.sh) + * [run-resnet50-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-resnet50-macos.sh) + * [run-resnet50.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/run-resnet50.sh) + 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/_cm.yaml) + 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/customize.py)*** + 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/cm4mlops/tree/main/script/run-all-mlperf-models/_cm.yaml) +
    + +___ +### Script output +`cmr "run natively all mlperf-models[,variations]" -j` +#### New environment keys (filter) + +#### New environment keys auto-detected from customize + +___ +### Maintainers + +* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md) \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/_cm.yaml new file mode 100644 index 000000000..a53b73a19 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/_cm.yaml @@ -0,0 +1,130 @@ +uid: 8d3cd46f54464810 +alias: run-all-mlperf-models + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: MLPerf benchmark support + +tags: +- run +- natively +- all +- mlperf-models + +variations: + + mil: + group: implementation + env: + IMPLEMENTATION: mil + default_env: + MODELS: resnet50,retinanet + BACKENDS: onnxruntime + DEVICES: cpu,cuda + + reference: + group: implementation + env: + IMPLEMENTATION: reference + default_env: + MODELS: resnet50,retinanet,bert-99,3d-unet-99,rnnt + CATEGORY: edge + + nvidia: + group: implementation + env: + IMPLEMENTATION: nvidia + default_env: + MODELS: resnet50,retinanet,bert-99,bert-99.9,3d-unet-99,rnnt,gptj-99,gptj-99.9,dlrmv2-99,dlrmv2-99.9 + BACKENDS: tensorrt + DEVICES: cuda + + qualcomm: + group: implementation + env: + IMPLEMENTATION: qualcomm + default_env: + MODELS: resnet50,retinanet,bert-99,bert-99.9 + DIVISION: closed + BACKENDS: glow + DEVICES: qaic + + intel: + group: implementation + env: + IMPLEMENTATION: intel + default_env: + MODELS: resnet50,retinanet,bert-99,3d-unet-99,rnnt + DIVISION: closed + BACKENDS: pytorch + DEVICES: cpu + + deepsparse: + group: implementation + env: + DIVISION: open + IMPLEMENTATION: deepsparse + default_env: + MODELS: bert-99 + BACKENDS: deepsparse + DEVICES: cpu + + tflite-cpp: + group: implementation + env: + IMPLEMENTATION: tflite_cpp + default_env: + MODELS: mobilenet,efficientnet + CATEGORY: edge + DIVISION: open + BACKENDS: tflite + DEVICES: cpu + + performance-only: + group: power + default: true + + power: + group: power + env: + POWER: yes + default_env: + POWER_SERVER_IP: 192.168.0.15 + POWER_SERVER_PORT: 4950 + + + phoenix: + group: sut + env: + CATEGORY: edge,datacenter + DIVISION: closed + state: + resnet50: + cpu: + onnxruntime: + offline_target_qps: 250 + + phoenix,reference: + default_env: + DEVICES: cpu,cuda + + orin.32g: + group: sut + env: + CATEGORY: edge + DIVISION: closed + + sapphire-rapids.24c: + group: sut + env: + CATEGORY: edge,datacenter + DIVISION: closed + + macbookpro-m1: + group: sut + env: + CATEGORY: edge + DIVISION: closed diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/customize.py new file mode 100644 index 000000000..367a60d63 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/customize.py @@ -0,0 +1,123 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + meta = i['meta'] + script_path = i['run_script_input']['path'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + models = env['MODELS'].split(",") + + backends = env.get('BACKENDS') + if backends: + backends = backends.split(",") + + devices = env.get('DEVICES') + if devices: + devices = devices.split(",") + + print(backends) + implementation = env['IMPLEMENTATION'] + + power = env.get('POWER', '') + + if str(power).lower() in ["yes", "true"]: + POWER_STRING = " --power yes --adr.mlperf-power-client.power_server=" + \ + env.get('POWER_SERVER', '192.168.0.15') + " --adr.mlperf-power-client.port=" + \ + env.get('POWER_SERVER_PORT', '4950') + " " + else: + POWER_STRING = "" + + if not devices: + return { + 'return': 1, 'error': 'No device specified. Please set one or more (comma separated) of {cpu, qaic, cuda, rocm} for --env.DEVICES=<>'} + + for model in models: + env['MODEL'] = model + cmds = [] + run_script_content = '#!/bin/bash\nsource ' + \ + os.path.join(script_path, "run-template.sh") + + if not backends: + if implementation == "reference": + if model == "resnet50": + backends = "tf,onnxruntime" + elif model == "retinanet": + backends = "onnxruntime,pytorch" + elif "bert" in model: + backends = "tf,onnxruntime,pytorch" + elif "3d-unet" in model: + backends = "tf,onnxruntime,pytorch" + elif model == "rnnt": + backends = "pytorch" + elif "gptj" in model: + backends = "pytorch" + elif "stable-diffusion-xl" in model: + backends = "pytorch" + elif "llama2-70b" in model: + backends = "pytorch" + backends = backends.split(",") + + for backend in backends: + + for device in devices: + offline_target_qps = ( + ((state.get( + model, + {})).get( + device, + {})).get( + backend, + {})).get('offline_target_qps') + if offline_target_qps: + pass + else: # try to do a test run with reasonable number of samples to get and record the actual system performance + if device == "cpu": + if model == "resnet50": + test_query_count = 1000 + else: + test_query_count = 100 + else: + if model == "resnet50": + test_query_count = 10000 + else: + test_query_count = 1000 + cmd = f'run_test "{backend}" "{test_query_count}" "{implementation}" "{device}" "$find_performance_cmd"' + cmds.append(cmd) + # second argument is unused for submission_cmd + cmd = f'run_test "{backend}" "100" "{implementation}" "{device}" "$submission_cmd"' + cmds.append(cmd) + run_file_name = 'tmp-' + model + '-run' + run_script_content += "\n\n" + "\n\n".join(cmds) + with open(os.path.join(script_path, run_file_name + ".sh"), 'w') as f: + f.write(run_script_content) + print(cmds) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-bert-macos.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-bert-macos.sh new file mode 100644 index 000000000..5d46fd113 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-bert-macos.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="bert-99" +device="cpu" +category="edge" +rerun="$rerun" +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +power="" +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "100" "reference" "cpu" "$find_performance_cmd" +run_test "tf" "100" "reference" "cpu" "$find_performance_cmd" +run_test "pytorch" "200" "reference" "cpu" "$find_performance_cmd" + +scenario="SingleStream" +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd_scenario" +run_test "tf" "100" "reference" "cpu" "$submission_cmd_scenario" +run_test "pytorch" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario="Offline" +division="closed" +run_test "tf" "100" "reference" "cpu" "$submission_cmd_scenario" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-bert.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-bert.sh new file mode 100644 index 000000000..08cddadde --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-bert.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="bert-99" +device="cpu" +category="edge" +rerun="$rerun" + +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "20" "reference" "cpu" "$find_performance_cmd" +run_test "tf" "20" "reference" "cpu" "$find_performance_cmd" +run_test "pytorch" "200" "reference" "cpu" "$find_performance_cmd" +run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" +run_test "tf" "10000" "reference" "cuda" "$find_performance_cmd" +run_test "pytorch" "10000" "reference" "cuda" "$find_performance_cmd" + +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" +#run_test "tf" "100" "reference" "cpu" "$submission_cmd" +run_test "pytorch" "100" "reference" "cpu" "$submission_cmd" +run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd " +run_test "tf" "100" "reference" "cuda" "$submission_cmd" +run_test "pytorch" "100" "reference" "cuda" "$submission_cmd" + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-cpp-implementation.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-cpp-implementation.sh new file mode 100644 index 000000000..704abff2d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-cpp-implementation.sh @@ -0,0 +1,163 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +#Add your run commands here... +# run "$CM_RUN_CMD" + +POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 " +POWER="" + +run "cm run script --tags=set,system,performance,mode" + +#cpp +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ +--adr.compiler.tags=gcc \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=2000 " + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ +--adr.compiler.tags=gcc \ +--category=edge --division=open --scenario=Offline --quiet" + + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ +--scenario=Offline \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ +--scenario=Offline \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=resnet50 --implementation=cpp --device=cpu --backend=onnxruntime \ +--scenario=SingleStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cpu --backend=onnxruntime \ +--scenario=SingleStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +# GPU + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ +--adr.compiler.tags=gcc \ +--test_query_count=20000 \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ +--adr.compiler.tags=gcc \ +--test_query_count=2000 \ +--category=edge --division=open --scenario=Offline --quiet" + + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--scenario=Offline \ +--model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=Offline \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--scenario=Offline \ +--model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=SingleStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=SingleStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +#multistream +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--scenario=Offline \ +--model=resnet50 --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=MultiStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=generate-run-cmds,inference,_submission \ +--model=retinanet --implementation=cpp --device=cuda --backend=onnxruntime \ +--scenario=MultiStream \ +--category=edge --division=$division --quiet \ +--adr.compiler.tags=gcc \ +--execution-mode=valid \ +--skip_submission_generation=yes \ +${POWER} \ +--results_dir=$HOME/results_dir" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-mobilenet-models.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-mobilenet-models.sh new file mode 100644 index 000000000..41497d56d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-mobilenet-models.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +POWER=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4940 " +POWER="" +extra_option="" +extra_tags="" +#extra_option=" --adr.mlperf-inference-implementation.compressed_dataset=on" +#extra_tags=",_only-fp32" + + +#Add your run commands here... +# run "$CM_RUN_CMD" +run "cm run script --tags=run,mobilenet-models,_tflite,_accuracy-only$extra_tags \ +--adr.compiler.tags=gcc \ +${extra_option} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_performance-only$extra_tags \ +${POWER} \ +--adr.compiler.tags=gcc \ +${extra_option} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_populate-readme$extra_tags \ +${POWER} \ +--adr.compiler.tags=gcc \ +${extra_option} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_accuracy-only$extra_tags \ +--adr.compiler.tags=gcc \ +${extra_option} \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_performance-only$extra_tags \ +${POWER} \ +${extra_option} \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/results_dir" + +run "cm run script --tags=run,mobilenet-models,_tflite,_armnn,_neon,_populate-readme$extra_tags \ +${POWER} \ +${extra_option} \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/results_dir" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-4090.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-4090.sh new file mode 100644 index 000000000..033fa9d9e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-4090.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +function run_model() { + model="$1" + test_query_count="$2" + run "$3" +} +division="open" +division="closed" +device="cuda" +backend="tensorrt" +implementation="nvidia-original" +category="datacenter-edge" +category="edge" +power="" +power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" +#Add your run commands here... +# run "$CM_RUN_CMD" +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' + +#run "resnet50" "100000" "${find_performance_cmd}" +#run "retinanet" "10000" "${find_performance_cmd}" +#run "rnnt" "100000" "${find_performance_cmd}" +#run "bert-99" "20000" "${find_performance_cmd}" +#run "3d-unet" "30" "${find_performance_cmd}" + + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --execution-mode=valid --implementation=$implementation --device=$device --backend=$backend --results_dir=$HOME/results_dir \ +--category=$category --division=$division --skip_submission_generation=yes --quiet $power' + +#run_model "bert-99.9" "10" "${submission_cmd} --offline_target_qps=1680 --server_target_qps=1520" +run_model "resnet50" "10" "${submission_cmd} --offline_target_qps=45000 --server_target_qps=38000 --singlestream_target_latency=0.2 --multistream_target_latency=0.4" +run_model "rnnt" "10" "${submission_cmd} --offline_target_qps=15200 --server_target_qps=14150 --singlestream_target_latency=23" +run_model "retinanet" "10" "${submission_cmd} --offline_target_qps=620 --server_target_qps=590 --singlestream_target_latency=2 --multistream_target_latency=14" +run_model "bert-99" "10" "${submission_cmd} --offline_target_qps=4100 --server_target_qps=3950 --singlestream_target_latency=1" +run_model "3d-unet-99.9" "10" "${submission_cmd} --offline_target_qps=4 --singlestream_target_latency=433 --env.CM_MLPERF_USE_MAX_DURATION=no" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-a100.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-a100.sh new file mode 100644 index 000000000..4b5fb40fc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-a100.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +function run_model() { + model="$1" + test_query_count="$2" + run "$3" +} +division="closed" +device="cuda" +backend="tensorrt" +implementation="nvidia-original" +category="edge" +power="" +connection_type="sxm" + +#Add your run commands here... +# run "$CM_RUN_CMD" +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' + +run "resnet50" "100000" "${find_performance_cmd}" +run "retinanet" "10000" "${find_performance_cmd}" +run "rnnt" "100000" "${find_performance_cmd}" +run "bert-99" "20000" "${find_performance_cmd}" +run "3d-unet-99.9" "30" "${find_performance_cmd}" + + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --adr.nvidia-harensss.tags=_${connection_type} $power' + +run "resnet50" "10" "${submission_cmd}" +run "retinanet" "10" "${submission_cmd}" +run "rnnt" "10" "${submission_cmd}" +run "bert-99" "10" "${submission_cmd}" +run "3d-unet-99.9" "10" "${submission_cmd} --env.CM_MLPERF_USE_MAX_DURATION='no'" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-t4.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-t4.sh new file mode 100644 index 000000000..835c1adad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-nvidia-t4.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +function run_model() { + model="$1" + test_query_count="$2" + run "$3" +} +division="closed" +device="cuda" +backend="tensorrt" +implementation="nvidia-original" +category="edge,datacenter" + +#Add your run commands here... +# run "$CM_RUN_CMD" +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count' + +run "resnet50" "30000" "${find_performance_cmd}" +run "retinanet" "2000" "${find_performance_cmd}" +run "rnnt" "20000" "${find_performance_cmd}" +run "bert-99" "10000" "${find_performance_cmd}" +run "bert-99.9" "5000" "${find_performance_cmd}" +run "3d-unet" "10" "${find_performance_cmd}" + + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet' + +run "resnet50" "10" "${submission_cmd}" +run "retinanet" "10" "${submission_cmd}" +run "rnnt" "10" "${submission_cmd}" +run "bert-99" "10" "${submission_cmd}" +run "bert-99.9" "10" "${submission_cmd}" +run "3d-unet" "10" "${submission_cmd}" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-pruned-bert.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-pruned-bert.sh new file mode 100644 index 000000000..16444e0db --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-pruned-bert.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +#not working +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none" \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none" \ +#zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90-none \ +#zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/base_quant-none \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85-none" \ +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/base_quant-none" \ +#"zoo:nlp/question_answering/oberta-medium/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/roberta-large/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned95-none" \ +#"zoo:nlp/question_answering/distilbert-none/pytorch/huggingface/squad/pruned90-none" \ +#"zoo:nlp/question_answering/oberta-small/pytorch/huggingface/squad/base-none" \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/base_quant-none" \ +#"zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none" \ + +zoo_stub_list=( \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni" \ +"zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni" \ +"zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none" \ +"zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none" \ +"zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni" \ +"zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none" \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none" \ +"zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none" \ +"zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni" \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni" \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none" \ +"zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none" \ +"zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none" \ +"zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none" \ +) + +rerun="" +power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --env.CM_MLPERF_SKIP_POWER_CHECKS=yes" +power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" +power="" +max_batchsize=1 +max_batchsize=128 +scenario="SingleStream" +scenario="Offline" + +if [[ $scenario == "Offline" ]]; then +for stub in ${zoo_stub_list[@]}; do +cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_find-performance \ + --adr.python.version_min=3.8 \ + --implementation=reference \ + --model=bert-99 \ + --precision=int8 \ + --backend=deepsparse \ + --device=cpu \ + --scenario=Offline \ + --test_query_count=15000 \ + --adr.mlperf-inference-implementation.max_batchsize=$max_batchsize \ + --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ + ${rerun} \ + --quiet" + echo ${cmd} + eval ${cmd} +done +fi + +for stub in ${zoo_stub_list[@]}; do + cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds \ + --adr.python.version_min=3.8 \ + --adr.compiler.tags=gcc \ + --implementation=reference \ + --model=bert-99 \ + --precision=int8 \ + --backend=deepsparse \ + --device=cpu \ + --scenario=$scenario \ + --execution_mode=valid \ + --adr.mlperf-inference-implementation.max_batchsize=$max_batchsize \ + ${power} \ + --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ + --quiet" + echo ${cmd} + eval ${cmd} +done diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-reference-models.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-reference-models.sh new file mode 100644 index 000000000..41898f145 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-reference-models.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +#Add your run commands here... +# run "$CM_RUN_CMD" +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=100" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=rnnt --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=retinanet --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=open --scenario=Offline --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=resnet50 --implementation=reference --device=cpu --backend=onnxruntime \ +--category=edge --division=$division --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=rnnt --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=$division --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=retinanet --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=$division --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=bert-99 --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=$division --quiet" + +run "cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=3d-unet-99 --implementation=reference --device=cpu --backend=pytorch \ +--category=edge --division=$division --quiet" + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-resnet50-macos.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-resnet50-macos.sh new file mode 100644 index 000000000..8d00ddc79 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-resnet50-macos.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="resnet50" +device="cpu" +category="edge" +rerun="$rerun" +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +power="" +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "6000" "reference" "cpu" "$find_performance_cmd --rerun" +run_test "tf" "6000" "reference" "cpu" "$find_performance_cmd --rerun" + +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" +run_test "tf" "100" "reference" "cpu" "$submission_cmd" + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-resnet50.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-resnet50.sh new file mode 100644 index 000000000..df2789d8c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-resnet50.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="resnet50" +device="cpu" +category="edge" +rerun="$rerun" +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "200" "reference" "cpu" "$find_performance_cmd" +run_test "tf" "200" "reference" "cpu" "$find_performance_cmd" +run_test "onnxruntime" "10000" "reference" "cuda" "$find_performance_cmd" +run_test "tf" "20000" "reference" "cuda" "$find_performance_cmd" + +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd" +run_test "tf" "100" "reference" "cpu" "$submission_cmd" +scenario="SingleStream" +run_test "tflite" "100" "tflite-cpp" "cpu" "$submission_cmd_scenario --adr.compiler.tags=gcc" +run_test "tflite" "100" "tflite-cpp" "cpu" "$submission_cmd_scenario --adr.compiler.tags=gcc --adr.mlperf-inference-implementation.compressed_dataset=on" +run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd " +scenario="Offline" +run_test "tf" "100" "reference" "cuda" "$submission_cmd_scenario" +scenario="SingleStream" +run_test "tf" "100" "reference" "cuda" "$submission_cmd_scenario" + +run_test "onnxruntime" "100" "reference" "cpu" "$readme_cmd" +run_test "tf" "100" "reference" "cpu" "$readme_cmd" +run_test "tflite" "100" "tflite-cpp" "cpu" "$readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream" +run_test "tflite" "100" "tflite-cpp" "cpu" "$readme_cmd_single --adr.compiler.tags=gcc --scenario=SingleStream --adr.mlperf-inference-implementation.compressed_dataset=on" +run_test "onnxruntime" "100" "reference" "cuda" "$readme_cmd --scenario=SingleStream" +run_test "tf" "100" "reference" "cuda" "$readme_cmd_single --scenario=SingleStream" +run_test "tf" "100" "reference" "cuda" "$readme_cmd_single --scenario=Offline" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-retinanet-sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-retinanet-sh new file mode 100644 index 000000000..6f0bac9c5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/run-retinanet-sh @@ -0,0 +1,86 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division="closed" +model="retinanet" +device="cpu" +category="edge" +rerun="$rerun" +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} +power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +# run "$CM_RUN_CMD" +run_test "onnxruntime" "50" "reference" "cpu" "$find_performance_cmd" +run_test "pytorch" "100" "reference" "cpu" "$find_performance_cmd" +run_test "onnxruntime" "1000" "reference" "cuda" "$find_performance_cmd" +run_test "pytorch" "1000" "reference" "cuda" "$find_performance_cmd" + +scenario=SingleStream +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario=Offline +run_test "onnxruntime" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario=SingleStream +run_test "pytorch" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario=Offline +run_test "pytorch" "100" "reference" "cpu" "$submission_cmd_scenario" +scenario=SingleStream +run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd_scenario" +scenario=Offline +run_test "onnxruntime" "100" "reference" "cuda" "$submission_cmd_scenario" +scenario=SingleStream +run_test "pytorch" "100" "reference" "cuda" "$submission_cmd_scenario" +scenario=Offline +run_test "pytorch" "100" "reference" "cuda" "$submission_cmd_scenario" + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/template.sh b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/template.sh new file mode 100644 index 000000000..42ecda5ad --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-all-mlperf-models/template.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +division=$DIVISION +model=$MODEL +device=$DEVICE +category=$CATEGORY +rerun=$RERUN + +function run_test() { + backend=$1 + test_query_count=$2 + implementation=$3 + device=$4 + run "$5" +} + +#power=' --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --adr.mlperf-power-client.port=4950 ' +power=${POWER_STRING} + +#Add your run commands here... +find_performance_cmd='cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=edge --division=open --scenario=Offline --quiet --test_query_count=$test_query_count $rerun' + +submission_cmd='cm run script --tags=generate-run-cmds,inference,_submission,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +submission_cmd_scenario='cm run script --tags=generate-run-cmds,inference,_submission --scenario=$scenario \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd_single='cm run script --tags=generate-run-cmds,inference,_populate-readme \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + +readme_cmd='cm run script --tags=generate-run-cmds,inference,_populate-readme,_all-scenarios \ +--model=$model --implementation=$implementation --device=$device --backend=$backend \ +--category=$category --division=$division --quiet --results_dir=$HOME/results_dir \ +--skip_submission_generation=yes --execution-mode=valid $power' + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-docker-container/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-docker-container/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/README-extra.md new file mode 100644 index 000000000..b930ef964 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/README-extra.md @@ -0,0 +1,15 @@ +This script runs a docker container and launces the given CM script inside it. +If the container image is not existing, corresponding build is initiated via CM dependencies. + +## How to Run +```bash +cm run script \ +--tags=run,docker,container +``` +### Options +1. `--script_tags="get,gcc"`: Script tags for the CM script to be run inside the docker container. + If this is not set the cm command run inside the docker container is `cm version` +2. `--cm_repo=ctuning@mlcommons-ck`: To use a different repo for CM scripts like "ctuning@mlcommons-ck". Default: `mlcommons@cm4mlops` +3. `--base="ubuntu:22.04"`: Specify the base image for Dockerfile. Default: "ubuntu:20.04" +4. `--recreate=yes`: To recreate docker image even when existing. Default: "no" +5. `--adr.build-docker-image.tags=_cache`: To use build cache for docker image build. Default: "" (`nocache`) diff --git a/cmx4mlops/cmx4mlops/repo/script/run-docker-container/README.md b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/README.md new file mode 100644 index 000000000..b1633be4e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Docker-automation/run-docker-container](https://docs.mlcommons.org/cm4mlops/scripts/Docker-automation/run-docker-container) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-docker-container/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/_cm.yaml new file mode 100644 index 000000000..5135070b9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/_cm.yaml @@ -0,0 +1,70 @@ +alias: run-docker-container +uid: 1e0c884107514b46 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +tags: +- run +- docker +- container + +cache: false + +category: Docker automation + +default_env: + CM_DOCKER_DETACHED_MODE: 'yes' + CM_DOCKER_REUSE_EXISTING_CONTAINER: 'no' + CM_DOCKER_PRIVILEGED_MODE: 'no' + +input_mapping: + all_gpus: CM_DOCKER_ADD_ALL_GPUS + num_gpus: CM_DOCKER_ADD_NUM_GPUS + base: CM_DOCKER_IMAGE_BASE + cache: CM_DOCKER_CACHE + cm_repo: CM_MLOPS_REPO + detached: CM_DOCKER_DETACHED_MODE + device: CM_DOCKER_ADD_DEVICE + docker_image_base: CM_DOCKER_IMAGE_BASE + docker_base_image: CM_DOCKER_IMAGE_BASE + keep_detached: CM_KEEP_DETACHED_CONTAINER + docker_os: CM_DOCKER_OS + docker_os_version: CM_DOCKER_OS_VERSION + extra_run_args: CM_DOCKER_EXTRA_RUN_ARGS + fake_run_option: CM_DOCKER_FAKE_RUN_OPTION + gh_token: CM_GH_TOKEN + image_name: CM_DOCKER_IMAGE_NAME + image_repo: CM_DOCKER_IMAGE_REPO + image_tag: CM_DOCKER_IMAGE_TAG + image_tag_extra: CM_DOCKER_IMAGE_TAG_EXTRA + interactive: CM_DOCKER_INTERACTIVE_MODE + it: CM_DOCKER_INTERACTIVE + mounts: CM_DOCKER_VOLUME_MOUNTS + pass_user_id: CM_DOCKER_PASS_USER_ID + pass_user_group: CM_DOCKER_PASS_USER_GROUP + port_maps: CM_DOCKER_PORT_MAPS + post_run_cmds: CM_DOCKER_POST_RUN_COMMANDS + pre_run_cmds: CM_DOCKER_PRE_RUN_COMMANDS + real_run: CM_REAL_RUN + recreate: CM_DOCKER_IMAGE_RECREATE + run_cmd: CM_DOCKER_RUN_CMD + run_cmd_extra: CM_DOCKER_RUN_CMD_EXTRA + save_script: CM_DOCKER_SAVE_SCRIPT + script_tags: CM_DOCKER_RUN_SCRIPT_TAGS + shm_size: CM_DOCKER_SHM_SIZE + +new_env_keys: + - 'CM_DOCKER_CONTAINER_ID' + +prehook_deps: +- names: + - build-docker-image + skip_if_any_env: + CM_DOCKER_IMAGE_EXISTS: + - 'yes' + CM_DOCKER_SKIP_BUILD: + - 'yes' + CM_DOCKER_CONTAINER_ID: + - on + tags: build,docker,image diff --git a/cmx4mlops/cmx4mlops/repo/script/run-docker-container/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/customize.py new file mode 100644 index 000000000..6a0ce7ce5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-docker-container/customize.py @@ -0,0 +1,390 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import subprocess +from os.path import exists + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + interactive = env.get('CM_DOCKER_INTERACTIVE_MODE', '') + + if str(interactive).lower() in ['yes', 'true', '1']: + env['CM_DOCKER_DETACHED_MODE'] = 'no' + + if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: + env['CM_DOCKER_RUN_SCRIPT_TAGS'] = "run,docker,container" + CM_RUN_CMD = "cm version" + else: + CM_RUN_CMD = "cm run script --tags=" + \ + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + ' --quiet' + + r = cm.access({'action': 'search', + 'automation': 'script', + 'tags': env['CM_DOCKER_RUN_SCRIPT_TAGS']}) + if len(r['list']) < 1: + raise Exception( + 'CM script with tags ' + + env['CM_DOCKER_RUN_SCRIPT_TAGS'] + + ' not found!') + + PATH = r['list'][0].path + os.chdir(PATH) + + env['CM_DOCKER_RUN_CMD'] = CM_RUN_CMD + + # Updating Docker info + update_docker_info(env) + + docker_image_repo = env['CM_DOCKER_IMAGE_REPO'] + docker_image_base = env['CM_DOCKER_IMAGE_BASE'] + docker_image_name = env['CM_DOCKER_IMAGE_NAME'] + docker_image_tag = env['CM_DOCKER_IMAGE_TAG'] + + DOCKER_CONTAINER = docker_image_repo + "/" + \ + docker_image_name + ":" + docker_image_tag + + print('') + print('Checking existing Docker container:') + print('') + CMD = f"""docker ps --filter "ancestor={DOCKER_CONTAINER}" """ + if os_info['platform'] == 'windows': + CMD += " 2> nul" + else: + CMD += " 2> /dev/null" + print(' ' + CMD) + print('') + + try: + docker_container = subprocess.check_output( + CMD, shell=True).decode("utf-8") + except Exception as e: + return { + 'return': 1, 'error': 'Docker is either not installed or not started:\n{}'.format(e)} + + output_split = docker_container.split("\n") + if len(output_split) > 1 and str(env.get('CM_DOCKER_REUSE_EXISTING_CONTAINER', + '')).lower() in ["1", "true", "yes"]: # container exists + out = output_split[1].split(" ") + existing_container_id = out[0] + print(f"Reusing existing container {existing_container_id}") + env['CM_DOCKER_CONTAINER_ID'] = existing_container_id + + else: + if env.get('CM_DOCKER_CONTAINER_ID', '') != '': + del (env['CM_DOCKER_CONTAINER_ID']) # not valid ID + + CMD = "docker images -q " + DOCKER_CONTAINER + + if os_info['platform'] == 'windows': + CMD += " 2> nul" + else: + CMD += " 2> /dev/null" + + print('') + print('Checking Docker images:') + print('') + print(' ' + CMD) + print('') + + try: + docker_image = subprocess.check_output( + CMD, shell=True).decode("utf-8") + except Exception as e: + return { + 'return': 1, 'error': 'Docker is either not installed or not started:\n{}'.format(e)} + + recreate_image = env.get('CM_DOCKER_IMAGE_RECREATE', '') + + if recreate_image != 'yes': + if docker_image: + print("Docker image exists with ID: " + docker_image) + env['CM_DOCKER_IMAGE_EXISTS'] = "yes" + + # elif recreate_image == "yes": + # env['CM_DOCKER_IMAGE_RECREATE'] = "no" + + return {'return': 0} + + +def postprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + # Updating Docker info + update_docker_info(env) + + docker_image_repo = env['CM_DOCKER_IMAGE_REPO'] + docker_image_base = env['CM_DOCKER_IMAGE_BASE'] + docker_image_name = env['CM_DOCKER_IMAGE_NAME'] + docker_image_tag = env['CM_DOCKER_IMAGE_TAG'] + + run_cmds = [] + mount_cmds = [] + port_map_cmds = [] + run_opts = '' + + # not completed as su command breaks the execution sequence + # + # if env.get('CM_DOCKER_PASS_USER_ID', '') != '': + # run_opts += " --user 0 " + # run_cmds.append(f"(usermod -u {os.getuid()} cmuser || echo pass)") + # run_cmds.append(f"(chown -R {os.getuid()}:{os.getuid()} /home/cmuser || echo pass)") + # run_cmds.append(" ( su cmuser )") + # run_cmds.append('export PATH="/home/cmuser/venv/cm/bin:$PATH"') + + if env.get('CM_DOCKER_PRE_RUN_COMMANDS', []): + for pre_run_cmd in env['CM_DOCKER_PRE_RUN_COMMANDS']: + run_cmds.append(pre_run_cmd) + + if env.get('CM_DOCKER_VOLUME_MOUNTS', []): + for mounts in env['CM_DOCKER_VOLUME_MOUNTS']: + mount_cmds.append(mounts) + + if env.get('CM_DOCKER_PASS_USER_GROUP', '') != '': + run_opts += " --group-add $(id -g $USER) " + + if env.get('CM_DOCKER_ADD_DEVICE', '') != '': + run_opts += " --device=" + env['CM_DOCKER_ADD_DEVICE'] + + if env.get('CM_DOCKER_PRIVILEGED_MODE', '') == 'yes': + run_opts += " --privileged " + + if env.get('CM_DOCKER_ADD_NUM_GPUS', '') != '': + run_opts += " --gpus={}".format(env['CM_DOCKER_ADD_NUM_GPUS']) + elif env.get('CM_DOCKER_ADD_ALL_GPUS', '') != '': + run_opts += " --gpus=all" + + if env.get('CM_DOCKER_SHM_SIZE', '') != '': + run_opts += " --shm-size={}".format(env['CM_DOCKER_SHM_SIZE']) + + if env.get('CM_DOCKER_EXTRA_RUN_ARGS', '') != '': + run_opts += env['CM_DOCKER_EXTRA_RUN_ARGS'] + + if env.get('CM_DOCKER_PORT_MAPS', []): + for ports in env['CM_DOCKER_PORT_MAPS']: + port_map_cmds.append(ports) + + run_cmd = env['CM_DOCKER_RUN_CMD'] + " " + \ + env.get('CM_DOCKER_RUN_CMD_EXTRA', '').replace(":", "=") + run_cmds.append(run_cmd) + if 'CM_DOCKER_POST_RUN_COMMANDS' in env: + for post_run_cmd in env['CM_DOCKER_POST_RUN_COMMANDS']: + run_cmds.append(post_run_cmd) + + run_cmd = " && ".join(run_cmds) + run_cmd = run_cmd.replace("--docker_run_deps", "") + + if mount_cmds: + for i, mount_cmd in enumerate(mount_cmds): + + # Since windows may have 2 :, we search from the right + j = mount_cmd.rfind(':') + if j > 0: + mount_parts = [mount_cmd[:j], mount_cmd[j + 1:]] + else: + return {'return': 1, 'error': 'Can\'t find separator : in a mount string: {}'.format( + mount_cmd)} + +# mount_parts = mount_cmd.split(":") +# if len(mount_parts) != 2: +# return {'return': 1, 'error': 'Invalid mount {} +# specified'.format(mount_parts)} + + host_mount = mount_parts[0] + + if not os.path.exists(host_mount): + os.makedirs(host_mount) + + abs_host_mount = os.path.abspath(mount_parts[0]) + + if abs_host_mount != host_mount or " " in abs_host_mount and not host_mount.startswith( + '"'): + mount_cmds[i] = f"\"{abs_host_mount}\":{mount_parts[1]}" + + mount_cmd_string = " -v " + " -v ".join(mount_cmds) + else: + mount_cmd_string = '' + run_opts += mount_cmd_string + + if port_map_cmds: + port_map_cmd_string = " -p " + "-p ".join(port_map_cmds) + else: + port_map_cmd_string = '' + + run_opts += port_map_cmd_string + + # Currently have problem running Docker in detached mode on Windows: + detached = str( + env.get( + 'CM_DOCKER_DETACHED_MODE', + '')).lower() in [ + 'yes', + 'true', + "1"] +# if detached and os_info['platform'] != 'windows': + if detached: + if os_info['platform'] == 'windows': + return { + 'return': 1, 'error': 'Currently we don\'t support running Docker containers in detached mode on Windows - TBD'} + + existing_container_id = env.get('CM_DOCKER_CONTAINER_ID', '') + if existing_container_id: + CMD = f"ID={existing_container_id} && docker exec $ID bash -c '" + run_cmd + "'" + else: + CONTAINER = f"docker run -dt {run_opts} --rm {docker_image_repo}/{docker_image_name}:{docker_image_tag} bash" + CMD = f"ID=`{CONTAINER}` && docker exec $ID bash -c '{run_cmd}'" + + if False and str(env.get('CM_KEEP_DETACHED_CONTAINER', '')).lower() not in [ + 'yes', "1", 'true']: + CMD += " && docker kill $ID >/dev/null" + + CMD += ' && echo "ID=$ID"' + + print('=========================') + print("Container launch command:") + print('') + print(CMD) + print('') + print("Running " + run_cmd + " inside docker container") + + record_script({'cmd': CMD, 'env': env}) + + print('') + # Execute the command + try: + result = subprocess.run( + CMD, + shell=True, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True) + print("Command Output:", result.stdout) + except subprocess.CalledProcessError as e: + print("Error Occurred!") + print(f"Command: {e.cmd}") + print(f"Return Code: {e.returncode}") + print(f"Error Output: {e.stderr}") + return {'return': 1, 'error': e.stderr} + + docker_out = result.stdout + # if docker_out != 0: + # return {'return': docker_out, 'error': 'docker run failed'} + + lines = docker_out.split("\n") + + for line in lines: + if line.startswith("ID="): + ID = line[3:] + env['CM_DOCKER_CONTAINER_ID'] = ID + + print(docker_out) + + else: + x = "'" + if os_info['platform'] == 'windows': + x = '"' + + x1 = '' + x2 = '' + run_cmd_prefix = "" + if env.get('CM_DOCKER_INTERACTIVE_MODE', '') in ['yes', 'True', True]: + run_cmd_prefix = "(" + x1 = '-it' + x2 = " && bash ) || bash" + + CONTAINER = "docker run " + x1 + " --entrypoint " + x + x + " " + run_opts + \ + " " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag + CMD = CONTAINER + " bash -c " + x + run_cmd_prefix + run_cmd + x2 + x + + print('') + print("Container launch command:") + print('') + print(CMD) + + record_script({'cmd': CMD, 'env': env}) + + print('') + docker_out = os.system(CMD) + if docker_out != 0: + return {'return': docker_out, 'error': 'docker run failed'} + + return {'return': 0} + + +def record_script(i): + + cmd = i['cmd'] + env = i['env'] + + files = [] + + dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '') + if dockerfile_path != '' and os.path.isfile(dockerfile_path): + files.append(dockerfile_path + '.run.bat') + files.append(dockerfile_path + '.run.sh') + + save_script = env.get('CM_DOCKER_SAVE_SCRIPT', '') + if save_script != '': + if save_script.endswith('.bat') or save_script.endswith('.sh'): + files.append(save_script) + else: + files.append(save_script + '.bat') + files.append(save_script + '.sh') + + for filename in files: + with open(filename, 'w') as f: + f.write(cmd + '\n') + + return {'return': 0} + + +def update_docker_info(env): + + # Updating Docker info + docker_image_repo = env.get('CM_DOCKER_IMAGE_REPO', 'local') + env['CM_DOCKER_IMAGE_REPO'] = docker_image_repo + + docker_image_base = env.get('CM_DOCKER_IMAGE_BASE') + if not docker_image_base: + if env.get("CM_DOCKER_OS", '') != '': + docker_image_base = env["CM_DOCKER_OS"] + \ + ":" + env["CM_DOCKER_OS_VERSION"] + else: + docker_image_base = "ubuntu:22.04" + + env['CM_DOCKER_IMAGE_BASE'] = docker_image_base + + if env.get('CM_DOCKER_IMAGE_NAME', '') != '': + docker_image_name = env['CM_DOCKER_IMAGE_NAME'] + else: + docker_image_name = 'cm-script-' + \ + env['CM_DOCKER_RUN_SCRIPT_TAGS'].replace( + ',', '-').replace('_', '-').replace('+', 'plus') + env['CM_DOCKER_IMAGE_NAME'] = docker_image_name + + docker_image_tag_extra = env.get('CM_DOCKER_IMAGE_TAG_EXTRA', '-latest') + + docker_image_tag = env.get('CM_DOCKER_IMAGE_TAG', docker_image_base.replace( + ':', '-').replace('_', '').replace("/", "-") + docker_image_tag_extra) + env['CM_DOCKER_IMAGE_TAG'] = docker_image_tag + + return diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/README-extra.md new file mode 100644 index 000000000..b91bf8e31 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/README-extra.md @@ -0,0 +1,21 @@ +# About + +This is a universal CM interface to run and customize all MLPerf inference benchmarks. +It is composed from the [portable automation recipes (CM scripts)](https://access.cknowledge.org/playground/?action=scripts). + +Check [this documentation](https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference) +and [CM GUI](https://access.cknowledge.org/playground/?action=howtorun&bench_uid=39877bb63fb54725) +to learn how to run MLPerf benchmarks via CM. + + + +# Authors + +* [Grigori Fursin](https://cKnowledge.org/gfursin) +* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) + + +# Acknowledgments + +We thank [the community](../../../CONTRIBUTING.md) for their suggestions and contributions! + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/README.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/README.md new file mode 100644 index 000000000..31070423b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app](https://docs.mlcommons.org/cm4mlops/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/_cm.yaml new file mode 100644 index 000000000..cf390bc3a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/_cm.yaml @@ -0,0 +1,617 @@ +alias: run-mlperf-inference-app +uid: 4a5d5b13fd7e4ac8 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Modular MLPerf inference benchmark pipeline + +developers: "[Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin)" + +gui: + title: CM GUI to run MLPerf inference benchmarks and prepare submissions + +clean_output_files: +- open.tar.gz +- summary.csv +- summary.json + +tags: +- run +- common +- generate-run-cmds +- run-mlperf +- run-mlperf-inference +- vision +- mlcommons +- mlperf +- inference +- reference + +tags_help: "run-mlperf,inference" + +default_env: + CM_MLPERF_IMPLEMENTATION: reference + CM_MLPERF_MODEL: resnet50 + CM_MLPERF_RUN_STYLE: test + CM_MLPERF_SKIP_SUBMISSION_GENERATION: no + CM_DOCKER_PRIVILEGED_MODE: yes + +input_mapping: + api_server: CM_MLPERF_INFERENCE_API_SERVER + backend: CM_MLPERF_BACKEND + batch_size: CM_MLPERF_LOADGEN_MAX_BATCHSIZE + beam_size: GPTJ_BEAM_SIZE + category: CM_MLPERF_SUBMISSION_SYSTEM_TYPE + clean: CM_MLPERF_CLEAN_ALL + compliance: CM_MLPERF_LOADGEN_COMPLIANCE + custom_system_nvidia: CM_CUSTOM_SYSTEM_NVIDIA + dashboard_wb_project: CM_MLPERF_DASHBOARD_WANDB_PROJECT + dashboard_wb_user: CM_MLPERF_DASHBOARD_WANDB_USER + debug: CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM + device: CM_MLPERF_DEVICE + division: CM_MLPERF_SUBMISSION_DIVISION + dlrm_data_path: DLRM_DATA_PATH + docker: CM_MLPERF_USE_DOCKER + dump_version_info: CM_DUMP_VERSION_INFO + save_console_log: CM_SAVE_CONSOLE_LOG + execution_mode: CM_MLPERF_RUN_STYLE + find_performance: CM_MLPERF_FIND_PERFORMANCE_MODE + framework: CM_MLPERF_BACKEND + docker_keep_alive: CM_DOCKER_CONTAINER_KEEP_ALIVE + get_platform_details: CM_GET_PLATFORM_DETAILS + gpu_name: CM_NVIDIA_GPU_NAME + hw_name: CM_HW_NAME + pip_loadgen: CM_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP + hw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA + imagenet_path: IMAGENET_PATH + implementation: CM_MLPERF_IMPLEMENTATION + lang: CM_MLPERF_IMPLEMENTATION + min_query_count: CM_MLPERF_INFERENCE_MIN_QUERY_COUNT + max_query_count: CM_MLPERF_INFERENCE_MAX_QUERY_COUNT + mode: CM_MLPERF_LOADGEN_MODE + model: CM_MLPERF_MODEL + multistream_target_latency: CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY + network: CM_NETWORK_LOADGEN + nvidia_system_name: CM_NVIDIA_SYSTEM_NAME + offline_target_qps: CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS + output_dir: OUTPUT_BASE_DIR + output_summary: MLPERF_INFERENCE_SUBMISSION_SUMMARY + output_tar: MLPERF_INFERENCE_SUBMISSION_TAR_FILE + performance_sample_count: CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT + power: CM_SYSTEM_POWER + precision: CM_MLPERF_MODEL_PRECISION + preprocess_submission: CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR + push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB + pull_changes: CM_MLPERF_INFERENCE_PULL_CODE_CHANGES + pull_inference_changes: CM_MLPERF_INFERENCE_PULL_SRC_CHANGES + readme: CM_MLPERF_README + regenerate_accuracy_file: CM_MLPERF_REGENERATE_ACCURACY_FILE + regenerate_files: CM_REGENERATE_MEASURE_FILES + rerun: CM_RERUN + results_dir: OUTPUT_BASE_DIR + results_git_url: CM_MLPERF_RESULTS_GIT_REPO_URL + run_checker: CM_RUN_SUBMISSION_CHECKER + run_style: CM_MLPERF_RUN_STYLE + scenario: CM_MLPERF_LOADGEN_SCENARIO + server_target_qps: CM_MLPERF_LOADGEN_SERVER_TARGET_QPS + singlestream_target_latency: CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY + skip_submission_generation: CM_MLPERF_SKIP_SUBMISSION_GENERATION + skip_truncation: CM_SKIP_TRUNCATE_ACCURACY + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: CM_MLPERF_SUBMITTER + sut_servers: CM_NETWORK_LOADGEN_SUT_SERVERS + sw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA + system_type: CM_MLPERF_SUBMISSION_SYSTEM_TYPE + target_latency: CM_MLPERF_LOADGEN_TARGET_LATENCY + target_qps: CM_MLPERF_LOADGEN_TARGET_QPS + test_query_count: CM_TEST_QUERY_COUNT + threads: CM_NUM_THREADS + sut: CM_MLPERF_INFERENCE_SUT_VARIATION + nvidia_llama2_dataset_file_path: CM_NVIDIA_LLAMA_DATASET_FILE_PATH + tp_size: CM_NVIDIA_TP_SIZE + vllm_model_name: CM_VLLM_SERVER_MODEL_NAME + num_workers: CM_MLPERF_INFERENCE_NUM_WORKERS + max_test_duration: CM_MLPERF_MAX_DURATION_TEST + all_models: CM_MLPERF_ALL_MODELS + criteo_day23_raw_data_path: CM_CRITEO_DAY23_RAW_DATA_PATH + use_dataset_from_host: CM_USE_DATASET_FROM_HOST + use_model_from_host: CM_USE_MODEL_FROM_HOST + rgat_checkpoint_path: RGAT_CHECKPOINT_PATH + +new_state_keys: +- app_mlperf_inference_* +- cm-mlperf-inference-results* + +deps: +- tags: detect,os + skip_if_env: + CM_MLPERF_USE_DOCKER: [ on ] +- tags: detect,cpu + skip_if_env: + CM_MLPERF_USE_DOCKER: [ on ] +- names: + - python + - python3 + tags: get,python3 + skip_if_env: + CM_MLPERF_USE_DOCKER: [ on ] +- names: + - inference-src + tags: get,mlcommons,inference,src +- tags: get,sut,description + skip_if_env: + CM_MLPERF_USE_DOCKER: [ on ] + +- tags: get,mlperf,inference,results,dir + names: + - get-mlperf-inference-results-dir + enable_if_env: + CM_MLPERF_USE_DOCKER: [ off ] + skip_if_env: + OUTPUT_BASE_DIR: [ on ] +- tags: install,pip-package,for-cmind-python,_package.tabulate +- tags: get,mlperf,inference,utils + +#We use this script as a command generator to run docker via app-mlperf-inference script +docker_off: + mounts: + - ${{ INSTALL_DATA_PATH }}:/install_data + - ${{ DATA_PATH }}:/data + - ${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }}:${{ CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH }} + - ${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }} + skip_run_cmd: 'no' + shm_size: '32gb' + extra_run_args: ' --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' + os: ubuntu + real_run: false + run: true + interactive: true + docker_input_mapping: + imagenet_path: IMAGENET_PATH + gptj_checkpoint_path: GPTJ_CHECKPOINT_PATH + criteo_preprocessed_path: CRITEO_PREPROCESSED_PATH + results_dir: RESULTS_DIR + submission_dir: SUBMISSION_DIR + dlrm_data_path: DLRM_DATA_PATH + intel_gptj_int8_model_path: CM_MLPERF_INFERENCE_INTEL_GPTJ_INT8_MODEL_PATH + +variations: + + accuracy-only: + default_variations: + submission-generation-style: full + env: + CM_MLPERF_LOADGEN_MODE: accuracy + CM_MLPERF_SUBMISSION_RUN: 'yes' + CM_RUN_MLPERF_ACCURACY: 'on' + CM_RUN_SUBMISSION_CHECKER: 'no' + group: submission-generation + + all-modes: + env: + CM_MLPERF_LOADGEN_ALL_MODES: 'yes' + group: mode + + all-scenarios: + env: + CM_MLPERF_LOADGEN_ALL_SCENARIOS: 'yes' + + compliance: + env: + CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' + + dashboard: + default_gui: false + env: + CM_MLPERF_DASHBOARD: 'on' + + find-performance: + env: + CM_MLPERF_FIND_PERFORMANCE_MODE: 'yes' + CM_MLPERF_LOADGEN_ALL_MODES: 'no' + CM_MLPERF_LOADGEN_MODE: performance + CM_MLPERF_RESULT_PUSH_TO_GITHUB: false + group: submission-generation + + full: + add_deps_recursive: + coco2014-original: + tags: _full + coco2014-preprocessed: + tags: _full + imagenet-original: + tags: _full + imagenet-preprocessed: + tags: _full + openimages-original: + tags: _full + openimages-preprocessed: + tags: _full + openorca-original: + tags: _full + openorca-preprocessed: + tags: _full + coco2014-dataset: + tags: _full + igbh-dataset: + tags: _full + env: + CM_MLPERF_SUBMISSION_GENERATION_STYLE: full + group: submission-generation-style + + performance-only: + default_variations: + submission-generation-style: full + env: + CM_MLPERF_LOADGEN_MODE: performance + CM_MLPERF_SUBMISSION_RUN: 'yes' + CM_RUN_SUBMISSION_CHECKER: 'no' + group: submission-generation + + populate-readme: + base: + - all-modes + default_variations: + submission-generation-style: full + env: + CM_MLPERF_README: 'yes' + CM_MLPERF_SUBMISSION_RUN: 'yes' + CM_RUN_SUBMISSION_CHECKER: 'no' + group: submission-generation + + scc24-base: + base: + - short + env: + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX4: scc24-base + CM_DOCKER_IMAGE_NAME: scc24 + CM_MLPERF_INFERENCE_MIN_QUERY_COUNT: 50 + adr: + coco2014-preprocessed: + tags: _size.50,_with-sample-ids + coco2014-dataset: + tags: _size.50,_with-sample-ids + nvidia-preprocess-data: + extra_cache_tags: "scc24-base" + deps: + - tags: clean,nvidia,scratch,_sdxl,_downloaded-data + extra_cache_rm_tags: scc24-main + + scc24-main: + base: + - short + adr: + coco2014-preprocessed: + tags: _size.500,_with-sample-ids + coco2014-dataset: + tags: _size.500,_with-sample-ids + nvidia-preprocess-data: + extra_cache_tags: "scc24-main" + env: + CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX4: scc24-main + CM_DOCKER_IMAGE_NAME: scc24 + CM_MLPERF_INFERENCE_MIN_QUERY_COUNT: 500 + deps: + - tags: clean,nvidia,scratch,_sdxl,_downloaded-data + extra_cache_rm_tags: scc24-base + + r2.1: + env: + CM_MLPERF_INFERENCE_VERSION: '2.1' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r2.1_default + group: benchmark-version + + r3.0: + env: + CM_MLPERF_INFERENCE_VERSION: '3.0' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r3.0_default + group: benchmark-version + + r3.1: + env: + CM_MLPERF_INFERENCE_VERSION: '3.1' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r3.1_default + group: benchmark-version + + r4.0-dev: + env: + CM_MLPERF_INFERENCE_VERSION: '4.0-dev' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.0-dev_default + group: benchmark-version + + r4.0: + env: + CM_MLPERF_INFERENCE_VERSION: '4.0' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.0_default + group: benchmark-version + adr: + get-mlperf-inference-results-dir: + tags: _version.r4_0-dev + get-mlperf-inference-submission-dir: + tags: _version.r4_0-dev + mlperf-inference-nvidia-scratch-space: + tags: _version.r4_0-dev + + r4.1-dev: + default: true + env: + CM_MLPERF_INFERENCE_VERSION: '4.1-dev' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.1-dev_default + group: benchmark-version + adr: + get-mlperf-inference-results-dir: + tags: _version.r4_1-dev + get-mlperf-inference-submission-dir: + tags: _version.r4_1-dev + mlperf-inference-nvidia-scratch-space: + tags: _version.r4_1-dev + + r4.1: + env: + CM_MLPERF_INFERENCE_VERSION: '4.1' + CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.1_default + adr: + get-mlperf-inference-results-dir: + tags: _version.r4_1 + get-mlperf-inference-submission-dir: + tags: _version.r4_1 + mlperf-inference-nvidia-scratch-space: + tags: _version.r4_1 + group: benchmark-version + + short: + add_deps_recursive: + submission-checker: + tags: _short-run + default: 'true' + env: + CM_MLPERF_SUBMISSION_DIVISION: open + CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR: off + CM_MLPERF_SUBMISSION_GENERATION_STYLE: short + group: submission-generation-style + + performance-and-accuracy: + default: true + base: + - all-modes + default_variations: + submission-generation-style: full + group: submission-generation + + submission: + base: + - all-modes + default_gui: true + default_variations: + submission-generation-style: full + env: + CM_MLPERF_LOADGEN_COMPLIANCE: 'yes' + CM_MLPERF_SUBMISSION_RUN: 'yes' + CM_RUN_MLPERF_ACCURACY: 'on' + CM_RUN_SUBMISSION_CHECKER: 'yes' + CM_TAR_SUBMISSION_DIR: 'yes' + group: submission-generation + post_deps: + - names: + - submission-generator + skip_if_env: + CM_MLPERF_SKIP_SUBMISSION_GENERATION: + - 'yes' + tags: generate,mlperf,inference,submission + +versions: + master: {} + r2.1: {} + +input_description: + division: + choices: + - 'open' + - 'closed' + default: 'open' + desc: MLPerf division + sort: 50 + category: + choices: + - 'edge' + - 'datacenter' + - 'network' + default: 'edge' + desc: MLPerf category + sort: 60 + device: + choices: + - cpu + - cuda + - rocm + - qaic + default: cpu + desc: MLPerf device + sort: 100 + model: + choices: + - resnet50 + - retinanet + - bert-99 + - bert-99.9 + - 3d-unet-99 + - 3d-unet-99.9 + - rnnt + - dlrm-v2-99 + - dlrm-v2-99.9 + - gptj-99 + - gptj-99.9 + - sdxl + - llama2-70b-99 + - llama2-70b-99.9 + - mixtral-8x7b + - mobilenet + - efficientnet + - rgat + default: resnet50 + desc: MLPerf model + sort: 200 + precision: + choices: + - float32 + - float16 + - bfloat16 + - int8 + - uint8 + default: '' + desc: MLPerf model precision + sort: 250 + implementation: + choices: + - mlcommons-python + - mlcommons-cpp + - nvidia + - intel + - qualcomm + - ctuning-cpp-tflite + default: mlcommons-python + desc: MLPerf implementation + sort: 300 + backend: + choices: + - onnxruntime + - tf + - pytorch + - deepsparse + - tensorrt + - glow + - tvm-onnx + default: onnxruntime + desc: MLPerf framework (backend) + sort: 400 + scenario: + choices: + - Offline + - Server + - SingleStream + - MultiStream + default: Offline + desc: MLPerf scenario + sort: 500 + mode: + choices: + - '' + - accuracy + - performance + default: '' + desc: MLPerf benchmark mode + sort: 600 + execution_mode: + choices: + - test + - fast + - valid + default: test + desc: MLPerf execution mode + sort: 700 + sut: + default: '' + desc: SUT configuration (if known) + sort: 750 + submitter: + default: CTuning + desc: Submitter name (without space) + sort: 800 + results_dir: + desc: Folder path to store results (defaults to the current working directory) + default: '' + sort: 900 + submission_dir: + desc: Folder path to store MLPerf submission tree + default: '' + sort: 1000 + + adr.compiler.tags: + desc: Compiler for loadgen and any C/C++ part of implementation + adr.inference-src-loadgen.env.CM_GIT_URL: + default: '' + desc: Git URL for MLPerf inference sources to build LoadGen (to enable non-reference + implementations) + adr.inference-src.env.CM_GIT_URL: + default: '' + desc: Git URL for MLPerf inference sources to run benchmarks (to enable non-reference + implementations) + adr.mlperf-inference-implementation.max_batchsize: + desc: Maximum batchsize to be used + adr.mlperf-inference-implementation.num_threads: + desc: Number of threads (reference & C++ implementation only) + adr.python.name: + desc: Python virtual environment name (optional) + adr.python.version: + desc: Force Python version (must have all system deps) + adr.python.version_min: + default: '3.8' + desc: Minimal Python version + power: + choices: + - 'yes' + - 'no' + default: 'no' + desc: Measure power + sort: 5000 + adr.mlperf-power-client.power_server: + default: '192.168.0.15' + desc: MLPerf Power server IP address + sort: 5005 + adr.mlperf-power-client.port: + default: 4950 + desc: MLPerf Power server port + sort: 5010 + clean: + boolean: true + default: false + desc: Clean run + compliance: + choices: + - 'yes' + - 'no' + default: 'no' + desc: Whether to run compliance tests (applicable only for closed division) + dashboard_wb_project: + desc: W&B dashboard project + default: cm-mlperf-dse-testing + dashboard_wb_user: + desc: W&B dashboard user + default: cmind + hw_name: + desc: MLPerf hardware name (for example "gcp.c3_standard_8", "nvidia_orin", "lenovo_p14s_gen_4_windows_11", "macbook_pro_m1_2", "thundercomm_rb6" ...) + multistream_target_latency: + desc: Set MultiStream target latency + offline_target_qps: + desc: Set LoadGen Offline target QPS + quiet: + boolean: true + default: true + desc: Quiet run (select default values for all questions) + server_target_qps: + desc: Set Server target QPS + singlestream_target_latency: + desc: Set SingleStream target latency + target_latency: + desc: Set Target latency + target_qps: + desc: Set LoadGen target QPS + j: + desc: Print results dictionary to console at the end of the run + boolean: true + default: false + repro: + desc: Record input/output/state/info files to make it easier to reproduce results + boolean: true + default: false + time: + desc: Print script execution time at the end of the run + boolean: true + default: true + debug: + desc: Debug this script + boolean: true + default: false + +#repo_to_report_errors: https://github.com/mlcommons/inference/issues diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/customize.py new file mode 100644 index 000000000..e620df68b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/customize.py @@ -0,0 +1,1030 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import json +import shutil +import subprocess +import cmind as cm +import copy +import mlperf_utils + +summary_ext = ['.csv', '.json', '.xlsx'] + +########################################################################## + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + const = i.get('const', {}) + + inp = i['input'] + state = i['state'] + script_path = i['run_script_input']['path'] + + if env.get('CM_RUN_DOCKER_CONTAINER', '') == "yes": + return {'return': 0} + + if env.get('CM_DOCKER_IMAGE_NAME', '') == 'scc24': + if env.get("CM_MLPERF_IMPLEMENTATION", "reference") == "reference": + env['CM_DOCKER_IMAGE_NAME'] = "scc24-reference" + elif "nvidia" in env.get("CM_MLPERF_IMPLEMENTATION", "reference"): + env['CM_DOCKER_IMAGE_NAME'] = "scc24-nvidia" + + dump_version_info = env.get('CM_DUMP_VERSION_INFO', True) + + system_meta = state.get('CM_SUT_META', {}) + if system_meta: + env['CM_SUT_META_EXISTS'] = "yes" + + env['CM_MODEL'] = env['CM_MLPERF_MODEL'] + + # Clean MLPerf inference output tar file if non-standard + x = env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE', '') + if x != '' and os.path.isfile(x): + os.remove(x) + + # Clean MLPerf inference submission summary files + x = env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY', '') + if x != '': + for y in summary_ext: + z = x + y + if os.path.isfile(z): + os.remove(z) + + if env.get('CM_MLPERF_SUBMISSION_SYSTEM_TYPE', '') != '': + system_type = env['CM_MLPERF_SUBMISSION_SYSTEM_TYPE'] + system_meta['system_type'] = system_type + + if env.get('CM_MLPERF_SUBMISSION_DIVISION', '') != '': + division = env['CM_MLPERF_SUBMISSION_DIVISION'] + system_meta['division'] = division + + if system_meta.get('division', '') != "closed": + # no compliance runs needed for open division + env["CM_MLPERF_LOADGEN_COMPLIANCE"] = "no" + + clean = False + + if 'CM_MLPERF_CLEAN_ALL' in env: + clean = True + if 'CM_MLPERF_CLEAN_SUBMISSION_DIR' not in env: + env['CM_MLPERF_CLEAN_SUBMISSION_DIR'] = "yes" + if 'CM_RERUN' not in env: + env['CM_RERUN'] = "yes" + + if str(env.get('CM_SYSTEM_POWER', 'no')).lower( + ) != "no" or env.get('CM_MLPERF_POWER', '') == "yes": + power_variation = ",_power" + env['CM_MLPERF_POWER'] = "yes" + else: + power_variation = "" + + if env.get('CM_RUN_STYLE', + '') == "valid" and 'CM_RUN_MLPERF_ACCURACY' not in env: + env['CM_RUN_MLPERF_ACCURACY'] = "on" + + print("Using MLCommons Inference source from " + + env['CM_MLPERF_INFERENCE_SOURCE']) + + if 'CM_MLPERF_LOADGEN_EXTRA_OPTIONS' not in env: + env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] = "" + + if 'CM_MLPERF_LOADGEN_MODES' not in env: + if 'CM_MLPERF_LOADGEN_MODE' not in env: + env['CM_MLPERF_LOADGEN_MODE'] = "performance" + + if 'CM_MLPERF_LOADGEN_SCENARIOS' not in env: + if 'CM_MLPERF_LOADGEN_SCENARIO' not in env: + env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" + + if env.get('CM_MLPERF_LOADGEN_ALL_SCENARIOS', '') == "yes": + env['CM_MLPERF_LOADGEN_SCENARIOS'] = get_valid_scenarios( + env['CM_MODEL'], + system_meta.get( + 'system_type', + 'edge'), + env['CM_MLPERF_LAST_RELEASE'], + env['CM_MLPERF_INFERENCE_SOURCE']) + else: + system_meta = {} + env['CM_MLPERF_LOADGEN_SCENARIOS'] = [ + env['CM_MLPERF_LOADGEN_SCENARIO']] + + if env.get('CM_MLPERF_LOADGEN_ALL_MODES', '') == "yes": + env['CM_MLPERF_LOADGEN_MODES'] = ["performance", "accuracy"] + else: + env['CM_MLPERF_LOADGEN_MODES'] = [env['CM_MLPERF_LOADGEN_MODE']] + + if env.get('OUTPUT_BASE_DIR', '') == '': + env['OUTPUT_BASE_DIR'] = env.get( + 'CM_MLPERF_INFERENCE_RESULTS_DIR', os.getcwd()) + + test_list = ["TEST01"] + if env['CM_MODEL'] in ["resnet50", "sdxl"]: + test_list.append("TEST04") + if "gpt" in env['CM_MODEL'] or "llama2-70b" in env['CM_MODEL'] or "mixtral-8x7b" in env['CM_MODEL']: + test_list.remove("TEST01") + # test_list.remove("TEST05") + + if "llama2" in env['CM_MODEL'].lower( + ) or "mixtral-8x7b" in env['CM_MODEL']: + test_list.append("TEST06") + + variation_implementation = "_" + \ + env.get("CM_MLPERF_IMPLEMENTATION", "reference") + variation_model = ",_" + env["CM_MLPERF_MODEL"] + variation_backend = ",_" + \ + env["CM_MLPERF_BACKEND"] if env.get( + "CM_MLPERF_BACKEND", "") != "" else "" + variation_device = ",_" + \ + env["CM_MLPERF_DEVICE"] if env.get( + "CM_MLPERF_DEVICE", "") != "" else "" + variation_run_style = ",_" + env.get("CM_MLPERF_RUN_STYLE", "test") + variation_reproducibility = ",_" + env["CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS"] if env.get( + "CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS", "") != "" else "" + variation_all_models = ",_all-models" if env.get( + "CM_MLPERF_ALL_MODELS", "") == "yes" else "" + + if env.get("CM_MLPERF_MODEL_PRECISION", '') != '': + variation_quantization_string = ",_" + env["CM_MLPERF_MODEL_PRECISION"] + else: + variation_quantization_string = "" + + tags = "app,mlperf,inference,generic," + variation_implementation + variation_model + variation_backend + variation_device + \ + variation_run_style + variation_reproducibility + \ + variation_quantization_string + power_variation + variation_all_models + verbose = inp.get('v', False) + print_env = inp.get('print_env', False) + print_deps = inp.get('print_deps', False) + add_deps_recursive = inp.get('add_deps_recursive', {}) + add_deps = inp.get('add_deps', {}) + ad = inp.get('ad', {}) + adr = inp.get('adr', {}) + docker_it = inp.get('docker_it', '') + docker_dt = inp.get('docker_dt', '') + adr_from_meta = i['run_script_input'].get('add_deps_recursive') + + for key in adr_from_meta: + add_deps_recursive[key] = adr_from_meta[key] + + if env.get('CM_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '': + if not add_deps_recursive.get('mlperf-inference-implementation', {}): + add_deps_recursive['mlperf-inference-implementation'] = {} + if add_deps_recursive['mlperf-inference-implementation'].get( + 'tags', '') == '': + add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + else: + add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' + add_deps_recursive['mlperf-inference-implementation']['tags'] += "_batch_size." + \ + env['CM_MLPERF_LOADGEN_MAX_BATCHSIZE'] + + if env.get('CM_MLPERF_INFERENCE_SUT_VARIATION', '') != '': + if not add_deps_recursive.get('mlperf-inference-implementation', {}): + add_deps_recursive['mlperf-inference-implementation'] = {} + if add_deps_recursive['mlperf-inference-implementation'].get( + 'tags', '') == '': + add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + else: + add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' + add_deps_recursive['mlperf-inference-implementation']['tags'] += "_" + \ + env['CM_MLPERF_INFERENCE_SUT_VARIATION'] + + if env.get('CM_NETWORK_LOADGEN', '') != '': + if not add_deps_recursive.get('mlperf-inference-implementation', {}): + add_deps_recursive['mlperf-inference-implementation'] = {} + network_variation_tag = f"_network-{env['CM_NETWORK_LOADGEN']}" + if add_deps_recursive['mlperf-inference-implementation'].get( + 'tags', '') == '': + add_deps_recursive['mlperf-inference-implementation']['tags'] = '' + else: + add_deps_recursive['mlperf-inference-implementation']['tags'] += ',' + add_deps_recursive['mlperf-inference-implementation']['tags'] += network_variation_tag + + if env.get('CM_OUTPUT_FOLDER_NAME', '') == '': + env['CM_OUTPUT_FOLDER_NAME'] = env['CM_MLPERF_RUN_STYLE'] + "_results" + + output_dir = os.path.join( + env['OUTPUT_BASE_DIR'], + env['CM_OUTPUT_FOLDER_NAME']) + if clean: + path_to_clean = output_dir + + print('=========================================================') + print('Cleaning results in {}'.format(path_to_clean)) + if os.path.exists(path_to_clean): + shutil.rmtree(path_to_clean) + + print('=========================================================') + + if str(env.get('CM_MLPERF_USE_DOCKER', '') + ).lower() in ["1", "true", "yes"]: + action = "docker" + # del(env['OUTPUT_BASE_DIR']) + state = {} + docker_extra_input = {} + + # if env.get('CM_HW_NAME'): + # del(env['CM_HW_NAME']) + + for k in inp: + if k.startswith("docker_"): + docker_extra_input[k] = inp[k] + inp = {} + if str(docker_dt).lower() in ["yes", "true", "1"]: + # turning it off for the first run and after that we turn it on + env['CM_DOCKER_REUSE_EXISTING_CONTAINER'] = 'no' + env['CM_DOCKER_DETACHED_MODE'] = 'yes' + + if env.get('CM_DOCKER_IMAGE_NAME', '') != '': + docker_extra_input['docker_image_name'] = env['CM_DOCKER_IMAGE_NAME'] + else: + action = "run" + + # local_keys = [ 'CM_MLPERF_SKIP_RUN', 'CM_MLPERF_LOADGEN_QUERY_COUNT', 'CM_MLPERF_LOADGEN_TARGET_QPS', 'CM_MLPERF_LOADGEN_TARGET_LATENCY' ] + + for scenario in env['CM_MLPERF_LOADGEN_SCENARIOS']: + scenario_tags = tags + ",_" + scenario.lower() + env['CM_MLPERF_LOADGEN_SCENARIO'] = scenario + + if scenario == "Offline": + if env.get('CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'): + env['CM_MLPERF_LOADGEN_TARGET_QPS'] = env['CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS'] + elif scenario == "Server": + if env.get('CM_MLPERF_LOADGEN_SERVER_TARGET_QPS'): + env['CM_MLPERF_LOADGEN_TARGET_QPS'] = env['CM_MLPERF_LOADGEN_SERVER_TARGET_QPS'] + elif scenario == "SingleStream": + if env.get('CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'): + env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = env['CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY'] + elif scenario == "MultiStream": + if env.get('CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'): + env['CM_MLPERF_LOADGEN_TARGET_LATENCY'] = env['CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY'] + + for mode in env['CM_MLPERF_LOADGEN_MODES']: + env['CM_MLPERF_LOADGEN_MODE'] = mode + + env_copy = copy.deepcopy(env) + const_copy = copy.deepcopy(const) + print(f"\nRunning loadgen scenario: {scenario} and mode: {mode}") + ii = {'action': action, 'automation': 'script', 'tags': scenario_tags, 'quiet': 'true', + 'env': env_copy, 'const': const_copy, 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': + copy.deepcopy(add_deps_recursive), 'ad': ad, 'adr': copy.deepcopy(adr), 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} + + if action == "docker": + for k in docker_extra_input: + ii[k] = docker_extra_input[k] + + r = cm.access(ii) + if r['return'] > 0: + return r + + if env_copy.get('CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR', '') != '': + env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] = env_copy['CM_MLPERF_INFERENCE_FINAL_RESULTS_DIR'] + else: + env['CM_MLPERF_INFERENCE_RESULTS_DIR_'] = os.path.join( + env['OUTPUT_BASE_DIR'], f"{env['CM_MLPERF_RUN_STYLE']}_results") + + if action == "docker": + if str(docker_dt).lower() not in ["yes", "true", "1"]: + print( + f"\nStop Running loadgen scenario: {scenario} and mode: {mode}") + # We run commands interactively inside the docker container + return {'return': 0} + else: + env['CM_DOCKER_REUSE_EXISTING_CONTAINER'] = 'yes' + container_id = env_copy['CM_DOCKER_CONTAINER_ID'] + env['CM_DOCKER_CONTAINER_ID'] = container_id + if state.get('docker', {}): + del (state['docker']) + + if env.get("CM_MLPERF_LOADGEN_COMPLIANCE", "") == "yes": + for test in test_list: + env['CM_MLPERF_LOADGEN_COMPLIANCE_TEST'] = test + env['CM_MLPERF_LOADGEN_MODE'] = "compliance" + ii = {'action': action, 'automation': 'script', 'tags': scenario_tags, 'quiet': 'true', + 'env': copy.deepcopy(env), 'const': copy.deepcopy(const), 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': + copy.deepcopy(add_deps_recursive), 'adr': copy.deepcopy(adr), 'ad': ad, 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} + if action == "docker": + for k in docker_extra_input: + ii[k] = docker_extra_input[k] + r = cm.access(ii) + if r['return'] > 0: + return r + if state.get('docker', {}): + del (state['docker']) + + if env.get('CM_DOCKER_CONTAINER_ID', '') != '' and str(env.get( + 'CM_DOCKER_CONTAINER_KEEP_ALIVE', '')).lower() not in ["yes", "1", "true"]: + container_id = env['CM_DOCKER_CONTAINER_ID'] + CMD = f"docker kill {container_id}" + docker_out = subprocess.check_output(CMD, shell=True).decode("utf-8") + + if state.get("cm-mlperf-inference-results"): + # print(state["cm-mlperf-inference-results"]) + for sut in state["cm-mlperf-inference-results"]: # only one sut will be there + # Better to do this in a stand alone CM script with proper deps but + # currently we manage this by modifying the sys path of the python + # executing CM + from tabulate import tabulate # noqa + + print(sut) + result_table, headers = mlperf_utils.get_result_table( + state["cm-mlperf-inference-results"][sut]) + print(tabulate(result_table, headers=headers, tablefmt="pretty")) + + print( + f"\nThe MLPerf inference results are stored at {output_dir}\n") + + return {'return': 0} + + +def get_valid_scenarios(model, category, mlperf_version, mlperf_path): + + import sys + + submission_checker_dir = os.path.join(mlperf_path, "tools", "submission") + + sys.path.append(submission_checker_dir) + if not os.path.exists(os.path.join( + submission_checker_dir, "submission_checker.py")): + shutil.copy(os.path.join(submission_checker_dir, "submission-checker.py"), os.path.join(submission_checker_dir, + "submission_checker.py")) + + import submission_checker as checker + + if "dlrm-99" in model: + model = model.replace("dlrm-99", "dlrm-v2-99") + if "sdxl" in model: + model = "stable-diffusion-xl" + + config = checker.MODEL_CONFIG + + internal_model_name = config[mlperf_version]["model_mapping"].get( + model, model) + + valid_scenarios = config[mlperf_version]["required-scenarios-" + + category.replace(",", "-")][internal_model_name] + + print( + "Valid Scenarios for " + + model + + " in " + + category + + " category are :" + + str(valid_scenarios)) + + return valid_scenarios + +########################################################################## + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + if env.get('CM_MLPERF_IMPLEMENTATION', '') == 'reference': + x1 = env.get('CM_MLPERF_INFERENCE_SOURCE', '') + x2 = env.get('CM_MLPERF_INFERENCE_CONF_PATH', '') + + if x1 != '' and x2 != '': + print('') + print( + 'Path to the MLPerf inference benchmark reference sources: {}'.format(x1)) + print( + 'Path to the MLPerf inference reference configuration file: {}'.format(x2)) + print('') + + return {'return': 0} + +########################################################################## + + +def load_md(path, path2, name): + + fn = os.path.join(path, path2, name + '.md') + + s = '' + + if os.path.isfile(fn): + r = utils.load_txt(fn) + if r['return'] > 0: + return r + + s = r['string'] + + return {'return': 0, 'string': s} + +########################################################################## + + +def get_url(url, path, path2, name, text): + + name_md = name + '.md' + fn = os.path.join(path, path2, name_md) + + urlx = '' + url_online = '' + if os.path.isfile(fn): + if not url.endswith('/'): + url += '/' + urlx = url + path2 + '/' + name_md + + url_online = '[{}]({})'.format(text, urlx) + + return {'return': 0, 'url_online': url_online} + +########################################################################## + + +def gui(i): + + params = i['params'] + st = i['st'] + + script_meta = i['meta'] + + misc = i['misc_module'] + + script_path = i['script_path'] + script_url = i.get('script_url', '') + script_tags = i.get('script_tags', '') + + compute_meta = i.get('compute_meta', {}) + compute_tags = compute_meta.get('tags', []) + bench_meta = i.get('bench_meta', {}) + + compute_uid = compute_meta.get('uid', '') + bench_uid = bench_meta.get('uid', '') + + st_inputs_custom = {} + + bench_input = bench_meta.get('bench_input', {}) + + end_html = '' + + extra = {} + add_to_st_inputs = {} + + inp = script_meta['input_description'] + + # Here we can update params + v = compute_meta.get('mlperf_inference_device') + if v is not None and v != '': + inp['device']['force'] = v + + if v in ['tpu', 'gaudi']: + st.markdown('----') + st.markdown( + '**WARNING: unified CM workflow support for this hardware is pending - please [feel free to help](https://discord.gg/JjWNWXKxwT)!**') + return {'return': 0, 'skip': True, 'end_html': end_html} + + elif 'orin' in compute_tags: + st.markdown('----') + st.markdown( + '**WARNING: we need to encode CM knowledge from [this Orin setp](https://github.com/mlcommons/ck/blob/master/docs/mlperf/setup/setup-nvidia-jetson-orin.md) to this GUI!**') + return {'return': 0, 'skip': True, 'end_html': end_html} + + st.markdown('---') + st.markdown('**How would you like to run the MLPerf inference benchmark?**') + + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_device', + 'desc': inp['device']}) + device = r.get('value2') + inp['device']['force'] = device + + if device == 'cpu': + inp['implementation']['choices'] = ['mlcommons-python', + 'mlcommons-cpp', 'intel', 'ctuning-cpp-tflite'] + if 'intel' in compute_tags: + inp['implementation']['default'] = 'intel' + else: + inp['implementation']['default'] = 'mlcommons-python' + inp['backend']['choices'] = [ + 'onnxruntime', 'deepsparse', 'pytorch', 'tf', 'tvm-onnx'] + inp['backend']['default'] = 'onnxruntime' + elif device == 'rocm': + inp['implementation']['force'] = 'mlcommons-python' + inp['precision']['force'] = '' + inp['backend']['force'] = 'onnxruntime' + st.markdown( + '*WARNING: CM-MLPerf inference workflow was not tested thoroughly for AMD GPU - please feel free to test and improve!*') + elif device == 'qaic': + inp['implementation']['force'] = 'qualcomm' + inp['precision']['force'] = '' + inp['backend']['force'] = 'glow' + + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_division', + 'desc': inp['division']}) + division = r.get('value2') + inp['division']['force'] = division + + y = 'compliance' + if division == 'closed': + inp[y]['default'] = 'yes' + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_compliance', + 'desc': inp[y]}) + compliance = r.get('value2') + inp[y]['force'] = compliance + + if compliance == 'yes': + st.markdown( + '*:red[See [online table with required compliance tests](https://github.com/mlcommons/policies/blob/master/submission_rules.adoc#5132-inference)].*') + + else: + inp[y]['force'] = 'no' + + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_category', + 'desc': inp['category']}) + category = r.get('value2') + inp['category']['force'] = category + + ########################################################################## + # Implementation + v = bench_input.get('mlperf_inference_implementation') + if v is not None and v != '': + inp['implementation']['force'] = v + else: + if device == 'cuda': + inp['implementation']['choices'] = [ + 'nvidia', 'mlcommons-python', 'mlcommons-cpp'] + inp['implementation']['default'] = 'nvidia' + inp['backend']['choices'] = ['tensorrt', 'onnxruntime', 'pytorch'] + inp['backend']['default'] = 'tensorrt' + + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_implementation', + 'desc': inp['implementation']}) + implementation = r.get('value2') + inp['implementation']['force'] = implementation + + implementation_setup = '' + r = load_md(script_path, 'setup', 'i-' + implementation) + if r['return'] == 0: + implementation_setup = r['string'] + + url_faq_implementation = '' + r = get_url(script_url, script_path, 'faq', implementation, 'FAQ online') + if r['return'] == 0: + url_faq_implementation = r['url_online'] + + can_have_docker_flag = False + + if implementation == 'mlcommons-cpp': + # inp['backend']['choices'] = ['onnxruntime'] + inp['precision']['force'] = 'float32' + inp['backend']['force'] = 'onnxruntime' + inp['model']['choices'] = ['resnet50', 'retinanet'] + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp)]*') + elif implementation == 'mlcommons-python': + inp['precision']['force'] = 'float32' + if device == 'cuda': + inp['backend']['choices'] = ['onnxruntime', 'pytorch', 'tf'] + inp['backend']['default'] = 'onnxruntime' + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python)]*') + elif implementation == 'ctuning-cpp-tflite': + inp['precision']['force'] = 'float32' + inp['model']['force'] = 'resnet50' + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-ctuning-cpp-tflite)]*') + elif implementation == 'nvidia': + inp['backend']['force'] = 'tensorrt' + extra['skip_script_docker_func'] = True + can_have_docker_flag = True + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia)]*') + elif implementation == 'intel': + inp['model']['choices'] = ['bert-99', 'gptj-99'] + inp['model']['default'] = 'bert-99' + inp['precision']['choices'] = ['int8', 'int4'] + inp['precision']['default'] = 'int8' + inp['category']['force'] = 'datacenter' + inp['backend']['force'] = 'pytorch' + inp['sut']['default'] = 'sapphire-rapids.112c' + can_have_docker_flag = True + extra['skip_script_docker_func'] = True +# st.markdown('*:red[Note: Intel implementation require extra CM command to build and run Docker container - you will run CM commands to run MLPerf benchmarks there!]*') + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-intel)]*') + elif implementation == 'qualcomm': + inp['model']['choices'] = ['resnet50', 'retinanet', 'bert-99'] + inp['model']['default'] = 'bert-99' + inp['precision']['default'] = 'float16' + extra['skip_script_docker_func'] = True + st.markdown( + '*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-inference-qualcomm)]*') + + ########################################################################## + # Backend + + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_backend', + 'desc': inp['backend']}) + backend = r.get('value2') + inp['backend']['force'] = backend + + backend_setup = '' + r = load_md(script_path, 'setup', 'b-' + backend) + if r['return'] == 0: + backend_setup = r['string'] + + if backend == 'deepsparse': + inp['model']['choices'] = [ + 'resnet50', 'retinanet', 'bert-99', 'bert-99.9'] + inp['model']['default'] = 'bert-99' + inp['precision']['choices'] = ['float32', 'int8'] + inp['precision']['default'] = 'int8' + if 'force' in inp['precision']: + del (inp['precision']['force']) + + ########################################################################## + # Model + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_model', + 'desc': inp['model']}) + model = r.get('value2') + inp['model']['force'] = model + + github_doc_model = '' + + if model == 'retinanet': + x = '50' + if implementation == 'mlcommons-python': + x = '200' + st.markdown( + ':red[This model requires ~{}GB of free disk space for preprocessed dataset in a full/submission run!]\n'.format(x)) + + elif model.startswith('bert-'): + github_doc_model = 'bert' + + elif model.startswith('3d-unet-'): + github_doc_model = '3d-unet' + + elif model == 'rnnt': + github_doc_model = 'rnnt' + + elif model.startswith('dlrm-v2-'): + github_doc_model = 'dlrm_v2' + + elif model.startswith('gptj-'): + github_doc_model = 'gpt-j' + + elif model == 'sdxl': + github_doc_model = 'stable-diffusion-xl' + + elif model.startswith('llama2-'): + github_doc_model = 'llama2-70b' + + elif model.startswith('mixtral-'): + github_doc_model = 'mixtral-8x7b' + + if github_doc_model == '': + github_doc_model = model + + model_cm_url = 'https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{}'.format( + github_doc_model) + extra_notes_online = '[Extra notes online]({})\n'.format(model_cm_url) + + st.markdown( + '*[CM-MLPerf GitHub docs for this model]({})*'.format(model_cm_url)) + + ########################################################################## + # Precision + if implementation == 'intel': + if model == 'bert-99': + inp['precision']['force'] = 'int8' + elif model == 'gptj-99': + inp['precision']['force'] = 'int4' + elif implementation == 'qualcomm': + if model == 'resnet50': + inp['precision']['print'] = 'int8' + elif model == 'retinanet': + inp['precision']['print'] = 'int8' + elif model == 'bert-99': + inp['precision']['print'] = 'int8/float16' + + if inp['precision'].get('force', '') == '': + x = inp['precision'].get('print', '') + if x != '': + st.markdown('**{}**: {}'.format(inp['precision']['desc'], x)) + else: + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_precision', + 'desc': inp['precision']}) + precision = r.get('value2') + inp['precision']['force'] = precision + + ########################################################################## + # Benchmark version + + script_meta_variations = script_meta['variations'] + + choices = [''] + [ + k for k in script_meta_variations if script_meta_variations[k].get( + 'group', '') == 'benchmark-version'] + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'Force specific benchmark version?'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_version', + 'desc': desc}) + benchmark_version = r.get('value2') + + if benchmark_version != '': + params['~~benchmark-version'] = [benchmark_version] + + ########################################################################## + # Run via Docker container + if can_have_docker_flag: + + default_choice = 'yes - run in container' + + choices = [default_choice, 'no - run natively'] + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'Should CM script prepare and run Docker container in interactive mode to run MLPerf? You can then copy/paste CM commands generated by this GUI to benchmark different models.'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_docker', + 'desc': desc}) + benchmark_docker = r.get('value2') + + if benchmark_docker == 'yes - run in container': + add_to_st_inputs['@docker'] = True + add_to_st_inputs['@docker_cache'] = 'no' + + ########################################################################## + # Prepare submission + st.markdown('---') + + submission = st.toggle( + 'Would you like to prepare official submission?', + value=False) + if submission: + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_hw_name', + 'desc': inp['hw_name']}) + inp['hw_name']['force'] = r.get('value2') + + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_submitter', + 'desc': inp['submitter']}) + submitter = r.get('value2') + inp['submitter']['force'] = submitter + + params['~~submission-generation'] = ['submission'] + params['~all-scenarios'] = ['true'] + inp['scenario']['force'] = '' + inp['clean']['default'] = False + inp['repro']['force'] = True + + x = '*:red[Use the following command to find local directory with the submission tree and results:]*\n```bash\ncm find cache --tags=submission,dir\n```\n' + + x += '*:red[You will also find results in `mlperf-inference-submission.tar.gz` file that you can submit to MLPerf!]*\n\n' + + x += '*:red[Note that if some results are INVALID due to too short run, you can rerun the same CM command and it should increase the length of the benchmark until you get valid result!]*\n' + + st.markdown(x) + + st.markdown('---') + + else: + inp['submitter']['force'] = '' + inp['clean']['default'] = True + params['~submission'] = ['false'] + + choices = [ + 'Performance', + 'Accuracy', + 'Find Performance from a short run', + 'Performance and Accuracy'] + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'What to measure?'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_measure', + 'desc': desc}) + measure = r.get('value2') + + x = '' + if measure == 'Performance': + x = 'performance-only' + elif measure == 'Accuracy': + x = 'accuracy-only' + elif measure == 'Find Performance from a short run': + x = 'find-performance' + elif measure == 'Performance and Accuracy': + x = 'submission' + + params['~~submission-generation'] = [x] + + ####################################################################### + # Prepare scenario + + xall = 'All applicable' + choices = ['Offline', 'Server', 'SingleStream', 'MultiStream', xall] + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'Which scenario(s)?'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_scenario', + 'desc': desc}) + scenario = r.get('value2') + + if scenario == xall: + params['~all-scenarios'] = ['true'] + inp['scenario']['force'] = '' + else: + inp['scenario']['force'] = scenario + + ########################################################################## + # Short or full run + + x = ['Full run', 'Short run'] + if submission: + choices = [x[0], x[1]] + else: + choices = [x[1], x[0]] + + desc = { + 'choices': choices, + 'default': choices[0], + 'desc': 'Short (test) or full (valid) run?'} + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_how', + 'desc': desc}) + how = r.get('value2') + + if how == x[0]: + params['~~submission-generation-style'] = ['full'] + inp['execution_mode']['force'] = 'valid' + else: + params['~~submission-generation-style'] = ['short'] + inp['execution_mode']['force'] = 'test' + + ########################################################################## + # Power + +# desc = {'boolean':True, 'default':False, 'desc':'Measure power?'} +# r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_power', 'desc':desc}) +# power = r.get('value2', False) + + power = st.toggle('Measure power consumption?', value=False) + + if power: + inp['power']['force'] = 'yes' + + y = 'adr.mlperf-power-client.power_server' + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_power_server', + 'desc': inp[y]}) + inp[y]['force'] = r.get('value2') + + y = 'adr.mlperf-power-client.port' + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_power_port', + 'desc': inp[y]}) + inp[y]['force'] = r.get('value2') + + st.markdown( + '*:red[See [online notes](https://github.com/mlcommons/ck/blob/master/docs/tutorials/mlperf-inference-power-measurement.md)] to setup power meter and server.*') + + else: + inp['power']['force'] = 'no' + inp['adr.mlperf-power-client.power_server']['force'] = '' + inp['adr.mlperf-power-client.port']['force'] = '' + + ########################################################################## + # Dashboard + +# desc = {'boolean':True, 'default':False, 'desc':'Output results to W&B dashboard?'} +# r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_dashboard', 'desc':desc}) +# dashboard = r.get('value2', False) + + dashboard = st.toggle('Output results to W&B dashboard?', value=False) + + if dashboard: + params['~dashboard'] = ['true'] + + y = 'dashboard_wb_project' + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_power_wb_project', + 'desc': inp[y]}) + inp[y]['force'] = r.get('value2') + + y = 'dashboard_wb_user' + r = misc.make_selector({'st': st, + 'st_inputs': st_inputs_custom, + 'params': params, + 'key': 'mlperf_inference_power_wb_user', + 'desc': inp[y]}) + inp[y]['force'] = r.get('value2') + + else: + params['~dashboard'] = ['false'] + inp['dashboard_wb_project']['force'] = '' + inp['dashboard_wb_user']['force'] = '' + + # Hide customization by default + params['hide_script_customization'] = True + + x = implementation_setup + if backend_setup != '': + if x != '': + x += '\n\n' + x += backend_setup + + extra['extra_notes_online'] = extra_notes_online + extra['extra_faq_online'] = url_faq_implementation + extra['extra_setup'] = x + + ########################################################################## + value_reproduce = inp.get('repro', {}).get('force', False) + reproduce = st.toggle( + 'Record extra info for reproducibility?', + value=value_reproduce) + + explore = st.toggle( + 'Explore/tune benchmark (batch size, threads, etc)?', + value=False) + + if reproduce or explore: + add_to_st_inputs.update({ + "@repro_extra.run-mlperf-inference-app.bench_uid": bench_uid, + "@repro_extra.run-mlperf-inference-app.compute_uid": compute_uid, + '@results_dir': '{{CM_EXPERIMENT_PATH3}}', + '@submission_dir': '{{CM_EXPERIMENT_PATH3}}' + }) + + inp['repro']['force'] = True + extra['use_experiment'] = True + + if explore: + add_to_st_inputs['@batch_size'] = '{{CM_EXPLORE_BATCH_SIZE{[1,2,4,8]}}}' + + ########################################################################## + debug = st.toggle( + 'Debug and run MLPerf benchmark natively from command line after CM auto-generates CMD?', + value=False) + if debug: + inp['debug']['force'] = True + + extra['add_to_st_inputs'] = add_to_st_inputs + + return {'return': 0, 'end_html': end_html, 'extra': extra} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/ctuning-cpp-tflite.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/ctuning-cpp-tflite.md new file mode 100644 index 000000000..920b6243b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/ctuning-cpp-tflite.md @@ -0,0 +1 @@ +# cTuning TFLite C++ implementation of MLPerf inference diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/deepsparse.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/deepsparse.md new file mode 100644 index 000000000..63eb72491 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/deepsparse.md @@ -0,0 +1 @@ +# FAQ: MLPerf inference with DeepSparse backend diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/intel.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/intel.md new file mode 100644 index 000000000..79f6aa979 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/intel.md @@ -0,0 +1 @@ +# FAQ: Intel implementation of MLPerf inference diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/mlcommons-cpp.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/mlcommons-cpp.md new file mode 100644 index 000000000..48700eead --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/mlcommons-cpp.md @@ -0,0 +1 @@ +# FAQ: MLCommons C++ implementation of MLPerf inference diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/mlcommons-python.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/mlcommons-python.md new file mode 100644 index 000000000..d8ed888f6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/mlcommons-python.md @@ -0,0 +1 @@ +# MLCommons reference implementation of MLPerf inference diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/nvidia.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/nvidia.md new file mode 100644 index 000000000..c873bf89e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/nvidia.md @@ -0,0 +1,2 @@ +# FAQ: Nvidia implementation of MLPerf inference + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/qualcomm.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/qualcomm.md new file mode 100644 index 000000000..92af081ca --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/faq/qualcomm.md @@ -0,0 +1 @@ +# FAQ: Qualcomm implementation of MLPerf inference diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/README.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/README.md new file mode 100644 index 000000000..fdbe0e28a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/README.md @@ -0,0 +1,30 @@ +***Outdated*** + +# About + +Prototyping modular and customizable CM containers for MLPerf. + +# Build + +```bash +./build.sh +``` + +# Run + +```bash +./run.sh + +cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_dashboard \ + --adr.python.version_min=3.8 \ + --submitter="modular-cm-mlperf-container" \ + --lang=python \ + --hw_name=default \ + --model=resnet50 \ + --backend=onnxruntime \ + --device=cpu \ + --scenario=Offline \ + --test_query_count=500 \ + --quiet \ + --clean +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/_common.bat b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/_common.bat new file mode 100644 index 000000000..7f9d3aab3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/_common.bat @@ -0,0 +1,7 @@ +rem set CM_CACHE=--no-cache + +set CM_DOCKER_ORG=modularcm +set CM_DOCKER_NAME=mlperf-inference +set CM_OS_NAME=ubuntu +set CM_HW_TARGET=cpu +set CM_OS_VERSION=22.04 diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/_common.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/_common.sh new file mode 100644 index 000000000..4d2f18aac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/_common.sh @@ -0,0 +1,10 @@ +#! /bin/bash + +#export CM_CACHE="--no-cache" + +export CM_DOCKER_ORG=modularcm +export CM_DOCKER_NAME="mlperf-inference" +export CM_OS_NAME="ubuntu" +export CM_HW_TARGET="cpu" +export CM_OS_VERSION="22.04" + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/build.bat b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/build.bat new file mode 100644 index 000000000..d7c097811 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/build.bat @@ -0,0 +1,25 @@ +call _common.bat + +docker build -f %CM_DOCKER_NAME%--%CM_OS_NAME%-%CM_HW_TARGET%.Dockerfile ^ + -t %CM_DOCKER_ORG%/%CM_DOCKER_NAME%:%CM_OS_NAME%-%CM_OS_VERSION% ^ + --build-arg cm_os_name=%CM_OS_NAME% ^ + --build-arg cm_hw_target=%CM_HW_TARGET% ^ + --build-arg cm_os_version=%CM_OS_VERSION% ^ + --build-arg cm_version="" ^ + --build-arg cm_automation_repo="mlcommons@ck" ^ + --build-arg cm_automation_checkout="" ^ + --build-arg cm_python_version="3.10.8" ^ + --build-arg cm_mlperf_inference_loadgen_version="" ^ + --build-arg cm_mlperf_inference_src_tags="" ^ + --build-arg cm_mlperf_inference_src_version="" ^ + --build-arg CM_MLPERF_CHOICE_SCRIPT="" ^ + --build-arg CM_MLPERF_CHOICE_SUBMITTER="Container" ^ + --build-arg CM_MLPERF_CHOICE_IMPLEMENTATION="python" ^ + --build-arg CM_MLPERF_CHOICE_HW_NAME="default" ^ + --build-arg CM_MLPERF_CHOICE_MODEL="resnet50" ^ + --build-arg CM_MLPERF_CHOICE_BACKEND="onnxruntime" ^ + --build-arg CM_MLPERF_CHOICE_DEVICE=%CM_HW_TARGET% ^ + --build-arg CM_MLPERF_CHOICE_SCENARIO="Offline" ^ + --build-arg CM_MLPERF_CHOICE_MODE="accuracy" ^ + --build-arg CM_MLPERF_CHOICE_QUERY_COUNT="5" ^ + %CM_CACHE% . diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/build.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/build.sh new file mode 100644 index 000000000..082f00d4b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/build.sh @@ -0,0 +1,27 @@ +#! /bin/bash + +. ./_common.sh + +time docker build -f ${CM_DOCKER_NAME}--${CM_OS_NAME}-${CM_HW_TARGET}.Dockerfile \ + -t ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}:${CM_OS_NAME}-${CM_OS_VERSION} \ + --build-arg cm_os_name=${CM_OS_NAME} \ + --build-arg cm_hw_target=${CM_HW_TARGET} \ + --build-arg cm_os_version=${CM_OS_VERSION} \ + --build-arg cm_version="" \ + --build-arg cm_automation_repo="mlcommons@ck" \ + --build-arg cm_automation_checkout="" \ + --build-arg cm_python_version="3.10.8" \ + --build-arg cm_mlperf_inference_loadgen_version="" \ + --build-arg cm_mlperf_inference_src_tags="" \ + --build-arg cm_mlperf_inference_src_version="" \ + --build-arg CM_MLPERF_CHOICE_SCRIPT=",_short,_submission,_dashboard" \ + --build-arg CM_MLPERF_CHOICE_SUBMITTER="Container" \ + --build-arg CM_MLPERF_CHOICE_IMPLEMENTATION="python" \ + --build-arg CM_MLPERF_CHOICE_HW_NAME="default" \ + --build-arg CM_MLPERF_CHOICE_MODEL="resnet50" \ + --build-arg CM_MLPERF_CHOICE_BACKEND="onnxruntime" \ + --build-arg CM_MLPERF_CHOICE_DEVICE=${CM_HW_TARGET} \ + --build-arg CM_MLPERF_CHOICE_SCENARIO="Offline" \ + --build-arg CM_MLPERF_CHOICE_MODE="accuracy" \ + --build-arg CM_MLPERF_CHOICE_QUERY_COUNT="500" \ + ${CM_CACHE} . diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/mlperf-inference--ubuntu-cpu.Dockerfile b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/mlperf-inference--ubuntu-cpu.Dockerfile new file mode 100644 index 000000000..25f9d7777 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/mlperf-inference--ubuntu-cpu.Dockerfile @@ -0,0 +1,118 @@ +# Modular MLPerf container with the MLCommons CM automation meta-framework + +# Preparing OS +ARG cm_os_name="ubuntu" +ARG cm_os_version="22.04" + +FROM ${cm_os_name}:${cm_os_version} + +# Maintained by the MLCommons taskforce on automation and reproducibility +LABEL github="https://github.com/mlcommons/ck" +LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce" + +# Customization +ARG CM_GH_TOKEN + +# Prepare shell and entry point +SHELL ["/bin/bash", "-c"] +ENTRYPOINT ["/bin/bash", "-c"] + +# Install system dependencies +# Notes: https://runnable.com/blog/9-common-dockerfile-mistakes +RUN apt-get update -y +RUN apt-get install -y lsb-release +RUN apt-get install -y python3 python3-pip git wget sudo + +# Extra python deps +RUN python3 -m pip install requests + +# CM version +ARG cm_version="" +ENV CM_VERSION="${cm_version}" +RUN if [ "${CM_VERSION}" != "" ] ; then \ + python3 -m pip install cmind==${CM_VERSION} ; \ + else \ + python3 -m pip install cmind ; \ + fi + +# Setup docker environment +ENTRYPOINT ["/bin/bash", "-c"] +ENV TZ=US/Pacific +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ >/etc/timezone + +# Setup docker user +# See example in https://github.com/mlcommons/GaNDLF/blob/master/Dockerfile-CPU +RUN groupadd --gid 10001 cm +RUN useradd --uid 10000 -g cm --create-home --shell /bin/bash cmuser +RUN echo "cmuser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +USER cmuser:cm +WORKDIR /home/cmuser + +# Check CM installation +RUN lsb_release -a > sys-version-os.log +RUN uname -a > sys-version-kernel.log +RUN python3 --version > sys-version-python3.log +RUN cm version > sys-version-cm.log + +################################################################################ +# Get CM automation repository +ARG cm_automation_repo="mlcommons@ck" +ARG cm_automation_repo_checkout="" +ENV CM_AUTOMATION_REPO=${cm_automation_repo} +ENV CM_AUTOMATION_REPO_CHECKOUT=${cm_automation_repo_checkout} +RUN echo ${CM_AUTOMATION_REPO} +RUN cm pull repo ${CM_AUTOMATION_REPO} --checkout=${CM_AUTOMATION_REPO_CHECKOUT} + +################################################################################ +# Install CM system dependencies +RUN cm run script "get sys-utils-cm" --quiet + +# Detect/install python +ARG cm_python_version="" +RUN cm run script "get python3" --version=${cm_python_version} + +################################################################################ +# Build MLPerf loadgen (official with correct seed for submission) +ARG cm_mlperf_inference_loadgen_version="" +RUN cm run script "get mlperf loadgen" --adr.compiler.tags=gcc --version=${cm_mlperf_inference_loadgen_version} --adr.inference-src-loadgen.version=${cm_mlperf_inference_loadgen_version} -v + +# Install MLPerf inference source (can be private development branch) +ARG cm_mlperf_inference_src_tags="" +ARG cm_mlperf_inference_src_version="" +RUN cm run script "get mlperf inference src ${cm_mlperf_inference_src_tags}" --version=${cm_mlperf_inference_src_version} -v + +################################################################################ +# Run CM automation workflow for MLPerf +# https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-app + +ARG CM_MLPERF_CHOICE_SCRIPT= +ARG CM_MLPERF_CHOICE_SUBMITTER="Container" +ARG CM_MLPERF_CHOICE_IMPLEMENTATION="python" +ARG CM_MLPERF_CHOICE_HW_NAME="default" +ARG CM_MLPERF_CHOICE_MODEL="resnet50" +ARG CM_MLPERF_CHOICE_BACKEND="onnxruntime" +ARG CM_MLPERF_CHOICE_DEVICE="cpu" +ARG CM_MLPERF_CHOICE_SCENARIO="Offline" +ARG CM_MLPERF_CHOICE_MODE="performance" +ARG CM_MLPERF_CHOICE_QUERY_COUNT="10" + +RUN cm run script --tags=run,mlperf,inference,generate-run-cmds,${CM_MLPERF_CHOICE_SCRIPT} \ + --adr.compiler.tags=gcc \ + --adr.python.version_min=3.8 \ + --adr.compiler.tags=gcc \ + --submitter="${CM_MLPERF_CHOICE_SUBMITTER}" \ + --lang=${CM_MLPERF_CHOICE_IMPLEMENTATION} \ + --hw_name=${CM_MLPERF_CHOICE_HW_NAME} \ + --model=${CM_MLPERF_CHOICE_MODEL} \ + --backend=${CM_MLPERF_CHOICE_BACKEND} \ + --device=${CM_MLPERF_CHOICE_DEVICE} \ + --scenario=${CM_MLPERF_CHOICE_SCENARIO} \ + --mode=${CM_MLPERF_CHOICE_MODE} \ + --test_query_count=${CM_MLPERF_CHOICE_QUERY_COUNT} \ + --quiet \ + --clean + +################################################################################ +# CMD entry point +CMD /bin/bash diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/run.bat b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/run.bat new file mode 100644 index 000000000..53b13dcb9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/run.bat @@ -0,0 +1,3 @@ +call _common.bat + +docker run -it %CM_DOCKER_ORG%/%CM_DOCKER_NAME%:%CM_OS_NAME%-%CM_OS_VERSION% diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/run.sh new file mode 100644 index 000000000..3473716c7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/modular-cm-containers/run.sh @@ -0,0 +1,3 @@ +. ./_common.sh + +docker run -it ${CM_DOCKER_ORG}/${CM_DOCKER_NAME}:${CM_OS_NAME}-${CM_OS_VERSION} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/run_mobilenet.py b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/run_mobilenet.py new file mode 100644 index 000000000..63ad8986e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/run_mobilenet.py @@ -0,0 +1,103 @@ +import cmind +import os +import sys + +models = { + "mobilenet": { + "v1": { + "multiplier": ["multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.25"], + "resolution": ["resolution-224", "resolution-192", "resolution-160", "resolution-128"], + "kind": [""] + }, + "v2": { + "multiplier": ["multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.35"], + "resolution": ["resolution-224", "resolution-192", "resolution-160", "resolution-128"], + "kind": [""] + }, + "v3": { + "multiplier": [""], + "resolution": [""], + "kind": ["large", "large-minimalistic", "small", "small-minimalistic"] + } + }, + "efficientnet": { + "": { + "multiplier": [""], + "resolution": [""], + "kind": ["lite0", "lite1", "lite2", "lite3", "lite4"] + } + } +} +variation_strings = {} +for t1 in models: + variation_strings[t1] = [] + variation_list = [] + variation_list.append(t1) + for version in models[t1]: + variation_list = [] + if version.strip(): + variation_list.append("_" + version) + variation_list_saved = variation_list.copy() + for k1 in models[t1][version]["multiplier"]: + variation_list = variation_list_saved.copy() + if k1.strip(): + variation_list.append("_" + k1) + variation_list_saved_2 = variation_list.copy() + for k2 in models[t1][version]["resolution"]: + variation_list = variation_list_saved_2.copy() + if k2.strip(): + variation_list.append("_" + k2) + variation_list_saved_3 = variation_list.copy() + for k3 in models[t1][version]["kind"]: + variation_list = variation_list_saved_3.copy() + if k3.strip(): + variation_list.append("_" + k3) + variation_strings[t1].append(",".join(variation_list)) +args = sys.argv + +opt = None +if len(args) > 1: + opt = args[1] +if opt == "submission": + var = "_submission" + execution_mode = "valid" +else: + var = "_find-performance" + execution_mode = "test" + +precisions = ["fp32", "uint8"] +for model in variation_strings: + for v in variation_strings[model]: + for precision in precisions: + if "small-minimalistic" in v and precision == "uint8": + continue + if model == "efficientnet" and precision == "uint8": + precision = "int8" + cm_input = { + 'action': 'run', + 'automation': 'script', + 'tags': f'generate-run-cmds,mlperf,inference,{var}', + 'quiet': True, + 'implementation': 'tflite-cpp', + 'precision': precision, + 'model': model, + 'scenario': 'SingleStream', + 'execution_mode': execution_mode, + 'test_query_count': '50', + 'adr': { + 'tflite-model': { + 'tags': v + }, + 'compiler': { + 'tags': 'gcc' + }, + 'mlperf-inference-implementation': { + 'tags': '_armnn,_use-neon' + } + } + } + print(cm_input) + r = cmind.access(cm_input) + if r['return'] > 0: + print(r) + # exit(1) diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/b-deepsparse.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/b-deepsparse.md new file mode 100644 index 000000000..30957027e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/b-deepsparse.md @@ -0,0 +1 @@ +DeepSparse backend diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-intel.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-intel.md new file mode 100644 index 000000000..a7079b7bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-intel.md @@ -0,0 +1 @@ +CM can run Intel's MLPerf inference benchmark implementation either natively or inside a container. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-nvidia.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-nvidia.md new file mode 100644 index 000000000..bfa50410c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-nvidia.md @@ -0,0 +1,3 @@ +* Container will require around 60GB of free disk space. +* Docker cache and running all models (without DLRM) will require ~600 GB free disk space. +* When you get into an interactive Docker mode, you can copy/paste CM commands generated by this GUI to benchmark different models. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-qualcomm.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-qualcomm.md new file mode 100644 index 000000000..c0aef5187 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-app/setup/i-qualcomm.md @@ -0,0 +1,6 @@ +* CM runs Qualcomm's MLPerf inference benchmark implementation natively. +* [QAIC SDK](https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc) must be installed. +* If you run CM-MLPerf for Qualcomm in a cloud, you may need to update/change AIM with an SDK version compatible with the Qualcomm's MLPerf implementation. + Please check [cTuning's MLPerf inference results](https://mlcommons.org/benchmarks/inference-datacenter/) to see the working QAIC SDK versions. + + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/README-about.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/README-about.md new file mode 100644 index 000000000..beaa467a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/README-about.md @@ -0,0 +1,107 @@ +## Set up + +We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. + +
    +Click here to set up docker (Optional). + +### Docker Setup + +CM commands are expected to run natively but if you prefer not to modify the host system, you can do the below command to set up a docker container. + +``` +cm docker script --tags=run,mobilenet-models,_tflite,_accuracy-only \ +--adr.compiler.tags=gcc \ +--docker_cm_repo=mlcommons@cm4mlops \ +--imagenet_path=$HOME/imagenet-2012-val \ +--results_dir=$HOME/mobilenet_results \ +--submission_dir=$HOME/inference_submission_3.1 \ +--docker_skip_run_cmd +``` + +This command will build a docker container and give you an interactive shell from which you can execute the below CM run commands. +* `results_dir`, `submission_dir` and `imagenet_path` are mounted from the host system. +* `results_dir` and `submission_dir` are expected to be empty directories to be populated by the docker +* `imagenet_path` should point to the imagenet folder containing the 50000 validation images. + +
    + +## Run Commands + +Since the runs can take many hours, in case you are running remotely you can install screen as follows. You may omit "screen" from all commands if you are running on a host system. +``` +cmr "get generic-sys-util _screen" +``` +### Default tflite + + +#### Do a full accuracy run for all the models (can take almost a day) + +``` +screen cmr "run mobilenet-models _tflite _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Do a full performance run for all the models (can take almost a day) +``` +screen cmr "run mobilenet-models _tflite _performance-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Generate README files for all the runs +``` +cmr "run mobilenet-models _tflite _populate-readme" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +#### Generate actual submission tree + +We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. +``` +cmr "generate inference submission" \ +--results_dir=$HOME/mobilenet_results/valid_results \ +--submission_dir=$HOME/mobilenet_submission_tree \ +--clean \ +--infer_scenario_results=yes \ +--adr.compiler.tags=gcc --adr.inference-src.version=master \ +--run-checker \ +--submitter=cTuning \ +--hw_notes_extra="Result taken by NAME" +``` +* Use `--hw_name="My system name"` to give a meaningful system name. Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems) + +#### Push the results to GitHub repo + +First, create a fork of [this repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/). Then run the following command after replacing `--repo_url` with your fork URL. +``` +cmr "push github mlperf inference submission" \ +--submission_dir=$HOME/mobilenet_submission_tree \ +--repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \ +--commit_message="Mobilenet results added" +``` + +Create a PR to [cTuning repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/) + +### Using ARMNN with NEON + +Follow the same procedure as above but for the first three experiment runs add `_armnn,_neon` to the tags. For example +``` +cmr "run mobilenet-models _tflite _armnn _neon _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. + +### Using ARMNN with OpenCL +Follow the same procedure as above but for the first three experiment runs add `_armnn,_opencl` to the tags. For example +``` +cmr "run mobilenet-models _tflite _armnn _opencl _accuracy-only" \ +--adr.compiler.tags=gcc \ +--results_dir=$HOME/mobilenet_results +``` + +`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/README.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/README.md new file mode 100644 index 000000000..c575f0d94 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/_cm.yaml new file mode 100644 index 000000000..fb28250a9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/_cm.yaml @@ -0,0 +1,134 @@ +alias: run-mlperf-inference-mobilenet-models +automation_alias: script +automation_uid: 5b4e0237da074764 +category: MLPerf benchmark support +default_env: + CM_MLPERF_NO_RERUN: 'no' + CM_MLPERF_RUN_EFFICIENTNETS: 'no' + CM_MLPERF_RUN_FP32: 'yes' + CM_MLPERF_RUN_INT8: 'yes' + CM_MLPERF_RUN_MOBILENETS: 'no' +deps: +- tags: get,sys-utils-cm +docker: + docker_input_mapping: + imagenet_path: IMAGENET_PATH + results_dir: RESULTS_DIR + submission_dir: SUBMISSION_DIR + docker_run_final_cmds: + - cm run script --tags=run,mlperf,inference,mobilenet-models,_find-performance --adr.mlperf-inference-implementation.fake_run=True + --adr.compiler.tags=gcc + fake_run_deps: false + mounts: + - ${{ IMAGENET_PATH }}:${{ IMAGENET_PATH }} + - ${{ RESULTS_DIR }}:/home/cmuser/mobilenet_results + - ${{ SUBMISSION_DIR }}:/home/cmuser/inference_submission_3.1 + run: true +input_mapping: + find-performance: CM_MLPERF_FIND_PERFORMANCE_MODE + imagenet_path: IMAGENET_PATH + no-rerun: CM_MLPERF_NO_RERUN + power: CM_MLPERF_POWER + results_dir: CM_MLPERF_INFERENCE_RESULTS_DIR + submission: CM_MLPERF_SUBMISSION_MODE + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR +tags: +- run +- mobilenet +- models +- image-classification +- mobilenet-models +- mlperf +- inference +uid: f21cc993a8b14a58 +variations: + accuracy-only: + env: + CM_MLPERF_ACCURACY_MODE: 'yes' + CM_MLPERF_FIND_PERFORMANCE_MODE: 'no' + CM_MLPERF_SUBMISSION_MODE: 'no' + group: run-mode + all-models: + default: true + env: + CM_MLPERF_RUN_EFFICIENTNETS: 'yes' + CM_MLPERF_RUN_MOBILENETS: 'yes' + group: model-selection + armnn: + env: + CM_MLPERF_USE_ARMNN_LIBRARY: 'yes' + efficientnet: + env: + CM_MLPERF_RUN_EFFICIENTNETS: 'yes' + group: model-selection + find-performance: + env: + CM_MLPERF_FIND_PERFORMANCE_MODE: 'yes' + CM_MLPERF_SUBMISSION_MODE: 'no' + group: run-mode + mobilenet: + env: + CM_MLPERF_RUN_MOBILENETS: 'yes' + group: model-selection + mobilenet-v1: + env: + CM_MLPERF_RUN_MOBILENET_V1: 'yes' + group: model-selection + mobilenet-v2: + env: + CM_MLPERF_RUN_MOBILENET_V2: 'yes' + group: model-selection + mobilenet-v3: + env: + CM_MLPERF_RUN_MOBILENET_V3: 'yes' + group: model-selection + neon: + env: + CM_MLPERF_USE_NEON: 'yes' + only-fp32: + env: + CM_MLPERF_RUN_INT8: 'no' + only-int8: + env: + CM_MLPERF_RUN_FP32: 'no' + opencl: + env: + CM_MLPERF_USE_OPENCL: 'yes' + performance-and-accuracy: + default: 'true' + env: + CM_MLPERF_ACCURACY_MODE: 'yes' + CM_MLPERF_FIND_PERFORMANCE_MODE: 'no' + CM_MLPERF_PERFORMANCE_MODE: 'yes' + CM_MLPERF_SUBMISSION_MODE: 'no' + group: run-mode + performance-only: + env: + CM_MLPERF_FIND_PERFORMANCE_MODE: 'no' + CM_MLPERF_PERFORMANCE_MODE: 'yes' + CM_MLPERF_SUBMISSION_MODE: 'no' + group: run-mode + submission: + env: + CM_MLPERF_FIND_PERFORMANCE_MODE: 'no' + CM_MLPERF_SUBMISSION_MODE: 'yes' + group: run-mode + tflite: + default: true + group: base-framework + tflite,armnn: + env: + CM_MLPERF_TFLITE_ARMNN: 'yes' + tflite,armnn,neon: + env: + CM_MLPERF_TFLITE_ARMNN_NEON: 'yes' + tflite,armnn,opencl: + env: + CM_MLPERF_TFLITE_ARMNN_OPENCL: 'yes' + tflite-default: + default: true + env: + CM_MLPERF_TFLITE_DEFAULT_MODE: 'yes' + group: optimization + use-neon: + alias: neon diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/customize.py new file mode 100644 index 000000000..d36c188b2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/customize.py @@ -0,0 +1,224 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import cmind +import sys + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + add_deps_recursive = i['input'].get('add_deps_recursive') + + adr = i['input'].get('adr') + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + verbose = (env.get('CM_VERBOSE', False) == 'yes') + + models_all = { + "mobilenet": { + "v1": { + "multiplier": ["multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.25"], + "resolution": ["resolution-224", "resolution-192", "resolution-160", "resolution-128"], + "kind": [""] + }, + "v2": { + "multiplier": ["multiplier-1.0", "multiplier-0.75", "multiplier-0.5", "multiplier-0.35"], + "resolution": ["resolution-224", "resolution-192", "resolution-160", "resolution-128"], + "kind": [""] + }, + "v3": { + "multiplier": [""], + "resolution": [""], + "kind": ["large", "large-minimalistic", "small", "small-minimalistic"] + } + }, + "efficientnet": { + "": { + "multiplier": [""], + "resolution": [""], + "kind": ["lite0", "lite1", "lite2", "lite3", "lite4"] + } + } + } + + models = {} + if env.get('CM_MLPERF_RUN_MOBILENET_V1', '') == "yes": + models['mobilenet'] = {} + models['mobilenet']['v1'] = models_all['mobilenet']['v1'] + elif env.get('CM_MLPERF_RUN_MOBILENET_V2', '') == "yes": + models['mobilenet'] = {} + models['mobilenet']['v2'] = models_all['mobilenet']['v2'] + elif env.get('CM_MLPERF_RUN_MOBILENET_V3', '') == "yes": + models['mobilenet'] = {} + models['mobilenet']['v3'] = models_all['mobilenet']['v3'] + elif env.get('CM_MLPERF_RUN_MOBILENETS', '') == "yes": + models['mobilenet'] = models_all['mobilenet'] + elif env.get('CM_MLPERF_RUN_EFFICIENTNETS', '') == "yes": + models['efficientnet'] = models_all['efficientnet'] + + variation_strings = {} + for t1 in models: + variation_strings[t1] = [] + variation_list = [] + variation_list.append(t1) + for version in models[t1]: + variation_list = [] + if version.strip(): + variation_list.append("_" + version) + variation_list_saved = variation_list.copy() + for k1 in models[t1][version]["multiplier"]: + variation_list = variation_list_saved.copy() + if k1.strip(): + variation_list.append("_" + k1) + variation_list_saved_2 = variation_list.copy() + for k2 in models[t1][version]["resolution"]: + variation_list = variation_list_saved_2.copy() + if k2.strip(): + variation_list.append("_" + k2) + variation_list_saved_3 = variation_list.copy() + for k3 in models[t1][version]["kind"]: + variation_list = variation_list_saved_3.copy() + if k3.strip(): + variation_list.append("_" + k3) + variation_strings[t1].append(",".join(variation_list)) + + if env.get('CM_MLPERF_SUBMISSION_MODE', '') == "yes": + var = "_submission" + execution_mode = "valid" + elif env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes" and env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": + var = "_full,_performance-and-accuracy" + execution_mode = "valid" + elif env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes": + var = "_full,_accuracy-only" + execution_mode = "valid" + elif env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": + var = "_full,_performance-only" + execution_mode = "valid" + else: + var = "_find-performance" + execution_mode = "test" + + precisions = [] + if env.get('CM_MLPERF_RUN_FP32', '') == "yes": + precisions.append("fp32") + if env.get('CM_MLPERF_RUN_INT8', '') == "yes": + precisions.append("uint8") + + implementation_tags = [] + if env.get('CM_MLPERF_USE_ARMNN_LIBRARY', '') == "yes": + implementation_tags.append("_armnn") + if env.get('CM_MLPERF_TFLITE_ARMNN_NEON', '') == "yes": + implementation_tags.append("_use-neon") + if env.get('CM_MLPERF_TFLITE_ARMNN_OPENCL', '') == "yes": + implementation_tags.append("_use-opencl") + implementation_tags_string = ",".join(implementation_tags) + + inp = i['input'] + + for model in variation_strings: + for v in variation_strings[model]: + for precision in precisions: + + if "small-minimalistic" in v and precision == "uint8": + continue + + if model == "efficientnet" and precision == "uint8": + precision = "int8" + + cm_input = { + 'action': 'run', + 'automation': 'script', + 'tags': f'generate-run-cmds,mlperf,inference,{var}', + 'quiet': True, + 'env': env, + 'input': inp, + 'v': verbose, + 'implementation': 'tflite-cpp', + 'precision': precision, + 'model': model, + 'scenario': 'SingleStream', + 'execution_mode': execution_mode, + 'test_query_count': '100', + 'adr': { + 'tflite-model': { + 'tags': v + }, + 'mlperf-inference-implementation': { + 'tags': implementation_tags_string + } + } + } + if add_deps_recursive: + # script automation will merge adr and add_deps_recursive + cm_input['add_deps_recursive'] = add_deps_recursive + + if adr: + utils.merge_dicts( + {'dict1': cm_input['adr'], 'dict2': adr, 'append_lists': True, 'append_unique': True}) + + if env.get('CM_MLPERF_INFERENCE_RESULTS_DIR', '') != '': + cm_input['results_dir'] = env['CM_MLPERF_INFERENCE_RESULTS_DIR'] + + if env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') != '': + cm_input['submission_dir'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + + if env.get('CM_MLPERF_FIND_PERFORMANCE_MODE', '') == "yes" and env.get( + 'CM_MLPERF_NO_RERUN', '') != 'yes': + cm_input['rerun'] = True + + if env.get('CM_MLPERF_POWER', '') == "yes": + cm_input['power'] = 'yes' + + if env.get('CM_MLPERF_ACCURACY_MODE', '') == "yes": + cm_input['mode'] = 'accuracy' + print(cm_input) + r = cmind.access(cm_input) + if r['return'] > 0: + return r + + if env.get('CM_MLPERF_PERFORMANCE_MODE', '') == "yes": + cm_input['mode'] = 'performance' + + print(cm_input) + r = cmind.access(cm_input) + if r['return'] > 0: + return r + + if env.get('CM_TEST_ONE_RUN', '') == "yes": + return {'return': 0} + + clean_input = { + 'action': 'rm', + 'automation': 'cache', + 'tags': 'get,preprocessed,dataset,_for.mobilenet', + 'quiet': True, + 'v': verbose, + 'f': 'True' + } + r = cmind.access(clean_input) + # if r['return'] > 0: + # return r + return {'return': 0} + + +def postprocess(i): + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/run.sh new file mode 100644 index 000000000..a9bf588e2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-mobilenet-models/run.sh @@ -0,0 +1 @@ +#!/bin/bash diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/README-extra.md new file mode 100644 index 000000000..80c280055 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/README-extra.md @@ -0,0 +1,10 @@ +# Run MLPerf Inference Submission Checker +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Inference submission checker](https://github.com/mlcommons/inference/blob/master/tools/submission/submission-checker.py) on a given submission folder. + +## How To +```bash +cm run script --tags=run,mlperf,inference,submission,checker --submitter=[SUBMITTER_NAME] --submission_dir=[SUBMISSION_FOLDER] +``` + +### Additional Options +* `[--skip_compliance]:` Skips the compliance tests diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/README.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/README.md new file mode 100644 index 000000000..68981e322 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/_cm.yaml new file mode 100644 index 000000000..84e712a40 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/_cm.yaml @@ -0,0 +1,105 @@ +alias: run-mlperf-inference-submission-checker +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +clean_files: [] +default_env: + CM_MLPERF_SHORT_RUN: 'no' +default_version: master +deps: +- names: + - python + - python3 + tags: get,python3 +- names: + - inference-src + - submission-checker-src + tags: get,mlcommons,inference,src +- tags: get,generic-python-lib,_xlsxwriter +- names: + - pyarrow + tags: get,generic-python-lib,_package.pyarrow +- names: + - pandas + tags: get,generic-python-lib,_pandas + version_min: 1.0.0 +- names: + - get-mlperf-submission-dir + skip_if_env: + CM_MLPERF_INFERENCE_SUBMISSION_DIR: + - 'on' + tags: get,mlperf,submission,dir +- enable_if_env: + CM_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION: + - 'on' + tags: preprocess,mlperf,inference,submission +input_mapping: + extra_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS + extra_model_benchmark_map: CM_MLPERF_EXTRA_MODEL_MAPPING + input: CM_MLPERF_INFERENCE_SUBMISSION_DIR + power: CM_MLPERF_POWER + preprocess: CM_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION + preprocess_submission: CM_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION + push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB + repo_branch: CM_MLPERF_RESULTS_GIT_REPO_BRANCH + repo_name: CM_MLPERF_RESULTS_GIT_REPO_NAME + repo_owner: CM_MLPERF_RESULTS_GIT_REPO_OWNER + skip_compliance: CM_MLPERF_SKIP_COMPLIANCE + skip_power_check: CM_MLPERF_SKIP_POWER_CHECK + src_version: CM_MLPERF_SUBMISSION_CHECKER_VERSION + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: CM_MLPERF_SUBMITTER + tar: CM_TAR_SUBMISSION_DIR +post_deps: +- enable_if_env: + CM_MLPERF_DASHBOARD: + - 'on' + tags: publish-results,dashboard +- enable_if_env: + CM_MLPERF_RESULT_PUSH_TO_GITHUB: + - 'on' + names: + - push-to-github + tags: publish-results,github +- enable_if_env: + CM_TAR_SUBMISSION_DIR: + - 'yes' + tags: run,tar +tags: +- run +- mlc +- mlcommons +- mlperf +- inference +- mlperf-inference +- submission +- checker +- submission-checker +- mlc-submission-checker +uid: 15d03ec2c1af4297 +variations: + short-run: + env: + CM_MLPERF_SHORT_RUN: 'yes' +versions: + master: + adr: + submission-checker-src: + version: master + r3.0: + adr: + submission-checker-src: + version: r3.0 + r3.1: + adr: + submission-checker-src: + version: r3.1 + r4.0: + adr: + submission-checker-src: + version: r4.0 + r4.1: + adr: + submission-checker-src: + version: r4.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/code.py b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/code.py new file mode 100644 index 000000000..85fce01bc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/code.py @@ -0,0 +1,29 @@ +# Developer: Grigori Fursin + +import os +import pandas + + +def main(): + print('=========================================================') + + print('Searching for summary.csv ...') + + if os.path.isfile('summary.csv'): + print('Converting to json ...') + + import pandas + + df = pandas.read_csv('summary.csv').T + + print('') + print(df) + print('') + + df.to_json('summary.json', orient='columns', indent=4) + + print('=========================================================') + + +if __name__ == '__main__': + main() diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/customize.py new file mode 100644 index 000000000..e7746c1ea --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/customize.py @@ -0,0 +1,135 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + q = '"' if os_info['platform'] == 'windows' else "'" + + submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + + version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION', '') + + if submission_dir == "": + return {'return': 1, + 'error': 'Please set --env.CM_MLPERF_INFERENCE_SUBMISSION_DIR'} + + submitter = env.get("CM_MLPERF_SUBMITTER", "") # "default") + if ' ' in submitter: + return { + 'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} + + if 'CM_MLPERF_SKIP_COMPLIANCE' in env: + skip_compliance = " --skip_compliance" + else: + skip_compliance = "" + + submission_checker_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + "submission_checker.py") + + if env['CM_MLPERF_SHORT_RUN'] == "yes": + import shutil + new_submission_checker_file = os.path.join( + os.path.dirname(submission_checker_file), + "submission_checker1.py") + with open(submission_checker_file, 'r') as file: + data = file.read() + data = data.replace("OFFLINE_MIN_SPQ = 24576", "OFFLINE_MIN_SPQ = 100") + data = data.replace( + "return is_valid, res, inferred", + "return True, res, inferred") + with open(new_submission_checker_file, 'w') as file: + file.write(data) + submission_checker_file = new_submission_checker_file + + if env.get('CM_MLPERF_EXTRA_MODEL_MAPPING', '') != '': + extra_map = ' --extra_model_benchmark_map "' + \ + env['CM_MLPERF_EXTRA_MODEL_MAPPING'] + '"' + else: + extra_map = "" + + if env.get('CM_MLPERF_SKIP_POWER_CHECK', 'no') == "yes": + power_check = " --skip-power-check" + else: + power_check = "" + + extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') + + x_submitter = ' --submitter ' + q + submitter + q if submitter != '' else '' + + x_version = ' --version ' + version + ' ' if version != '' else '' + + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + q + submission_checker_file + q + ' --input ' + q + submission_dir + q + \ + x_submitter + \ + x_version + \ + skip_compliance + extra_map + power_check + extra_args + + x_version = ' --version ' + version[1:] + ' ' if version != '' else '' + + x_submission_repo_name = '' + x_submission_repo_owner = '' + x_submission_repo_branch = '' + + if env.get('CM_MLPERF_RESULTS_GIT_REPO_NAME', '') != '': + x_submission_repo_name = f""" --repository {env['CM_MLPERF_RESULTS_GIT_REPO_NAME']}""" + if env.get('CM_MLPERF_RESULTS_GIT_REPO_OWNER', '') != '': + x_submission_repo_owner = f""" --repository-owner {env['CM_MLPERF_RESULTS_GIT_REPO_OWNER']}""" + if env.get('CM_MLPERF_RESULTS_GIT_REPO_BRANCH', '') != '': + x_submission_repo_branch = f""" --repository-branch {env['CM_MLPERF_RESULTS_GIT_REPO_BRANCH']}""" + + report_generator_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + "generate_final_report.py") + env['CM_RUN_CMD'] = CMD + print(CMD) + env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + q + report_generator_file + q + ' --input summary.csv ' + \ + x_version + \ + x_submission_repo_name + \ + x_submission_repo_owner + \ + x_submission_repo_branch + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + if env.get('CM_TAR_SUBMISSION_DIR', ''): + env['CM_TAR_INPUT_DIR'] = env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'] + + x = env.get('MLPERF_INFERENCE_SUBMISSION_TAR_FILE', '') + if x != '': + env['CM_TAR_OUTFILE'] = x + + if env.get('CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR', '') != '': + env['CM_TAR_OUTPUT_DIR'] = env['CM_MLPERF_INFERENCE_SUBMISSION_BASE_DIR'] + + x = env.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY', '') + if x != '': + for y in ['.csv', '.json', '.xlsx']: + + z0 = 'summary' + y + + if os.path.isfile(z0): + z1 = x + y + + if os.path.isfile(z1): + os.remove(z1) + + os.rename(z0, z1) + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/run.bat b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/run.bat new file mode 100644 index 000000000..5cbc264a2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/run.bat @@ -0,0 +1,6 @@ +echo "%CM_RUN_CMD%" +%CM_RUN_CMD% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/run.sh new file mode 100644 index 000000000..82434a83b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-inference-submission-checker/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash +cmd=${CM_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? + +cmd=${CM_POST_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/README-extra.md new file mode 100644 index 000000000..d13278d9b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/README-extra.md @@ -0,0 +1,15 @@ +# Run MLPerf Power Client Script +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Power Server script](https://github.com/mlcommons/power-dev/tree/master/ptd_client_server). + +## How To +```bash +cm run script --tags=run,mlperf,power,client [--log_dir=<> --power_server=<> \ +--loadgen_logs_dir=<> --ntp_server=<> --run_cmd=<>] +``` + +### Default Values +1. `log_dir`: `logs` +2. `power_server`: `localhost` +3. `loadgen_logs_dir`: `loadgen_logs`, +4. `ntp_server`: `time.google.com` +5. `run_cmd`: `dummy.sh` diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/README.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/README.md new file mode 100644 index 000000000..8ddf14b30 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-power-client](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-power-client) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/_cm.yaml new file mode 100644 index 000000000..40604e1e3 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/_cm.yaml @@ -0,0 +1,41 @@ +alias: run-mlperf-power-client +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +clean_files: [] +default_env: + CM_MLPERF_POWER_LOG_DIR: logs + CM_MLPERF_POWER_NTP_SERVER: time.google.com + CM_MLPERF_POWER_SERVER_ADDRESS: localhost + CM_MLPERF_RUN_CMD: '' +deps: +- names: + - python + - python3 + tags: get,python3 +- names: + - power-src + tags: get,mlperf,power,src +- tags: get,generic-sys-util,_ntpdate +input_mapping: + loadgen_logs_dir: CM_MLPERF_LOADGEN_LOGS_DIR + log_dir: CM_MLPERF_POWER_LOG_DIR + max_amps: CM_MLPERF_POWER_MAX_AMPS + max_volts: CM_MLPERF_POWER_MAX_VOLTS + ntp_server: CM_MLPERF_POWER_NTP_SERVER + port: CM_MLPERF_POWER_SERVER_PORT + power_server: CM_MLPERF_POWER_SERVER_ADDRESS + run_cmd: CM_MLPERF_RUN_CMD + server: CM_MLPERF_POWER_SERVER_ADDRESS + server_port: CM_MLPERF_POWER_SERVER_PORT + timestamp: CM_MLPERF_POWER_TIMESTAMP +tags: +- run +- mlc +- mlcommons +- mlperf +- power +- client +- power-client +uid: bf6a6d0cc97b48ae diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/customize.py new file mode 100644 index 000000000..889b3c272 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/customize.py @@ -0,0 +1,58 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import configparser + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + if not env['CM_MLPERF_RUN_CMD']: + env['CM_MLPERF_RUN_CMD'] = os.path.join( + i['run_script_input']['path'], "dummy.sh") + + if 'CM_MLPERF_POWER_TIMESTAMP' in env: + timestamp = "" + else: + timestamp = " --no-timestamp-path" + + if 'CM_MLPERF_LOADGEN_LOGS_DIR' not in env: + env['CM_MLPERF_LOADGEN_LOGS_DIR'] = os.path.join( + os.getcwd(), "loadgen_logs") + + run_cmd = env['CM_MLPERF_RUN_CMD'].replace("'", '"') + run_cmd = run_cmd.replace('"', '\\"') + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' +\ + os.path.join(env['CM_MLPERF_POWER_SOURCE'], 'ptd_client_server', 'client.py') + \ + " -a " + env['CM_MLPERF_POWER_SERVER_ADDRESS'] + \ + " -p " + env.get('CM_MLPERF_POWER_SERVER_PORT', "4950") + \ + " -w '" + run_cmd + \ + "' -L " + env['CM_MLPERF_LOADGEN_LOGS_DIR'] + \ + " -o " + env['CM_MLPERF_POWER_LOG_DIR'] + \ + " -n " + env['CM_MLPERF_POWER_NTP_SERVER'] + \ + timestamp + + if 'CM_MLPERF_POWER_MAX_AMPS' in env and 'CM_MLPERF_POWER_MAX_VOLTS' in env: + cmd = cmd + " --max-amps " + env['CM_MLPERF_POWER_MAX_AMPS'] + \ + " --max-volts " + env['CM_MLPERF_POWER_MAX_VOLTS'] + + env['CM_MLPERF_POWER_RUN_CMD'] = cmd + + return {'return': 0} + + +def postprocess(i): + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/dummy.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/dummy.sh new file mode 100644 index 000000000..a796ab609 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/dummy.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +mkdir -p loadgen_logs + +# Create mock files with the same names that loadgen does + +echo power_begin $(date --utc +"%m-%d-%Y %T.%3N") | tee loadgen_logs/mlperf_log_detail.txt +touch loadgen_logs/mlperf_log_accuracy.json +touch loadgen_logs/mlperf_log_summary.txt +touch loadgen_logs/mlperf_log_trace.json +sleep 25 +echo power_end $(date --utc +"%m-%d-%Y %T.%3N") | tee -a loadgen_logs/mlperf_log_detail.txt diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/run.sh new file mode 100644 index 000000000..19805cb5b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-client/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +if [[ -n ${CM_RUN_DIR} ]]; then + cur_dir=${CM_RUN_DIR}; + cd $cur_dir +else + cur_dir=`pwd` +fi +echo "Running power client from $cur_dir" + +cmd="${CM_MLPERF_POWER_RUN_CMD}" +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/README-extra.md new file mode 100644 index 000000000..78b0457f7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/README-extra.md @@ -0,0 +1,17 @@ +# Run MLPerf Power Server Script +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Power Server script](https://github.com/mlcommons/power-dev/tree/master/ptd_client_server). + +## How To +```bash +cm run script --tags=run,mlperf,power,server [--interface_flag=<> \ +--device_port=<> --outdir=<> --logfile=<> --outdir=<> --device_type=<> ] +``` + +### Default Values +1. `ntp_server`: `time.google.com` +2. `interface_flag`: "" +3. `device_port`: `/dev/usbtmc0` +4. `device_type`: `49` +5. `outdir`: `~/mlperf_power_logs` +6. `logfile`: `logs_ptdaemon.txt` + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/README.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/README.md new file mode 100644 index 000000000..7e2aa68fd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-power-server](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-power-server) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/_cm.yaml new file mode 100644 index 000000000..c4c4546b1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/_cm.yaml @@ -0,0 +1,57 @@ +alias: run-mlperf-power-server +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +clean_files: [] +default_env: + CM_MLPERF_POWER_DEVICE_PORT: /dev/usbtmc0 + CM_MLPERF_POWER_DEVICE_TYPE: '49' + CM_MLPERF_POWER_INTERFACE_FLAG: '' + CM_MLPERF_POWER_NTP_SERVER: time.google.com + CM_MLPERF_POWER_SERVER_ADDRESS: 0.0.0.0 + CM_MLPERF_POWER_SERVER_PORT: '4950' + CM_MLPERF_POWER_SERVER_USE_SCREEN: 'no' +deps: +- names: + - python + - python3 + tags: get,python3 +- tags: detect,os +- names: + - power-src + tags: get,mlperf,power,src +- names: + - power-damenon + tags: get,mlperf,power,daemon +- names: + - screen + skip_if_env: + CM_HOST_OS_TYPE: windows + tags: get,generic,sys-util,_screen +- enable_if_env: + CM_HOST_OS_TYPE: windows + names: + - win32 + tags: get,generic-python-lib,_package.pypiwin32 +docker: + device: /dev/usbtmc0 + port_maps: + - 4950:4950 +input_mapping: + device_port: CM_MLPERF_POWER_DEVICE_PORT + device_type: CM_MLPERF_POWER_DEVICE_TYPE + interface_flag: CM_MLPERF_POWER_INTERFACE_FLAG + ntp_server: CM_MLPERF_POWER_NTP_SERVER + conf_file: CM_MLPERF_POWER_SERVER_CONF_FILE + screen: CM_MLPERF_POWER_SERVER_USE_SCREEN + num_analyzers: CM_MLPERF_POWER_NUM_ANALYZERS +tags: +- run +- mlc +- mlcommons +- mlperf +- power +- server +- power-server +uid: 5bc68aaf389a40bd diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/customize.py new file mode 100644 index 000000000..a9c674a32 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/customize.py @@ -0,0 +1,103 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import configparser + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + + # Initialize ConfigParser + config = configparser.ConfigParser() + + if env.get('CM_MLPERF_POWER_SERVER_CONF_FILE', '') != '': + server_config_file = env['CM_MLPERF_POWER_SERVER_CONF_FILE'] + else: + server_config_file = os.path.join( + env.get('CM_MLPERF_POWER_SOURCE', ''), + 'ptd_client_server', + 'server.template.conf' + ) + + # Read the configuration file with error handling + if not os.path.exists(server_config_file): + raise FileNotFoundError( + f"Server config file not found: {server_config_file}") + + config.read(server_config_file) + # Update the server section + try: + config['server']['ntpServer'] = env['CM_MLPERF_POWER_NTP_SERVER'] + config['server']['listen'] = f"{env['CM_MLPERF_POWER_SERVER_ADDRESS']} {env['CM_MLPERF_POWER_SERVER_PORT']}" + except KeyError as e: + raise KeyError(f"Missing required environment variable: {e}") + + # Define number of analyzers and network port start + num_analyzers = int(env.get('CM_MLPERF_POWER_NUM_ANALYZERS', 1)) + network_port_start = int( + env.get( + 'CM_MLPERF_POWER_NETWORK_PORT_START', + 8888)) + + # Ensure 'ptd' section exists + if 'ptd' not in config: + config.add_section('ptd') + + config['ptd']['ptd'] = str(env.get('CM_MLPERF_PTD_PATH', '')) + config['ptd']['analyzercount'] = str(num_analyzers) + + # Add analyzers to the configuration + for aid in range(1, num_analyzers + 1): + analyzer_section = f'analyzer{aid}' + if analyzer_section not in config: + config.add_section(analyzer_section) + + # Add the analyzer subsection as keys under the 'ptd' section + config[f'{analyzer_section}']['interfaceFlag'] = str( + env.get('CM_MLPERF_POWER_INTERFACE_FLAG', '')) + config[f'{analyzer_section}']['deviceType'] = str( + env.get('CM_MLPERF_POWER_DEVICE_TYPE', '')) + config[f'{analyzer_section}']['devicePort'] = str( + env.get('CM_MLPERF_POWER_DEVICE_PORT', '')) + config[f'{analyzer_section}']['networkPort'] = str( + network_port_start + aid - 1) + + with open('tmp-power-server.conf', 'w') as configfile: + config.write(configfile) + + print({section: dict(config[section]) for section in config.sections()}) + + if env['CM_HOST_OS_TYPE'] == "windows": + cmd_prefix = "" + else: + cmd_prefix = "sudo " + + cmd = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + os.path.join( + env['CM_MLPERF_POWER_SOURCE'], + 'ptd_client_server', + 'server.py') + ' -c tmp-power-server.conf' + if env.get('CM_MLPERF_POWER_SERVER_USE_SCREEN', 'no') == 'yes': + cmd = cmd_prefix + ' screen -d -m ' + cmd + ' ' + else: + cmd = cmd_prefix + cmd + + env['RUN_CMD'] = cmd + + return {'return': 0} + + +def postprocess(i): + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/run.bat b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/run.bat new file mode 100644 index 000000000..d23f0addf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/run.bat @@ -0,0 +1,7 @@ +@echo off + +echo %RUN_CMD% + +%RUN_CMD% + +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/run.sh new file mode 100644 index 000000000..1c5f07f33 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-power-server/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cmd=${RUN_CMD} +echo $cmd +eval $cmd +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/README.md b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/README.md new file mode 100644 index 000000000..baf8822c9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/_cm.yaml new file mode 100644 index 000000000..661e1ed17 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/_cm.yaml @@ -0,0 +1,71 @@ +alias: run-mlperf-training-submission-checker +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +clean_files: [] +default_env: + CM_MLPERF_SHORT_RUN: 'no' +default_version: master +deps: +- names: + - python + - python3 + tags: get,python3 +- names: + - inference-src + - submission-checker-src + tags: get,mlcommons,inference,src +- tags: install,mlperf,logging,from.src +input_mapping: + extra_args: CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS + input: CM_MLPERF_SUBMISSION_DIR + power: CM_MLPERF_POWER + push_to_github: CM_MLPERF_RESULT_PUSH_TO_GITHUB + skip_compliance: CM_MLPERF_SKIP_COMPLIANCE + skip_power_check: CM_MLPERF_SKIP_POWER_CHECK + src_version: CM_MLPERF_SUBMISSION_CHECKER_VERSION + submission_dir: CM_MLPERF_SUBMISSION_DIR + submitter: CM_MLPERF_SUBMITTER + tar: CM_TAR_SUBMISSION_DIR +post_deps: +- enable_if_env: + CM_MLPERF_RESULT_PUSH_TO_GITHUB: + - 'on' + names: + - push-to-github + tags: publish-results,github +- enable_if_env: + CM_TAR_SUBMISSION_DIR: + - 'yes' + tags: run,tar +tags: +- run +- mlc +- mlcommons +- mlperf +- training +- train +- mlperf-training +- submission +- checker +- submission-checker +- mlc-submission-checker +uid: cb5cb60ac9a74d09 +variations: + short-run: + env: + CM_MLPERF_SHORT_RUN: 'yes' +versions: + master: + adr: + submission-checker-src: + version: master + r3.0: + adr: + submission-checker-src: + version: r3.0 + r3.1: + adr: + submission-checker-src: + version: r3.1 diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/customize.py new file mode 100644 index 000000000..18ee01350 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/customize.py @@ -0,0 +1,54 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + submission_dir = env.get("CM_MLPERF_SUBMISSION_DIR", "") + + version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION', 'v3.1') + + if submission_dir == "": + return {'return': 1, 'error': 'Please set CM_MLPERF_SUBMISSION_DIR'} + + submitter = env.get("CM_MLPERF_SUBMITTER", "") # "default") + if ' ' in submitter: + return { + 'return': 1, 'error': 'CM_MLPERF_SUBMITTER cannot contain a space. Please provide a name without space using --submitter input. Given value: {}'.format(submitter)} + + submission_checker_file = os.path.join( + env['CM_MLPERF_LOGGING_REPO_PATH'], + "scripts", + "verify_for_" + version + "_training.sh") + + extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS', '') + + CMD = submission_checker_file + " " + submission_dir + + env['CM_RUN_CMD'] = CMD + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + if env.get('CM_TAR_SUBMISSION_DIR'): + env['CM_TAR_INPUT_DIR'] = env.get('CM_MLPERF_SUBMISSION_DIR', '$HOME') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/run.sh new file mode 100644 index 000000000..8784f3504 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-mlperf-training-submission-checker/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash +cmd=${CM_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? + +cmd=${CM_POST_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/run-python/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-python/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-python/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-python/README.md b/cmx4mlops/cmx4mlops/repo/script/run-python/README.md new file mode 100644 index 000000000..4d66bdffc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-python/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/run-python](https://docs.mlcommons.org/cm4mlops/scripts/Tests/run-python) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-python/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-python/_cm.yaml new file mode 100644 index 000000000..7901f3de8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-python/_cm.yaml @@ -0,0 +1,16 @@ +alias: run-python +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Tests +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +input_mapping: + command: CM_RUN_PYTHON_CMD +tags: +- run +- python +uid: 75a46d84ee6f49b0 diff --git a/cmx4mlops/cmx4mlops/repo/script/run-python/run.bat b/cmx4mlops/cmx4mlops/repo/script/run-python/run.bat new file mode 100644 index 000000000..95d32d577 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-python/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_RUN_PYTHON_CMD% +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/run-python/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-python/run.sh new file mode 100644 index 000000000..641095ae8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-python/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_RUN_PYTHON_CMD} +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-terraform/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/README-about.md b/cmx4mlops/cmx4mlops/repo/script/run-terraform/README-about.md new file mode 100644 index 000000000..f890c6170 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/README-about.md @@ -0,0 +1,12 @@ +## Setup for Google Cloud Instances +``` +sudo snap install google-cloud-cli --classic +gcloud auth application-default login +``` + +The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. + +``` +cm run script --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit +``` +Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/run-terraform/README-extra.md new file mode 100644 index 000000000..47c1f4f30 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/README-extra.md @@ -0,0 +1 @@ +Please copy aws/credentials.example to aws/credentials.sh file after adding your AWS credentials diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/README.md b/cmx4mlops/cmx4mlops/repo/script/run-terraform/README.md new file mode 100644 index 000000000..419b488f9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/run-terraform](https://docs.mlcommons.org/cm4mlops/scripts/Cloud-automation/run-terraform) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-terraform/_cm.yaml new file mode 100644 index 000000000..6e00051e0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/_cm.yaml @@ -0,0 +1,320 @@ +alias: run-terraform +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Cloud automation +clean_files: [] +default_env: + TF_VAR_CPU_COUNT: '1' + TF_VAR_SECURITY_GROUP_ID: sg-0783752c97d2e011d +deps: +- tags: get,terraform +input_mapping: + cminit: CM_TERRAFORM_CM_INIT + destroy: CM_DESTROY_TERRAFORM + gcp_credentials_json_file: CM_GCP_CREDENTIALS_JSON_PATH + key_file: CM_SSH_KEY_FILE + run_cmds: CM_TERRAFORM_RUN_COMMANDS + ssh_key_file: CM_SSH_KEY_FILE +new_env_keys: +- CM_TERRAFORM_RUN_DIR +- CM_TERRAFORM_CONFIG_DIR +new_state_keys: +- CM_TF_NEW_INSTANCES_STATE +post_deps: +- dynamic: true + enable_if_env: + CM_DESTROY_TERRAFORM: + - 'on' + names: + - destroy-cmd + tags: destroy,terraform +tags: +- run +- terraform +uid: ec344bd44af144d7 +variations: + a1.2xlarge: + base: + - aws + - arm64 + env: + TF_VAR_INSTANCE_TYPE: a1.2xlarge + group: aws-instance-type + a1.metal: + base: + - aws + - arm64 + env: + TF_VAR_INSTANCE_TYPE: a1.metal + group: aws-instance-type + a1.xlarge: + base: + - aws + - arm64 + env: + TF_VAR_INSTANCE_TYPE: a1.xlarge + group: aws-instance-type + amazon-linux-2-kernel.#: + env: + TF_VAR_INSTANCE_IMAGE_OS: amazon-linux-2-kernel.# + amazon-linux-2-kernel.510,arm64,us-west-2: + env: + TF_VAR_INSTANCE_IMAGE: ami-0f1a5f5ada0e7da53 + group: aws-instance-image + arm64: + env: + CM_INSTANCE_PLATFORM: arm64 + group: platform + aws: + default: true + default_variations: + aws-instance-type: t2.micro + region: us-west-2 + env: + CM_TERRAFORM_CONFIG_DIR_NAME: aws + group: cloud-provider + aws_instance_image.#: + env: + TF_VAR_INSTANCE_IMAGE: '#' + group: aws-instance-image + aws_instance_image.ami-0735c191cf914754d: + env: + TF_VAR_INSTANCE_IMAGE: ami-0735c191cf914754d + group: aws-instance-image + aws_instance_image.ami-0a0d8589b597d65b3: + env: + TF_VAR_INSTANCE_IMAGE: ami-0a0d8589b597d65b3 + group: aws-instance-image + aws_instance_type.#: + env: + TF_VAR_INSTANCE_TYPE: '#' + group: aws-instance-type + c5.12xlarge: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: c5.12xlarge + group: aws-instance-type + c5.4xlarge: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: c5.4xlarge + group: aws-instance-type + c5d.9xlarge: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: c5d.9xlarge + group: aws-instance-type + debian-cloud/debian-11: + env: + TF_VAR_INSTANCE_IMAGE: debian-cloud/debian-11 + group: gcp-instance-image + f1-micro: + base: + - gcp + env: + TF_VAR_INSTANCE_TYPE: f1-micro + group: gcp-instance-type + g4dn.xlarge: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: g4dn.xlarge + group: aws-instance-type + gcp: + default_env: + TF_VAR_SSH_PUB_KEY_FILE: $HOME/.ssh/id_rsa.pub + TF_VAR_SSH_USER: asuresh + default_variations: + gcp-instance-image: ubuntu-2204-jammy-v20230114 + gcp-instance-type: f1-micro + instance-name: instance_name.microubuntu2204 + region: region.us-west1 + storage-size: storage_size.120 + zone: zone.us-west1-a + env: + CM_TERRAFORM_CONFIG_DIR_NAME: gcp + group: cloud-provider + gcp_instance_image.#: + env: + TF_VAR_INSTANCE_IMAGE: '#' + group: gcp-instance-image + gcp_instance_type.#: + env: + TF_VAR_INSTANCE_TYPE: '#' + group: gcp-instance-type + gcp_project.#: + env: + TF_VAR_GCP_PROJECT: '#' + group: gcp-project + graviton: + default_variations: + platform: arm64 + env: + CM_TERRAFORM_AWS_GRAVITON_INSTANCE: 'yes' + inf1.2xlarge: + base: + - aws + - inferentia + env: + TF_VAR_INSTANCE_TYPE: inf1.2xlarge + group: aws-instance-type + inf1.xlarge: + base: + - aws + - inferentia + env: + TF_VAR_INSTANCE_TYPE: inf1.xlarge + group: aws-instance-type + inf2.8xlarge: + base: + - aws + - inferentia + env: + TF_VAR_INSTANCE_TYPE: inf2.8xlarge + group: aws-instance-type + inf2.xlarge: + base: + - aws + - inferentia + env: + TF_VAR_INSTANCE_TYPE: inf2.xlarge + group: aws-instance-type + inferentia: + default_variations: + platform: arm64 + env: + CM_TERRAFORM_AWS_INFERENTIA_INSTANCE: 'yes' + inferentia,amazon-linux-2-kernel.510: + default_variations: + aws-instance-image: amazon-linux-2-kernel.510,arm64,us-west-2 + instance_name.#: + env: + TF_VAR_INSTANCE_NAME: '#' + group: instance-name + m7g.2xlarge: + base: + - aws + - arm64 + - graviton + env: + TF_VAR_INSTANCE_TYPE: m7g.2xlarge + group: aws-instance-type + m7g.xlarge: + base: + - aws + - arm64 + - graviton + env: + TF_VAR_INSTANCE_TYPE: m7g.xlarge + group: aws-instance-type + n1-highmem.#: + base: + - gcp + env: + TF_VAR_INSTANCE_TYPE: n1-highmem-# + group: gcp-instance-type + n1-standard.#: + base: + - gcp + env: + TF_VAR_INSTANCE_TYPE: n1-highmem-# + group: gcp-instance-type + region.#: + env: + TF_VAR_INSTANCE_REGION: '#' + group: region + rhel.#: + env: + TF_VAR_INSTANCE_IMAGE_OS: rhel.# + rhel.9,x86,us-west-2: + env: + TF_VAR_INSTANCE_IMAGE: ami-0dda7e535b65b6469 + group: aws-instance-image + storage_size.#: + env: + TF_VAR_DISK_GBS: '#' + group: storage-size + storage_size.8: + env: + TF_VAR_DISK_GBS: '8' + group: storage-size + t2.#: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: t2.# + group: aws-instance-type + t2.2xlarge: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: t2.2xlarge + group: aws-instance-type + t2.large: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: t2.large + group: aws-instance-type + t2.medium: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: t2.medium + group: aws-instance-type + t2.micro: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: t2.micro + group: aws-instance-type + t2.nano: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: t2.nano + group: aws-instance-type + t2.small: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: t2.small + group: aws-instance-type + t2.xlarge: + base: + - aws + env: + TF_VAR_INSTANCE_TYPE: t2.xlarge + group: aws-instance-type + ubuntu-2204-jammy-v20230114: + env: + TF_VAR_INSTANCE_IMAGE: ubuntu-2204-jammy-v20230114 + group: gcp-instance-image + ubuntu.#: + env: + TF_VAR_INSTANCE_IMAGE_OS: ubuntu.# + ubuntu.2204,arm64,us-west-2: + env: + TF_VAR_INSTANCE_IMAGE: ami-079f51a7bcca65b92 + group: aws-instance-image + ubuntu.2204,x86,us-west-2: + env: + TF_VAR_INSTANCE_IMAGE: ami-0735c191cf914754d + group: aws-instance-image + us-west-2: + env: + TF_VAR_INSTANCE_REGION: us-west-2 + group: region + x86: + default: true + env: + CM_INSTANCE_PLATFORM: x86 + group: platform + zone.#: + env: + TF_VAR_INSTANCE_ZONE: '#' + group: zone diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/apply_credentials.sh b/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/apply_credentials.sh new file mode 100644 index 000000000..ff649c594 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/apply_credentials.sh @@ -0,0 +1,3 @@ +export TF_VAR_ACCESS_KEY=$AWS_ACCESS_KEY_ID +export TF_VAR_SECRET_KEY=$AWS_SECRET_ACCESS_KEY +export TF_VAR_TOKEN=$AWS_SESSION_TOKEN diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/credentials.example b/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/credentials.example new file mode 100644 index 000000000..dff61bd91 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/credentials.example @@ -0,0 +1,3 @@ +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +export AWS_SESSION_TOKEN="IQoJb3JpZ2luX2VjEJH//////////wEafyhbXdlc3QtMiJGMEQCIBulUe3NOGrPDkxmCHXnwBxddgbbaj2rH94tpgMfwmKdD50CR6eKjh5+iea2w+9EUWaZEW4G0CN06JWFEIX2v0k2SLkXODVoG7MAWLMFWuBsssrWfLIRoVc9AODQodivdsGUOYWzqFRQ8HKbDjm+DSX05GjObwuucohkvWZZ3LmkspXQ1sOoaC62GcIcJuhp3pR/ajZ8iASLTIBFhcP8nuif+aDMO6HVRgQ9D60BJtqlPCzQH3bY/KKK9iQHQstcVsw17ne5bcnBgfdVOxFKxbuivkQ1CitxHko86z9gsaalEVxspcAwbRnMQuldi3win09ny8qUtWYqA+wEtc2n5ZPFS4UhF0RyE0IYy7dfrCnWdmv0elBRcfAZ0wQNNbt8iUoUYI1+stQexwHYJOtzLqvzoLLWwqrxwPaIEUZFposB2tJAV+krBq7ueIw50AJa8XKgjn5dTvFTaX4rYWh9ck7i7Q4xiq2CcQRArMRUkJRTCs8f+aBjqnAT7n+GhH7alJzRSjXSJ8Ln3t2KlzkPrraspoy5xqH61+yf5tRE2p511PiK9tl94/r0OCzYo9E0SYYket1TU" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/main.tf b/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/main.tf new file mode 100644 index 000000000..dd281310d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/aws/main.tf @@ -0,0 +1,67 @@ +variable ACCESS_KEY { + type = string + description = "AWS access key" +} +variable SECRET_KEY { + type = string + description = "AWS secret key" +} +variable TOKEN { + type = string + description = "AWS Token" +} +variable INSTANCE_TYPE { + type = string + description = "AWS instance type" +} +variable INSTANCE_REGION { + type = string + description = "AWS instance region" +} +variable INSTANCE_IMAGE { + type = string + description = "AWS instance image" +} +variable SECURITY_GROUP_ID { + type = string + description = "AWS instance security group id" +} +variable CPU_COUNT { + default = 1 + description = "AWS CPU count" +} +variable DISK_GBS { + default = 8 + description = "AWS Disk space in GBs" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.0" + } + } +} +# Configure the AWS Provider +provider "aws" { + region = var.INSTANCE_REGION + access_key=var.ACCESS_KEY + secret_key=var.SECRET_KEY + token=var.TOKEN +} + +resource "aws_instance" "cm" { + ami = var.INSTANCE_IMAGE + instance_initiated_shutdown_behavior = "terminate" + instance_type = var.INSTANCE_TYPE + key_name = "cmuser" + vpc_security_group_ids = [ + var.SECURITY_GROUP_ID + ] + root_block_device { + delete_on_termination = true + volume_size = var.DISK_GBS + } +} + diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-terraform/customize.py new file mode 100644 index 000000000..60df105e7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/customize.py @@ -0,0 +1,103 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import shutil +import json + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + script_dir = i['run_script_input']['path'] + config_dir = os.path.join( + script_dir, env.get( + 'CM_TERRAFORM_CONFIG_DIR_NAME', '')) + env['CM_TERRAFORM_CONFIG_DIR'] = config_dir + cache_dir = os.getcwd() + + print(f"Running terraform from {cache_dir}") + + shutil.copy(os.path.join(config_dir, "main.tf"), cache_dir) + env['CM_TERRAFORM_RUN_DIR'] = cache_dir + + return {'return': 0} + + +def postprocess(i): + env = i['env'] + if env.get('CM_DESTROY_TERRAFORM'): + return {'return': 0} + state = i['state'] + with open("terraform.tfstate") as f: + tfstate = json.load(f) +# print(tfstate) + resources = tfstate['resources'] + for resource in resources: + if resource['type'] == 'aws_instance': + aws_resource = resource + break + instances_state = aws_resource['instances'] + state['CM_TF_NEW_INSTANCES_STATE'] = [] + ssh_key_file = env.get('CM_SSH_KEY_FILE') + user = 'ubuntu' + for instance_state in instances_state: + instance_attributes = instance_state['attributes'] + state['CM_TF_NEW_INSTANCES_STATE'].append(instance_attributes) + public_ip = instance_attributes['public_ip'] + if env.get('CM_TERRAFORM_CM_INIT'): + run_input = { + 'automation': 'script', + 'action': 'run', + 'tags': 'remote,run,ssh', + 'env': { + }, + 'host': public_ip, + 'user': user, + 'skip_host_verify': True, + 'ssh_key_file': ssh_key_file, + 'quiet': True, + 'silent': True, + 'run_cmds': [ + "sudo apt-get update", + "sudo apt-get -y upgrade", + "sudo apt-get install -y python3-pip", + "python3 -m pip install cmind", + "source ~/.profile", + "cm pull repo ctuning@mlcommons-ck", + "cm run script --tags=get,sys-utils-cm" + ] + } + if env.get('CM_TERRAFORM_RUN_COMMANDS'): + run_cmds = env.get('CM_TERRAFORM_RUN_COMMANDS') + for cmd in run_cmds: + cmd = cmd.replace(":", "=") + cmd = cmd.replace(";;", ",") + run_input['run_cmds'].append(cmd) + r = cm.access(run_input) + if r['return'] > 0: + return r + # print(r) + print_attr(instance_attributes, "id") + print_attr(instance_attributes, "instance_type") + print_attr(instance_attributes, "public_ip") + print_attr(instance_attributes, "public_dns") + print_attr(instance_attributes, "security_groups") + + return {'return': 0} + + +def print_attr(instance_attributes, key): + if key in instance_attributes: + print(key.upper() + ": " + str(instance_attributes[key])) diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/gcp/apply_credentials.sh b/cmx4mlops/cmx4mlops/repo/script/run-terraform/gcp/apply_credentials.sh new file mode 100644 index 000000000..e69de29bb diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/gcp/main.tf b/cmx4mlops/cmx4mlops/repo/script/run-terraform/gcp/main.tf new file mode 100644 index 000000000..abf47034e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/gcp/main.tf @@ -0,0 +1,80 @@ +variable INSTANCE_TYPE { + type = string + description = "GCP instance type" +} +variable INSTANCE_NAME { + type = string + description = "GCP instance name" +} +variable INSTANCE_IMAGE { + type = string + description = "GCP instance OS image" +} +variable GCP_PROJECT { + type = string + description = "GCP project ID" +} +variable SECURITY_GROUP_ID { + type = string + description = "GCP instance security group id" +} +variable CPU_COUNT { + default = 1 + description = "GCP CPU count" +} +variable DISK_GBS { + default = 8 + description = "GCP Disk space in GBs" +} +variable SSH_PUB_KEY_FILE { + type = string + description = "Path to SSH public key" +} +variable SSH_USER { + type = string + description = "SSH username" +} + +variable INSTANCE_REGION { + type = string + description = "GCP region" +} + +variable INSTANCE_ZONE { + type = string + description = "GCP zone" +} + + +resource "google_compute_instance" "cm" { + name = var.INSTANCE_NAME + machine_type = var.INSTANCE_TYPE + zone = var.INSTANCE_ZONE + project = var.GCP_PROJECT + tags = ["cm"] + + boot_disk { + initialize_params { + image = var.INSTANCE_IMAGE + labels = { + my_label = "value" + } + } + } + + network_interface { + network = "default" + + access_config { + // Ephemeral public IP + } + } + + metadata = { + ssh-keys = "${var.SSH_USER}:${file(var.SSH_PUB_KEY_FILE)}" + } + + metadata_startup_script = "echo hi > /test.txt" + + +} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-terraform/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-terraform/run.sh new file mode 100644 index 000000000..094cffcd9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-terraform/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash +if [[ ${CM_TERRAFORM_CONFIG_DIR} == "aws" ]]; then + source ${CM_TERRAFORM_CONFIG_DIR}/credentials.sh + source ${CM_TERRAFORM_CONFIG_DIR}/apply_credentials.sh +fi + + +if [[ -z $CM_DESTROY_TERRAFORM ]]; then + terraform init -input=false + terraform plan -out=tfplan -input=false + terraform apply -input=false tfplan + test $? -eq 0 || exit $? + sleep 20 +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/_cm.yaml new file mode 100644 index 000000000..f75a3d9fe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/_cm.yaml @@ -0,0 +1,143 @@ +uid: c3eff27c791048aa +alias: run-vllm-server + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: DevOps automation + +tags: +- run +- server +- vllm +- vllm-server + +input_mapping: + model: CM_VLLM_SERVER_MODEL_NAME + tp_size: CM_VLLM_SERVER_TP_SIZE + pp_size: CM_VLLM_SERVER_PP_SIZE + distributed-executor-backend: CM_VLLM_SERVER_DIST_EXEC_BACKEND + api_key: CM_VLLM_SERVER_API_KEY + skip_docker_model_download: CM_VLLM_SKIP_DOCKER_MODEL_DOWNLOAD + host: CM_VLLM_SERVER_HOST + port: CM_VLLM_SERVER_PORT + uvicorn_log_level: CM_VLLM_SERVER_UVICORN_LOG_LEVEL + allow_credentials: CM_VLLM_SERVER_ALLOW_CREDENTIALS + allowed_origins: CM_VLLM_SERVER_ALLOWED_ORIGINS + allowed_methods: CM_VLLM_SERVER_ALLOWED_METHODS + allowed_headers: CM_VLLM_SERVER_ALLOWED_HEADERS + lora_modules: CM_VLLM_SERVER_LORA_MODULES + prompt_adapters: CM_VLLM_SERVER_PROMPT_ADAPTERS + chat_template: CM_VLLM_SERVER_CHAT_TEMPLATE + response_role: CM_VLLM_SERVER_RESPONSE_ROLE + ssl_keyfile: CM_VLLM_SERVER_SSL_KEYFILE + ssl_certfile: CM_VLLM_SERVER_SSL_CERTFILE + ssl_ca_certs: CM_VLLM_SERVER_SSL_CA_CERTS + ssl_cert_reqs: CM_VLLM_SERVER_SSL_CERT_REQS + root_path: CM_VLLM_SERVER_ROOT_PATH + middleware: CM_VLLM_SERVER_MIDDLEWARE + tokenizer: CM_VLLM_SERVER_TOKENIZER + skip_tokenizer_init: CM_VLLM_SERVER_SKIP_TOKENIZER_INIT + revision: CM_VLLM_SERVER_REVISION + code_revision: CM_VLLM_SERVER_CODE_REVISION + tokenizer_revision: CM_VLLM_SERVER_TOKENIZER_REVISION + tokenizer_mode: CM_VLLM_SERVER_TOKENIZER_MODE + trust_remote_code: CM_VLLM_SERVER_TRUST_REMOTE_CODE + download_dir: CM_VLLM_SERVER_DOWNLOAD_DIR + load_format: CM_VLLM_SERVER_LOAD_FORMAT + dtype: CM_VLLM_SERVER_DTYPE + kv_cache_dtype: CM_VLLM_SERVER_KV_CACHE_DTYPE + quantization_param_path: CM_VLLM_SERVER_QUANTIZATION_PARAM_PATH + max_model_len: CM_VLLM_SERVER_MAX_MODEL_LEN + guided_decoding_backend: CM_VLLM_SERVER_GUIDED_DECODING_BACKEND + worker_use_ray: CM_VLLM_SERVER_WORKER_USE_RAY + pipeline_parallel_size: CM_VLLM_SERVER_PIPELINE_PARALLEL_SIZE + max_parallel_loading_workers: CM_VLLM_SERVER_MAX_PARALLEL_LOADING_WORKERS + ray_workers_use_nsight: CM_VLLM_SERVER_RAY_WORKERS_USE_NSIGHT + block_size: CM_VLLM_SERVER_BLOCK_SIZE + enable_prefix_caching: CM_VLLM_SERVER_ENABLE_PREFIX_CACHING + disable_sliding_window: CM_VLLM_SERVER_DISABLE_SLIDING_WINDOW + use_v2_block_manager: CM_VLLM_SERVER_USE_V2_BLOCK_MANAGER + num_lookahead_slots: CM_VLLM_SERVER_NUM_LOOKAHEAD_SLOTS + seed: CM_VLLM_SERVER_SEED + swap_space: CM_VLLM_SERVER_SWAP_SPACE + gpu_memory_utilization: CM_VLLM_SERVER_GPU_MEMORY_UTILIZATION + num_gpu_blocks_override: CM_VLLM_SERVER_NUM_GPU_BLOCKS_OVERRIDE + max_num_batched_tokens: CM_VLLM_SERVER_MAX_NUM_BATCHED_TOKENS + max_num_seqs: CM_VLLM_SERVER_MAX_NUM_SEQS + max_logprobs: CM_VLLM_SERVER_MAX_LOGPROBS + disable_log_stats: CM_VLLM_SERVER_DISABLE_LOG_STATS + quantization: CM_VLLM_SERVER_QUANTIZATION + rope_scaling: CM_VLLM_SERVER_ROPE_SCALING + rope_theta: CM_VLLM_SERVER_ROPE_THETA + enforce_eager: CM_VLLM_SERVER_ENFORCE_EAGER + max_context_len_to_capture: CM_VLLM_SERVER_MAX_CONTEXT_LEN_TO_CAPTURE + max_seq_len_to_capture: CM_VLLM_SERVER_MAX_SEQ_LEN_TO_CAPTURE + disable_custom_all_reduce: CM_VLLM_SERVER_DISABLE_CUSTOM_ALL_REDUCE + tokenizer_pool_size: CM_VLLM_SERVER_TOKENIZER_POOL_SIZE + tokenizer_pool_type: CM_VLLM_SERVER_TOKENIZER_POOL_TYPE + tokenizer_pool_extra_config: CM_VLLM_SERVER_TOKENIZER_POOL_EXTRA_CONFIG + enable_lora: CM_VLLM_SERVER_ENABLE_LORA + max_loras: CM_VLLM_SERVER_MAX_LORAS + max_lora_rank: CM_VLLM_SERVER_MAX_LORA_RANK + lora_extra_vocab_size: CM_VLLM_SERVER_LORA_EXTRA_VOCAB_SIZE + lora_dtype: CM_VLLM_SERVER_LORA_DTYPE + long_lora_scaling_factors: CM_VLLM_SERVER_LONG_LORA_SCALING_FACTORS + max_cpu_loras: CM_VLLM_SERVER_MAX_CPU_LORAS + fully_sharded_loras: CM_VLLM_SERVER_FULLY_SHARDED_LORAS + enable_prompt_adapter: CM_VLLM_SERVER_ENABLE_PROMPT_ADAPTER + max_prompt_adapters: CM_VLLM_SERVER_MAX_PROMPT_ADAPTERS + max_prompt_adapter_token: CM_VLLM_SERVER_MAX_PROMPT_ADAPTER_TOKEN + device: CM_VLLM_SERVER_DEVICE + scheduler_delay_factor: CM_VLLM_SERVER_SCHEDULER_DELAY_FACTOR + enable_chunked_prefill: CM_VLLM_SERVER_ENABLE_CHUNKED_PREFILL + speculative_model: CM_VLLM_SERVER_SPECULATIVE_MODEL + num_speculative_tokens: CM_VLLM_SERVER_NUM_SPECULATIVE_TOKENS + speculative_draft_tensor_parallel_size: CM_VLLM_SERVER_SPECULATIVE_DRAFT_TENSOR_PARALLEL_SIZE + speculative_max_model_len: CM_VLLM_SERVER_SPECULATIVE_MAX_MODEL_LEN + speculative_disable_by_batch_size: CM_VLLM_SERVER_SPECULATIVE_DISABLE_BY_BATCH_SIZE + ngram_prompt_lookup_max: CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MAX + ngram_prompt_lookup_min: CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MIN + spec_decoding_acceptance_method: CM_VLLM_SERVER_SPEC_DECODING_ACCEPTANCE_METHOD + typical_acceptance_sampler_posterior_threshold: CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_THRESHOLD + typical_acceptance_sampler_posterior_alpha: CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_ALPHA + model_loader_extra_config: CM_VLLM_SERVER_MODEL_LOADER_EXTRA_CONFIG + preemption_mode: CM_VLLM_SERVER_PREEMPTION_MODE + served_model_name: CM_VLLM_SERVER_SERVED_MODEL_NAME + qlora_adapter_name_or_path: CM_VLLM_SERVER_QLORA_ADAPTER_NAME_OR_PATH + otlp_traces_endpoint: CM_VLLM_SERVER_OTLP_TRACES_ENDPOINT + engine_use_ray: CM_VLLM_SERVER_ENGINE_USE_RAY + disable_log_requests: CM_VLLM_SERVER_DISABLE_LOG_REQUESTS + max_log_len: CM_VLLM_SERVER_MAX_LOG_LEN + +deps: + - tags: get,python3,get-python3 + version_max: "3.11.999" + version_max_usable: "3.11.0" + + - tags: get,cuda,_cudnn + names: + - cuda + + - tags: get,ml-model,huggingface,zoo,_clone-repo + update_tags_from_env_with_prefix: + _model-stub.: + - CM_VLLM_SERVER_MODEL_NAME + enable_if_env: + CM_VLLM_SERVER_MODEL_NAME: [ on ] + skip_if_env: + CM_VLLM_SKIP_DOCKER_MODEL_DOWNLOAD: [ on ] + + - tags: get,generic-python-lib,_package.vllm + +docker: + port_maps: + - "8000:8000" + base_image: nvcr.io/nvidia/pytorch:24.06-py3 + interactive: True + extra_run_args: ' --ulimit memlock=-1' + all_gpus: 'yes' + os: "ubuntu" + os_version: "22.04" diff --git a/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/customize.py b/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/customize.py new file mode 100644 index 000000000..9e945c85f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/customize.py @@ -0,0 +1,453 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import subprocess + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + cmd_args = "" + + model_name = env.get("CM_VLLM_SERVER_MODEL_NAME", False) + if not model_name: + return {'return': 1, 'error': 'Model name not specified'} + else: + cmd_args += f" --model {env['CM_ML_MODEL_PATH']} --served-model-name {model_name}" + + tp_size = env.get("CM_VLLM_SERVER_TP_SIZE", False) + if tp_size: + cmd_args += f" --tensor-parallel-size {tp_size}" + + pp_size = env.get("CM_VLLM_SERVER_PP_SIZE", False) + if pp_size: + cmd_args += f" --pipeline-parallel-size {pp_size}" + + api_key = env.get("CM_VLLM_SERVER_API_KEY", "root") + if pp_size: + cmd_args += f" --api-key {api_key}" + + distributed_executor_backend = env.get( + "CM_VLLM_SERVER_DIST_EXEC_BACKEND", False) + if distributed_executor_backend: + cmd_args += f" --distributed-executor-backend {distributed_executor_backend}" + + host = env.get("CM_VLLM_SERVER_HOST", False) + if host: + cmd_args += f" --host {host}" + + port = env.get("CM_VLLM_SERVER_PORT", False) + if port: + cmd_args += f" --port {port}" + + uvicorn_log_level = env.get("CM_VLLM_SERVER_UVICORN_LOG_LEVEL", False) + if uvicorn_log_level: + cmd_args += f" --uvicorn-log-level {uvicorn_log_level}" + + allow_credentials = env.get("CM_VLLM_SERVER_ALLOW_CREDENTIALS", False) + if allow_credentials: + cmd_args += f" --allow-credentials" + + allowed_origins = env.get("CM_VLLM_SERVER_ALLOWED_ORIGINS", False) + if allowed_origins: + cmd_args += f" --allowed-origins {allowed_origins}" + + allowed_methods = env.get("CM_VLLM_SERVER_ALLOWED_METHODS", False) + if allowed_methods: + cmd_args += f" --allowed-methods {allowed_methods}" + + allowed_headers = env.get("CM_VLLM_SERVER_ALLOWED_HEADERS", False) + if allowed_headers: + cmd_args += f" --allowed-headers {allowed_headers}" + + lora_modules = env.get("CM_VLLM_SERVER_LORA_MODULES", False) + if lora_modules: + cmd_args += f" --lora-modules {lora_modules}" + + prompt_adapters = env.get("CM_VLLM_SERVER_PROMPT_ADAPTERS", False) + if prompt_adapters: + cmd_args += f" --prompt-adapters {prompt_adapters}" + + chat_template = env.get("CM_VLLM_SERVER_CHAT_TEMPLATE", False) + if chat_template: + cmd_args += f" --chat-template {chat_template}" + + response_role = env.get("CM_VLLM_SERVER_RESPONSE_ROLE", False) + if response_role: + cmd_args += f" --response-role {response_role}" + + ssl_keyfile = env.get("CM_VLLM_SERVER_SSL_KEYFILE", False) + if ssl_keyfile: + cmd_args += f" --ssl-keyfile {ssl_keyfile}" + + ssl_certfile = env.get("CM_VLLM_SERVER_SSL_CERTFILE", False) + if ssl_certfile: + cmd_args += f" --ssl-certfile {ssl_certfile}" + + ssl_ca_certs = env.get("CM_VLLM_SERVER_SSL_CA_CERTS", False) + if ssl_ca_certs: + cmd_args += f" --ssl-ca-certs {ssl_ca_certs}" + + ssl_cert_reqs = env.get("CM_VLLM_SERVER_SSL_CERT_REQS", False) + if ssl_cert_reqs: + cmd_args += f" --ssl-cert-reqs {ssl_cert_reqs}" + + root_path = env.get("CM_VLLM_SERVER_ROOT_PATH", False) + if root_path: + cmd_args += f" --root-path {root_path}" + + middleware = env.get("CM_VLLM_SERVER_MIDDLEWARE", False) + if middleware: + cmd_args += f" --middleware {middleware}" + + tokenizer = env.get("CM_VLLM_SERVER_TOKENIZER", False) + if tokenizer: + cmd_args += f" --tokenizer {tokenizer}" + + skip_tokenizer_init = env.get("CM_VLLM_SERVER_SKIP_TOKENIZER_INIT", False) + if skip_tokenizer_init: + cmd_args += f" --skip-tokenizer-init" + + revision = env.get("CM_VLLM_SERVER_REVISION", False) + if revision: + cmd_args += f" --revision {revision}" + + code_revision = env.get("CM_VLLM_SERVER_CODE_REVISION", False) + if code_revision: + cmd_args += f" --code-revision {code_revision}" + + tokenizer_revision = env.get("CM_VLLM_SERVER_TOKENIZER_REVISION", False) + if tokenizer_revision: + cmd_args += f" --tokenizer-revision {tokenizer_revision}" + + tokenizer_mode = env.get("CM_VLLM_SERVER_TOKENIZER_MODE", False) + if tokenizer_mode: + cmd_args += f" --tokenizer-mode {tokenizer_mode}" + + trust_remote_code = env.get("CM_VLLM_SERVER_TRUST_REMOTE_CODE", False) + if trust_remote_code: + cmd_args += f" --trust-remote-code" + + download_dir = env.get("CM_VLLM_SERVER_DOWNLOAD_DIR", False) + if download_dir: + cmd_args += f" --download-dir {download_dir}" + + load_format = env.get("CM_VLLM_SERVER_LOAD_FORMAT", False) + if load_format: + cmd_args += f" --load-format {load_format}" + + dtype = env.get("CM_VLLM_SERVER_DTYPE", False) + if dtype: + cmd_args += f" --dtype {dtype}" + + kv_cache_dtype = env.get("CM_VLLM_SERVER_KV_CACHE_DTYPE", False) + if kv_cache_dtype: + cmd_args += f" --kv-cache-dtype {kv_cache_dtype}" + + quantization_param_path = env.get( + "CM_VLLM_SERVER_QUANTIZATION_PARAM_PATH", False) + if quantization_param_path: + cmd_args += f" --quantization-param-path {quantization_param_path}" + + max_model_len = env.get("CM_VLLM_SERVER_MAX_MODEL_LEN", False) + if max_model_len: + cmd_args += f" --max-model-len {max_model_len}" + + guided_decoding_backend = env.get( + "CM_VLLM_SERVER_GUIDED_DECODING_BACKEND", False) + if guided_decoding_backend: + cmd_args += f" --guided-decoding-backend {guided_decoding_backend}" + + worker_use_ray = env.get("CM_VLLM_SERVER_WORKER_USE_RAY", False) + if worker_use_ray: + cmd_args += f" --worker-use-ray" + + max_parallel_loading_workers = env.get( + "CM_VLLM_SERVER_MAX_PARALLEL_LOADING_WORKERS", False) + if max_parallel_loading_workers: + cmd_args += f" --max-parallel-loading-workers {max_parallel_loading_workers}" + + ray_workers_use_nsight = env.get( + "CM_VLLM_SERVER_RAY_WORKERS_USE_NSIGHT", False) + if ray_workers_use_nsight: + cmd_args += f" --ray-workers-use-nsight" + + block_size = env.get("CM_VLLM_SERVER_BLOCK_SIZE", False) + if block_size: + cmd_args += f" --block-size {block_size}" + + enable_prefix_caching = env.get( + "CM_VLLM_SERVER_ENABLE_PREFIX_CACHING", False) + if enable_prefix_caching: + cmd_args += f" --enable-prefix-caching" + + disable_sliding_window = env.get( + "CM_VLLM_SERVER_DISABLE_SLIDING_WINDOW", False) + if disable_sliding_window: + cmd_args += f" --disable-sliding-window" + + use_v2_block_manager = env.get( + "CM_VLLM_SERVER_USE_V2_BLOCK_MANAGER", False) + if use_v2_block_manager: + cmd_args += f" --use-v2-block-manager" + + num_lookahead_slots = env.get("CM_VLLM_SERVER_NUM_LOOKAHEAD_SLOTS", False) + if num_lookahead_slots: + cmd_args += f" --num-lookahead-slots {num_lookahead_slots}" + + seed = env.get("CM_VLLM_SERVER_SEED", False) + if seed: + cmd_args += f" --seed {seed}" + + swap_space = env.get("CM_VLLM_SERVER_SWAP_SPACE", False) + if swap_space: + cmd_args += f" --swap-space {swap_space}" + + gpu_memory_utilization = env.get( + "CM_VLLM_SERVER_GPU_MEMORY_UTILIZATION", False) + if gpu_memory_utilization: + cmd_args += f" --gpu-memory-utilization {gpu_memory_utilization}" + + num_gpu_blocks_override = env.get( + "CM_VLLM_SERVER_NUM_GPU_BLOCKS_OVERRIDE", False) + if num_gpu_blocks_override: + cmd_args += f" --num-gpu-blocks-override {num_gpu_blocks_override}" + + max_num_batched_tokens = env.get( + "CM_VLLM_SERVER_MAX_NUM_BATCHED_TOKENS", False) + if max_num_batched_tokens: + cmd_args += f" --max-num-batched-tokens {max_num_batched_tokens}" + + max_num_seqs = env.get("CM_VLLM_SERVER_MAX_NUM_SEQS", False) + if max_num_seqs: + cmd_args += f" --max-num-seqs {max_num_seqs}" + + max_logprobs = env.get("CM_VLLM_SERVER_MAX_LOGPROBS", False) + if max_logprobs: + cmd_args += f" --max-logprobs {max_logprobs}" + + disable_log_stats = env.get("CM_VLLM_SERVER_DISABLE_LOG_STATS", False) + if disable_log_stats: + cmd_args += f" --disable-log-stats" + + quantization = env.get("CM_VLLM_SERVER_QUANTIZATION", False) + if quantization: + cmd_args += f" --quantization {quantization}" + + rope_scaling = env.get("CM_VLLM_SERVER_ROPE_SCALING", False) + if rope_scaling: + cmd_args += f" --rope-scaling {rope_scaling}" + + rope_theta = env.get("CM_VLLM_SERVER_ROPE_THETA", False) + if rope_theta: + cmd_args += f" --rope-theta {rope_theta}" + + enforce_eager = env.get("CM_VLLM_SERVER_ENFORCE_EAGER", False) + if enforce_eager: + cmd_args += f" --enforce-eager" + + max_context_len_to_capture = env.get( + "CM_VLLM_SERVER_MAX_CONTEXT_LEN_TO_CAPTURE", False) + if max_context_len_to_capture: + cmd_args += f" --max-context-len-to-capture {max_context_len_to_capture}" + + max_seq_len_to_capture = env.get( + "CM_VLLM_SERVER_MAX_SEQ_LEN_TO_CAPTURE", False) + if max_seq_len_to_capture: + cmd_args += f" --max-seq-len-to-capture {max_seq_len_to_capture}" + + disable_custom_all_reduce = env.get( + "CM_VLLM_SERVER_DISABLE_CUSTOM_ALL_REDUCE", False) + if disable_custom_all_reduce: + cmd_args += f" --disable-custom-all-reduce" + + tokenizer_pool_size = env.get("CM_VLLM_SERVER_TOKENIZER_POOL_SIZE", False) + if tokenizer_pool_size: + cmd_args += f" --tokenizer-pool-size {tokenizer_pool_size}" + + tokenizer_pool_type = env.get("CM_VLLM_SERVER_TOKENIZER_POOL_TYPE", False) + if tokenizer_pool_type: + cmd_args += f" --tokenizer-pool-type {tokenizer_pool_type}" + + tokenizer_pool_extra_config = env.get( + "CM_VLLM_SERVER_TOKENIZER_POOL_EXTRA_CONFIG", False) + if tokenizer_pool_extra_config: + cmd_args += f" --tokenizer-pool-extra-config {tokenizer_pool_extra_config}" + + enable_lora = env.get("CM_VLLM_SERVER_ENABLE_LORA", False) + if enable_lora: + cmd_args += f" --enable-lora" + + max_loras = env.get("CM_VLLM_SERVER_MAX_LORAS", False) + if max_loras: + cmd_args += f" --max-loras {max_loras}" + + max_lora_rank = env.get("CM_VLLM_SERVER_MAX_LORA_RANK", False) + if max_lora_rank: + cmd_args += f" --max-lora-rank {max_lora_rank}" + + lora_extra_vocab_size = env.get( + "CM_VLLM_SERVER_LORA_EXTRA_VOCAB_SIZE", False) + if lora_extra_vocab_size: + cmd_args += f" --lora-extra-vocab-size {lora_extra_vocab_size}" + + lora_dtype = env.get("CM_VLLM_SERVER_LORA_DTYPE", False) + if lora_dtype: + cmd_args += f" --lora-dtype {lora_dtype}" + + long_lora_scaling_factors = env.get( + "CM_VLLM_SERVER_LONG_LORA_SCALING_FACTORS", False) + if long_lora_scaling_factors: + cmd_args += f" --long-lora-scaling-factors {long_lora_scaling_factors}" + + max_cpu_loras = env.get("CM_VLLM_SERVER_MAX_CPU_LORAS", False) + if max_cpu_loras: + cmd_args += f" --max-cpu-loras {max_cpu_loras}" + + fully_sharded_loras = env.get("CM_VLLM_SERVER_FULLY_SHARDED_LORAS", False) + if fully_sharded_loras: + cmd_args += f" --fully-sharded-loras" + + enable_prompt_adapter = env.get( + "CM_VLLM_SERVER_ENABLE_PROMPT_ADAPTER", False) + if enable_prompt_adapter: + cmd_args += f" --enable-prompt-adapter" + + max_prompt_adapters = env.get("CM_VLLM_SERVER_MAX_PROMPT_ADAPTERS", False) + if max_prompt_adapters: + cmd_args += f" --max-prompt-adapters {max_prompt_adapters}" + + max_prompt_adapter_token = env.get( + "CM_VLLM_SERVER_MAX_PROMPT_ADAPTER_TOKEN", False) + if max_prompt_adapter_token: + cmd_args += f" --max-prompt-adapter-token {max_prompt_adapter_token}" + + device = env.get("CM_VLLM_SERVER_DEVICE", False) + if device: + cmd_args += f" --device {device}" + + scheduler_delay_factor = env.get( + "CM_VLLM_SERVER_SCHEDULER_DELAY_FACTOR", False) + if scheduler_delay_factor: + cmd_args += f" --scheduler-delay-factor {scheduler_delay_factor}" + + enable_chunked_prefill = env.get( + "CM_VLLM_SERVER_ENABLE_CHUNKED_PREFILL", False) + if enable_chunked_prefill: + cmd_args += f" --enable-chunked-prefill" + + speculative_model = env.get("CM_VLLM_SERVER_SPECULATIVE_MODEL", False) + if speculative_model: + cmd_args += f" --speculative-model {speculative_model}" + + num_speculative_tokens = env.get( + "CM_VLLM_SERVER_NUM_SPECULATIVE_TOKENS", False) + if num_speculative_tokens: + cmd_args += f" --num-speculative-tokens {num_speculative_tokens}" + + speculative_draft_tensor_parallel_size = env.get( + "CM_VLLM_SERVER_SPECULATIVE_DRAFT_TENSOR_PARALLEL_SIZE", False) + if speculative_draft_tensor_parallel_size: + cmd_args += f" --speculative-draft-tensor-parallel-size {speculative_draft_tensor_parallel_size}" + + speculative_max_model_len = env.get( + "CM_VLLM_SERVER_SPECULATIVE_MAX_MODEL_LEN", False) + if speculative_max_model_len: + cmd_args += f" --speculative-max-model-len {speculative_max_model_len}" + + speculative_disable_by_batch_size = env.get( + "CM_VLLM_SERVER_SPECULATIVE_DISABLE_BY_BATCH_SIZE", False) + if speculative_disable_by_batch_size: + cmd_args += f" --speculative-disable-by-batch-size {speculative_disable_by_batch_size}" + + ngram_prompt_lookup_max = env.get( + "CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MAX", False) + if ngram_prompt_lookup_max: + cmd_args += f" --ngram-prompt-lookup-max {ngram_prompt_lookup_max}" + + ngram_prompt_lookup_min = env.get( + "CM_VLLM_SERVER_NGRAM_PROMPT_LOOKUP_MIN", False) + if ngram_prompt_lookup_min: + cmd_args += f" --ngram-prompt-lookup-min {ngram_prompt_lookup_min}" + + spec_decoding_acceptance_method = env.get( + "CM_VLLM_SERVER_SPEC_DECODING_ACCEPTANCE_METHOD", False) + if spec_decoding_acceptance_method: + cmd_args += f" --spec-decoding-acceptance-method {spec_decoding_acceptance_method}" + + typical_acceptance_sampler_posterior_threshold = env.get( + "CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_THRESHOLD", False) + if typical_acceptance_sampler_posterior_threshold: + cmd_args += f" --typical-acceptance-sampler-posterior-threshold {typical_acceptance_sampler_posterior_threshold}" + + typical_acceptance_sampler_posterior_alpha = env.get( + "CM_VLLM_SERVER_TYPICAL_ACCEPTANCE_SAMPLER_POSTERIOR_ALPHA", False) + if typical_acceptance_sampler_posterior_alpha: + cmd_args += f" --typical-acceptance-sampler-posterior-alpha {typical_acceptance_sampler_posterior_alpha}" + + model_loader_extra_config = env.get( + "CM_VLLM_SERVER_MODEL_LOADER_EXTRA_CONFIG", False) + if model_loader_extra_config: + cmd_args += f" --model-loader-extra-config {model_loader_extra_config}" + + preemption_mode = env.get("CM_VLLM_SERVER_PREEMPTION_MODE", False) + if preemption_mode: + cmd_args += f" --preemption-mode {preemption_mode}" + + served_model_name = env.get("CM_VLLM_SERVER_SERVED_MODEL_NAME", False) + if served_model_name: + cmd_args += f" --served-model-name {served_model_name}" + + qlora_adapter_name_or_path = env.get( + "CM_VLLM_SERVER_QLORA_ADAPTER_NAME_OR_PATH", False) + if qlora_adapter_name_or_path: + cmd_args += f" --qlora-adapter-name-or-path {qlora_adapter_name_or_path}" + + otlp_traces_endpoint = env.get( + "CM_VLLM_SERVER_OTLP_TRACES_ENDPOINT", False) + if otlp_traces_endpoint: + cmd_args += f" --otlp-traces-endpoint {otlp_traces_endpoint}" + + engine_use_ray = env.get("CM_VLLM_SERVER_ENGINE_USE_RAY", False) + if engine_use_ray: + cmd_args += f" --engine-use-ray" + + disable_log_requests = env.get( + "CM_VLLM_SERVER_DISABLE_LOG_REQUESTS", False) + if disable_log_requests: + cmd_args += f" --disable-log-requests" + + max_log_len = env.get("CM_VLLM_SERVER_MAX_LOG_LEN", False) + if max_log_len: + cmd_args += f" --max-log-len {max_log_len}" + + cmd = f"{env['CM_PYTHON_BIN_WITH_PATH']} -m vllm.entrypoints.openai.api_server {cmd_args}" + print(cmd) + + env['CM_VLLM_RUN_CMD'] = cmd + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/run.sh b/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/run.sh new file mode 100644 index 000000000..176c323c5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/run-vllm-server/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +echo ${CM_VLLM_RUN_CMD} + +${CM_VLLM_RUN_CMD} +test $? -eq 0 || exit 1 diff --git a/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/_cm.yaml new file mode 100644 index 000000000..4bf8b8b17 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/_cm.yaml @@ -0,0 +1,51 @@ +# Identification of this CM script +alias: runtime-system-infos +uid: 755cf27627784001 +cache: false +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: "MLPerf benchmark support" +docker: + real_run: False + +# User-friendly tags to find this CM script +tags: + - runtime + - system + - utilisation + - infos + +input_mapping: + log_dir: CM_LOGS_DIR + interval: CM_SYSTEM_INFO_MEASUREMENT_INTERVAL + + +# Dependencies on other CM scripts + +deps: + + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Install system dependencies on a given host + - tags: get,sys-utils-cm + + # Detect python3 + - tags: get,python3 + names: + - python + - python3 + +variations: + all: # TODO: get both cpu and gpu measurements + group: devices + + cpu: # TODO: get the cpu measurements + group: devices + + cuda: # TODO: get the gpu(cuda) measurements + group: devices diff --git a/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/customize.py b/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/customize.py new file mode 100644 index 000000000..7f77fd534 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/runtime-system-infos/customize.py @@ -0,0 +1,106 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import shutil +# used to measure the system infos(have not tested for obtaining gpu info) +import psutil +import csv # used to write the measurements to csv format as txt file +from datetime import datetime, timezone +import time +import signal +import sys + +# format of time measurement in mlperf logs +# :::MLLOG {"key": "power_begin", "value": "07-20-2024 17:54:38.800", "time_ms": 1580.314812, "namespace": "mlperf::logging", "event_type": "POINT_IN_TIME", "metadata": {"is_error": false, "is_warning": false, "file": "loadgen.cc", "line_no": 564, "pid": 9473, "tid": 9473}} +# :::MLLOG {"key": "power_end", "value": "07-20-2024 17:54:39.111", "time_ms": 1580.314812, "namespace": "mlperf::logging", "event_type": "POINT_IN_TIME", "metadata": {"is_error": false, "is_warning": false, "file": "loadgen.cc", "line_no": 566, "pid": 9473, "tid": 9473}} + +# inorder to safely close when recieving interrupt signal +# argument sig: signal number +# argument frame: current stack frame + + +def signal_handler(sig, frame): + print("Signal received, closing the system information file safely.") + f.close() + sys.exit(0) + + +# Register signal handlers for SIGTERM +signal.signal(signal.SIGTERM, signal_handler) + + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + if env.get("CM_RUN_DIR", "") == "": + env['CM_RUN_DIR'] = os.getcwd() + + logs_dir = env.get('CM_LOGS_DIR', env['CM_RUN_DIR']) + + log_json_file_path = os.path.join(logs_dir, 'sys_utilisation_info.txt') + + interval = int(env.get('CM_SYSTEM_INFO_MEASUREMENT_INTERVAL', '2')) + + print(f"The system dumps are created to the folder:{logs_dir}") + + print("WARNING: Currently the script is in its development stage. Only memory measurements supports as of now!") + + print("Started measuring system info!") + + csv_headers = [ + 'timestamp', + 'cpu_utilisation', + 'total_memory_gb', + 'used_memory_gb'] + + # done to be made available to signal_handler function in case of kill signals + # as of now handles for only SIGTERM + global f + while True: + with open(log_json_file_path, 'a', newline='') as f: + writer = csv.DictWriter(f, fieldnames=csv_headers) + # If the file is empty, write headers + if f.tell() == 0: + writer.writeheader() + + memory = psutil.virtual_memory() + cpu_util = psutil.cpu_percent(interval=0) + total_memory_gb = memory.total / (1024 ** 3) + used_memory_gb = memory.used / (1024 ** 3) + + data = { + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'cpu_utilisation': cpu_util, + 'total_memory_gb': total_memory_gb, + 'used_memory_gb': used_memory_gb + } + + # Write data as a row to CSV file + writer.writerow(data) + time.sleep(interval) + f.close() + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/README.md b/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/README.md new file mode 100644 index 000000000..7f179a306 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts//save-mlperf-inference-implementation-state](https://docs.mlcommons.org/cm4mlops/scripts//save-mlperf-inference-implementation-state) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/_cm.yaml new file mode 100644 index 000000000..4f1deee8e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/_cm.yaml @@ -0,0 +1,13 @@ +alias: save-mlperf-inference-implementation-state +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +new_state_keys: + - mlperf-inference-implementation +tags: +- save +- mlperf +- inference +- implementation +- state +uid: b14b813229c444f8 diff --git a/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/customize.py b/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/customize.py new file mode 100644 index 000000000..6ce5426bf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/save-mlperf-inference-implementation-state/customize.py @@ -0,0 +1,79 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + state = i['state'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if not state.get( + 'mlperf-inference-implementation'): # No state information. Just returning + return {'return': 0} + + if env.get('CM_MLPERF_README', "") == "yes": + import cmind as cm + inp = i['input'] + + script_tags = state['mlperf-inference-implementation'].get( + 'script_tags', '') + script_adr = state['mlperf-inference-implementation'].get( + 'script_adr', {}) + + if script_tags != '': + cm_input = {'action': 'run', + 'automation': 'script', + 'tags': script_tags, + 'adr': script_adr, + 'env': env, + 'print_deps': True, + 'quiet': True, + 'silent': True, + 'fake_run': True + } + + r = cm.access(cm_input) + if r['return'] > 0: + return r + + state['mlperf-inference-implementation']['print_deps'] = r['new_state']['print_deps'] + + if env.get('CM_DUMP_VERSION_INFO', True): + + if state['mlperf-inference-implementation'].get('script_id', '') == '': + state['mlperf-inference-implementation']['script_id'] = '' + + script_id = state['mlperf-inference-implementation']['script_id'] + run_state = i['input']['run_state'] + version_info = {} + version_info[script_id] = run_state['version_info'] + + state['mlperf-inference-implementation']['version_info'] = version_info + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/README.md b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/README.md new file mode 100644 index 000000000..b50f6ccdb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/set-device-settings-qaic](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/set-device-settings-qaic) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/_cm.yaml new file mode 100644 index 000000000..ad88ba330 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/_cm.yaml @@ -0,0 +1,39 @@ +alias: set-device-settings-qaic +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: DevOps automation +default_env: + CM_QAIC_DEVICES: '0' +deps: +- tags: detect-os +- tags: get,qaic,platform,sdk +docker_input_mapping: {} +input_description: {} +input_mapping: {} +new_env_keys: +- CM_QAIC_DEVICE_* +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- set +- device +- qaic +- ai100 +- cloud +- performance +- power +- setting +- mode +- vc +- ecc +uid: 408a1a1563b44780 +variations: + ecc: + env: + CM_QAIC_ECC: 'yes' + vc.#: + env: + CM_QAIC_VC: '#' diff --git a/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/customize.py b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/customize.py new file mode 100644 index 000000000..5aa6aaf3e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/customize.py @@ -0,0 +1,55 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_QAIC_ECC', '') == 'yes': + import json + for device in env['CM_QAIC_DEVICES'].split(","): + ecc_template = {} + ecc_template['request'] = [] + ecc_template['request'].append({}) + ecc_template['request'][0]['qid'] = device + ecc_template['request'][0]['dev_config'] = {} + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request'] = { + } + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request']['ras_ecc'] = [ + ] + ecc_template['request'][0]['dev_config']['update_ras_ecc_config_request']['ras_ecc'].append( + "RAS_DDR_ECC") + with open("request_" + device + ".json", "w") as f: + f.write(json.dumps(ecc_template)) + + if env.get('CM_QAIC_VC', '') != '': + env['CM_QAIC_VC_HEX'] = hex(int(env['CM_QAIC_VC'])) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/run.sh b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/run.sh new file mode 100644 index 000000000..cdc11ac73 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-device-settings-qaic/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" +IFS="," read -r -a devices <<< "$CM_QAIC_DEVICES" + +if [[ -n ${CM_QAIC_VC} ]]; then + for device in ${devices[@]} + do + run "sudo ${CM_QAIC_TOOLS_PATH}/qaic-diag -d $device -m 0x4B 0x66 0x05 0x1 ${CM_QAIC_VC_HEX}" + done +fi + +if [[ ${CM_QAIC_ECC} == "yes" ]]; then + for device in ${devices} + do + run "sudo ${CM_QAIC_TOOLS_PATH}/qaic-monitor-json -i request_$device.json" + run "rm request_$device.json" + done +fi + diff --git a/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/README.md b/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/README.md new file mode 100644 index 000000000..c061fc680 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/set-echo-off-win](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/set-echo-off-win) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/_cm.yaml new file mode 100644 index 000000000..ebb9d59cc --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/_cm.yaml @@ -0,0 +1,14 @@ +alias: set-echo-off-win +automation_alias: script +automation_uid: 5b4e0237da074764 +category: DevOps automation +new_state_keys: +- script_prefix +tags: +- set +- echo +- 'off' +- win +- echo-off-win +- echo-off +uid: 49d94b57524f4fcf diff --git a/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/customize.py b/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/customize.py new file mode 100644 index 000000000..0472d1791 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-echo-off-win/customize.py @@ -0,0 +1,37 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + state = i['state'] + + # If windows, download here otherwise use run.sh + if os_info['platform'] == 'windows': + + script_prefix = state.get('script_prefix', []) + + s = '@echo off' + if s not in script_prefix: + script_prefix.insert(0, s) + + state['script_prefix'] = script_prefix + + # Test to skip next dependency + # env = i['env'] + # env['CM_SKIP_SYS_UTILS'] = 'YES' + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/README.md b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/README.md new file mode 100644 index 000000000..cf8702783 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/set-performance-mode](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/set-performance-mode) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/_cm.yaml new file mode 100644 index 000000000..6a820e286 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/_cm.yaml @@ -0,0 +1,42 @@ +alias: set-performance-mode +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: DevOps automation +deps: +- tags: detect-os +- tags: detect-cpu +docker_input_mapping: {} +input_description: {} +input_mapping: {} +new_env_keys: +- OMP_* +new_state_keys: [] +post_deps: [] +posthook_deps: [] +prehook_deps: [] +tags: +- set +- system +- performance +- power +- mode +uid: 2c0ab7b64692443d +variations: + cpu: + default: 'true' + env: + CM_SET_PERFORMANCE_MODE_OF: cpu + group: device + performance: + default: true + env: + CM_SET_PERFORMANCE_MODE: performance + group: performance-mode + power: + env: + CM_SET_PERFORMANCE_MODE: power + group: power + reproducibility: + env: + CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE: 'yes' diff --git a/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/customize.py b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/customize.py new file mode 100644 index 000000000..5509e702f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/customize.py @@ -0,0 +1,36 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + env['OMP_PROC_BIND'] = 'true' + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run-ubuntu.sh b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run-ubuntu.sh new file mode 100644 index 000000000..fcec44246 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run-ubuntu.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} +CM_SUDO="sudo" +#Add your run commands here... +# run "$CM_RUN_CMD" +run "${CM_SUDO} apt-get install -y linux-tools-common linux-tools-generic linux-tools-`uname -r`" +run "${CM_SUDO} cpupower frequency-set -g performance" +if [[ ${CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE} != "no" ]]; then + run "${CM_SUDO} sysctl -w vm.dirty_ratio=8" + run "${CM_SUDO} sysctl -w vm.swappiness=1" + run "${CM_SUDO} sysctl -w vm.zone_reclaim_mode=1" + run "${CM_SUDO} sync; sysctl -w vm.drop_caches=3" + run "${CM_SUDO} sysctl -w kernel.randomize_va_space=0" +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run.bat b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run.sh b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run.sh new file mode 100644 index 000000000..3a584c10c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-performance-mode/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/README.md b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/README.md new file mode 100644 index 000000000..8b3b4bfc0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/set-sqlite-dir](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/set-sqlite-dir) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/_cm.yaml new file mode 100644 index 000000000..37f348259 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/_cm.yaml @@ -0,0 +1,22 @@ +alias: set-sqlite-dir +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: DevOps automation +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +env: {} +input_mapping: + path: CM_SQLITE_PATH +new_env_keys: +- CM_SQLITE_PATH +tags: +- set +- sqlite +- dir +- sqlite-dir +uid: 05904966355a43ac diff --git a/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/code.py b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/code.py new file mode 100644 index 000000000..dcff6e4a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/code.py @@ -0,0 +1 @@ +import sqlite3 diff --git a/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/customize.py b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/customize.py new file mode 100644 index 000000000..d4e27fe7b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/customize.py @@ -0,0 +1,21 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +import os + + +def postprocess(i): + + env = i['env'] + + env['CM_SQLITE_PATH'] = os.getcwd() + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/run.bat b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/run.bat new file mode 100644 index 000000000..37f249b0f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\code.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/run.sh b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/run.sh new file mode 100644 index 000000000..9b94917d9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-sqlite-dir/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/code.py +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/set-user-limits/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/set-user-limits/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-user-limits/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-user-limits/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/set-user-limits/_cm.yaml new file mode 100644 index 000000000..6097298c2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-user-limits/_cm.yaml @@ -0,0 +1,14 @@ +alias: set-user-limits +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- set +- user +- limits +- limit +uid: 49dd1856b37342ac +variations: + large-nofile: + env: + CM_ULIMIT_NOFILE: 9999 diff --git a/cmx4mlops/cmx4mlops/repo/script/set-user-limits/customize.py b/cmx4mlops/cmx4mlops/repo/script/set-user-limits/customize.py new file mode 100644 index 000000000..84f9b1f22 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-user-limits/customize.py @@ -0,0 +1,42 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + cmds = [] + + if env.get('CM_ULIMIT_NOFILE', '') != '': + cmds.append(f"ulimit -n {env['CM_ULIMIT_NOFILE']}") + + env['CM_RUN_CMD'] = " && ".join(cmds) + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/set-user-limits/run.sh b/cmx4mlops/cmx4mlops/repo/script/set-user-limits/run.sh new file mode 100644 index 000000000..4c23c380e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-user-limits/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running: " +echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/set-venv/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/set-venv/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-venv/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-venv/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/set-venv/README-extra.md new file mode 100644 index 000000000..987ad1f67 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-venv/README-extra.md @@ -0,0 +1,6 @@ +# Examples + +```bash +cmr "set venv" mlperf-test +cmr "set venv" mlperf-test2 --python=/usr/bin/python3 +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/set-venv/README.md b/cmx4mlops/cmx4mlops/repo/script/set-venv/README.md new file mode 100644 index 000000000..b08493ba2 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-venv/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts//set-venv](https://docs.mlcommons.org/cm4mlops/scripts//set-venv) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/set-venv/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/set-venv/_cm.yaml new file mode 100644 index 000000000..40b08b9f1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-venv/_cm.yaml @@ -0,0 +1,14 @@ +alias: set-venv +uid: 07163dd7d6cd4026 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +input_mapping: + python: CM_SET_VENV_PYTHON + +cache: false + +tags: +- set +- venv diff --git a/cmx4mlops/cmx4mlops/repo/script/set-venv/customize.py b/cmx4mlops/cmx4mlops/repo/script/set-venv/customize.py new file mode 100644 index 000000000..cafdac977 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/set-venv/customize.py @@ -0,0 +1,112 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + ############################################################ + cur_dir = os.getcwd() + + name = env.get('CM_NAME', '') + if name == '': + artifacts = i.get('input', {}).get('artifacts', []) + if len(artifacts) > 0: + name = artifacts[0] + if name == '': + name = 'default' + + if os_info['platform'] == 'windows': + activate_script = os.path.join('Scripts', 'activate.bat') + else: + activate_script = os.path.join('bin', 'activate') + + activate_script2 = os.path.join(name, activate_script) + + if not os.path.isfile(activate_script2): + force_python_path = env.get('CM_SET_VENV_PYTHON', '') + + if force_python_path != '' and not os.path.isfile(force_python_path): + return {'return': 1, 'error': 'python executable not found: {}'.format( + force_python_path)} + + if os_info['platform'] == 'windows': + python_path = 'python.exe' if force_python_path == '' else force_python_path + create_dir = ' & md {}\\work' + else: + python_path = 'python3' if force_python_path == '' else force_python_path + create_dir = ' ; mkdir {}/work' + + cmd = python_path + ' -m venv ' + name + create_dir.format(name) + + print('====================================================================') + + print('Creating venv: "{}" ...'.format(cmd)) + os.system(cmd) + + if os.path.isfile(activate_script2): + script_file = 'venv-' + name + if os_info['platform'] == 'windows': + script_file += '.bat' + xcmd = script_file + else: + script_file += '.sh' + xcmd = 'source ' + script_file + + if not os.path.isfile(script_file): + + work_dir = os.path.join(name, 'work') + if not os.path.isdir(work_dir): + os.makedirs(work_dir) + + if os_info['platform'] == 'windows': + shell = os.environ.get('CM_SET_VENV_SHELL', '') + if shell == '': + shell = env.get('CM_SET_VENV_SHELL', '') + if shell != '': + shell = shell.replace('CM_SET_VENV_WORK', 'work') + if shell == '': + shell = 'cmd' + cmd = 'cd {} & call {} & set CM_REPOS=%CD%\\{}\\CM & {}\n'.format( + name, activate_script, name, shell) + else: + cmd = '#!/bin/bash\n\ncd {} ; source {} ; export CM_REPOS=$PWD/CM ; cd work\n'.format( + name, activate_script) + + with open(script_file, 'w') as f: + f.write(cmd) + + print('====================================================================') + print('Please run the following command:') + print('') + print(xcmd) + print('====================================================================') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/README-extra.md new file mode 100644 index 000000000..8c9b52508 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/README-extra.md @@ -0,0 +1,12 @@ +# Compress using tar +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) compresses a given folder and generates a tar.gz file + +## How To +```bash +cm run script --tags=run,tar --input_dir=[DIR_PATH] +``` + + +### Additional Options +* `--output_dir:` Directory in which to generate the output file. Default is current working directory +* `--outfile:`: Output filename. Default is inputfoldername".gz" diff --git a/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/README.md b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/README.md new file mode 100644 index 000000000..d6a20206c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/tar-my-folder](https://docs.mlcommons.org/cm4mlops/scripts/DevOps-automation/tar-my-folder) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/_cm.yaml new file mode 100644 index 000000000..100e27eb7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/_cm.yaml @@ -0,0 +1,15 @@ +alias: tar-my-folder +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: DevOps automation +clean_files: [] +deps: [] +input_mapping: + input_dir: CM_TAR_INPUT_DIR + outfile: CM_TAR_OUTFILE + output_dir: CM_TAR_OUTPUT_DIR +tags: +- run +- tar +uid: 3784212e986c456b diff --git a/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/customize.py b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/customize.py new file mode 100644 index 000000000..1bada957f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/tar-my-folder/customize.py @@ -0,0 +1,43 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import subprocess +from os.path import exists + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + input_dir = env.get("CM_TAR_INPUT_DIR", "") + if input_dir == "": + return {'return': 1, 'error': 'Please set CM_TAR_INPUT_DIR'} + output_dir = env.get("CM_TAR_OUTPUT_DIR", "") + if output_dir == "": + output_dir = os.getcwd() + output_file = env.get("CM_TAR_OUTFILE", "") + input_dirname = os.path.basename(input_dir) + if output_file == "": + output_file = input_dirname + ".tar.gz" + from pathlib import Path + input_path = Path(input_dir) + cd_dir = input_path.parent.absolute() + CMD = 'tar --directory ' + \ + str(cd_dir) + ' -czf ' + os.path.join(output_dir, + output_file) + ' ' + input_dirname + print(CMD) + ret = os.system(CMD) + print("Tar file " + os.path.join(output_dir, output_file) + " created") + + return {'return': ret} diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/README-extra.md new file mode 100644 index 000000000..582991f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/README.md b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/README.md new file mode 100644 index 000000000..e2626fc25 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-cm-core](https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-cm-core) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/_cm.yaml new file mode 100644 index 000000000..420d119f8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/_cm.yaml @@ -0,0 +1,14 @@ +alias: test-cm-core +uid: 2c2fb9d20dc64cf3 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Tests + +tags: +- test +- cm +- core + +cache: false diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/customize.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/customize.py new file mode 100644 index 000000000..d6049c17a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/customize.py @@ -0,0 +1,28 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/run.bat b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/run.bat new file mode 100644 index 000000000..271224ef9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/run.bat @@ -0,0 +1,3 @@ +rem native script + +echo "TBD" diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/run.sh b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/run.sh new file mode 100644 index 000000000..997c0c33e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "TBD" + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/check.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/check.py new file mode 100644 index 000000000..4883116c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/check.py @@ -0,0 +1,16 @@ +def check_return(r): + if 'return' not in r: + raise Exception( + 'CM access function should always return key \'return\'!') + if 'error' in r: + raise Exception(r['error']) + + +def check_list(r, string, found=True): + check_return(r) + if 'list' not in r: + raise Exception('CM search should return a list!') + if len(r['list']) < 1 and found: + raise Exception('CM search returned an empty list for ' + string) + if len(r['list']) > 0 and not found: + raise Exception('CM search returned at lease one entry for ' + string) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/process_dockerfile.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/process_dockerfile.py new file mode 100644 index 000000000..1d23ec601 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/process_dockerfile.py @@ -0,0 +1,33 @@ +import sys +import os +import cmind as cm +import check as checks +import json +import yaml + +files = sys.argv[1:] + +for file in files: + if not os.path.isfile(file): + continue + if not file.endswith("_cm.json") and not file.endswith("_cm.yaml"): + continue + if not file.startswith(os.path.join("script")): + continue + + script_path = os.path.dirname(file) + + f = open(file) + + if file.endswith(".json"): + data = json.load(f) + elif file.endswith(".yaml"): + data = yaml.safe_load(f) + + uid = data['uid'] + + r = cm.access({'action': 'dockerfile', + 'automation': 'script', + 'artifact': uid, + 'quiet': 'yes'}) + checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/process_readme.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/process_readme.py new file mode 100644 index 000000000..03daec793 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/process_readme.py @@ -0,0 +1,27 @@ +import sys +import os +import cmind as cm +import check as checks +import json +import yaml + +files = sys.argv[1:] + +for file in files: + if not os.path.isfile(file): + continue + if not file.endswith("_cm.json") and not file.endswith("_cm.yaml"): + continue + if not file.startswith(os.path.join("script")): + continue + script_path = os.path.dirname(file) + f = open(file) + if file.endswith(".json"): + data = json.load(f) + elif file.endswith(".yaml"): + data = yaml.safe_load(f) + uid = data['uid'] + + r = cm.access({'action': 'doc', 'automation': 'script', + 'artifact': uid, 'quiet': 'yes'}) + checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_deps.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_deps.py new file mode 100644 index 000000000..37d75fd4c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_deps.py @@ -0,0 +1,25 @@ +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps + +import cmind as cm +import check as checks + +# MLPerf v3.0 inference is now very outdated and we are testing inference +# in separate tests + +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'generate-run-cmds,mlperf', 'adr': +# {'loadgen': {'version': 'r3.0'}, 'compiler': {'tags': "gcc"}}, 'env': {'CM_MODEL': 'resnet50', +# 'CM_DEVICE': 'cpu', 'CM_BACKEND': 'onnxruntime'}, 'quiet': 'yes'}) +# checks.check_return(r) +# +# r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'loadgen,version-r3.0,deps-python-non-virtual'}) +# checks.check_list(r, "loadgen,version-r3.0,deps-python-non-virtual") +# +# r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'inference,src,version-r3.0'}) +# checks.check_list(r, "inference,src,version-r3.0") +# +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_onnxruntime,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) +# checks.check_return(r) +# +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_tf,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) +# checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_docker.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_docker.py new file mode 100644 index 000000000..ad867a2a1 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_docker.py @@ -0,0 +1,39 @@ +# This test covers version, variation, compilation from src, +# add_deps_recursive, post_deps + +import cmind as cm +import check as checks + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'run,docker,container', + 'add_deps_recursive': { + 'compiler': {'tags': "gcc"} + }, + 'docker_cm_repo': 'mlcommons@cm4mlops', + 'image_name': 'cm-script-app-image-classification-onnx-py', + 'env': { + 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', + 'CM_DOCKER_IMAGE_BASE': 'ubuntu:22.04', + 'CM_DOCKER_IMAGE_REPO': 'cknowledge' + }, + 'quiet': 'yes' + }) +checks.check_return(r) + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'run,docker,container', + 'add_deps_recursive': { + 'compiler': {'tags': "gcc"} + }, + 'docker_cm_repo': 'mlcommons@cm4mlops', + 'image_name': 'cm-script-app-image-classification-onnx-py', + 'env': { + 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', + 'CM_DOCKER_IMAGE_BASE': 'ubuntu:24.04', + 'CM_DOCKER_IMAGE_REPO': 'local' + }, + 'quiet': 'yes' + }) +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_features.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_features.py new file mode 100644 index 000000000..fa8ccb706 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_features.py @@ -0,0 +1,55 @@ +# This test covers +# 1. python-virtual-env and update_deps inside customize.py +# 2. cache search using "-" prefix + +import cmind as cm +import check as checks + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'install,python-venv', + 'name': 'test', + 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,python,virtual,name-test'}) +checks.check_list(r, "get,python-venv") + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'get,dataset,preprocessed,imagenet,_NHWC', + 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,dataset,preprocessed,imagenet,-_NCHW'}) +checks.check_list(r, "_NHWC") + +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,dataset,preprocessed,imagenet,-_NHWC'}) +checks.check_list(r, "_NHWC", False) + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'get,generic-python-lib,_package.scipy', + 'version': '1.9.3', + 'quiet': 'yes'}) + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'get,generic-python-lib,_package.scipy', + 'version': '1.9.2', + 'quiet': 'yes'}) + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'get,generic-python-lib,_package.scipy', + 'version': '1.9.3', + 'only_execute_from_cache': True, + 'quiet': 'yes'}) + +# r should return error +if 'error' not in r: + print(r) + raise Exception('Invalidated cache entry for scipy==1.9.3 found in cache') diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_install.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_install.py new file mode 100644 index 000000000..d4fb93ec7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/script/test_install.py @@ -0,0 +1,15 @@ +# This test covers script installation, version, shared library install + +import cmind as cm +import check as checks + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'python,src,install,_shared', + 'version': '3.9.10', + 'quiet': 'true'}) +checks.check_return(r) + +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'python,src,install,_shared,version-3.9.10'}) +checks.check_list(r, "python,src,install,_shared,version-3.9.10") diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/test_cm.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/test_cm.py new file mode 100644 index 000000000..821e1571d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/test_cm.py @@ -0,0 +1,17 @@ +try: + import cmind as cm + + r = cm.access(['test', 'script']) + if 'return' not in r: + raise Exception( + 'CM access function should always return key \'return\'!') + exit(0) + +except ImportError as e: + from sys import stderr + from subprocess import call + print( + 'WARNING: CM module for python is not installed & jupyter notebooks will not be supported', + file=stderr) + retcode = call(['cm', 'test', 'script']) + exit(retcode) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/test_search_speed.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/test_search_speed.py new file mode 100644 index 000000000..577c4f0b8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/test_search_speed.py @@ -0,0 +1,26 @@ +import cmind as cm +import time + +times = [] + +steps = 10 + +print('Running search with tags {} times ...'.format(steps)) + +for step in range(steps): + + start = time.time() + r = cm.access({'action': 'search', + 'automation': 'script', + 'tags': 'detect,os'}) + timer = time.time() - start + + if r['return'] > 0: + cm.error(r) + + times.append(timer) + +step = 0 +for t in times: + step += 1 + print("{}) {:0.3f} sec.".format(step, t)) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_retinanet.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_retinanet.py new file mode 100644 index 000000000..0b96f17f5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_retinanet.py @@ -0,0 +1,37 @@ +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps + +import cmind as cm +from pathlib import Path +import sys +import os + +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) +import check as checks # noqa + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': + {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': + {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', + 'mode': 'performance', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'install,python-venv', + 'version': '3.10.8', + 'name': 'mlperf'}) +checks.check_return(r) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'submitter': 'Community', + 'implementation': 'cpp', 'hw_name': 'default', 'model': 'retinanet', 'backend': 'onnxruntime', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '10', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm.py new file mode 100644 index 000000000..4b621ebe8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm.py @@ -0,0 +1,28 @@ +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps + +import cmind as cm + +from pathlib import Path +import sys +import os + +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) +import check as checks # noqa + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) + + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_dashboard', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py new file mode 100644 index 000000000..692ddeb83 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_ge.py @@ -0,0 +1,26 @@ +import cmind as cm + +from pathlib import Path +import sys +import os + +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) +import check as checks # noqa + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': { + 'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, + 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', + 'device': 'cpu', 'scenario': 'Offline', 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': { + 'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, + 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', + 'device': 'cpu', 'scenario': 'Offline', 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py new file mode 100644 index 000000000..5758ad08f --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-core/src/tutorials/test_tutorial_tvm_pip_vm.py @@ -0,0 +1,27 @@ +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps + +import cmind as cm + +from pathlib import Path +import sys +import os + +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) +import check as checks # noqa + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short,_dashboard', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/README-extra.md new file mode 100644 index 000000000..480bea4f4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/README-extra.md @@ -0,0 +1,8 @@ +# CM script + +This script prints internal pipeline run: + +```bash +cmr "test cm-script pipeline" +``` + diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/README.md b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/README.md new file mode 100644 index 000000000..1c6e7b38a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-cm-script-pipeline](https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-cm-script-pipeline) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/_cm.yaml new file mode 100644 index 000000000..9a2327ae6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/_cm.yaml @@ -0,0 +1,37 @@ +alias: test-cm-script-pipeline +uid: ebe50aa281be4458 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Tests + +developers: "Grigori Fursin" + +tags: +- test +- cm-script +- pipeline + +cache: false + +deps: +- tags: print,any-text + env: + CM_PRINT_ANY_TEXT: "_cm.yaml: deps" + +prehook_deps: +- tags: print,any-text + env: + CM_PRINT_ANY_TEXT: "_cm.yaml: prehook_deps" + +posthook_deps: +- tags: print,any-text + env: + CM_PRINT_ANY_TEXT: "_cm.yaml: posthook_deps" + +post_deps: +- tags: print,any-text + env: + CM_PRINT_ANY_TEXT: "_cm.yaml: post_deps" + CM_PRINT_ANY_CM_ENV_KEYS: 'CM_TMP_CURRENT_SCRIPT_PATH,CM_TMP_CURRENT_PATH,CM_QUIET' diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/customize.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/customize.py new file mode 100644 index 000000000..2b49c32ed --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/customize.py @@ -0,0 +1,51 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +# Developers: Grigori Fursin + +from cmind import utils +import os + + +def preprocess(i): + + print('') + print('customize.py: preprocess') + print('') + + return {'return': 0} + + +def postprocess(i): + + automation = i['automation'] + run_script_input = i['run_script_input'] + env = i['env'] + + print('') + print('customize.py: postprocess') + print('') + + r = automation.run_native_script( + {'run_script_input': run_script_input, 'env': env, 'script_name': 'run2'}) + if r['return'] > 0: + return r + + return {'return': 0} + + +def detect_version(i): + + print('') + print('customize.py: detect_version') + print('') + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run.bat b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run.bat new file mode 100644 index 000000000..3103960b7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run.bat @@ -0,0 +1,5 @@ +rem native script + +echo. +echo run.bat +echo. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run.sh b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run.sh new file mode 100644 index 000000000..51936abac --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +echo "" +echo "run.sh" +echo "" diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run2.bat b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run2.bat new file mode 100644 index 000000000..120bd24a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run2.bat @@ -0,0 +1,5 @@ +rem native script + +echo. +echo run2.bat +echo. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run2.sh b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run2.sh new file mode 100644 index 000000000..21664817b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-script-pipeline/run2.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +echo "" +echo "run2.sh" +echo "" diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/_cm.yaml new file mode 100644 index 000000000..b8b7a9c79 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/_cm.yaml @@ -0,0 +1,31 @@ +alias: test-cm-scripts +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- test +- test-scripts +- cm-test +env: + CM_VAR1: orig +const: + CM_CVAR1: orig +new_env_keys: + - CM_VAR* + - CM_CVAR* + +uid: 6fbe3884575c4e51 +variations: + v1: + env: + CM_VAR1: v1 + v2: + env: + CM_VAR1: v2 + CM_VAR2: v2 + const: + CM_VAR2: constv2 + v1,v2: + env: + CM_VAR1: combv1v2 + CM_VAR2: combv1v2 diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/customize.py b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/run.bat b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/run.sh b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/run.sh new file mode 100644 index 000000000..4c23c380e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-cm-scripts/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running: " +echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/.vscode/launch.json b/cmx4mlops/cmx4mlops/repo/script/test-debug/.vscode/launch.json new file mode 100644 index 000000000..49c4e19f0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/.vscode/launch.json @@ -0,0 +1,22 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python Debugger: Remote Attach", + "type": "debugpy", + "request": "attach", + "connect": { + "host": "localhost", + "port": 5678 + }, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "${workspaceFolder}" + } + ] + } +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-debug/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/test-debug/README-extra.md new file mode 100644 index 000000000..06a41746c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/README-extra.md @@ -0,0 +1,20 @@ +Demo of debugging CM scripts and wrapped apps with Visual Studio Code. + + +Debug customize.py using remote debugging and follow instructions on command line: + +```bash +cmr "test cm-debug" --debug_uid=8d96cd9fa4734204 +``` + +Debug Python application or tool wrapped by the CM script (see [python/main.py](python/main.py)): + +```bash +cmr "test cm-debug" --debug_uid=45a7c3a500d24a63 +``` + + + +Debug CM internals using standard Python debugging: +* Open _demo.py and start debugging using "Python File" default configuration. + diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/README.md b/cmx4mlops/cmx4mlops/repo/script/test-debug/README.md new file mode 100644 index 000000000..7854c9e1d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CM-interface-prototyping/test-debug](https://docs.mlcommons.org/cm4mlops/scripts/CM-interface-prototyping/test-debug) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-debug/_cm.yaml new file mode 100644 index 000000000..8a4ab37aa --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/_cm.yaml @@ -0,0 +1,29 @@ +alias: test-debug +uid: 5ccd3d701a9144f9 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: CM interface prototyping + +deps: + # Detect host OS features + - tags: detect,os + + # Detect host CPU features + - tags: detect,cpu + + # Get Python + - tags: get,python3 + names: + - python + - python3 + + # May need to use CM in automation recipes + - tags: get,generic-python-lib,_package.cmind + names: + - python-cmind + +tags: +- test +- cm-debug diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/_demo.py b/cmx4mlops/cmx4mlops/repo/script/test-debug/_demo.py new file mode 100644 index 000000000..878249a74 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/_demo.py @@ -0,0 +1,9 @@ +# Developer: Grigori Fursin + +import cmind +import sys + +print(sys.executable) + +r = cmind.access('run script "test cm-debug"') +print(r) diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/customize.py b/cmx4mlops/cmx4mlops/repo/script/test-debug/customize.py new file mode 100644 index 000000000..acd7dc6e4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/customize.py @@ -0,0 +1,51 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +# Developer(s): Grigori Fursin + +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + + print("********************************************************") + print('- Importing CM library ...') + import cmind + print(' SUCCESS!') + + cmind.utils.debug_here( + __file__, + port=5678, + text='Debugging customize.py!', + env=env, + env_debug_uid='8d96cd9fa4734204').breakpoint() + + print('') + print('- List CM repos ...') + print('') + r = cmind.access({'action': 'show', 'automation': 'repo', 'out': 'con'}) + print('') + print(' SUCCESS!') + print("********************************************************") + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/python/.vscode/launch.json b/cmx4mlops/cmx4mlops/repo/script/test-debug/python/.vscode/launch.json new file mode 100644 index 000000000..c2dc39156 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/python/.vscode/launch.json @@ -0,0 +1,23 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python Debugger: Remote Attach", + "type": "debugpy", + "request": "attach", + "connect": { + "host": "localhost", + "port": 5678 + }, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "${workspaceFolder}" + } + ] + } + ] +} \ No newline at end of file diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/python/main.py b/cmx4mlops/cmx4mlops/repo/script/test-debug/python/main.py new file mode 100644 index 000000000..a00bf6b34 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/python/main.py @@ -0,0 +1,26 @@ +""" +Testing CM debugging + +# Developer(s): Grigori Fursin +""" + +import cmind.utils +import os +import json + +print("Hello World 1") + +env = os.environ + +print('') +print(json.dumps(dict(env), indent=2)) + +# Import cmind to test break points +if os.environ.get('CM_TMP_DEBUG_UID', '') == '45a7c3a500d24a63': + cmind.utils.debug_here( + __file__, + port=5678, + text='Debugging main.py!').breakpoint() + +print('') +print("Hello World 2") diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/run.bat b/cmx4mlops/cmx4mlops/repo/script/test-debug/run.bat new file mode 100644 index 000000000..d45522c1d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/run.bat @@ -0,0 +1,6 @@ +echo ======================================================== + +%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\python\main.py +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% + +echo ======================================================== diff --git a/cmx4mlops/cmx4mlops/repo/script/test-debug/run.sh b/cmx4mlops/cmx4mlops/repo/script/test-debug/run.sh new file mode 100644 index 000000000..ee43341da --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-debug/run.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +echo "========================================================" + +${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/python/main.py +test $? -eq 0 || exit 1 + +echo ======================================================== diff --git a/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/README-extra.md new file mode 100644 index 000000000..bde1360a8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/README-extra.md @@ -0,0 +1,6 @@ +```bash +cmr "test deps conditions" +cmr "test deps conditions" --test1 +cmr "test deps conditions" --test1 --test2 +cmr "test deps conditions" --test1 --test2 --test3 +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/README.md b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/README.md new file mode 100644 index 000000000..d8b65269a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-deps-conditions](https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-deps-conditions) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/_cm.yaml new file mode 100644 index 000000000..bc74a5871 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions/_cm.yaml @@ -0,0 +1,54 @@ +alias: test-deps-conditions +uid: 5cb82aee472640df + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Tests + +developers: "Grigori Fursin" + +deps: + - tags: print,native,hello-world,_skip_print_env + - tags: print,native,hello-world,_skip_print_env,_text.SKIP_IF_ALL_ENV + skip_if_env: + CM_ENV1: + - True + CM_ENV2: + - True + CM_ENV3: + - True + - tags: print,native,hello-world,_skip_print_env,_text.SKIP_IF_ANY_ENV + skip_if_any_env: + CM_ENV1: + - True + CM_ENV2: + - True + CM_ENV3: + - True + - tags: print,native,hello-world,_skip_print_env,_text.ENABLE_IF_ALL_ENV + enable_if_env: + CM_ENV1: + - True + CM_ENV2: + - True + CM_ENV3: + - True + - tags: print,native,hello-world,_skip_print_env,_text.ENABLE_IF_ANY_ENV + enable_if_any_env: + CM_ENV1: + - True + CM_ENV2: + - True + CM_ENV3: + - True + +input_mapping: + test1: CM_ENV1 + test2: CM_ENV2 + test3: CM_ENV3 + +tags: +- test +- deps +- conditions diff --git a/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/README-extra.md new file mode 100644 index 000000000..d2b1f3033 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/README-extra.md @@ -0,0 +1,20 @@ +Checking some conditions to turn on or off deps: + +```bash +cmr "test deps conditions2" -s +cmr "test deps conditions2" -s --test +cmr "test deps conditions2" -s --test=xyz +``` + +Note that the last two will run with the following deps, +i.e. `True` tests not only for turning flag on, +but also for any non-empty value : +```yaml + - tags: print,any-text,_text.RUN_IF_ENV_IS_SET_TO_TRUE + enable_if_env: + TEST: + - True + +``` + +It is useful to check if flag turns on output or output to specific file for example ... diff --git a/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/README.md b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/README.md new file mode 100644 index 000000000..1f3d0611a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-deps-conditions2](https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-deps-conditions2) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/_cm.yaml new file mode 100644 index 000000000..08eb5ffb8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-deps-conditions2/_cm.yaml @@ -0,0 +1,28 @@ +alias: test-deps-conditions2 +uid: 7a81ef941b3c4c6c + +automation_alias: script +automation_uid: 5b4e0237da074764 + +category: Tests + +developers: "Grigori Fursin" + +deps: + - tags: print,any-text,_cm_env.TEST + - tags: print,any-text,_text.RUN_IF_ENV_IS_SET_TO_TRUE + enable_if_env: + TEST: + - True + - tags: print,any-text,_text.RUN_IF_ENV_IS_NOT_SET_TO_TRUE + skip_if_env: + TEST: + - True + +input_mapping: + test: TEST + +tags: +- test +- deps +- conditions2 diff --git a/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/README-extra.md new file mode 100644 index 000000000..582991f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/README.md b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/README.md new file mode 100644 index 000000000..3a88769c5 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-download-and-extract-artifacts](https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-download-and-extract-artifacts) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/_cm.yaml new file mode 100644 index 000000000..c1961ba30 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/_cm.yaml @@ -0,0 +1,31 @@ +alias: test-download-and-extract-artifacts +uid: 51dde7580b404b27 + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +category: Tests + +deps: +- tags: download,file,_url.https://zenodo.org/record/4735647/files/resnet50_v1.onnx + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_REPRODUCE_PAPER_XYZ + CM_DOWNLOAD_CHECKSUM: + force_cache: true + extra_cache_tags: reproduce,paper,artifact,zenodo,xyz +- tags: download-and-extract,_extract,_url.https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1 + env: + CM_DOWNLOAD_FINAL_ENV_NAME: CM_REPRODUCE_PAPER_XYZ2 + CM_EXTRACT_FINAL_ENV_NAME: CM_REPRODUCE_PAPER_XYZ2_EXTRACTED +# CM_DOWNLOAD_CHECKSUM: + force_cache: true + extra_cache_tags: reproduce,paper,artifact,zenodo,xyz2 + +new_env_keys: + - CM_REPRODUCE_PAPER_XYZ* + +tags: +- test +- download-and-extract-artifacts diff --git a/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/customize.py b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/run.bat b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/run.sh b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/run.sh new file mode 100644 index 000000000..3a584c10c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-download-and-extract-artifacts/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + + + +function exit_if_error() { + test $? -eq 0 || exit $? +} + +function run() { + echo "Running: " + echo "$1" + echo "" + if [[ ${CM_FAKE_RUN} != 'yes' ]]; then + eval "$1" + exit_if_error + fi +} + +#Add your run commands here... +# run "$CM_RUN_CMD" diff --git a/cmx4mlops/cmx4mlops/repo/script/test-dummy/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-dummy/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-dummy/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-dummy/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/test-dummy/README-extra.md new file mode 100644 index 000000000..582991f6d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-dummy/README-extra.md @@ -0,0 +1 @@ +# CM script diff --git a/cmx4mlops/cmx4mlops/repo/script/test-dummy/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-dummy/_cm.yaml new file mode 100644 index 000000000..09ba01f92 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-dummy/_cm.yaml @@ -0,0 +1,11 @@ +alias: test-dummy +uid: 3ef5d69f929349bc + +automation_alias: script +automation_uid: 5b4e0237da074764 + +cache: false + +tags: +- test +- dummy diff --git a/cmx4mlops/cmx4mlops/repo/script/test-dummy/customize.py b/cmx4mlops/cmx4mlops/repo/script/test-dummy/customize.py new file mode 100644 index 000000000..89236cec9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-dummy/customize.py @@ -0,0 +1,35 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/test-dummy/run.bat b/cmx4mlops/cmx4mlops/repo/script/test-dummy/run.bat new file mode 100644 index 000000000..648302ca7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-dummy/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/cmx4mlops/cmx4mlops/repo/script/test-dummy/run.sh b/cmx4mlops/cmx4mlops/repo/script/test-dummy/run.sh new file mode 100644 index 000000000..05a7907cf --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-dummy/run.sh @@ -0,0 +1,2 @@ +#!/bin/bash + diff --git a/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/README.md b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/README.md new file mode 100644 index 000000000..79f9d851b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet](https://docs.mlcommons.org/cm4mlops/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/_cm.yaml new file mode 100644 index 000000000..fd6a8607c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/_cm.yaml @@ -0,0 +1,28 @@ +alias: test-mlperf-inference-retinanet +automation_alias: script +automation_uid: 5b4e0237da074764 +category: CM interface prototyping +deps: +- tags: get,sys-utils-cm +- names: + - python + - python3 + tags: get,python3 +- tags: get,generic-python-lib,_requests +- names: + - loadgen + - mlperf-inference-loadgen + tags: get,loadgen +- force_env_keys: + - CM_GIT_* + names: + - inference-src + tags: mlperf,inference,source +- tags: get,dataset,open-images,original +- tags: get,raw,ml-model,retinanet +tags: +- test +- mlperf-inference-win +- retinanet +- windows +uid: 1cedbc3b642a403a diff --git a/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/customize.py b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/customize.py new file mode 100644 index 000000000..2b5c08e6b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/customize.py @@ -0,0 +1,30 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + meta = i['meta'] + + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + state = i['state'] + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/run.bat b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/run.bat new file mode 100644 index 000000000..38970bc0e --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/run.bat @@ -0,0 +1,8 @@ +echo. + +set CUR_DIR=%cd% +set SCRIPT_DIR=%CM_TMP_CURRENT_SCRIPT_PATH% + +cd %CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH% + +%CM_PYTHON_BIN_WITH_PATH% python/main.py --profile retinanet-onnxruntime --scenario Offline --model %CM_ML_MODEL_FILE_WITH_PATH% --dataset-path %CM_DATASET_PATH_ROOT%\validation\data --accuracy diff --git a/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/run.sh b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/run.sh new file mode 100644 index 000000000..b43737407 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-mlperf-inference-retinanet/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "" + +cd ${CM_MLPERF_INFERENCE_CLASSIFICATION_AND_DETECTION_PATH} + +ln -s ${CM_DATASET_PATH_ROOT}/annotations ${CM_DATASET_PATH_ROOT}/validation/data/annotations + +${CM_PYTHON_BIN_WITH_PATH} python/main.py --profile retinanet-onnxruntime --scenario Offline --model ${CM_ML_MODEL_FILE_WITH_PATH} --dataset-path ${CM_DATASET_PATH_ROOT}/validation/data --accuracy diff --git a/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/README.md b/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/README.md new file mode 100644 index 000000000..3fd22bc6c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-set-sys-user-cm](https://docs.mlcommons.org/cm4mlops/scripts/Tests/test-set-sys-user-cm) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/_cm.yaml new file mode 100644 index 000000000..742309e1b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/_cm.yaml @@ -0,0 +1,14 @@ +alias: test-set-sys-user-cm +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Tests +default_env: + CM_SUDO: sudo +tags: +- demo +- set +- sys-user +- cm +- sys-user-cm +uid: 25fdfcf0fe434af2 diff --git a/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/run.sh b/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/run.sh new file mode 100644 index 000000000..c0d513db7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/test-set-sys-user-cm/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +${CM_SUDO} groupadd -g 1111 ckuser +${CM_SUDO} useradd -u 2222 -g ckuser --create-home --shell /bin/bash ckuser +${CM_SUDO} echo "ckuser:ckuser" | chpasswd +${CM_SUDO} adduser ckuser sudo +${CM_SUDO} echo "ckuser ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers diff --git a/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/README-extra.md new file mode 100644 index 000000000..71b498f99 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/README-extra.md @@ -0,0 +1,7 @@ +# MLPerf Inference Accuracy Log Truncator +This [CM script](https://github.com/mlcommons/ck/blob/master/cm/docs/specs/script.md) runs the [MLPerf Inference accuracy log truncator](https://github.com/mlcommons/inference/blob/master/tools/submission/truncate_accuracy_log.py) on a given submission folder. + +## How To +```bash +cm run script --tags=run,mlperf,inference,accuracy,truncator --submitter=[SUBMITTER_NAME] --submission_dir=[SUBMISSION_FOLDER] +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/README.md b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/README.md new file mode 100644 index 000000000..fa1675bfe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log](https://docs.mlcommons.org/cm4mlops/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/_cm.yaml new file mode 100644 index 000000000..6e5887266 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/_cm.yaml @@ -0,0 +1,41 @@ +alias: truncate-mlperf-inference-accuracy-log +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +category: MLPerf benchmark support +clean_files: [] +deps: +- names: + - python + - python3 + tags: get,python3 +- names: + - inference-src + tags: get,mlcommons,inference,src +- names: + - get-mlperf-submission-dir + skip_if_env: + CM_MLPERF_INFERENCE_SUBMISSION_DIR: + - 'on' + tags: get,mlperf,submission,dir +input_mapping: + input: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submission_dir: CM_MLPERF_INFERENCE_SUBMISSION_DIR + submitter: CM_MLPERF_SUBMITTER +tags: +- run +- mlc +- mlcommons +- mlperf +- inference +- mlperf-inference +- truncation +- truncator +- truncate +- accuracy +- accuracy-log +- accuracy-log-trancation +- accuracy-log-truncator +- mlc-accuracy-log-trancation +- mlc-accuracy-log-truncator +uid: 9d5ec20434084d14 diff --git a/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/customize.py b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/customize.py new file mode 100644 index 000000000..d4bb2d66a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/customize.py @@ -0,0 +1,37 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import cmind as cm +import os +import subprocess +from os.path import exists + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") + + if submission_dir == "": + print("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") + return {'return': 1, 'error': 'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified in env in run-mlperf-accuracy-log-truncator'} + + submitter = env.get("CM_MLPERF_SUBMITTER", "CTuning") + + os.system("rm -rf " + submission_dir + "_logs") + + CMD = env['CM_PYTHON_BIN'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", + "truncate_accuracy_log.py") + "' --input '" + submission_dir + "' --submitter '" + submitter + "' --backup '" + submission_dir + "_logs'" + env['CM_RUN_CMD'] = CMD + + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/run.sh b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/run.sh new file mode 100644 index 000000000..1b3c5c3c0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/truncate-mlperf-inference-accuracy-log/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +cmd=${CM_RUN_CMD} +echo "${cmd}" +eval "${cmd}" +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/README.md b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/README.md new file mode 100644 index 000000000..e27a3c661 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Tests/upgrade-python-pip](https://docs.mlcommons.org/cm4mlops/scripts/Tests/upgrade-python-pip) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/_cm.yaml new file mode 100644 index 000000000..1a00fc99b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/_cm.yaml @@ -0,0 +1,16 @@ +alias: upgrade-python-pip +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Tests +deps: +- tags: detect,os +- names: + - python + - python3 + tags: get,python3 +tags: +- upgrade +- python +- pip +- python-pip +uid: 4343ed2d9a974923 diff --git a/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/run.bat b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/run.bat new file mode 100644 index 000000000..b6cc1b374 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/run.bat @@ -0,0 +1,2 @@ +%CM_PYTHON_BIN_WITH_PATH% -m pip install --upgrade pip +IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/run.sh b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/run.sh new file mode 100644 index 000000000..389a212e4 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/upgrade-python-pip/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +${CM_PYTHON_BIN_WITH_PATH} -m pip install --upgrade pip +test $? -eq 0 || exit $? diff --git a/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/COPYRIGHT.md b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/COPYRIGHT.md new file mode 100644 index 000000000..696f82922 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/COPYRIGHT.md @@ -0,0 +1,3 @@ +© 2022-2025 MLCommons. All Rights Reserved. + +Grigori Fursin, the cTuning foundation and OctoML donated the CK and CM projects to MLCommons to benefit everyone. diff --git a/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md new file mode 100644 index 000000000..836b025dd --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md @@ -0,0 +1,17 @@ +This is a wrapper script to [Reproduce MLPerf OctoML TinyML Results](https://github.com/octoml/ck/tree/master/cm-mlops/script/reproduce-mlperf-octoml-tinyml-results) +which runs the script for the two microtvm variants and their supported models. + +## Install +``` +cm run script --tags=generate,tiny,octoml,submission +``` + +The above command should produce five elf binaries which can be located inside the respective cache entries given by the below command +``` +cm show cache --tags=reproduce,tiny,octoml,mlperf +``` + +## Install and Flash +``` +cm run script --tags=generate,tiny,octoml,submission --flash +``` diff --git a/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/README.md b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/README.md new file mode 100644 index 000000000..7564850a6 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/README.md @@ -0,0 +1 @@ +Please see [https://docs.mlcommons.org/cm4mlops/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission](https://docs.mlcommons.org/cm4mlops/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission) for the documentation of this CM script. diff --git a/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/_cm.yaml b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/_cm.yaml new file mode 100644 index 000000000..f8f24d444 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/_cm.yaml @@ -0,0 +1,28 @@ +alias: wrapper-reproduce-octoml-tinyml-submission +automation_alias: script +automation_uid: 5b4e0237da074764 +category: Reproduce MLPerf benchmarks +default_version: r1.0 +deps: +- names: + - python + - python3 + tags: get,python3 +env: {} +input_mapping: + flash: CM_FLASH_BOARD + recreate_binary: CM_RECREATE_BINARY +tags: +- run +- generate-tiny +- generate +- submission +- tiny +- generate-tiny-submission +- results +- mlcommons +- mlperf +- octoml +uid: b946001e289c4480 +versions: + r1.0: {} diff --git a/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/customize.py b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/customize.py new file mode 100644 index 000000000..e687af6e9 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/customize.py @@ -0,0 +1,54 @@ +# +# Copyright: https://github.com/mlcommons/ck/blob/master/cm-mlops/COPYRIGHT.md +# License: https://github.com/mlcommons/ck/blob/master/cm-mlops/LICENSE.md +# +# White paper: https://arxiv.org/abs/2406.16791 +# History: https://github.com/mlcommons/ck/blob/master/HISTORY.CM.md +# Original repository: https://github.com/mlcommons/ck/tree/master/cm-mlops +# +# CK and CM project contributors: https://github.com/mlcommons/ck/blob/master/CONTRIBUTING.md +# + +from cmind import utils +import os +import cmind as cm + + +def preprocess(i): + + os_info = i['os_info'] + env = i['env'] + state = i['state'] + inp = i['input'] + if 'CM_FLASH_BOARD' in env: + script_tags = "flash,tiny" + else: + script_tags = "reproduce,tiny,mlperf,octoml" + boards = ["NUCLEO", "NRF"] + microtvm_variants = { + "cmsis_nn": [ + "ad", "ic", "vww", "kws"], "native": [ + "ic", "ad", "vww", "kws"]} + for board in boards: + for microtvm_variant in microtvm_variants: + if board == "NRF" and microtvm_variant == "native": + continue + for model in microtvm_variants[microtvm_variant]: + variation_tags_string = "_" + board + ",_" + microtvm_variant + ",_" + model + tags = script_tags + "," + variation_tags_string + if 'CM_RECREATE_BINARY' in env: + r = cm.access( + {'action': 'rm', 'automation': 'cache', 'tags': tags, 'force': 'true'}) + if r['return'] > 0: + return r + r = cm.access({'action': 'run', 'automation': 'script', 'tags': tags, 'quiet': 'true', 'env': env, + 'input': inp, 'state': state, 'add_deps': inp.get('add_deps', {}), 'add_deps_recursive': + inp.get('add_deps_recursive', {})}) + if r['return'] > 0: + return r + + return {'return': 0} + + +def postprocess(i): + return {'return': 0} diff --git a/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/run.sh b/cmx4mlops/cmx4mlops/repo/script/wrapper-reproduce-octoml-tinyml-submission/run.sh new file mode 100644 index 000000000..e69de29bb diff --git a/cmx4mlops/cmx4mlops/repo/tests/script/check.py b/cmx4mlops/cmx4mlops/repo/tests/script/check.py new file mode 100644 index 000000000..dd030c3bb --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/script/check.py @@ -0,0 +1,27 @@ +def check_return(r): + if 'return' not in r: + raise Exception( + 'CM access function should always return key \'return\'!') + if 'error' in r: + raise Exception(r['error']) + + +def check_list(r, string, found=True): + check_return(r) + if 'list' not in r: + raise Exception('CM search should return a list!') + if len(r['list']) < 1 and found: + raise Exception('CM search returned an empty list for ' + string) + if len(r['list']) > 0 and not found: + raise Exception('CM search returned at lease one entry for ' + string) + + +def check_key_value(d, key, value, absent_ok=False): + if not d.get(key): + if absent_ok: + return True + else: + raise Exception(f"{key} is missing. Current values are {d}") + elif d[key] != value: + raise Exception( + f"{key} is not having the expected value of {value}. Current value is {d[key]}") diff --git a/cmx4mlops/cmx4mlops/repo/tests/script/process_dockerfile.py b/cmx4mlops/cmx4mlops/repo/tests/script/process_dockerfile.py new file mode 100644 index 000000000..d9abd2abe --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/script/process_dockerfile.py @@ -0,0 +1,33 @@ +import sys +import os +import cmind as cm +import check as checks +import json +import yaml + +files = sys.argv[1:] + +for file in files: + if not os.path.isfile(file): + continue + if not file.endswith("_cm.json") and not file.endswith("_cm.yaml"): + continue + if not file.startswith(os.path.join("cm-mlops", "script")): + continue + + script_path = os.path.dirname(file) + + f = open(file) + + if file.endswith(".json"): + data = json.load(f) + elif file.endswith(".yaml"): + data = yaml.safe_load(f) + + uid = data['uid'] + + r = cm.access({'action': 'dockerfile', + 'automation': 'script', + 'artifact': uid, + 'quiet': 'yes'}) + checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/tests/script/process_readme.py b/cmx4mlops/cmx4mlops/repo/tests/script/process_readme.py new file mode 100644 index 000000000..de7e04033 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/script/process_readme.py @@ -0,0 +1,27 @@ +import sys +import os +import cmind as cm +import check as checks +import json +import yaml + +files = sys.argv[1:] + +for file in files: + if not os.path.isfile(file): + continue + if not file.endswith("_cm.json") and not file.endswith("_cm.yaml"): + continue + if not file.startswith(os.path.join("cm-mlops", "script")): + continue + script_path = os.path.dirname(file) + f = open(file) + if file.endswith(".json"): + data = json.load(f) + elif file.endswith(".yaml"): + data = yaml.safe_load(f) + uid = data['uid'] + + r = cm.access({'action': 'doc', 'automation': 'script', + 'artifact': uid, 'quiet': 'yes'}) + checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/tests/script/process_tests.py b/cmx4mlops/cmx4mlops/repo/tests/script/process_tests.py new file mode 100644 index 000000000..8012d097b --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/script/process_tests.py @@ -0,0 +1,38 @@ +import sys +import os +import cmind as cm +import check as checks +import json +import yaml + +files = sys.argv[1:] + +for file in files: + print(file) + if not os.path.isfile(file) or not "script" in file: + continue + if not file.endswith("_cm.json") and not file.endswith("_cm.yaml"): + continue + script_path = os.path.dirname(file) + f = open(file) + if file.endswith(".json"): + data = json.load(f) + elif file.endswith(".yaml"): + data = yaml.safe_load(f) + if data.get('uid', '') == '': + continue # not a CM script meta + uid = data['uid'] + + ii = { + 'action': 'test', 'automation': 'script', 'artifact': uid, 'quiet': 'yes', 'out': 'con' + } + if os.environ.get('DOCKER_CM_REPO', '') != '': + ii['docker_cm_repo'] = os.environ['DOCKER_CM_REPO'] + if os.environ.get('DOCKER_CM_REPO_BRANCH', '') != '': + ii['docker_cm_repo_branch'] = os.environ['DOCKER_CM_REPO_BRANCH'] + if os.environ.get('TEST_INPUT_INDEX', '') != '': + ii['test_input_index'] = os.environ['TEST_INPUT_INDEX'] + print(ii) + r = cm.access(ii) + + checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/tests/script/test_deps.py b/cmx4mlops/cmx4mlops/repo/tests/script/test_deps.py new file mode 100644 index 000000000..37d75fd4c --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/script/test_deps.py @@ -0,0 +1,25 @@ +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps + +import cmind as cm +import check as checks + +# MLPerf v3.0 inference is now very outdated and we are testing inference +# in separate tests + +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'generate-run-cmds,mlperf', 'adr': +# {'loadgen': {'version': 'r3.0'}, 'compiler': {'tags': "gcc"}}, 'env': {'CM_MODEL': 'resnet50', +# 'CM_DEVICE': 'cpu', 'CM_BACKEND': 'onnxruntime'}, 'quiet': 'yes'}) +# checks.check_return(r) +# +# r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'loadgen,version-r3.0,deps-python-non-virtual'}) +# checks.check_list(r, "loadgen,version-r3.0,deps-python-non-virtual") +# +# r = cm.access({'action':'search', 'automation': 'cache', 'tags': 'inference,src,version-r3.0'}) +# checks.check_list(r, "inference,src,version-r3.0") +# +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_onnxruntime,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) +# checks.check_return(r) +# +# r = cm.access({'action':'run', 'automation':'script', 'tags': 'app,mlperf,inference,generic,_python,_resnet50,_tf,_cpu,_r3.0_default', 'adr': {'mlperf-implementation': { 'version': 'master'}}, 'quiet': 'yes'}) +# checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/tests/script/test_docker.py b/cmx4mlops/cmx4mlops/repo/tests/script/test_docker.py new file mode 100644 index 000000000..991ef0403 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/script/test_docker.py @@ -0,0 +1,23 @@ +# This test covers version, variation, compilation from src, +# add_deps_recursive, post_deps + +import cmind as cm +import check as checks + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'run,docker,container', + 'add_deps_recursive': { + 'compiler': {'tags': "gcc"} + }, + 'image_name': 'cm-script-app-image-classification-onnx-py', + 'env': { + 'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python', + 'CM_MLOPS_REPO': 'mlcommons@cm4mlops', + 'CM_MLOPS_REPO_BRANCH': 'mlperf-inference', + 'CM_DOCKER_IMAGE_BASE': 'ubuntu:22.04' + }, + 'quiet': 'yes' + }) + +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/tests/script/test_features.py b/cmx4mlops/cmx4mlops/repo/tests/script/test_features.py new file mode 100644 index 000000000..b29ee6a7a --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/script/test_features.py @@ -0,0 +1,38 @@ +# This test covers +# 1. python-virtual-env and update_deps inside customize.py +# 2. cache search using "-" prefix + +import cmind as cm +import check as checks + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'install,python-venv', + 'name': 'test', + 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,python,virtual,name-test'}) +checks.check_list(r, "get,python-venv") + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'get,dataset,preprocessed,imagenet,_NHWC', + 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,dataset,preprocessed,imagenet,-_NCHW'}) +checks.check_list(r, "_NHWC") + +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'get,dataset,preprocessed,imagenet,-_NHWC'}) +# checks.check_list(r, "-_NHWC", False) + + +r = cm.access({'action': 'run', 'automation': 'script', + 'tags': 'test-scripts,_v1,_v2'}) +new_env = r['new_env'] +checks.check_key_value(new_env, "CM_VAR1", "combv1v2") +checks.check_key_value(new_env, "CM_VAR2", "constv2") diff --git a/cmx4mlops/cmx4mlops/repo/tests/script/test_install.py b/cmx4mlops/cmx4mlops/repo/tests/script/test_install.py new file mode 100644 index 000000000..d4fb93ec7 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/script/test_install.py @@ -0,0 +1,15 @@ +# This test covers script installation, version, shared library install + +import cmind as cm +import check as checks + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'python,src,install,_shared', + 'version': '3.9.10', + 'quiet': 'true'}) +checks.check_return(r) + +r = cm.access({'action': 'search', 'automation': 'cache', + 'tags': 'python,src,install,_shared,version-3.9.10'}) +checks.check_list(r, "python,src,install,_shared,version-3.9.10") diff --git a/cmx4mlops/cmx4mlops/repo/tests/test_cm.py b/cmx4mlops/cmx4mlops/repo/tests/test_cm.py new file mode 100644 index 000000000..821e1571d --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/test_cm.py @@ -0,0 +1,17 @@ +try: + import cmind as cm + + r = cm.access(['test', 'script']) + if 'return' not in r: + raise Exception( + 'CM access function should always return key \'return\'!') + exit(0) + +except ImportError as e: + from sys import stderr + from subprocess import call + print( + 'WARNING: CM module for python is not installed & jupyter notebooks will not be supported', + file=stderr) + retcode = call(['cm', 'test', 'script']) + exit(retcode) diff --git a/cmx4mlops/cmx4mlops/repo/tests/test_search_speed.py b/cmx4mlops/cmx4mlops/repo/tests/test_search_speed.py new file mode 100644 index 000000000..577c4f0b8 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/test_search_speed.py @@ -0,0 +1,26 @@ +import cmind as cm +import time + +times = [] + +steps = 10 + +print('Running search with tags {} times ...'.format(steps)) + +for step in range(steps): + + start = time.time() + r = cm.access({'action': 'search', + 'automation': 'script', + 'tags': 'detect,os'}) + timer = time.time() - start + + if r['return'] > 0: + cm.error(r) + + times.append(timer) + +step = 0 +for t in times: + step += 1 + print("{}) {:0.3f} sec.".format(step, t)) diff --git a/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_retinanet.py b/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_retinanet.py new file mode 100644 index 000000000..dcca78f20 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_retinanet.py @@ -0,0 +1,37 @@ +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps + +import check as checks +import cmind as cm +from pathlib import Path +import sys +import os + +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': + {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'app,mlperf,inference,generic,_cpp,_retinanet,_onnxruntime,_cpu', 'adr': + {'python': {'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'scenario': 'Offline', + 'mode': 'performance', 'test_query_count': '10', 'rerun': 'true', 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'run', + 'automation': 'script', + 'tags': 'install,python-venv', + 'version': '3.10.8', + 'name': 'mlperf'}) +checks.check_return(r) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'compiler': {'tags': "gcc"}, 'openimages-preprocessed': {'tags': '_50'}}, 'submitter': 'Community', + 'implementation': 'cpp', 'hw_name': 'default', 'model': 'retinanet', 'backend': 'onnxruntime', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '10', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm.py b/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm.py new file mode 100644 index 000000000..6901a3169 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm.py @@ -0,0 +1,28 @@ +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps + +import check as checks +import cmind as cm + +from pathlib import Path +import sys +import os + +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) + + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm_pip_ge.py b/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm_pip_ge.py new file mode 100644 index 000000000..f95521f44 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm_pip_ge.py @@ -0,0 +1,26 @@ +import check as checks +import cmind as cm + +from pathlib import Path +import sys +import os + +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': { + 'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, + 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', + 'device': 'cpu', 'scenario': 'Offline', 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': { + 'tags': '_pip-install'}, 'tvm-model': {'tags': '_graph_executor'}}, + 'submitter': 'Community', 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', + 'device': 'cpu', 'scenario': 'Offline', 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) diff --git a/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm_pip_vm.py b/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm_pip_vm.py new file mode 100644 index 000000000..ab9244bf0 --- /dev/null +++ b/cmx4mlops/cmx4mlops/repo/tests/tutorials/test_tutorial_tvm_pip_vm.py @@ -0,0 +1,27 @@ +# This test covers version, variation, compilation from src, add_deps, +# add_deps_recursive, deps, post_deps + +import check as checks +import cmind as cm + +from pathlib import Path +import sys +import os + +sys.path.insert( + 1, + os.path.join( + Path(__file__).parent.parent.resolve(), + "script")) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'mode': 'accuracy', 'test_query_count': '5', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) + +r = cm.access({'action': 'run', 'automation': 'script', 'tags': 'run,mlperf,inference,generate-run-cmds,_submission,_short', 'adr': + {'python': {'name': 'mlperf', 'version_min': '3.8'}, 'tvm': {'tags': '_pip-install'}}, 'submitter': 'Community', + 'implementation': 'python', 'hw_name': 'default', 'model': 'resnet50', 'backend': 'tvm-onnx', 'device': 'cpu', 'scenario': 'Offline', + 'test_query_count': '500', 'clean': 'true', 'quiet': 'yes'}) +checks.check_return(r) From 8c68dfef15932657f2bbf25279b2150c7e2f21e8 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 09:23:33 +0100 Subject: [PATCH 03/19] clean up --- cmx4mlops/README.md | 1 + docs/cmx/README.md | 1 + 2 files changed, 2 insertions(+) create mode 100644 cmx4mlops/README.md create mode 100644 docs/cmx/README.md diff --git a/cmx4mlops/README.md b/cmx4mlops/README.md new file mode 100644 index 000000000..a0990367e --- /dev/null +++ b/cmx4mlops/README.md @@ -0,0 +1 @@ +TBD diff --git a/docs/cmx/README.md b/docs/cmx/README.md new file mode 100644 index 000000000..a0990367e --- /dev/null +++ b/docs/cmx/README.md @@ -0,0 +1 @@ +TBD From 59a377124e15aee4581d2095a197d570e26c935d Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 09:28:38 +0100 Subject: [PATCH 04/19] demo --- cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py index 3d17c08f7..1ba359f7f 100644 --- a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py +++ b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py @@ -81,3 +81,11 @@ def test(self, i): return {'return':0} + + ############################################################ + def run(self, i): + + import json + print (json.dumps(i, indent=2)) + + return {'return':0, 'new_key':'new_value'} From 17c32b1a66e8d21306252c8f80b6a4c9842c33fa Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 09:30:47 +0100 Subject: [PATCH 05/19] clean up --- cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py index 1ba359f7f..859cbf67a 100644 --- a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py +++ b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py @@ -88,4 +88,6 @@ def run(self, i): import json print (json.dumps(i, indent=2)) - return {'return':0, 'new_key':'new_value'} + v = i.get('test','') + + return {'return':0, 'new_key':v} From 60e63df95e2af25a182cc051074e5fd535b7f89e Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 09:43:17 +0100 Subject: [PATCH 06/19] workflows fixes --- .github/workflows/cla.yml | 2 +- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index c0e1544d4..093bbbafd 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: "MLCommons CLA bot check" - if: (github.event.comment.body == 'recheck') || github.event_name == 'pull_request_target' + if: github.repository_owner == 'mlcommons' && ((github.event.comment.body == 'recheck') || github.event_name == 'pull_request_target') # Alpha Release uses: mlcommons/cla-bot@master env: diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 3980fa945..2b34bf75f 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -63,4 +63,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - cm run script --tags=push,github,mlperf,inference,submission --repo_url= https://github.com/ctuning/test_mlperf_inference_submissions --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/ctuning/test_mlperf_inference_submissions --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet From a9959def8384a2faf95cdf27d37d16905350dbd4 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 09:54:18 +0100 Subject: [PATCH 07/19] clean up --- .github/workflows/cla.yml | 2 +- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 093bbbafd..f418f9b0c 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -17,7 +17,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # the below token should have repo scope and must be manually added by you in the repository's secret - PERSONAL_ACCESS_TOKEN : ${{ secrets.MLCOMMONS_BOT_CLA_TOKEN }} + PERSONAL_ACCESS_TOKEN: ${{ secrets.MLCOMMONS_BOT_CLA_TOKEN }} with: path-to-signatures: 'cla-bot/v1/cla.json' # branch should not be protected diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 2b34bf75f..653f104d9 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -63,4 +63,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/ctuning/test_mlperf_inference_submissions --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + cmx run script --tags=push,github,mlperf,inference,submission --repo_url="https://github.com/ctuning/test_mlperf_inference_submissions" --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet From 3982b990ef2bdd054ae8bf2c20c07c73bcdba4b7 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 09:57:19 +0100 Subject: [PATCH 08/19] fixing cla check --- .github/workflows/cla.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index f418f9b0c..0de92695f 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -4,25 +4,26 @@ on: issue_comment: types: [created] pull_request_target: - types: [opened,closed,synchronize] + types: [opened,closed,synchronize] jobs: cla-check: + if: github.repository_owner == 'mlcommons' runs-on: ubuntu-latest steps: - name: "MLCommons CLA bot check" - if: github.repository_owner == 'mlcommons' && ((github.event.comment.body == 'recheck') || github.event_name == 'pull_request_target') + if: (github.event.comment.body == 'recheck') || github.event_name == 'pull_request_target' # Alpha Release uses: mlcommons/cla-bot@master env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # the below token should have repo scope and must be manually added by you in the repository's secret - PERSONAL_ACCESS_TOKEN: ${{ secrets.MLCOMMONS_BOT_CLA_TOKEN }} + PERSONAL_ACCESS_TOKEN : ${{ secrets.MLCOMMONS_BOT_CLA_TOKEN }} with: path-to-signatures: 'cla-bot/v1/cla.json' # branch should not be protected branch: 'main' - allowlist: user1,bot* + allowlist: user1,mlcommons-bot,bot* remote-organization-name: mlcommons remote-repository-name: systems From 4cf64e666782733f0963d3fc52e821a35c52e4fa Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 10:05:09 +0100 Subject: [PATCH 09/19] test --- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 653f104d9..4dc930409 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -63,4 +63,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - cmx run script --tags=push,github,mlperf,inference,submission --repo_url="https://github.com/ctuning/test_mlperf_inference_submissions" --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + cmx run script --tags=push,github,mlperf,inference,submission --repo_url="https://github.com/ctuning/test_mlperf_inference_submissions" --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet From fad92f6c2253a68e084d60d8221d0508ab0fa9fd Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 14:30:40 +0100 Subject: [PATCH 10/19] clean up --- .../test-cmx-image-classification-onnx.yml | 2 +- ...bert-deepsparse-tf-onnxruntime-pytorch.yml | 66 +++++++++++++++++++ .../test-cmx-mlperf-inference-resnet50.yml | 4 +- .../test-cmx-mlperf-inference-rgat.yml | 51 ++++++++++++++ .github/workflows/test-cmx.yml | 52 +++++++++++++++ 5 files changed, 172 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml create mode 100644 .github/workflows/test-cmx-mlperf-inference-rgat.yml create mode 100644 .github/workflows/test-cmx.yml diff --git a/.github/workflows/test-cmx-image-classification-onnx.yml b/.github/workflows/test-cmx-image-classification-onnx.yml index de8250525..360628c06 100644 --- a/.github/workflows/test-cmx-image-classification-onnx.yml +++ b/.github/workflows/test-cmx-image-classification-onnx.yml @@ -1,7 +1,7 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: 'CMX: image classification with ONNX' +name: 'Image classification with ONNX via CMX' on: pull_request: diff --git a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml new file mode 100644 index 000000000..361a8b102 --- /dev/null +++ b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -0,0 +1,66 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference bert (deepsparse, tf, onnxruntime, pytorch) + +on: + pull_request_target: + branches: [ "main", "master", "dev"] + paths: + - '.github/workflows/test-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml' + - '**' + - '!**.md' + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + # 3.12 didn't work on 20240305 - need to check + python-version: [ "3.11" ] + backend: [ "deepsparse", "tf", "onnxruntime", "pytorch" ] + precision: [ "int8", "fp32" ] + os: [ubuntu-latest, windows-latest, macos-latest] + exclude: + - backend: tf + - backend: pytorch + - backend: onnxruntime + - precision: fp32 + - os: windows-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Configure git longpaths (Windows) + if: matrix.os == 'windows-latest' + run: | + git config --system core.longpaths true + - name: Install dependencies + run: | + pip install cmx4mlops + - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} + if: matrix.os == 'windows-latest' + run: | + cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} + if: matrix.os != 'windows-latest' + run: | + cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + - name: Push Results + if: github.repository_owner == 'ctuning' + env: + USER: "GitHub Action" + EMAIL: "admin@cTuning.org" + GITHUB_TOKEN: ${{ secrets.TEST_RESULTS_GITHUB_TOKEN }} + run: | + git config --global user.name "${{ env.USER }}" + git config --global user.email "${{ env.EMAIL }}" + git config --global credential.https://github.com.helper "" + git config --global credential.https://github.com.helper "!gh auth git-credential" + git config --global credential.https://gist.github.com.helper "" + git config --global credential.https://gist.github.com.helper "!gh auth git-credential" + cmx run script --tags=push,github,mlperf,inference,submission --repo_url="https://github.com/ctuning/test_mlperf_inference_submissions" --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 4dc930409..70e71a1d9 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -1,11 +1,11 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: MLPerf inference ResNet50 via CMX +name: 'MLPerf inference ResNet50 via CMX' on: pull_request_target: - branches: [ "main", "master", "dev", "mlperf-inference" ] + branches: [ "main", "master", "dev"] paths: - '.github/workflows/test-cmx-mlperf-inference-resnet50.yml' - '**' diff --git a/.github/workflows/test-cmx-mlperf-inference-rgat.yml b/.github/workflows/test-cmx-mlperf-inference-rgat.yml new file mode 100644 index 000000000..e8d20780b --- /dev/null +++ b/.github/workflows/test-cmx-mlperf-inference-rgat.yml @@ -0,0 +1,51 @@ +name: MLPerf inference R-GAT via CMX + +on: + pull_request: + branches: [ "main", "master", "dev" ] + paths: + - '.github/workflows/test-mlperf-inference-rgat.yml' + - '**' + - '!**.md' + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: [ "3.12" ] + backend: [ "pytorch" ] + implementation: [ "python" ] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Configure git longpaths (Windows) + if: matrix.os == 'windows-latest' + run: | + git config --system core.longpaths true + - name: Install dependencies + run: | + pip install cmx4mlops + - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} + run: | + cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 + - name: Push Results + if: github.repository_owner == 'ctuning' + env: + USER: "GitHub Action" + EMAIL: "admin@cTuning.org" + GITHUB_TOKEN: ${{ secrets.TEST_RESULTS_GITHUB_TOKEN }} + run: | + git config --global user.name "${{ env.USER }}" + git config --global user.email "${{ env.EMAIL }}" + git config --global credential.https://github.com.helper "" + git config --global credential.https://github.com.helper "!gh auth git-credential" + git config --global credential.https://gist.github.com.helper "" + git config --global credential.https://gist.github.com.helper "!gh auth git-credential" + cmx run script --tags=push,github,mlperf,inference,submission --repo_url="https://github.com/ctuning/test_mlperf_inference_submissions" --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/.github/workflows/test-cmx.yml b/.github/workflows/test-cmx.yml new file mode 100644 index 000000000..c46d7cd06 --- /dev/null +++ b/.github/workflows/test-cmx.yml @@ -0,0 +1,52 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: CM test + +on: + pull_request: + branches: [ "master", "main", "dev" ] + paths: + - '.github/workflows/test-cm.yml' + - 'cm/**' + - '!cm/**.md' + +jobs: + build: + strategy: + fail-fast: false + matrix: + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + on: [ubuntu-latest, windows-latest, macos-latest] + exclude: + - python-version: "3.7" + on: "macos-latest" + runs-on: "${{ matrix.on }}" + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies and test cm pull repo + run: | + python -m pip install --upgrade pip + python -m pip install flake8 pytest + pip install -r requirements.txt + python -m pip install --ignore-installed --verbose pip setuptools + python -m pip install cmx4mlops + python -m cmind + cm init + - name: Test + run: | + python tests/test_cm.py + cd && mkdir tmp_cm_repo + cd tmp_cm_repo && cm init repo + cmx add script tmp_cm_repo:my-test-script --tags=test,script + cmx add script .:my-test-script2 --tags=test2,script + cd && cmx add repo my-test-repo + cmx add script my-test-repo:my-test-script --tags=test,script + cd $HOME/CM/repos/my-test-repo && cmx add script .:my-test-script2 --tags=test2,script + From 4d39c6c2fb275ee6094b8dcfbc1f15915daca445 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 14:32:14 +0100 Subject: [PATCH 11/19] demo --- cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py index 859cbf67a..3ccde7c44 100644 --- a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py +++ b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py @@ -88,6 +88,6 @@ def run(self, i): import json print (json.dumps(i, indent=2)) - v = i.get('test','') + v = i.get('test', 'default') return {'return':0, 'new_key':v} From 737b2593756ed30e02a8845f656b3d340e976f1e Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 14:46:16 +0100 Subject: [PATCH 12/19] clean up --- ...erf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml | 6 ++++-- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 6 ++++-- .github/workflows/test-cmx-mlperf-inference-rgat.yml | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml index 361a8b102..58eee6526 100644 --- a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml +++ b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -45,11 +45,13 @@ jobs: - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | - cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet +# cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 --v --quiet - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | - cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet +# cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet + cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 --v --quiet - name: Push Results if: github.repository_owner == 'ctuning' env: diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 70e71a1d9..16bd2f630 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -45,11 +45,13 @@ jobs: - name: Test MLPerf Inference ResNet50 (Windows) if: matrix.os == 'windows-latest' run: | - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet +# cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + cmx run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 --quiet - name: Test MLPerf Inference ResNet50 (Linux/macOS) if: matrix.os != 'windows-latest' run: | - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet +# cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + cmx run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 --v --quiet - name: Push Results if: github.repository_owner == 'ctuning' env: diff --git a/.github/workflows/test-cmx-mlperf-inference-rgat.yml b/.github/workflows/test-cmx-mlperf-inference-rgat.yml index e8d20780b..1eceafef4 100644 --- a/.github/workflows/test-cmx-mlperf-inference-rgat.yml +++ b/.github/workflows/test-cmx-mlperf-inference-rgat.yml @@ -34,7 +34,8 @@ jobs: pip install cmx4mlops - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} run: | - cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 +# cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 + cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet --v --target_qps=1 - name: Push Results if: github.repository_owner == 'ctuning' env: From 847c14d9dea6177eb7977800e54b073f01b9fc69 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 15:20:53 +0100 Subject: [PATCH 13/19] clean up --- .github/workflows/test-cmx.yml | 2 +- README.md | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-cmx.yml b/.github/workflows/test-cmx.yml index c46d7cd06..e66f37b4a 100644 --- a/.github/workflows/test-cmx.yml +++ b/.github/workflows/test-cmx.yml @@ -1,7 +1,7 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: CM test +name: CMX test on: pull_request: diff --git a/README.md b/README.md index 69fa3296e..158dcf656 100755 --- a/README.md +++ b/README.md @@ -140,6 +140,11 @@ Copyright (c) 2014-2021 cTuning foundation * [Grigori Fursin](https://cKnowledge.org/gfursin) (FlexAI, cTuning) +## Maintainers + +* CM, CM4MLOps and MLPerf automations: MLCommons +* CMX (the next generation of CM): Grigori Fursin + ## Long-term vision To learn more about the motivation behind CK and CM technology, please explore the following presentations: From 6003f91c3cde3e228ffdbc8941dd5b0c2b68afb3 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 15:36:00 +0100 Subject: [PATCH 14/19] fixing workflows --- .github/workflows/test-cmx-image-classification-onnx.yml | 3 +-- ...lperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml | 2 +- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 2 +- .github/workflows/test-cmx-mlperf-inference-rgat.yml | 4 ++-- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-cmx-image-classification-onnx.yml b/.github/workflows/test-cmx-image-classification-onnx.yml index 360628c06..7ff5d8e6a 100644 --- a/.github/workflows/test-cmx-image-classification-onnx.yml +++ b/.github/workflows/test-cmx-image-classification-onnx.yml @@ -28,8 +28,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - python3 -m pip install cmind - cmx pull repo --url=https://github.com/mlcommons/cm4mlops + python3 -m pip install cmx4mlops cmx test core - name: Test image classification with ONNX run: | diff --git a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml index 58eee6526..c7123cb8f 100644 --- a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml +++ b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -41,7 +41,7 @@ jobs: git config --system core.longpaths true - name: Install dependencies run: | - pip install cmx4mlops + python -m pip install cmx4mlops - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 16bd2f630..6c43fec04 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -41,7 +41,7 @@ jobs: git config --system core.longpaths true - name: Install dependencies run: | - pip install cmx4mlops + python -m pip install cmx4mlops - name: Test MLPerf Inference ResNet50 (Windows) if: matrix.os == 'windows-latest' run: | diff --git a/.github/workflows/test-cmx-mlperf-inference-rgat.yml b/.github/workflows/test-cmx-mlperf-inference-rgat.yml index 1eceafef4..d99c06c54 100644 --- a/.github/workflows/test-cmx-mlperf-inference-rgat.yml +++ b/.github/workflows/test-cmx-mlperf-inference-rgat.yml @@ -20,7 +20,7 @@ jobs: implementation: [ "python" ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: @@ -31,7 +31,7 @@ jobs: git config --system core.longpaths true - name: Install dependencies run: | - pip install cmx4mlops + python -m pip install cmx4mlops - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} run: | # cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 From 94bfbc0bb38192652016b0da2f5ace3547eddd0a Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 15:37:49 +0100 Subject: [PATCH 15/19] fixing workflows --- ...lperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml | 4 ++-- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 2 +- .github/workflows/test-cmx-mlperf-inference-rgat.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml index c7123cb8f..d9821211b 100644 --- a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml +++ b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -30,7 +30,7 @@ jobs: - os: windows-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: @@ -41,7 +41,7 @@ jobs: git config --system core.longpaths true - name: Install dependencies run: | - python -m pip install cmx4mlops + python3 -m pip install cmx4mlops - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 6c43fec04..d09d589cd 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -41,7 +41,7 @@ jobs: git config --system core.longpaths true - name: Install dependencies run: | - python -m pip install cmx4mlops + python3 -m pip install cmx4mlops - name: Test MLPerf Inference ResNet50 (Windows) if: matrix.os == 'windows-latest' run: | diff --git a/.github/workflows/test-cmx-mlperf-inference-rgat.yml b/.github/workflows/test-cmx-mlperf-inference-rgat.yml index d99c06c54..f6a755442 100644 --- a/.github/workflows/test-cmx-mlperf-inference-rgat.yml +++ b/.github/workflows/test-cmx-mlperf-inference-rgat.yml @@ -31,7 +31,7 @@ jobs: git config --system core.longpaths true - name: Install dependencies run: | - python -m pip install cmx4mlops + python3 -m pip install cmx4mlops - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} run: | # cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 From a187f49f43e975e19d119a5246f450c6deb6e6a6 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 15:45:16 +0100 Subject: [PATCH 16/19] clean up --- ...erf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml | 6 +++--- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 6 +++--- .github/workflows/test-cmx-mlperf-inference-rgat.yml | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml index d9821211b..c7d549be4 100644 --- a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml +++ b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -35,13 +35,13 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmx4mlops - name: Configure git longpaths (Windows) if: matrix.os == 'windows-latest' run: | git config --system core.longpaths true - - name: Install dependencies - run: | - python3 -m pip install cmx4mlops - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index d09d589cd..5c681943a 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -35,13 +35,13 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmx4mlops - name: Configure git longpaths (Windows) if: matrix.os == 'windows-latest' run: | git config --system core.longpaths true - - name: Install dependencies - run: | - python3 -m pip install cmx4mlops - name: Test MLPerf Inference ResNet50 (Windows) if: matrix.os == 'windows-latest' run: | diff --git a/.github/workflows/test-cmx-mlperf-inference-rgat.yml b/.github/workflows/test-cmx-mlperf-inference-rgat.yml index f6a755442..fd242ff8e 100644 --- a/.github/workflows/test-cmx-mlperf-inference-rgat.yml +++ b/.github/workflows/test-cmx-mlperf-inference-rgat.yml @@ -25,13 +25,13 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python3 -m pip install cmx4mlops - name: Configure git longpaths (Windows) if: matrix.os == 'windows-latest' run: | git config --system core.longpaths true - - name: Install dependencies - run: | - python3 -m pip install cmx4mlops - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} run: | # cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 From 53f3990ac85f6954a5fac27910d54a17af63fc6d Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 15:47:37 +0100 Subject: [PATCH 17/19] clean up --- ...f-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml | 8 +++----- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 8 +++----- .github/workflows/test-cmx-mlperf-inference-rgat.yml | 7 +++---- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml index c7d549be4..30ce1ad17 100644 --- a/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml +++ b/.github/workflows/test-cmx-mlperf-inference-bert-deepsparse-tf-onnxruntime-pytorch.yml @@ -35,22 +35,20 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python3 -m pip install cmx4mlops - name: Configure git longpaths (Windows) if: matrix.os == 'windows-latest' run: | git config --system core.longpaths true + - name: Install dependencies + run: | + python3 -m pip install cmx4mlops - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os == 'windows-latest' run: | -# cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 -v --quiet cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }} --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --precision=${{ matrix.precision }} --target_qps=1 --v --quiet - name: Test MLPerf Inference Bert ${{ matrix.backend }} on ${{ matrix.os }} if: matrix.os != 'windows-latest' run: | -# cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 -v --quiet cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=bert-99 --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=5 --precision=${{ matrix.precision }} --target_qps=1 --v --quiet - name: Push Results if: github.repository_owner == 'ctuning' diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 5c681943a..7f0a9de9e 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -35,22 +35,20 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python3 -m pip install cmx4mlops - name: Configure git longpaths (Windows) if: matrix.os == 'windows-latest' run: | git config --system core.longpaths true + - name: Install dependencies + run: | + python3 -m pip install cmx4mlops - name: Test MLPerf Inference ResNet50 (Windows) if: matrix.os == 'windows-latest' run: | -# cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet cmx run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 --quiet - name: Test MLPerf Inference ResNet50 (Linux/macOS) if: matrix.os != 'windows-latest' run: | -# cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet cmx run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 --v --quiet - name: Push Results if: github.repository_owner == 'ctuning' diff --git a/.github/workflows/test-cmx-mlperf-inference-rgat.yml b/.github/workflows/test-cmx-mlperf-inference-rgat.yml index fd242ff8e..150fb0c8b 100644 --- a/.github/workflows/test-cmx-mlperf-inference-rgat.yml +++ b/.github/workflows/test-cmx-mlperf-inference-rgat.yml @@ -25,16 +25,15 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python3 -m pip install cmx4mlops - name: Configure git longpaths (Windows) if: matrix.os == 'windows-latest' run: | git config --system core.longpaths true + - name: Install dependencies + run: | + python3 -m pip install cmx4mlops - name: Test MLPerf Inference R-GAT using ${{ matrix.backend }} on ${{ matrix.os }} run: | -# cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet -v --target_qps=1 cmx run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="MLCommons" --adr.inference-src.tags=_branch.dev --pull_changes=yes --pull_inference_changes=yes --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=rgat --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --adr.compiler.tags=gcc --category=datacenter --quiet --v --target_qps=1 - name: Push Results if: github.repository_owner == 'ctuning' From 75b6b3579bfabb936f3934c211c17a6bd7b348fa Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Wed, 22 Jan 2025 15:51:15 +0100 Subject: [PATCH 18/19] test3 --- cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py index 3ccde7c44..f63b0d44b 100644 --- a/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py +++ b/cmx4mlops/cmx4mlops/repo/automation/cmx-demo/modulex.py @@ -89,5 +89,6 @@ def run(self, i): print (json.dumps(i, indent=2)) v = i.get('test', 'default') + v2 = i.get('test2', 'default') - return {'return':0, 'new_key':v} + return {'return':0, 'new_key':v, 'new_key2':v2} From 3fec8a57951111bacbf957de01c0850105b8e277 Mon Sep 17 00:00:00 2001 From: Grigori Fursin Date: Sat, 1 Feb 2025 15:44:54 +0100 Subject: [PATCH 19/19] clean up --- .github/workflows/test-cmx-mlperf-inference-resnet50.yml | 4 ++-- README.md | 4 ++-- docs/README.md | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml index 3980fa945..11ecc6cf9 100644 --- a/.github/workflows/test-cmx-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-cmx-mlperf-inference-resnet50.yml @@ -50,7 +50,7 @@ jobs: if: matrix.os != 'windows-latest' run: | cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet - - name: Push Results + - name: Push Test MLPerf Results when on cTuning test branch if: github.repository_owner == 'ctuning' env: USER: "GitHub Action" @@ -63,4 +63,4 @@ jobs: git config --global credential.https://github.com.helper "!gh auth git-credential" git config --global credential.https://gist.github.com.helper "" git config --global credential.https://gist.github.com.helper "!gh auth git-credential" - cm run script --tags=push,github,mlperf,inference,submission --repo_url= https://github.com/ctuning/test_mlperf_inference_submissions --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet + cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/ctuning/test_mlperf_inference_submissions --repo_branch=main --commit_message="Results from R50 GH action on ${{ matrix.os }}" --quiet diff --git a/README.md b/README.md index 69fa3296e..987e0ee15 100755 --- a/README.md +++ b/README.md @@ -11,14 +11,14 @@ ## Collective Knowledge -[Collective Knowledge (CK, CM, CM4MLOps, CM4MLPerf and CMX)](https://cKnowledge.org) +[Collective Knowledge (CK)](https://cKnowledge.org) is an educational community project to learn how to run AI, ML and other emerging workloads in the most efficient and cost-effective way across diverse models, data sets, software and hardware: [ [white paper](https://arxiv.org/abs/2406.16791) ]. It includes the following sub-projects. -### Collective Minds (CM) +### Collective Mind (CM) The Collective Mind (CM) project, or Collective Minds, facilitates the decomposition of complex software systems into portable, reusable, and diff --git a/docs/README.md b/docs/README.md index a5fbcc2ac..c3fc2d31f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,6 +1,6 @@ # CM documentation -**We plan to rewrite and simplify the CM documentation and tutorials based on user feedback in Q3 2024 - please stay tuned for more details**. +**We plan to rewrite and simplify the CM documentation and tutorials based on user feedback - please stay tuned for more details**. Collective Mind (CM) is a lightweight, non-intrusive and technology-agnostic workflow automation framework being developed by the [MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)