diff --git a/tests/integration/benchmark/ir-llm/config/lmi-dist/config.yml b/tests/integration/benchmark/ir-llm/config/lmi-dist/config.yml index c5289c24c..717119c70 100644 --- a/tests/integration/benchmark/ir-llm/config/lmi-dist/config.yml +++ b/tests/integration/benchmark/ir-llm/config/lmi-dist/config.yml @@ -81,7 +81,7 @@ benchmarks: image: "LMI-dist" config: "benchmark_config_passive_Llama-3-1-8b-instruct.json" dataset: "s3://djl-benchmark-datasets/openorca/openorca_instruct_sample_payload_en_500-1000.tar.gz" - action: no + action: yes - model: "Llama-3.1-8b-instruct-suzuka" endpoints: - endpoint: "sagemaker" @@ -95,7 +95,7 @@ benchmarks: image: "LMI-dist" config: "benchmark_config_passive_Llama-3-1-70b.json" dataset: "s3://djl-benchmark-datasets/openorca/openorca_base_sample_payload_en_500-1000.tar.gz" - action: no + action: yes - model: "Llama-3.1-70b-suzuka" endpoints: - endpoint: "sagemaker" @@ -109,7 +109,7 @@ benchmarks: image: "LMI-dist" config: "benchmark_config_passive_Llama-3-1-70b-instruct.json" dataset: "s3://djl-benchmark-datasets/openorca/openorca_instruct_sample_payload_en_500-1000.tar.gz" - action: no + action: yes - model: "Llama-3.1-70b-instruct-suzuka" endpoints: - endpoint: "sagemaker" diff --git a/tests/integration/benchmark/ir-llm/config/trtllm/config.yml b/tests/integration/benchmark/ir-llm/config/trtllm/config.yml index cdf7a4e69..aba72f767 100644 --- a/tests/integration/benchmark/ir-llm/config/trtllm/config.yml +++ b/tests/integration/benchmark/ir-llm/config/trtllm/config.yml @@ -75,21 +75,21 @@ benchmarks: image: "TRTLLM-v12-8192-sample" config: "benchmark_config_Llama-3-1-8b-instruct.json" dataset: "s3://djl-benchmark-datasets/openorca/openorca_trtllm_instruct_sample_payload_en_500-1000.tar.gz" - action: no + action: yes - model: "Llama-3.1-70b" endpoints: - endpoint: "sagemaker" image: "TRTLLM" config: "benchmark_config_Llama-3-1-70b.json" dataset: "s3://ddjl-benchmark-datasets/openorca/openorca_trtllm_base_sample_payload_en_500-1000.tar.gz" - action: no + action: yes - model: "Llama-3.1-70b-instruct" endpoints: - endpoint: "sagemaker" image: "TRTLLM" config: "benchmark_config_Llama-3-1-70b-instruct.json" dataset: "s3://djl-benchmark-datasets/openorca/openorca_trtllm_instruct_sample_payload_en_500-1000.tar.gz" - action: no + action: yes - model: "Llama-3.1-405b-fp8" endpoints: - endpoint: "sagemaker"