From 456df955e578fb32ad668c48bfd052722d21025d Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Thu, 10 Oct 2024 02:15:40 -0400 Subject: [PATCH] [Bugfix] Fix Weight Loading Multiple GPU Test - Large Models (#9213) Signed-off-by: Amit Garg --- tests/weight_loading/models-large.txt | 1 - tests/weight_loading/models.txt | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/weight_loading/models-large.txt b/tests/weight_loading/models-large.txt index 5fda910fde084..8ab7f05d7d1b2 100644 --- a/tests/weight_loading/models-large.txt +++ b/tests/weight_loading/models-large.txt @@ -1,6 +1,5 @@ compressed-tensors, nm-testing/Mixtral-8x7B-Instruct-v0.1-W4A16-quantized, main compressed-tensors, nm-testing/Mixtral-8x7B-Instruct-v0.1-W4A16-channel-quantized, main compressed-tensors, nm-testing/Mixtral-8x7B-Instruct-v0.1-W8A16-quantized, main -compressed-tensors, mgoin/DeepSeek-Coder-V2-Lite-Instruct-FP8, main gptq_marlin, TheBloke/Mixtral-8x7B-v0.1-GPTQ, main awq_marlin, casperhansen/deepseek-coder-v2-instruct-awq, main \ No newline at end of file diff --git a/tests/weight_loading/models.txt b/tests/weight_loading/models.txt index a90b352a39bca..a4ee9538d646b 100644 --- a/tests/weight_loading/models.txt +++ b/tests/weight_loading/models.txt @@ -20,6 +20,7 @@ compressed-tensors, nm-testing/Meta-Llama-3-8B-FP8-compressed-tensors-test, main compressed-tensors, nm-testing/Phi-3-mini-128k-instruct-FP8, main compressed-tensors, neuralmagic/Phi-3-medium-128k-instruct-quantized.w4a16, main compressed-tensors, nm-testing/TinyLlama-1.1B-Chat-v1.0-actorder-group, main +compressed-tensors, mgoin/DeepSeek-Coder-V2-Lite-Instruct-FP8, main awq, casperhansen/mixtral-instruct-awq, main awq_marlin, casperhansen/mixtral-instruct-awq, main fp8, neuralmagic/Meta-Llama-3-8B-Instruct-FP8-KV, main