Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
Skip models that don’t support py38
Browse files Browse the repository at this point in the history
  • Loading branch information
dbarbuzzi committed May 21, 2024
1 parent c57b35d commit 0260a94
Showing 1 changed file with 12 additions and 0 deletions.
12 changes: 12 additions & 0 deletions tests/models/test_big_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
Run `pytest tests/models/test_big_models.py`.
"""
# UPSTREAM SYNC
import sys

import pytest

MODELS = [
Expand All @@ -27,6 +30,11 @@
"EleutherAI/gpt-j-6b",
]

# UPSTREAM SYNC
SKIPPED_MODELS_PY38 = [
"mosaicml/mpt-7b",
]


@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["half"])
Expand All @@ -45,6 +53,10 @@ def test_models(
if model in SKIPPED_MODELS_OOM:
pytest.skip(reason="These models cause OOM issue on the CPU"
"because it is a fp32 checkpoint.")
# UPSTREAM SYNC
if model in SKIPPED_MODELS_PY38 and sys.version_info < (3, 9):
pytest.skip(reason="This model has custom code that does not "
"support Python 3.8")

hf_model = hf_runner(model, dtype=dtype)
hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
Expand Down

2 comments on commit 0260a94

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 0260a94 Previous: 93183d6 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.3.0", "python_version": "3.10.12 (main, May 10 2024, 13:42:25) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 3.833162215106034 prompts/s 3.8343768684524253 prompts/s 1.00
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.3.0", "python_version": "3.10.12 (main, May 10 2024, 13:42:25) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 1471.934290600717 tokens/s 1472.4007174857313 tokens/s 1.00

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 0260a94 Previous: 93183d6 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.3.0", "python_version": "3.8.17 (default, May 10 2024, 13:27:09) \n[GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 3.788469494408559 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.3.0", "python_version": "3.8.17 (default, May 10 2024, 13:27:09) \n[GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 1454.7722858528866 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.