Skip to content

Commit

Permalink
bump vllm version to 0.4.1
Browse files Browse the repository at this point in the history
  • Loading branch information
lightDev0405 committed May 28, 2024
1 parent db8c4db commit ab3bf48
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 2 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def get_requires():
"metrics": ["nltk", "jieba", "rouge-chinese"],
"deepspeed": ["deepspeed>=0.10.0,<=0.14.0"],
"bitsandbytes": ["bitsandbytes>=0.39.0"],
"vllm": ["vllm>=0.4.0"],
"vllm": ["vllm>=0.4.1"],
"galore": ["galore-torch"],
"badam": ["badam"],
"gptq": ["optimum>=1.16.0", "auto-gptq>=0.5.0"],
Expand Down
6 changes: 5 additions & 1 deletion src/llamafactory/hparams/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import torch
import transformers
from transformers import HfArgumentParser, Seq2SeqTrainingArguments
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import is_torch_bf16_gpu_available
from transformers.utils.versions import require_version
Expand Down Expand Up @@ -63,6 +64,9 @@ def _verify_model_args(model_args: "ModelArguments", finetuning_args: "Finetunin
if model_args.adapter_name_or_path is not None and finetuning_args.finetuning_type != "lora":
raise ValueError("Adapter is only valid for the LoRA method.")

if model_args.use_unsloth and is_deepspeed_zero3_enabled():
raise ValueError("Unsloth is incompatible with DeepSpeed ZeRO-3.")

if model_args.quantization_bit is not None:
if finetuning_args.finetuning_type != "lora":
raise ValueError("Quantization is only compatible with the LoRA method.")
Expand All @@ -89,7 +93,7 @@ def _check_extra_dependencies(
require_version("mixture-of-depth>=1.1.6", "To fix: pip install mixture-of-depth>=1.1.6")

if model_args.infer_backend == "vllm":
require_version("vllm>=0.4.0", "To fix: pip install vllm>=0.4.0")
require_version("vllm>=0.4.1", "To fix: pip install vllm>=0.4.1")

if finetuning_args.use_galore:
require_version("galore_torch", "To fix: pip install galore_torch")
Expand Down

0 comments on commit ab3bf48

Please sign in to comment.