Skip to content

Commit

Permalink
formatting'
Browse files Browse the repository at this point in the history
  • Loading branch information
afeldman-nm committed Jun 3, 2024
1 parent 2a1d84a commit f6e0310
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions tests/kernels/test_self_and_cross_attn.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
from vllm.attention.backends.utils import (
STR_NOT_IMPL_ENC_DEC_CHUNKED_PREFILL, STR_NOT_IMPL_ENC_DEC_ROCM_HIP)
from vllm.attention.backends.xformers import XFormersBackend
from vllm.logger import init_logger
from vllm.utils import is_hip, make_tensor_with_pad

from vllm.logger import init_logger
logger = init_logger(__name__)

# If not is_hip(): supported head sizes are [64, 80, 96, 112, 128, 256]
Expand Down Expand Up @@ -1282,8 +1282,9 @@ def test_encoder_attention(num_heads: int, head_size: int, backend_name: str,
'''

import vllm.envs as envs
print("envs.VLLM_ATTENTION_BACKEND: "+str(envs.VLLM_ATTENTION_BACKEND))
logger.info("envs.VLLM_ATTENTION_BACKEND: "+str(envs.VLLM_ATTENTION_BACKEND))
print("envs.VLLM_ATTENTION_BACKEND: " + str(envs.VLLM_ATTENTION_BACKEND))
logger.info("envs.VLLM_ATTENTION_BACKEND: ",
str(envs.VLLM_ATTENTION_BACKEND))

# Attention scale factor, attention backend instance, attention wrapper
# instance. Encoder attention does not require KV cache.
Expand Down

0 comments on commit f6e0310

Please sign in to comment.