Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[GHA] Replaced visual_language_chat_sample-ubuntu-llava #1802

Open
wants to merge 14 commits into
base: master
Choose a base branch
from
Open
12 changes: 0 additions & 12 deletions .github/workflows/causal_lm_cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -399,18 +399,6 @@ jobs:
f.write(content.encode("utf-8"))
- run: diff cpp2.txt py2.txt

visual_language_chat_sample-ubuntu-llava_1_5:
uses: ./.github/workflows/job_vlm_sample_llava.yml
with:
model_id: llava-hf/llava-1.5-7b-hf
model_dir: llava_1_5_7b_ov

visual_language_chat_sample-ubuntu-llava_next:
uses: ./.github/workflows/job_vlm_sample_llava.yml
with:
model_id: llava-hf/llava-v1.6-mistral-7b-hf
model_dir: llava_v1_6_mistral_7b_ov

visual_language_chat_sample-ubuntu-internvl2:
runs-on: ubuntu-22.04-16-cores
steps:
Expand Down
49 changes: 0 additions & 49 deletions .github/workflows/job_vlm_sample_llava.yml

This file was deleted.

9 changes: 8 additions & 1 deletion .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ env:
SCCACHE_SERVER_PORT: 35555
SCCACHE_CACHE_SIZE: 30G
SCCACHE_AZURE_KEY_PREFIX: genai/ubuntu/22_04/x64
HF_HOME: /mount/caches/huggingface
GENAI_ARCHIVE_NAME: genai.tar.gz
GENAI_SAMPLES_NAME: genai_samples.tar.gz

Expand Down Expand Up @@ -326,16 +327,22 @@ jobs:
- name: 'LLM'
marker: 'llm'
cmd: 'tests/python_tests/samples'
runner: 'aks-linux-4-cores-16gb'
- name: 'Whisper'
marker: 'whisper'
cmd: 'tests/python_tests/samples'
runner: 'aks-linux-4-cores-16gb'
- name: 'VLM'
marker: 'vlm'
cmd: 'tests/python_tests/samples'
runner: 'aks-linux-8-cores-64gb'

needs: [ openvino_download, genai_build_cmake, genai_build_wheel, genai_build_samples ]
timeout-minutes: 45
defaults:
run:
shell: bash
runs-on: aks-linux-4-cores-16gb
runs-on: ${{ matrix.test.runner }}
container:
image: openvinogithubactions.azurecr.io/ov_test/ubuntu_22_04_x64:${{ needs.openvino_download.outputs.docker_tag }}
volumes:
Expand Down
2 changes: 2 additions & 0 deletions tests/python_tests/pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,13 @@ markers =
; samples - Tests related to the sample models.
; llm - Tests related to large language models.
; whisper - Tests related to the Whisper model.
; vlm - Tests related to the VLM model.
precommit
nightly
real_models
samples
llm
whisper
vlm

addopts = -m precommit
11 changes: 10 additions & 1 deletion tests/python_tests/samples/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,21 @@
"TinyStories-1M": {
"name": "roneneldan/TinyStories-1M",
"convert_args": ['--trust-remote-code', '--weight-format', 'fp16']
},
"llava-1.5-7b-hf": {
"name": "llava-hf/llava-1.5-7b-hf",
"convert_args": ['--trust-remote-code']
},
"llava-v1.6-mistral-7b-hf": {
"name": "llava-hf/llava-v1.6-mistral-7b-hf",
"convert_args": ['--trust-remote-code']
}
}

TEST_FILES = {
"how_are_you_doing_today.wav": "https://storage.openvinotoolkit.org/models_contrib/speech/2021.2/librispeech_s5/how_are_you_doing_today.wav",
"adapter_model.safetensors": "https://huggingface.co/smangrul/tinyllama_lora_sql/resolve/main/adapter_model.safetensors"
"adapter_model.safetensors": "https://huggingface.co/smangrul/tinyllama_lora_sql/resolve/main/adapter_model.safetensors",
"monalisa.jpg": "https://llava-vl.github.io/static/images/monalisa.jpg",
}

SAMPLES_PY_DIR = os.environ.get("SAMPLES_PY_DIR", os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../samples/python")))
Expand Down
6 changes: 4 additions & 2 deletions tests/python_tests/samples/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@
from conftest import logger
import subprocess # nosec B404

def run_sample(command):
def run_sample(command, input_data=None):
logger.info(f"Running sample command: {' '.join(command)}")
result = subprocess.run(command, capture_output=True, text=True, check=True, encoding='utf-8')
if input_data:
logger.info(f"Input data: {input_data}")
result = subprocess.run(command, capture_output=True, text=True, check=True, encoding='utf-8', input=input_data)
logger.info(f"Sample output: {result.stdout}")
return result
35 changes: 35 additions & 0 deletions tests/python_tests/samples/test_visual_language_chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import os
import pytest
import sys

from conftest import TEST_FILES, SAMPLES_PY_DIR, SAMPLES_CPP_DIR
from test_utils import run_sample

class TestVisualLanguageChat:
@pytest.mark.vlm
@pytest.mark.samples
@pytest.mark.parametrize(
"convert_model, sample_args",
[
pytest.param("llava-1.5-7b-hf", 'Who drew this painting?\nWhen did the painter live?'),
pytest.param("llava-v1.6-mistral-7b-hf", 'Who drew this painting?\nWhen did the painter live?'),
],
indirect=["convert_model"],
)
@pytest.mark.parametrize("download_test_content", [TEST_FILES["monalisa.jpg"]], indirect=True)
def test_sample_visual_language_chat(self, convert_model, download_test_content, sample_args):
# Test Python sample
py_script = os.path.join(SAMPLES_PY_DIR, "visual_language_chat/visual_language_chat.py")
py_command = [sys.executable, py_script, convert_model, download_test_content]
py_result = run_sample(py_command, sample_args)

# Test CPP sample
cpp_sample = os.path.join(SAMPLES_CPP_DIR, 'visual_language_chat')
cpp_command =[cpp_sample, convert_model, download_test_content]
cpp_result = run_sample(cpp_command, sample_args)

# Compare results
assert py_result.stdout == cpp_result.stdout, f"Results should match"
Loading