-
Notifications
You must be signed in to change notification settings - Fork 288
[CI] [GHA] Use OV_CACHE
in the WWB tests
#2781
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 12 commits
6495cfd
29a668c
229ef95
dd2140f
2bc0ada
72ab81a
a6028ee
ffbafd1
57916ca
01a91ed
916d33d
2dcc9ba
19e5943
31bc592
f8f7437
42d1eb0
e305656
0470bf1
404461e
464d2f9
9dfa38b
a8d0882
a6d802a
9c55669
af17000
cb3ad5f
6674ebf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
from pathlib import Path | ||
import os | ||
import tempfile | ||
|
||
|
||
WWB_CACHE_PATH = Path(os.path.join(os.environ.get('OV_CACHE', tempfile.TemporaryDirectory().name), 'wwb_cache')) | ||
akashchi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
akashchi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
SHOULD_CLEANUP = os.environ.get('CLEANUP_CACHE', '').lower() in ('1', 'true', 'yes') |
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -5,14 +5,14 @@ | |||||
import shutil | ||||||
import pytest | ||||||
import logging | ||||||
import tempfile | ||||||
import re | ||||||
from constants import WWB_CACHE_PATH, SHOULD_CLEANUP | ||||||
|
||||||
|
||||||
logging.basicConfig(level=logging.INFO) | ||||||
logger = logging.getLogger(__name__) | ||||||
|
||||||
MODEL_CACHE = tempfile.mkdtemp() | ||||||
MODEL_CACHE = WWB_CACHE_PATH | ||||||
OV_IMAGE_MODELS = ["echarlaix/tiny-random-stable-diffusion-xl", | ||||||
"yujiepan/stable-diffusion-3-tiny-random", | ||||||
"katuni4ka/tiny-random-flux", | ||||||
|
@@ -38,13 +38,14 @@ def run_wwb(args): | |||||
|
||||||
def setup_module(): | ||||||
for model_id in OV_IMAGE_MODELS: | ||||||
MODEL_PATH = os.path.join(MODEL_CACHE, model_id.replace("/", "--")) | ||||||
MODEL_PATH = MODEL_CACHE.joinpath(model_id.replace("/", "--")) | ||||||
akashchi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||||
subprocess.run(["optimum-cli", "export", "openvino", "--model", model_id, MODEL_PATH], capture_output=True, text=True) | ||||||
|
||||||
|
||||||
def teardown_module(): | ||||||
logger.info("Remove models") | ||||||
shutil.rmtree(MODEL_CACHE) | ||||||
if SHOULD_CLEANUP: | ||||||
logger.info("Removing models") | ||||||
shutil.rmtree(MODEL_CACHE) | ||||||
|
||||||
|
||||||
def get_similarity(output: str) -> float: | ||||||
|
@@ -121,11 +122,12 @@ def test_image_model_genai(model_id, model_type, tmp_path): | |||||
pytest.xfail("Ticket 173169") | ||||||
|
||||||
GT_FILE = tmp_path / "gt.csv" | ||||||
MODEL_PATH = os.path.join(MODEL_CACHE, model_id.replace("/", "--")) | ||||||
MODEL_PATH = MODEL_CACHE.joinpath(model_id.replace("/", "--")) | ||||||
akashchi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||||
MODEL_PATH = MODEL_PATH if MODEL_PATH.exists() else model_id | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The fallback logic reassigns MODEL_PATH from a Path object to a string, creating inconsistent types. Consider using
Suggested change
Copilot uses AI. Check for mistakes. Positive FeedbackNegative Feedback There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The underlying |
||||||
|
||||||
run_wwb([ | ||||||
"--base-model", | ||||||
model_id, | ||||||
MODEL_PATH, | ||||||
"--num-samples", | ||||||
"1", | ||||||
"--gt-data", | ||||||
|
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
@@ -1,12 +1,13 @@ | ||||||
import os | ||||||
import shutil | ||||||
import tempfile | ||||||
import pandas as pd | ||||||
import pytest | ||||||
import logging | ||||||
import json | ||||||
import sys | ||||||
|
||||||
from constants import WWB_CACHE_PATH, SHOULD_CLEANUP | ||||||
|
||||||
from transformers import AutoTokenizer | ||||||
from optimum.intel.openvino import OVModelForCausalLM, OVWeightQuantizationConfig | ||||||
|
||||||
|
@@ -18,9 +19,9 @@ | |||||
|
||||||
|
||||||
model_id = "facebook/opt-125m" | ||||||
tmp_dir = tempfile.mkdtemp() | ||||||
base_model_path = os.path.join(tmp_dir, "opt125m") | ||||||
target_model_path = os.path.join(tmp_dir, "opt125m_int8") | ||||||
cache_dir = WWB_CACHE_PATH | ||||||
base_model_path = os.path.join(cache_dir, "opt125m") | ||||||
akashchi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||||
target_model_path = os.path.join(cache_dir, "opt125m_int8") | ||||||
akashchi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||||
|
||||||
gptq_model_id = "ybelkada/opt-125m-gptq-4bit" | ||||||
awq_model_id = "TitanML/tiny-mixtral-AWQ-4bit" | ||||||
|
@@ -29,24 +30,27 @@ | |||||
def setup_module(): | ||||||
from optimum.exporters.openvino.convert import export_tokenizer | ||||||
|
||||||
logger.info("Create models") | ||||||
tokenizer = AutoTokenizer.from_pretrained(model_id) | ||||||
base_model = OVModelForCausalLM.from_pretrained(model_id) | ||||||
base_model.save_pretrained(base_model_path) | ||||||
tokenizer.save_pretrained(base_model_path) | ||||||
export_tokenizer(tokenizer, base_model_path) | ||||||
if not os.path.exists(base_model_path): | ||||||
logger.info("Create models") | ||||||
tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=WWB_CACHE_PATH) | ||||||
base_model = OVModelForCausalLM.from_pretrained(model_id, cache_dir=WWB_CACHE_PATH) | ||||||
base_model.save_pretrained(base_model_path) | ||||||
tokenizer.save_pretrained(base_model_path) | ||||||
export_tokenizer(tokenizer, base_model_path) | ||||||
|
||||||
target_model = OVModelForCausalLM.from_pretrained( | ||||||
model_id, quantization_config=OVWeightQuantizationConfig(bits=8) | ||||||
) | ||||||
target_model.save_pretrained(target_model_path) | ||||||
tokenizer.save_pretrained(target_model_path) | ||||||
export_tokenizer(tokenizer, target_model_path) | ||||||
if not os.path.exists(target_model_path): | ||||||
target_model = OVModelForCausalLM.from_pretrained( | ||||||
model_id, quantization_config=OVWeightQuantizationConfig(bits=8), cache_dir=WWB_CACHE_PATH | ||||||
) | ||||||
target_model.save_pretrained(target_model_path) | ||||||
tokenizer.save_pretrained(target_model_path) | ||||||
export_tokenizer(tokenizer, target_model_path) | ||||||
|
||||||
|
||||||
def teardown_module(): | ||||||
logger.info("Remove models") | ||||||
shutil.rmtree(tmp_dir) | ||||||
if SHOULD_CLEANUP: | ||||||
logger.info("Removing models") | ||||||
shutil.rmtree(cache_dir) | ||||||
|
||||||
|
||||||
@pytest.mark.skipif((sys.platform == "darwin"), reason='173169') | ||||||
|
@@ -145,7 +149,7 @@ def test_text_language(tmp_path): | |||||
temp_file_name = tmp_path / "gt.csv" | ||||||
run_wwb([ | ||||||
"--base-model", | ||||||
"Qwen/Qwen2-0.5B", | ||||||
'Qwen/Qwen2-0.5B', | ||||||
|
'Qwen/Qwen2-0.5B', | |
"Qwen/Qwen2-0.5B", |
Copilot uses AI. Check for mistakes.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could be a
conftest.py
.should_cleanup
andwwb_cache_path
could be session fixtures.