Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion tests/python_tests/data/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

def get_models_list():
model_ids = [
"katuni4ka/tiny-random-phi3",
"optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM",
]
if pytest.selected_model_ids:
model_ids = [model_id for model_id in model_ids if model_id in pytest.selected_model_ids.split(' ')]
Expand Down
4 changes: 2 additions & 2 deletions tests/python_tests/data/tokenizer_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ def get_tokenizer_configs():
"unk_token": "<unk>",
"chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>'+ '\n' + message['content'] + '\n'}}{% elif message['role'] == 'user' %}{{'<|user|>' + '\n' + message['content'] + '\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>' + '\n' + message['content'] + '<|endoftext|>' + ('' if loop.last else '\n')}}{% endif %}{% endfor %}"
},
"katuni4ka/tiny-random-phi3": {
"optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM": {
"bos_token": "<s>",
"eos_token": "<|endoftext|>",
"pad_token": "<|endoftext|>",
Expand Down Expand Up @@ -712,7 +712,7 @@ def get_tokenizer_configs():
"unk_token": "<unk>",
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '### User:\n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ '### System:\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '### Assistant:\n' + message['content'] }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '### Assistant:' }}\n{% endif %}\n{% endfor %}"
},
"katuni4ka/tiny-random-minicpm": {
"optimum-intel-internal-testing/tiny-random-minicpm": {
"bos_token": "<s>",
"eos_token": "</s>",
"pad_token": None,
Expand Down
6 changes: 3 additions & 3 deletions tests/python_tests/samples/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@
"convert_args": []
},
"tiny-random-minicpmv-2_6": {
"name": "katuni4ka/tiny-random-minicpmv-2_6",
"name": "optimum-intel-internal-testing/tiny-random-minicpmv-2_6",
"convert_args": ['--trust-remote-code', "--task", "image-text-to-text"]
},
"InternVL2-1B": {
Expand All @@ -121,15 +121,15 @@
"convert_args": ["--task", "text-generation-with-past", "--weight-format", "int8"]
},
"tiny-random-latent-consistency": {
"name": "echarlaix/tiny-random-latent-consistency",
"name": "optimum-intel-internal-testing/tiny-random-latent-consistency",
"convert_args": ['--trust-remote-code', '--weight-format', 'fp16']
},
"tiny-random-latent-consistency-lora": {
"name": "katuni4ka/tiny-random-latent-consistency-lora",
"convert_args": []
},
"tiny-random-llava": {
"name": "katuni4ka/tiny-random-llava",
"name": "optimum-intel-internal-testing/tiny-random-llava",
"convert_args": ["--trust-remote-code", "--task", "image-text-to-text"]
},
"bge-small-en-v1.5": {
Expand Down
8 changes: 4 additions & 4 deletions tests/python_tests/test_llm_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def test_batch_string_inputs(model_id, generation_config_dict, prompts, pipeline

@pytest.mark.precommit
def test_batch_size_switch():
model_id = 'katuni4ka/tiny-random-phi3'
model_id = 'optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM'
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)

Expand All @@ -110,7 +110,7 @@ def test_batch_size_switch():

@pytest.mark.precommit
def test_empty_encoded_inputs_throw():
model_id = 'katuni4ka/tiny-random-phi3'
model_id = 'optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM'
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)

Expand Down Expand Up @@ -696,7 +696,7 @@ def run_perf_metrics_collection(model_id, generation_config_dict: dict, prompt:
def test_perf_metrics(generation_config, prompt):
import time
start_time = time.perf_counter()
model_id = 'katuni4ka/tiny-random-gemma2'
model_id = 'optimum-intel-internal-testing/tiny-random-gemma2'
perf_metrics = run_perf_metrics_collection(model_id, generation_config, prompt)
total_time = (time.perf_counter() - start_time) * 1000

Expand Down Expand Up @@ -778,7 +778,7 @@ class Person(BaseModel):
city: Literal["Dublin", "Dubai", "Munich"]
generation_config.update(dict(structured_output_config=ov_genai.StructuredOutputConfig(json_schema=json.dumps(Person.model_json_schema()))))

model_id = 'katuni4ka/tiny-random-gemma2'
model_id = 'optimum-intel-internal-testing/tiny-random-gemma2'
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)
perf_metrics = ov_pipe.generate([prompt], **generation_config).perf_metrics
Expand Down
4 changes: 2 additions & 2 deletions tests/python_tests/test_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# "echo_with_generation",
])
def test_basic_stop_criteria(generation_config, prompt):
model_id : str = "katuni4ka/tiny-random-phi3"
model_id : str = "optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM"
generate_and_compare(model_id, [prompt], generation_config)


Expand Down Expand Up @@ -71,7 +71,7 @@ def test_stop_strings(generation_config, model_id, pipeline_type):
'I have an interview about product speccing with the company Weekend Health. Give me an example of a question they might ask with regards about a new feature'
])
def test_greedy(generation_config, prompt):
model_id : str = "katuni4ka/tiny-random-phi3"
model_id : str = "optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM"
prompt = prompt.decode('unicode_escape') if isinstance(prompt, bytes) else prompt

generate_and_compare(model=model_id,
Expand Down
2 changes: 1 addition & 1 deletion tests/python_tests/test_stateful_speculative_decoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def test_string_inputs(main_model, main_device, draft_model, draft_device, promp
def test_perf_metrics():
import time
start_time = time.perf_counter()
model_id = 'katuni4ka/tiny-random-gemma2'
model_id = 'optimum-intel-internal-testing/tiny-random-gemma2'
_, _, model_path = download_and_convert_model(model_id)

# Create OpenVINO GenAI pipeline:
Expand Down
2 changes: 1 addition & 1 deletion tests/python_tests/test_structured_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class RESTAPIResponse(BaseModel):

structured_id_models = [
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
"katuni4ka/tiny-random-phi3",
"optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM",
]


Expand Down
12 changes: 6 additions & 6 deletions tests/python_tests/test_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ def test_set_chat_template(ov_hf_tokenizers):
@pytest.mark.parametrize(
"ov_hf_tokenizers",
[
"katuni4ka/tiny-random-phi3",
"optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM",
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
# ("black-forest-labs/FLUX.1-dev", dict(subfolder="tokenizer")), # FLUX.1-dev has tokenizer in subfolder
],
Expand Down Expand Up @@ -420,10 +420,10 @@ def hf_ov_genai_models(request, tmp_path_factory):
@pytest.mark.parametrize(
"hf_ov_genai_models",
[
("katuni4ka/tiny-random-phi3", {"padding_side": None}),
("optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM", {"padding_side": None}),
("TinyLlama/TinyLlama-1.1B-Chat-v1.0", {"padding_side": None}),
("katuni4ka/tiny-random-llava-next", {"padding_side": "right"}),
("katuni4ka/tiny-random-llava-next", {"padding_side": "left"}),
("optimum-intel-internal-testing/tiny-random-llava-next", {"padding_side": "right"}),
("optimum-intel-internal-testing/tiny-random-llava-next", {"padding_side": "left"}),
(
"BAAI/bge-small-en-v1.5",
{"padding_side": None},
Expand Down Expand Up @@ -485,8 +485,8 @@ def test_padding(
base_models_for_paired_input_test = [
("answerdotai/ModernBERT-base", {"padding_side": None}),
("TinyLlama/TinyLlama-1.1B-Chat-v1.0", {"padding_side": None}),
("katuni4ka/tiny-random-llava-next", {"padding_side": "right"}),
("katuni4ka/tiny-random-llava-next", {"padding_side": "left"}),
("optimum-intel-internal-testing/tiny-random-llava-next", {"padding_side": "right"}),
("optimum-intel-internal-testing/tiny-random-llava-next", {"padding_side": "left"}),
]

def make_model_params():
Expand Down
98 changes: 49 additions & 49 deletions tests/python_tests/test_vlm_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,20 +80,20 @@ class VlmModelInfo:


VIDEO_MODEL_IDS = [
"katuni4ka/tiny-random-llava-next-video",
"optimum-intel-internal-testing/tiny-random-llava-next-video",
]


MODEL_IDS: list[str] = [
"katuni4ka/tiny-random-minicpmv-2_6",
"katuni4ka/tiny-random-phi3-vision",
"katuni4ka/tiny-random-phi-4-multimodal",
"katuni4ka/tiny-random-llava",
"katuni4ka/tiny-random-llava-next",
"katuni4ka/tiny-random-internvl2",
"katuni4ka/tiny-random-qwen2vl",
"katuni4ka/tiny-random-qwen2.5-vl",
"katuni4ka/tiny-random-gemma3",
"optimum-intel-internal-testing/tiny-random-minicpmv-2_6",
"optimum-intel-internal-testing/tiny-random-phi3-vision",
"optimum-intel-internal-testing/tiny-random-phi-4-multimodal",
"optimum-intel-internal-testing/tiny-random-llava",
"optimum-intel-internal-testing/tiny-random-llava-next",
"optimum-intel-internal-testing/tiny-random-internvl2",
"optimum-intel-internal-testing/tiny-random-qwen2vl",
"optimum-intel-internal-testing/tiny-random-qwen2.5-vl",
"optimum-intel-internal-testing/tiny-random-gemma3",
"qnguyen3/nanoLLaVA",
*VIDEO_MODEL_IDS,
]
Expand All @@ -106,28 +106,28 @@ class VlmModelInfo:


TAG_GENERATOR_BY_MODEL: dict[str, Callable[[int], str]] = {
"katuni4ka/tiny-random-llava": lambda idx: "\n",
"katuni4ka/tiny-random-phi3-vision": lambda idx: f"<|image_{idx + 1}|>\n",
"katuni4ka/tiny-random-llava-next-video": lambda idx: "\n",
"optimum-intel-internal-testing/tiny-random-phi3-vision": lambda idx: f"<|image_{idx + 1}|>\n",
"optimum-intel-internal-testing/tiny-random-llava-next-video": lambda idx: "<image>\n",
"qnguyen3/nanoLLaVA": lambda idx: "<image>\n",
}


RESOLUTION_BY_MODEL: dict[str, int | None] = {
"katuni4ka/tiny-random-gemma3": 32,
"optimum-intel-internal-testing/tiny-random-gemma3": 32,
"qnguyen3/nanoLLaVA": 384,
"katuni4ka/tiny-random-llava-next-video": 336,
"optimum-intel-internal-testing/tiny-random-llava-next-video": 336,
}


RESOLUTION_BY_VIDEO_MODEL: dict[str, int | None] = {
"katuni4ka/tiny-random-llava-next-video": 32,
"optimum-intel-internal-testing/tiny-random-llava-next-video": 32,
}


Expand All @@ -152,8 +152,8 @@ class VlmModelInfo:


NPU_UNSUPPORTED_MODELS = {
"katuni4ka/tiny-random-internvl2",
"katuni4ka/tiny-random-gemma3",
"optimum-intel-internal-testing/tiny-random-internvl2",
"optimum-intel-internal-testing/tiny-random-gemma3",
}


Expand All @@ -178,9 +178,9 @@ def _setup_generation_config(


def _get_ov_model(model_id: str) -> str:
if model_id in {"katuni4ka/tiny-random-phi-4-multimodal", "qnguyen3/nanoLLaVA"}:
if model_id in {"optimum-intel-internal-testing/tiny-random-phi-4-multimodal", "qnguyen3/nanoLLaVA"}:
pytest.skip("ValueError: The current version of Transformers does not allow for the export of the model. Maximum required is 4.53.3, got: 4.55.4")
if "katuni4ka/tiny-random-phi3-vision" == model_id:
if "optimum-intel-internal-testing/tiny-random-phi3-vision" == model_id:
pytest.xfail("AttributeError: 'DynamicCache' object has no attribute 'get_usable_length'. Ticket CVS-175110")
ov_cache_models_dir = get_ov_cache_models_dir()
dir_name = str(model_id).replace(os.sep, "_")
Expand All @@ -203,10 +203,10 @@ def _get_ov_model(model_id: str) -> str:
export=True,
load_in_8bit=False,
trust_remote_code=model_id in {
"katuni4ka/tiny-random-minicpmv-2_6",
"katuni4ka/tiny-random-internvl2",
"katuni4ka/tiny-random-phi3-vision",
"katuni4ka/tiny-random-phi-4-multimodal",
"optimum-intel-internal-testing/tiny-random-minicpmv-2_6",
"optimum-intel-internal-testing/tiny-random-internvl2",
"optimum-intel-internal-testing/tiny-random-phi3-vision",
"optimum-intel-internal-testing/tiny-random-phi-4-multimodal",
"qnguyen3/nanoLLaVA",
},
)
Expand Down Expand Up @@ -809,7 +809,7 @@ def test_perf_metrics(
max_new_tokens = DEFAULT_MAX_NEW_TOKENS

# Using non-cached model to get more accurate load time
model_path = _get_ov_model("katuni4ka/tiny-random-minicpmv-2_6")
model_path = _get_ov_model("optimum-intel-internal-testing/tiny-random-minicpmv-2_6")
start_time = perf_counter_ns()
pipe = VLMPipeline(model_path, "CPU", ATTENTION_BACKEND=backend)
start_generate = perf_counter_ns()
Expand Down Expand Up @@ -1147,24 +1147,24 @@ def conversation_requests(


TAG_INSERTED_BY_TEMPLATE = [
("katuni4ka/tiny-random-llava", "PA"),
("katuni4ka/tiny-random-llava-next", "PA"),
("katuni4ka/tiny-random-qwen2vl", "PA"),
("katuni4ka/tiny-random-qwen2.5-vl", "PA"),
("katuni4ka/tiny-random-gemma3", "SDPA"),
("optimum-intel-internal-testing/tiny-random-llava", "PA"),
("optimum-intel-internal-testing/tiny-random-llava-next", "PA"),
("optimum-intel-internal-testing/tiny-random-qwen2vl", "PA"),
("optimum-intel-internal-testing/tiny-random-qwen2.5-vl", "PA"),
("optimum-intel-internal-testing/tiny-random-gemma3", "SDPA"),
("qnguyen3/nanoLLaVA", "PA"),
("katuni4ka/tiny-random-llava-next-video", "PA"),
("optimum-intel-internal-testing/tiny-random-llava-next-video", "PA"),
]


IMAGE_ID_IGNORANT_MODELS_TO_TAG = TAG_INSERTED_BY_TEMPLATE + [
("katuni4ka/tiny-random-internvl2", "PA"),
("optimum-intel-internal-testing/tiny-random-internvl2", "PA"),
]


MODELS_TO_TAG = IMAGE_ID_IGNORANT_MODELS_TO_TAG + [
("katuni4ka/tiny-random-minicpmv-2_6", "PA"),
("katuni4ka/tiny-random-phi3-vision", "PA"),
("optimum-intel-internal-testing/tiny-random-minicpmv-2_6", "PA"),
("optimum-intel-internal-testing/tiny-random-phi3-vision", "PA"),
]


Expand Down Expand Up @@ -1395,20 +1395,20 @@ def test_model_tags_missing_native(ov_pipe_model: VlmModelInfo):
@pytest.mark.parametrize(
"ov_pipe_model",
[
pytest.param(("katuni4ka/tiny-random-qwen2vl","SDPA")),
pytest.param(("katuni4ka/tiny-random-qwen2vl", "PA")),
pytest.param(("katuni4ka/tiny-random-qwen2.5-vl", "SDPA")),
pytest.param(("katuni4ka/tiny-random-qwen2.5-vl", "PA"), marks=pytest.mark.xfail(reason="CVS-167316")),
pytest.param(("optimum-intel-internal-testing/tiny-random-qwen2vl","SDPA")),
pytest.param(("optimum-intel-internal-testing/tiny-random-qwen2vl", "PA")),
pytest.param(("optimum-intel-internal-testing/tiny-random-qwen2.5-vl", "SDPA")),
pytest.param(("optimum-intel-internal-testing/tiny-random-qwen2.5-vl", "PA"), marks=pytest.mark.xfail(reason="CVS-167316")),
(
pytest.param(("katuni4ka/tiny-random-gemma3", "SDPA"), marks=pytest.mark.xfail(reason=GEMMA3_MACOS_XFAIL_REASON))
pytest.param(("optimum-intel-internal-testing/tiny-random-gemma3", "SDPA"), marks=pytest.mark.xfail(reason=GEMMA3_MACOS_XFAIL_REASON))
if sys.platform == "darwin"
else pytest.param(("katuni4ka/tiny-random-gemma3", "SDPA"))
else pytest.param(("optimum-intel-internal-testing/tiny-random-gemma3", "SDPA"))
),
pytest.param(("katuni4ka/tiny-random-gemma3", "PA"), marks=pytest.mark.xfail(reason="CVS-171180")),
pytest.param(("optimum-intel-internal-testing/tiny-random-gemma3", "PA"), marks=pytest.mark.xfail(reason="CVS-171180")),
pytest.param(("qnguyen3/nanoLLaVA", "SDPA")),
pytest.param(("qnguyen3/nanoLLaVA", "PA")),
pytest.param(("katuni4ka/tiny-random-llava-next-video", "SDPA")),
pytest.param(("katuni4ka/tiny-random-llava-next-video", "PA")),
pytest.param(("optimum-intel-internal-testing/tiny-random-llava-next-video", "SDPA")),
pytest.param(("optimum-intel-internal-testing/tiny-random-llava-next-video", "PA")),
],
ids=lambda p: f"{p[0]}/{p[1]}",
indirect=["ov_pipe_model"],
Expand Down
10 changes: 5 additions & 5 deletions tools/who_what_benchmark/tests/test_cli_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
logger = logging.getLogger(__name__)

MODEL_CACHE = tempfile.mkdtemp()
OV_IMAGE_MODELS = ["echarlaix/tiny-random-stable-diffusion-xl",
"yujiepan/stable-diffusion-3-tiny-random",
"katuni4ka/tiny-random-flux",
"katuni4ka/tiny-random-flux-fill"]
OV_IMAGE_MODELS = ["optimum-intel-internal-testing/tiny-random-stable-diffusion-xl",
"optimum-intel-internal-testing/stable-diffusion-3-tiny-random",
"optimum-intel-internal-testing/tiny-random-flux",
"optimum-intel-internal-testing/tiny-random-flux-fill"]


def run_wwb(args):
Expand Down Expand Up @@ -111,7 +111,7 @@ def test_image_model_genai(model_id, model_type, tmp_path):
pytest.skip(reason="FLUX-Fill is supported as inpainting only")
if model_type == "image-inpainting":
pytest.xfail("Segfault. Ticket 170877")
if model_id == "katuni4ka/tiny-random-flux" and model_type == "image-to-image":
if model_id == "optimum-intel-internal-testing/tiny-random-flux" and model_type == "image-to-image":
pytest.xfail("Randomly wwb died with <Signals.SIGABRT: 6>. Ticket 170878")

mac_arm64_skip = any(substring in model_id for substring in ('stable-diffusion-xl',
Expand Down
2 changes: 1 addition & 1 deletion tools/who_what_benchmark/tests/test_cli_vlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def run_test(model_id, model_type, optimum_threshold, genai_threshold, tmp_path)
@pytest.mark.parametrize(
("model_id", "model_type"),
[
("katuni4ka/tiny-random-llava", "visual-text"),
("optimum-intel-internal-testing/tiny-random-llava", "visual-text"),
],
)
def test_vlm_basic(model_id, model_type, tmp_path):
Expand Down
Loading