Skip to content
Open
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion tests/python_tests/data/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

def get_models_list():
model_ids = [
"katuni4ka/tiny-random-phi3",
"optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM",
]
if pytest.selected_model_ids:
model_ids = [model_id for model_id in model_ids if model_id in pytest.selected_model_ids.split(' ')]
Expand Down
4 changes: 2 additions & 2 deletions tests/python_tests/data/tokenizer_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ def get_tokenizer_configs():
"unk_token": "<unk>",
"chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>'+ '\n' + message['content'] + '\n'}}{% elif message['role'] == 'user' %}{{'<|user|>' + '\n' + message['content'] + '\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>' + '\n' + message['content'] + '<|endoftext|>' + ('' if loop.last else '\n')}}{% endif %}{% endfor %}"
},
"katuni4ka/tiny-random-phi3": {
"optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM": {
"bos_token": "<s>",
"eos_token": "<|endoftext|>",
"pad_token": "<|endoftext|>",
Expand Down Expand Up @@ -712,7 +712,7 @@ def get_tokenizer_configs():
"unk_token": "<unk>",
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '### User:\n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ '### System:\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '### Assistant:\n' + message['content'] }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '### Assistant:' }}\n{% endif %}\n{% endfor %}"
},
"katuni4ka/tiny-random-minicpm": {
"optimum-intel-internal-testing/tiny-random-minicpm": {
"bos_token": "<s>",
"eos_token": "</s>",
"pad_token": None,
Expand Down
6 changes: 3 additions & 3 deletions tests/python_tests/samples/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@
"convert_args": []
},
"tiny-random-minicpmv-2_6": {
"name": "katuni4ka/tiny-random-minicpmv-2_6",
"name": "optimum-intel-internal-testing/tiny-random-minicpmv-2_6",
"convert_args": ['--trust-remote-code', "--task", "image-text-to-text"]
},
"InternVL2-1B": {
Expand All @@ -121,15 +121,15 @@
"convert_args": ["--task", "text-generation-with-past", "--weight-format", "int8"]
},
"tiny-random-latent-consistency": {
"name": "echarlaix/tiny-random-latent-consistency",
"name": "optimum-intel-internal-testing/tiny-random-latent-consistency",
"convert_args": ['--trust-remote-code', '--weight-format', 'fp16']
},
"tiny-random-latent-consistency-lora": {
"name": "katuni4ka/tiny-random-latent-consistency-lora",
"convert_args": []
},
"tiny-random-llava": {
"name": "katuni4ka/tiny-random-llava",
"name": "optimum-intel-internal-testing/tiny-random-llava",
"convert_args": ["--trust-remote-code", "--task", "image-text-to-text"]
},
"bge-small-en-v1.5": {
Expand Down
32 changes: 19 additions & 13 deletions tests/python_tests/test_llm_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def test_batch_string_inputs(model_id, generation_config_dict, prompts, pipeline

@pytest.mark.precommit
def test_batch_size_switch():
model_id = 'katuni4ka/tiny-random-phi3'
model_id = 'optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM'
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)

Expand All @@ -110,7 +110,7 @@ def test_batch_size_switch():

@pytest.mark.precommit
def test_empty_encoded_inputs_throw():
model_id = 'katuni4ka/tiny-random-phi3'
model_id = 'optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM'
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)

Expand Down Expand Up @@ -645,37 +645,43 @@ def test_pipeline_validates_generation_config(model_id):
# Work with Unicode in Python API
#

# Model, prompt and max_new_tokens that generates unfinished utf-8 string.
UNICODE_PYBIND_DECODING_TEST_CASES: list[tuple[str, str, int]] = [
("optimum-intel-internal-testing/tiny-random-PhiForCausalLM", ",", 3)
]


@pytest.mark.precommit
@pytest.mark.parametrize("model_id", get_models_list())
def test_unicode_pybind_decoding_one_string(model_id):
@pytest.mark.parametrize("model_id,prompt,max_new_tokens", UNICODE_PYBIND_DECODING_TEST_CASES)
def test_unicode_pybind_decoding_one_string(model_id: str, prompt: str, max_new_tokens: int):
# On this model this prompt generates unfinished utf string.
# Test that pybind will not fail.
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)
res_str = ov_pipe.generate(',', max_new_tokens=4, apply_chat_template=False)
res_str = ov_pipe.generate(prompt, max_new_tokens=max_new_tokens, apply_chat_template=False)
assert '�' == res_str[-1]


@pytest.mark.precommit
@pytest.mark.parametrize("model_id", get_models_list())
def test_unicode_pybind_decoding_batched(model_id):
@pytest.mark.parametrize("model_id,prompt,max_new_tokens", UNICODE_PYBIND_DECODING_TEST_CASES)
def test_unicode_pybind_decoding_batched(model_id: str, prompt: str, max_new_tokens: int):
# On this model this prompt generates unfinished utf string.
# Test that pybind will not fail.
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)
res_str = ov_pipe.generate([","], max_new_tokens=4, apply_chat_template=False)
res_str = ov_pipe.generate([prompt], max_new_tokens=max_new_tokens, apply_chat_template=False)
assert '�' == res_str.texts[0][-1]


@pytest.mark.precommit
@pytest.mark.parametrize("model_id", get_models_list())
def test_unicode_pybind_decoding_one_string_streamer(model_id):
@pytest.mark.parametrize("model_id,prompt,max_new_tokens", UNICODE_PYBIND_DECODING_TEST_CASES)
def test_unicode_pybind_decoding_one_string_streamer(model_id: str, prompt: str, max_new_tokens: int):
# On this model this prompt generates unfinished utf-8 string
# and streams it. Test that pybind will not fail while we pass string to python.
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)
res_str = []
ov_pipe.generate(",", max_new_tokens=4, apply_chat_template=False, streamer=lambda x: res_str.append(x))
ov_pipe.generate(prompt, max_new_tokens=max_new_tokens, apply_chat_template=False, streamer=lambda x: res_str.append(x))
assert '�' == ''.join(res_str)[-1]

#
Expand All @@ -696,7 +702,7 @@ def run_perf_metrics_collection(model_id, generation_config_dict: dict, prompt:
def test_perf_metrics(generation_config, prompt):
import time
start_time = time.perf_counter()
model_id = 'katuni4ka/tiny-random-gemma2'
model_id = 'optimum-intel-internal-testing/tiny-random-gemma2'
perf_metrics = run_perf_metrics_collection(model_id, generation_config, prompt)
total_time = (time.perf_counter() - start_time) * 1000

Expand Down Expand Up @@ -778,7 +784,7 @@ class Person(BaseModel):
city: Literal["Dublin", "Dubai", "Munich"]
generation_config.update(dict(structured_output_config=ov_genai.StructuredOutputConfig(json_schema=json.dumps(Person.model_json_schema()))))

model_id = 'katuni4ka/tiny-random-gemma2'
model_id = 'optimum-intel-internal-testing/tiny-random-gemma2'
_, _, models_path = download_and_convert_model(model_id)
ov_pipe = create_ov_pipeline(models_path)
perf_metrics = ov_pipe.generate([prompt], **generation_config).perf_metrics
Expand Down
4 changes: 2 additions & 2 deletions tests/python_tests/test_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# "echo_with_generation",
])
def test_basic_stop_criteria(generation_config, prompt):
model_id : str = "katuni4ka/tiny-random-phi3"
model_id : str = "optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM"
generate_and_compare(model_id, [prompt], generation_config)


Expand Down Expand Up @@ -71,7 +71,7 @@ def test_stop_strings(generation_config, model_id, pipeline_type):
'I have an interview about product speccing with the company Weekend Health. Give me an example of a question they might ask with regards about a new feature'
])
def test_greedy(generation_config, prompt):
model_id : str = "katuni4ka/tiny-random-phi3"
model_id : str = "optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM"
prompt = prompt.decode('unicode_escape') if isinstance(prompt, bytes) else prompt

generate_and_compare(model=model_id,
Expand Down
2 changes: 1 addition & 1 deletion tests/python_tests/test_stateful_speculative_decoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def test_string_inputs(main_model, main_device, draft_model, draft_device, promp
def test_perf_metrics():
import time
start_time = time.perf_counter()
model_id = 'katuni4ka/tiny-random-gemma2'
model_id = 'optimum-intel-internal-testing/tiny-random-gemma2'
_, _, model_path = download_and_convert_model(model_id)

# Create OpenVINO GenAI pipeline:
Expand Down
2 changes: 1 addition & 1 deletion tests/python_tests/test_structured_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class RESTAPIResponse(BaseModel):

structured_id_models = [
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
"katuni4ka/tiny-random-phi3",
"optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM",
]


Expand Down
12 changes: 6 additions & 6 deletions tests/python_tests/test_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ def test_set_chat_template(ov_hf_tokenizers):
@pytest.mark.parametrize(
"ov_hf_tokenizers",
[
"katuni4ka/tiny-random-phi3",
"optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM",
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
# ("black-forest-labs/FLUX.1-dev", dict(subfolder="tokenizer")), # FLUX.1-dev has tokenizer in subfolder
],
Expand Down Expand Up @@ -420,10 +420,10 @@ def hf_ov_genai_models(request, tmp_path_factory):
@pytest.mark.parametrize(
"hf_ov_genai_models",
[
("katuni4ka/tiny-random-phi3", {"padding_side": None}),
("optimum-intel-internal-testing/tiny-random-Phi3ForCausalLM", {"padding_side": None}),
("TinyLlama/TinyLlama-1.1B-Chat-v1.0", {"padding_side": None}),
("katuni4ka/tiny-random-llava-next", {"padding_side": "right"}),
("katuni4ka/tiny-random-llava-next", {"padding_side": "left"}),
("optimum-intel-internal-testing/tiny-random-llava-next", {"padding_side": "right"}),
("optimum-intel-internal-testing/tiny-random-llava-next", {"padding_side": "left"}),
(
"BAAI/bge-small-en-v1.5",
{"padding_side": None},
Expand Down Expand Up @@ -485,8 +485,8 @@ def test_padding(
base_models_for_paired_input_test = [
("answerdotai/ModernBERT-base", {"padding_side": None}),
("TinyLlama/TinyLlama-1.1B-Chat-v1.0", {"padding_side": None}),
("katuni4ka/tiny-random-llava-next", {"padding_side": "right"}),
("katuni4ka/tiny-random-llava-next", {"padding_side": "left"}),
("optimum-intel-internal-testing/tiny-random-llava-next", {"padding_side": "right"}),
("optimum-intel-internal-testing/tiny-random-llava-next", {"padding_side": "left"}),
]

def make_model_params():
Expand Down
Loading
Loading