Skip to content

Commit b8a4572

Browse files
[Misc] Use helper function to generate dummy messages in OpenAI MM tests (vllm-project#26875)
Signed-off-by: DarkLight1337 <[email protected]>
1 parent 302ef40 commit b8a4572

File tree

3 files changed

+91
-176
lines changed

3 files changed

+91
-176
lines changed

tests/entrypoints/openai/test_audio.py

Lines changed: 26 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -53,22 +53,35 @@ def base64_encoded_audio() -> dict[str, str]:
5353
}
5454

5555

56-
@pytest.mark.asyncio
57-
@pytest.mark.parametrize("model_name", [MODEL_NAME])
58-
@pytest.mark.parametrize("audio_url", [TEST_AUDIO_URLS[0]])
59-
async def test_single_chat_session_audio(
60-
client: openai.AsyncOpenAI, model_name: str, audio_url: str
56+
def dummy_messages_from_audio_url(
57+
audio_urls: str | list[str],
58+
content_text: str = "What's happening in this audio?",
6159
):
62-
messages = [
60+
if isinstance(audio_urls, str):
61+
audio_urls = [audio_urls]
62+
63+
return [
6364
{
6465
"role": "user",
6566
"content": [
66-
{"type": "audio_url", "audio_url": {"url": audio_url}},
67-
{"type": "text", "text": "What's happening in this audio?"},
67+
*(
68+
{"type": "audio_url", "audio_url": {"url": audio_url}}
69+
for audio_url in audio_urls
70+
),
71+
{"type": "text", "text": content_text},
6872
],
6973
}
7074
]
7175

76+
77+
@pytest.mark.asyncio
78+
@pytest.mark.parametrize("model_name", [MODEL_NAME])
79+
@pytest.mark.parametrize("audio_url", [TEST_AUDIO_URLS[0]])
80+
async def test_single_chat_session_audio(
81+
client: openai.AsyncOpenAI, model_name: str, audio_url: str
82+
):
83+
messages = dummy_messages_from_audio_url(audio_url)
84+
7285
# test single completion
7386
chat_completion = await client.chat.completions.create(
7487
model=model_name,
@@ -138,20 +151,9 @@ async def test_single_chat_session_audio_base64encoded(
138151
audio_url: str,
139152
base64_encoded_audio: dict[str, str],
140153
):
141-
messages = [
142-
{
143-
"role": "user",
144-
"content": [
145-
{
146-
"type": "audio_url",
147-
"audio_url": {
148-
"url": f"data:audio/wav;base64,{base64_encoded_audio[audio_url]}" # noqa: E501
149-
},
150-
},
151-
{"type": "text", "text": "What's happening in this audio?"},
152-
],
153-
}
154-
]
154+
messages = dummy_messages_from_audio_url(
155+
f"data:audio/wav;base64,{base64_encoded_audio[audio_url]}"
156+
)
155157

156158
# test single completion
157159
chat_completion = await client.chat.completions.create(
@@ -252,15 +254,7 @@ async def test_single_chat_session_input_audio(
252254
async def test_chat_streaming_audio(
253255
client: openai.AsyncOpenAI, model_name: str, audio_url: str
254256
):
255-
messages = [
256-
{
257-
"role": "user",
258-
"content": [
259-
{"type": "audio_url", "audio_url": {"url": audio_url}},
260-
{"type": "text", "text": "What's happening in this audio?"},
261-
],
262-
}
263-
]
257+
messages = dummy_messages_from_audio_url(audio_url)
264258

265259
# test single completion
266260
chat_completion = await client.chat.completions.create(
@@ -365,18 +359,7 @@ async def test_chat_streaming_input_audio(
365359
async def test_multi_audio_input(
366360
client: openai.AsyncOpenAI, model_name: str, audio_urls: list[str]
367361
):
368-
messages = [
369-
{
370-
"role": "user",
371-
"content": [
372-
*(
373-
{"type": "audio_url", "audio_url": {"url": audio_url}}
374-
for audio_url in audio_urls
375-
),
376-
{"type": "text", "text": "What's happening in this audio?"},
377-
],
378-
}
379-
]
362+
messages = dummy_messages_from_audio_url(audio_urls)
380363

381364
if len(audio_urls) > MAXIMUM_AUDIOS:
382365
with pytest.raises(openai.BadRequestError): # test multi-audio input

tests/entrypoints/openai/test_video.py

Lines changed: 31 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -55,22 +55,35 @@ def base64_encoded_video() -> dict[str, str]:
5555
}
5656

5757

58-
@pytest.mark.asyncio
59-
@pytest.mark.parametrize("model_name", [MODEL_NAME])
60-
@pytest.mark.parametrize("video_url", TEST_VIDEO_URLS)
61-
async def test_single_chat_session_video(
62-
client: openai.AsyncOpenAI, model_name: str, video_url: str
58+
def dummy_messages_from_video_url(
59+
video_urls: str | list[str],
60+
content_text: str = "What's in this video?",
6361
):
64-
messages = [
62+
if isinstance(video_urls, str):
63+
video_urls = [video_urls]
64+
65+
return [
6566
{
6667
"role": "user",
6768
"content": [
68-
{"type": "video_url", "video_url": {"url": video_url}},
69-
{"type": "text", "text": "What's in this video?"},
69+
*(
70+
{"type": "video_url", "video_url": {"url": video_url}}
71+
for video_url in video_urls
72+
),
73+
{"type": "text", "text": content_text},
7074
],
7175
}
7276
]
7377

78+
79+
@pytest.mark.asyncio
80+
@pytest.mark.parametrize("model_name", [MODEL_NAME])
81+
@pytest.mark.parametrize("video_url", TEST_VIDEO_URLS)
82+
async def test_single_chat_session_video(
83+
client: openai.AsyncOpenAI, model_name: str, video_url: str
84+
):
85+
messages = dummy_messages_from_video_url(video_url)
86+
7487
# test single completion
7588
chat_completion = await client.chat.completions.create(
7689
model=model_name,
@@ -137,15 +150,7 @@ async def test_error_on_invalid_video_url_type(
137150
async def test_single_chat_session_video_beamsearch(
138151
client: openai.AsyncOpenAI, model_name: str, video_url: str
139152
):
140-
messages = [
141-
{
142-
"role": "user",
143-
"content": [
144-
{"type": "video_url", "video_url": {"url": video_url}},
145-
{"type": "text", "text": "What's in this video?"},
146-
],
147-
}
148-
]
153+
messages = dummy_messages_from_video_url(video_url)
149154

150155
chat_completion = await client.chat.completions.create(
151156
model=model_name,
@@ -172,20 +177,9 @@ async def test_single_chat_session_video_base64encoded(
172177
video_url: str,
173178
base64_encoded_video: dict[str, str],
174179
):
175-
messages = [
176-
{
177-
"role": "user",
178-
"content": [
179-
{
180-
"type": "video_url",
181-
"video_url": {
182-
"url": f"data:video/jpeg;base64,{base64_encoded_video[video_url]}" # noqa: E501
183-
},
184-
},
185-
{"type": "text", "text": "What's in this video?"},
186-
],
187-
}
188-
]
180+
messages = dummy_messages_from_video_url(
181+
f"data:video/jpeg;base64,{base64_encoded_video[video_url]}"
182+
)
189183

190184
# test single completion
191185
chat_completion = await client.chat.completions.create(
@@ -231,20 +225,10 @@ async def test_single_chat_session_video_base64encoded_beamsearch(
231225
video_url: str,
232226
base64_encoded_video: dict[str, str],
233227
):
234-
messages = [
235-
{
236-
"role": "user",
237-
"content": [
238-
{
239-
"type": "video_url",
240-
"video_url": {
241-
"url": f"data:video/jpeg;base64,{base64_encoded_video[video_url]}" # noqa: E501
242-
},
243-
},
244-
{"type": "text", "text": "What's in this video?"},
245-
],
246-
}
247-
]
228+
messages = dummy_messages_from_video_url(
229+
f"data:video/jpeg;base64,{base64_encoded_video[video_url]}"
230+
)
231+
248232
chat_completion = await client.chat.completions.create(
249233
model=model_name,
250234
messages=messages,
@@ -265,15 +249,7 @@ async def test_single_chat_session_video_base64encoded_beamsearch(
265249
async def test_chat_streaming_video(
266250
client: openai.AsyncOpenAI, model_name: str, video_url: str
267251
):
268-
messages = [
269-
{
270-
"role": "user",
271-
"content": [
272-
{"type": "video_url", "video_url": {"url": video_url}},
273-
{"type": "text", "text": "What's in this video?"},
274-
],
275-
}
276-
]
252+
messages = dummy_messages_from_video_url(video_url)
277253

278254
# test single completion
279255
chat_completion = await client.chat.completions.create(
@@ -318,18 +294,7 @@ async def test_chat_streaming_video(
318294
async def test_multi_video_input(
319295
client: openai.AsyncOpenAI, model_name: str, video_urls: list[str]
320296
):
321-
messages = [
322-
{
323-
"role": "user",
324-
"content": [
325-
*(
326-
{"type": "video_url", "video_url": {"url": video_url}}
327-
for video_url in video_urls
328-
),
329-
{"type": "text", "text": "What's in this video?"},
330-
],
331-
}
332-
]
297+
messages = dummy_messages_from_video_url(video_urls)
333298

334299
if len(video_urls) > MAXIMUM_VIDEOS:
335300
with pytest.raises(openai.BadRequestError): # test multi-video input

0 commit comments

Comments
 (0)