Skip to content

Commit 435fa95

Browse files
authored
[Frontend] add run batch to CLI (vllm-project#18804)
Signed-off-by: reidliu41 <[email protected]> Co-authored-by: reidliu41 <[email protected]>
1 parent 4c2b38c commit 435fa95

File tree

5 files changed

+110
-22
lines changed

5 files changed

+110
-22
lines changed

examples/offline_inference/openai_batch/README.md

Lines changed: 35 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,19 @@ The batch running tool is designed to be used from the command line.
4848
You can run the batch with the following command, which will write its results to a file called `results.jsonl`
4949

5050
```console
51-
python -m vllm.entrypoints.openai.run_batch -i offline_inference/openai_batch/openai_example_batch.jsonl -o results.jsonl --model meta-llama/Meta-Llama-3-8B-Instruct
51+
python -m vllm.entrypoints.openai.run_batch \
52+
-i offline_inference/openai_batch/openai_example_batch.jsonl \
53+
-o results.jsonl \
54+
--model meta-llama/Meta-Llama-3-8B-Instruct
55+
```
56+
57+
or use command-line:
58+
59+
```console
60+
vllm run-batch \
61+
-i offline_inference/openai_batch/openai_example_batch.jsonl \
62+
-o results.jsonl \
63+
--model meta-llama/Meta-Llama-3-8B-Instruct
5264
```
5365

5466
### Step 3: Check your results
@@ -68,7 +80,19 @@ The batch runner supports remote input and output urls that are accessible via h
6880
For example, to run against our example input file located at `https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl`, you can run
6981

7082
```console
71-
python -m vllm.entrypoints.openai.run_batch -i https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl -o results.jsonl --model meta-llama/Meta-Llama-3-8B-Instruct
83+
python -m vllm.entrypoints.openai.run_batch \
84+
-i https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl \
85+
-o results.jsonl \
86+
--model meta-llama/Meta-Llama-3-8B-Instruct
87+
```
88+
89+
or use command-line:
90+
91+
```console
92+
vllm run-batch \
93+
-i https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl \
94+
-o results.jsonl \
95+
--model meta-llama/Meta-Llama-3-8B-Instruct
7296
```
7397

7498
## Example 3: Integrating with AWS S3
@@ -164,6 +188,15 @@ python -m vllm.entrypoints.openai.run_batch \
164188
--model --model meta-llama/Meta-Llama-3-8B-Instruct
165189
```
166190

191+
or use command-line:
192+
193+
```console
194+
vllm run-batch \
195+
-i "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_INPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \
196+
-o "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_OUTPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \
197+
--model --model meta-llama/Meta-Llama-3-8B-Instruct
198+
```
199+
167200
### Step 4: View your results
168201

169202
Your results are now on S3. You can view them in your terminal by running

tests/entrypoints/openai/test_run_batch.py

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
import json
44
import subprocess
5-
import sys
65
import tempfile
76

87
from vllm.entrypoints.openai.protocol import BatchRequestOutput
@@ -35,9 +34,8 @@ def test_empty_file():
3534
input_file.write("")
3635
input_file.flush()
3736
proc = subprocess.Popen([
38-
sys.executable, "-m", "vllm.entrypoints.openai.run_batch", "-i",
39-
input_file.name, "-o", output_file.name, "--model",
40-
"intfloat/multilingual-e5-small"
37+
"vllm", "run-batch", "-i", input_file.name, "-o", output_file.name,
38+
"--model", "intfloat/multilingual-e5-small"
4139
], )
4240
proc.communicate()
4341
proc.wait()
@@ -54,9 +52,8 @@ def test_completions():
5452
input_file.write(INPUT_BATCH)
5553
input_file.flush()
5654
proc = subprocess.Popen([
57-
sys.executable, "-m", "vllm.entrypoints.openai.run_batch", "-i",
58-
input_file.name, "-o", output_file.name, "--model",
59-
"NousResearch/Meta-Llama-3-8B-Instruct"
55+
"vllm", "run-batch", "-i", input_file.name, "-o", output_file.name,
56+
"--model", "NousResearch/Meta-Llama-3-8B-Instruct"
6057
], )
6158
proc.communicate()
6259
proc.wait()
@@ -79,9 +76,8 @@ def test_completions_invalid_input():
7976
input_file.write(INVALID_INPUT_BATCH)
8077
input_file.flush()
8178
proc = subprocess.Popen([
82-
sys.executable, "-m", "vllm.entrypoints.openai.run_batch", "-i",
83-
input_file.name, "-o", output_file.name, "--model",
84-
"NousResearch/Meta-Llama-3-8B-Instruct"
79+
"vllm", "run-batch", "-i", input_file.name, "-o", output_file.name,
80+
"--model", "NousResearch/Meta-Llama-3-8B-Instruct"
8581
], )
8682
proc.communicate()
8783
proc.wait()
@@ -95,9 +91,8 @@ def test_embeddings():
9591
input_file.write(INPUT_EMBEDDING_BATCH)
9692
input_file.flush()
9793
proc = subprocess.Popen([
98-
sys.executable, "-m", "vllm.entrypoints.openai.run_batch", "-i",
99-
input_file.name, "-o", output_file.name, "--model",
100-
"intfloat/multilingual-e5-small"
94+
"vllm", "run-batch", "-i", input_file.name, "-o", output_file.name,
95+
"--model", "intfloat/multilingual-e5-small"
10196
], )
10297
proc.communicate()
10398
proc.wait()
@@ -117,9 +112,8 @@ def test_score():
117112
input_file.write(INPUT_SCORE_BATCH)
118113
input_file.flush()
119114
proc = subprocess.Popen([
120-
sys.executable,
121-
"-m",
122-
"vllm.entrypoints.openai.run_batch",
115+
"vllm",
116+
"run-batch",
123117
"-i",
124118
input_file.name,
125119
"-o",

vllm/entrypoints/cli/main.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import vllm.entrypoints.cli.benchmark.main
88
import vllm.entrypoints.cli.collect_env
99
import vllm.entrypoints.cli.openai
10+
import vllm.entrypoints.cli.run_batch
1011
import vllm.entrypoints.cli.serve
1112
import vllm.version
1213
from vllm.entrypoints.utils import VLLM_SERVE_PARSER_EPILOG, cli_env_setup
@@ -17,6 +18,7 @@
1718
vllm.entrypoints.cli.serve,
1819
vllm.entrypoints.cli.benchmark.main,
1920
vllm.entrypoints.cli.collect_env,
21+
vllm.entrypoints.cli.run_batch,
2022
]
2123

2224

vllm/entrypoints/cli/run_batch.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
3+
import argparse
4+
import asyncio
5+
6+
from prometheus_client import start_http_server
7+
8+
from vllm.entrypoints.cli.types import CLISubcommand
9+
from vllm.entrypoints.logger import logger
10+
from vllm.entrypoints.openai.run_batch import main as run_batch_main
11+
from vllm.entrypoints.openai.run_batch import make_arg_parser
12+
from vllm.utils import FlexibleArgumentParser
13+
from vllm.version import __version__ as VLLM_VERSION
14+
15+
16+
class RunBatchSubcommand(CLISubcommand):
17+
"""The `run-batch` subcommand for vLLM CLI."""
18+
19+
def __init__(self):
20+
self.name = "run-batch"
21+
super().__init__()
22+
23+
@staticmethod
24+
def cmd(args: argparse.Namespace) -> None:
25+
logger.info("vLLM batch processing API version %s", VLLM_VERSION)
26+
logger.info("args: %s", args)
27+
28+
# Start the Prometheus metrics server.
29+
# LLMEngine uses the Prometheus client
30+
# to publish metrics at the /metrics endpoint.
31+
if args.enable_metrics:
32+
logger.info("Prometheus metrics enabled")
33+
start_http_server(port=args.port, addr=args.url)
34+
else:
35+
logger.info("Prometheus metrics disabled")
36+
37+
asyncio.run(run_batch_main(args))
38+
39+
def subparser_init(
40+
self,
41+
subparsers: argparse._SubParsersAction) -> FlexibleArgumentParser:
42+
run_batch_parser = subparsers.add_parser(
43+
"run-batch",
44+
help="Run batch prompts and write results to file.",
45+
description=(
46+
"Run batch prompts using vLLM's OpenAI-compatible API.\n"
47+
"Supports local or HTTP input/output files."),
48+
usage=
49+
"vllm run-batch -i INPUT.jsonl -o OUTPUT.jsonl --model <model>",
50+
)
51+
return make_arg_parser(run_batch_parser)
52+
53+
54+
def cmd_init() -> list[CLISubcommand]:
55+
return [RunBatchSubcommand()]

vllm/entrypoints/openai/run_batch.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,7 @@
3333
from vllm.version import __version__ as VLLM_VERSION
3434

3535

36-
def parse_args():
37-
parser = FlexibleArgumentParser(
38-
description="vLLM OpenAI-Compatible batch runner.")
36+
def make_arg_parser(parser: FlexibleArgumentParser):
3937
parser.add_argument(
4038
"-i",
4139
"--input-file",
@@ -98,7 +96,13 @@ def parse_args():
9896
default=False,
9997
help="If set to True, enable prompt_tokens_details in usage.")
10098

101-
return parser.parse_args()
99+
return parser
100+
101+
102+
def parse_args():
103+
parser = FlexibleArgumentParser(
104+
description="vLLM OpenAI-Compatible batch runner.")
105+
return make_arg_parser(parser).parse_args()
102106

103107

104108
# explicitly use pure text format, with a newline at the end

0 commit comments

Comments
 (0)