Skip to content

Commit a955b7d

Browse files
committed
TEST: rm qwen1.5_7b test
1 parent 759dca3 commit a955b7d

File tree

7 files changed

+4
-10
lines changed

7 files changed

+4
-10
lines changed

.github/workflows/api_eva.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,4 +134,4 @@ jobs:
134134
if: always()
135135
run: |
136136
export workdir=$(pwd)
137-
rm -rf $workdir/*
137+
rm -rf $workdir/*

autotest/config.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,6 @@ pytorch_chat_model:
139139
- Qwen/Qwen2.5-32B-Instruct
140140
- Qwen/Qwen2.5-72B-Instruct
141141
- Qwen/Qwen2-7B-Instruct
142-
- Qwen/Qwen1.5-7B-Chat
143142
- Qwen/Qwen1.5-MoE-A2.7B-Chat
144143
- Qwen/Qwen2.5-VL-7B-Instruct
145144
- Qwen/Qwen2.5-VL-32B-Instruct
@@ -379,7 +378,6 @@ benchmark_model:
379378

380379

381380
evaluate_model:
382-
- Qwen/Qwen1.5-7B-Chat
383381
- google/gemma-2-9b-it
384382
- google/gemma-2-27b-it
385383
- internlm/internlm2_5-7b-chat

autotest/evaluate/eval_config_base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
type=OpenAISDK,
2626
abbr=f'{MODEL_NAME}-lmdeploy-api',
2727
openai_api_base=API_BASE,
28-
key='EMPTY',
28+
key='EMPTY',
2929
path=MODEL_PATH,
3030
meta_template=api_meta_template,
3131
max_out_len=2048,

autotest/evaluate/eval_config_chat.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,10 @@
88

99
datasets = sum([v for k, v in locals().items() if k.endswith('_datasets')], [])
1010

11-
1211
MODEL_NAME = 'Qwen2-7B-Instruct'
1312
MODEL_PATH = '/nvme/qa_test_models/Qwen/Qwen2-7B-Instruct'
1413
API_BASE = 'http://127.0.0.1:65525/v1'
1514

16-
1715
api_meta_template = dict(round=[
1816
dict(role='HUMAN', api_role='HUMAN'),
1917
dict(role='BOT', api_role='BOT', generate=True),
@@ -24,7 +22,7 @@
2422
type=OpenAISDK,
2523
abbr=f'{MODEL_NAME}-lmdeploy-api',
2624
openai_api_base=API_BASE,
27-
key='EMPTY',
25+
key='EMPTY',
2826
path=MODEL_PATH,
2927
meta_template=api_meta_template,
3028
max_out_len=2048,

autotest/evaluate/test_api_evaluate_pytorch.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ def prepare_environment(request, config, worker_id):
1111
param = request.param
1212
model = param['model']
1313
backend = param['backend']
14-
print(param['model'], param['backend'], param['extra'])
1514
model_path = config.get('model_path') + '/' + model
1615
pid, startRes = start_restful_api(config, param, model, model_path, backend, worker_id)
1716
yield param

autotest/evaluate/test_api_evaluate_turbomind.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ def prepare_environment(request, config, worker_id):
1111
param = request.param
1212
model = param['model']
1313
backend = param['backend']
14-
print(param['model'], param['backend'], param['extra'])
1514
model_path = config.get('model_path') + '/' + model
1615
pid, startRes = start_restful_api(config, param, model, model_path, backend, worker_id)
1716
yield param

autotest/utils/evaluate_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,4 +160,4 @@ def restful_test(config, run_id, prepare_environment, worker_id='gw0', port=DEFA
160160
f'after 7200 seconds')
161161
return False, timeout_msg
162162
except Exception as e:
163-
return False, f'Error during evaluation for {model_name}: {str(e)}'
163+
return False, f'Error during evaluation for {model_name}: {str(e)}'

0 commit comments

Comments
 (0)