11from .. import result_summarizer
2+ from ...rcp_checker import rcp_checker
23from ...compliance_checker .mlp_compliance import usage_choices , rule_choices
34import argparse
45
@@ -8,40 +9,62 @@ def get_compute_args():
89 prog = "mlperf_logging.result_summarizer.compute_score" ,
910 description = "Compute the score of a single benchmark" ,
1011 )
11- parser .add_argument ("benchmark" , type = str , help = "TODO:" , required = True )
12- parser .add_argument ("system" , type = str , help = "System name" , default = None )
12+ parser .add_argument ("-- benchmark" , type = str , help = "TODO:" , required = True )
13+ parser .add_argument ("-- system" , type = str , help = "System name" , default = None )
1314 parser .add_argument (
14- "has_power" , action = "store_true" , help = "Compute power score as well"
15+ "-- has_power" , action = "store_true" , help = "Compute power score as well"
1516 )
1617 parser .add_argument (
17- "benchmark_folder" , type = str , help = "Folder containing all the result files" , required = True
18+ "-- benchmark_folder" , type = str , help = "Folder containing all the result files" , required = True
1819 )
1920 parser .add_argument (
20- "usage" ,
21+ "-- usage" ,
2122 type = str ,
2223 default = "training" ,
2324 choices = usage_choices (),
2425 help = "the usage such as training, hpc, inference_edge, inference_server" ,
2526 required = True ,
2627 )
2728 parser .add_argument (
28- "ruleset" ,
29+ "-- ruleset" ,
2930 type = str ,
3031 choices = rule_choices (),
3132 help = "the ruleset such as 0.6.0, 0.7.0, or 1.0.0" ,
3233 required = True ,
3334 )
34-
3535 parser .add_argument (
36- "weak_scaling" , action = "store_true" , help = "Compute weak scaling score"
36+ "--is_weak_scaling" , action = "store_true" , help = "Compute weak scaling score"
37+ )
38+ parser .add_argument (
39+ "--scale" , action = "store_true" , help = "Compute the scaling factor"
3740 )
3841
3942 return parser .parse_args ()
4043
4144
45+ def print_benchmark_info (args ):
46+ print (f"MLPerf { args .usage } " )
47+ print (f"Folder: { args .benchmark_folder } " )
48+ print (f"Version: { args .ruleset } " )
49+ print (f"System: { args .system } " )
50+ print (f"Benchmark: { args .benchmark } " )
51+
4252args = get_compute_args ()
4353
44- if args .weak_scaling :
54+ if args .scale :
55+ rcp_checker .check_directory (
56+ args .benchmark_folder ,
57+ args .usage ,
58+ args .ruleset ,
59+ False ,
60+ False ,
61+ rcp_file = None ,
62+ rcp_pass = 'pruned_rcps' ,
63+ rcp_bypass = False ,
64+ set_scaling = True ,
65+ )
66+
67+ if args .is_weak_scaling :
4568 scores , power_scores = result_summarizer ._compute_weak_score_standalone (
4669 args .benchmark ,
4770 args .system ,
@@ -50,9 +73,10 @@ def get_compute_args():
5073 args .usage ,
5174 args .ruleset ,
5275 )
76+ print_benchmark_info (args )
5377 print (f"Scores: { scores } " )
5478 if power_scores :
55- print (f"Power Scores: { power_scores } " )
79+ print (f"Power Scores - Energy (kJ) : { power_scores } " )
5680else :
5781 score , power_score = result_summarizer ._compute_strong_score_standalone (
5882 args .benchmark ,
@@ -62,6 +86,7 @@ def get_compute_args():
6286 args .usage ,
6387 args .ruleset ,
6488 )
65- print (f"Score: { score } " )
89+ print_benchmark_info (args )
90+ print (f"Score - Time to Train (minutes): { score } " )
6691 if power_score :
67- print (f"Power Score: { power_score } " )
92+ print (f"Power Score - Energy (kJ) : { power_score } " )
0 commit comments