diff --git a/docs/DOCUMENTATION.md b/docs/DOCUMENTATION.md index 45f231327..aa8a66a52 100644 --- a/docs/DOCUMENTATION.md +++ b/docs/DOCUMENTATION.md @@ -403,7 +403,9 @@ In each trial, the tuning trial with the fastest training time to achieve the *v #### Self-tuning ruleset -Submissions to this ruleset are not allowed to have user-defined hyperparameters. This ruleset allows both submissions that use the same hyperparameters for all workloads, including the randomized ones (e.g. Adam with default parameters), as well as submissions that perform inner-loop tuning during their training run (e.g. SGD with line searches). +This ruleset allows both submissions that use the same hyperparameters for all workloads, including the randomized ones (e.g. Adam with default parameters), as well as submissions that perform inner-loop tuning during their training run (e.g. SGD with line searches). + +Submissions in this track may only specify hyperparameter values used for model initialization,such as `dropout_rate` and `aux_dropout_rate`. Submitters may provide a dictionary containing the initial values for these hyperparameters in a JSON file. If no values are not provided, the default values will be used. Submissions will run on one instance of the [benchmarking hardware](#benchmarking-hardware). As always, submissions are allowed to perform inner-loop tuning (e.g. for their learning rate) but the tuning efforts will be part of their score. A submission will run *S=5* times and its score will be the median time to reach the target evaluation metric value on the validation set. To account for the lack of external tuning, submissions have a longer time budget to reach the target performance. Compared to the [external tuning ruleset](#external-tuning-ruleset), the `max_runtime` is $1.5$ times longer. Runs that do not reach the target performance of the evaluation metric within this allotted time budget have an infinite time. diff --git a/submission_runner.py b/submission_runner.py index 468a04c7c..38fce8a6c 100644 --- a/submission_runner.py +++ b/submission_runner.py @@ -14,6 +14,7 @@ --experiment_name=baseline """ +import collections import datetime import gc import importlib @@ -61,6 +62,9 @@ # Workload_path will be appended by '_pytorch' or '_jax' automatically. WORKLOADS = workloads.WORKLOADS +# Self-tuning submission can specify only these hyperparameters. +SELF_TUNING_ALLOWED_HYPERPARAMETERS = {"dropout_rate", "aux_dropout_rate"} + flags.DEFINE_string( 'submission_path', None, @@ -79,7 +83,8 @@ flags.DEFINE_string( 'tuning_search_space', None, - 'The path to the JSON file describing the external tuning search space.') + 'The path to the JSON file describing the external tuning search space' + 'or the self tuning fixed hyperparameters.') flags.DEFINE_integer('num_tuning_trials', 1, 'The number of external hyperparameter trials to run.') @@ -652,9 +657,21 @@ def score_submission_on_workload(workload: spec.Workload, logging.info('=' * 20) score = min(all_timings) else: - if tuning_search_space is not None: - raise ValueError( - 'Cannot provide a tuning search space when using self tuning.') + # Self-tuning submissions may specify only allowed hyperparameters, + # each with exactly one fixed value. + with open(tuning_search_space, 'r', encoding='UTF-8') as f: + fixed_hyperparameters = json.load(f) + if not isinstance(fixed_hyperparameters, dict): + raise ValueError("Self-tuning expects a dictionary of hyperparameters.") + for k, v in fixed_hyperparameters.items(): + if k not in SELF_TUNING_ALLOWED_HYPERPARAMETERS: + raise ValueError(f"Hyperparameter '{k}' not allowed.") + if isinstance(v, (dict, list, set)): + raise ValueError(f"Hyperparameter '{k}' must have a single value.") + hyperparameters = collections.namedtuple( + 'Hyperparameters', + fixed_hyperparameters.keys())(**fixed_hyperparameters) + if not rng_seed: rng_seed = struct.unpack('q', os.urandom(8))[0] rng = prng.PRNGKey(rng_seed) @@ -669,7 +686,7 @@ def score_submission_on_workload(workload: spec.Workload, workload, workload_name, global_batch_size, global_eval_batch_size, data_dir, imagenet_v2_data_dir, init_optimizer_state, update_params, data_selection, prepare_for_eval, - None, rng_seed, rng, profiler, max_global_steps, log_dir, + hyperparameters, rng_seed, rng, profiler, max_global_steps, log_dir, save_checkpoints=save_checkpoints) return score diff --git a/submissions/submission_checker.py b/submissions/submission_checker.py index ab657c0f0..beca63457 100644 --- a/submissions/submission_checker.py +++ b/submissions/submission_checker.py @@ -24,6 +24,8 @@ - get_batch_size - data_selection +A self_tuning submission may optionally include a +`fixed_hyperparameters.json` file in the same directory as `submission.py`. """ import argparse