Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion mlos_bench/mlos_bench/environments/local/local_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,10 @@ def run(self) -> tuple[Status, datetime, dict[str, TunableValue] | None]:
)

_LOG.debug("Read data:\n%s", data)
if list(data.columns) == ["metric", "value"]:
if len(data) == 0:
_LOG.warning("Empty metrics file - fail the run")
return (Status.FAILED, timestamp, None)
elif list(data.columns) == ["metric", "value"]:
_LOG.info(
"Local results have (metric,value) header and %d rows: assume long format",
len(data),
Expand Down
5 changes: 4 additions & 1 deletion mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,10 @@ def bulk_register(
df_configs = self._to_df(configs) # Impute missing values, if necessary

df_scores = self._adjust_signs_df(
pd.DataFrame([{} if score is None else score for score in scores])
pd.DataFrame(
[{} if score is None else score for score in scores],
columns=list(self._opt_targets),
)
)

if status is not None:
Expand Down
30 changes: 26 additions & 4 deletions mlos_bench/mlos_bench/tests/environments/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,19 @@
import pytest

from mlos_bench.environments.base_environment import Environment
from mlos_bench.environments.status import Status
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.tunables.tunable_types import TunableValue


def check_env_success(
env: Environment,
tunable_groups: TunableGroups,
expected_results: dict[str, TunableValue],
*,
expected_results: dict[str, TunableValue] | None,
expected_telemetry: list[tuple[datetime, str, Any]],
expected_status_run: set[Status] | None = None,
expected_status_next: set[Status] | None = None,
global_config: dict | None = None,
) -> None:
"""
Expand All @@ -34,19 +38,37 @@ def check_env_success(
Expected results of the benchmark.
expected_telemetry : list[tuple[datetime, str, Any]]
Expected telemetry data of the benchmark.
expected_status_run : set[Status]
Expected status right after the trial.
Default is the `SUCCEEDED` value.
expected_status_next : set[Status]
Expected status values for the next trial.
Default is the same set as in `.is_good()`.
global_config : dict
Global params.
"""
# pylint: disable=too-many-arguments
if expected_status_run is None:
expected_status_run = {Status.SUCCEEDED}

if expected_status_next is None:
expected_status_next = {
Status.PENDING,
Status.READY,
Status.RUNNING,
Status.SUCCEEDED,
}

with env as env_context:

assert env_context.setup(tunable_groups, global_config)

(status, _ts, data) = env_context.run()
assert status.is_succeeded()
assert data == pytest.approx(expected_results, nan_ok=True)
assert status in expected_status_run
assert data == expected_results or data == pytest.approx(expected_results, nan_ok=True)

(status, _ts, telemetry) = env_context.status()
assert status.is_good()
assert status in expected_status_next
assert telemetry == pytest.approx(expected_telemetry, nan_ok=True)

env_context.teardown()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
"""Unit tests for LocalEnv benchmark environment."""
import pytest

from mlos_bench.environments.status import Status
from mlos_bench.tests.environments import check_env_success
from mlos_bench.tests.environments.local import create_local_env
from mlos_bench.tunables.tunable_groups import TunableGroups
Expand Down Expand Up @@ -101,3 +102,24 @@ def test_local_env_wide(tunable_groups: TunableGroups) -> None:
},
expected_telemetry=[],
)


def test_local_env_results_empty_file(tunable_groups: TunableGroups) -> None:
"""When the results file is empty, do not crash but mark the trial FAILED."""
local_env = create_local_env(
tunable_groups,
{
"run": [
"echo 'latency,throughput,score' > output.csv",
],
"read_results_file": "output.csv",
},
)

check_env_success(
local_env,
tunable_groups,
expected_status_run={Status.FAILED},
expected_results=None,
expected_telemetry=[],
)
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def _run_local_env(tunable_groups: TunableGroups, shell_subcmd: str, expected: d
},
)

check_env_success(local_env, tunable_groups, expected, [])
check_env_success(local_env, tunable_groups, expected_results=expected, expected_telemetry=[])


@pytest.mark.skipif(sys.platform == "win32", reason="sh-like shell only")
Expand Down
Loading