-
Notifications
You must be signed in to change notification settings - Fork 390
refactor(benchmark): ignore tests/benchmark path in fill and drop markers
#1821
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: forks/osaka
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,5 +1,7 @@ | ||
| """The module contains the pytest hooks for the gas benchmark values.""" | ||
|
|
||
| from pathlib import Path | ||
|
|
||
| import pytest | ||
|
|
||
| from execution_testing.test_types import Environment, EnvironmentDefaults | ||
|
|
@@ -30,31 +32,68 @@ def pytest_addoption(parser: pytest.Parser) -> None: | |
| ) | ||
|
|
||
|
|
||
| def pytest_ignore_collect( | ||
| collection_path: Path, config: pytest.Config | ||
| ) -> bool | None: | ||
| """ | ||
| Ignore tests/benchmark/ directory by default unless explicitly specified. | ||
|
|
||
| Returns True to ignore, False to collect, None for default behavior. | ||
| """ | ||
| # Only handle paths within tests/benchmark/ | ||
| try: | ||
| collection_path.relative_to(config.rootpath / "tests" / "benchmark") | ||
| except ValueError: | ||
| return None | ||
|
|
||
| # Check if benchmark tests explicitly specified in command line arguments | ||
| benchmark_path = config.rootpath / "tests" / "benchmark" | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We could define this above the |
||
| for arg in config.args: | ||
| arg_path = Path(arg) | ||
| # Check absolute paths | ||
| if arg_path.is_absolute(): | ||
| try: | ||
| arg_path.relative_to(benchmark_path) | ||
| # Explicitly specified, set op_mode and don't ignore | ||
| config.op_mode = OpMode.BENCHMARKING # type: ignore[attr-defined] | ||
| return False | ||
| except ValueError: | ||
| continue | ||
| # Check relative paths containing 'benchmark' | ||
| elif "benchmark" in arg: | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a rather loose check, perhaps we could check for "benchmark/compute" or "benchmark/stateful". Of course, it'll break if we add another valid benchmark sub-folder here. |
||
| # Explicitly specified, set op_mode and don't ignore | ||
| config.op_mode = OpMode.BENCHMARKING # type: ignore[attr-defined] | ||
| return False | ||
|
Comment on lines
+44
to
+66
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This step checks whether the benchmark path is explicitly specified, but i think this is not the best approach |
||
|
|
||
| # Not explicitly specified, ignore by default | ||
| return True | ||
|
|
||
|
|
||
| @pytest.hookimpl(tryfirst=True) | ||
| def pytest_configure(config: pytest.Config) -> None: | ||
| """Configure the fill and execute mode to benchmarking.""" | ||
| config.addinivalue_line( | ||
| "markers", | ||
| "repricing: Mark test as reference test for gas repricing analysis", | ||
| ) | ||
| if config.getoption("gas_benchmark_value"): | ||
| config.op_mode = OpMode.BENCHMARKING # type: ignore[attr-defined] | ||
|
|
||
|
|
||
| def pytest_collection_modifyitems( | ||
| config: pytest.Config, items: list[pytest.Item] | ||
| ) -> None: | ||
| """Remove non-repricing tests when --fixed-opcode-count is specified.""" | ||
| fixed_opcode_count = config.getoption("fixed_opcode_count") | ||
| if not fixed_opcode_count: | ||
| # If --fixed-opcode-count is not specified, don't filter anything | ||
| """ | ||
| Filter tests based on repricing marker kwargs when -m repricing is specified. | ||
|
|
||
| When a test has @pytest.mark.repricing(param=value), only the parameterized | ||
| variant matching those kwargs should be selected. | ||
| """ | ||
| # Check if -m repricing marker filter was specified | ||
| markexpr = config.getoption("markexpr", "") | ||
| if "repricing" not in markexpr: | ||
| return | ||
|
|
||
| filtered = [] | ||
| for item in items: | ||
| if not item.get_closest_marker("benchmark"): | ||
| continue | ||
|
|
||
| repricing_marker = item.get_closest_marker("repricing") | ||
| if not repricing_marker: | ||
| continue | ||
|
|
@@ -160,7 +199,10 @@ def genesis_environment(request: pytest.FixtureRequest) -> Environment: # noqa: | |
| Return an Environment instance with appropriate gas limit based on test | ||
| type. | ||
| """ | ||
| if request.node.get_closest_marker("benchmark") is not None: | ||
| is_benchmark = ( | ||
| getattr(request.config, "op_mode", False) == OpMode.BENCHMARKING | ||
| ) | ||
| if is_benchmark: | ||
| return Environment(gas_limit=BENCHMARKING_MAX_GAS) | ||
| return Environment() | ||
|
|
||
|
|
@@ -171,6 +213,9 @@ def env(request: pytest.FixtureRequest) -> Environment: # noqa: D103 | |
| Return an Environment instance with appropriate gas limit based on test | ||
| type. | ||
| """ | ||
| if request.node.get_closest_marker("benchmark") is not None: | ||
| is_benchmark = ( | ||
| getattr(request.config, "op_mode", False) == OpMode.BENCHMARKING | ||
| ) | ||
| if is_benchmark: | ||
| return Environment(gas_limit=BENCHMARKING_MAX_GAS) | ||
| return Environment() | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -32,62 +32,6 @@ def pytest_generate_tests(metafunc: Any) -> None: | |
| metafunc.definition.add_marker(benchmark_marker) | ||
|
|
||
|
|
||
| def pytest_collection_modifyitems(config: Any, items: Any) -> None: | ||
| """Add the `benchmark` marker to all tests under `./tests/benchmark`.""" | ||
| benchmark_dir = Path(__file__).parent | ||
| benchmark_marker = pytest.mark.benchmark | ||
| gen_docs = config.getoption("--gen-docs", default=False) | ||
|
|
||
| if gen_docs: | ||
| for item in items: | ||
| if ( | ||
| benchmark_dir in Path(item.fspath).parents | ||
| and not item.get_closest_marker("benchmark") | ||
| and not item.get_closest_marker("stateful") | ||
| ): | ||
| item.add_marker(benchmark_marker) | ||
| return | ||
|
Comment on lines
-39
to
-49
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This code is pretty ugly, but this PR will have to add some " |
||
|
|
||
| marker_expr = config.getoption("-m", default="") | ||
| run_benchmarks = ( | ||
| marker_expr | ||
| and "benchmark" in marker_expr | ||
| and "not benchmark" not in marker_expr | ||
| ) | ||
| run_stateful_tests = ( | ||
| marker_expr | ||
| and "stateful" in marker_expr | ||
| and "not stateful" not in marker_expr | ||
| ) | ||
|
|
||
| items_for_removal = [] | ||
| for i, item in enumerate(items): | ||
| is_in_benchmark_dir = benchmark_dir in Path(item.fspath).parents | ||
| has_stateful_marker = item.get_closest_marker("stateful") | ||
| is_benchmark_test = ( | ||
| is_in_benchmark_dir and not has_stateful_marker | ||
| ) or item.get_closest_marker("benchmark") | ||
|
|
||
| if is_benchmark_test: | ||
| if is_in_benchmark_dir and not item.get_closest_marker( | ||
| "benchmark" | ||
| ): | ||
| item.add_marker(benchmark_marker) | ||
| if not run_benchmarks: | ||
| items_for_removal.append(i) | ||
| elif run_benchmarks: | ||
| items_for_removal.append(i) | ||
| elif ( | ||
| is_in_benchmark_dir | ||
| and has_stateful_marker | ||
| and not run_stateful_tests | ||
| ): | ||
| items_for_removal.append(i) | ||
|
|
||
| for i in reversed(items_for_removal): | ||
| items.pop(i) | ||
|
|
||
|
|
||
| @pytest.fixture | ||
| def tx_gas_limit(fork: Fork, gas_benchmark_value: int) -> int: | ||
| """Return the transaction gas limit cap.""" | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To address the failing documentation generation workflow, if fill is running in documentation mode (i.e.,
config.getoption("--gen-docs", default=False)isTrue), then we can simplyreturn Falsehere to not ignore benchmark tests and have them included in the documentation.