|
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
14 | 14 |
|
15 | | -import unittest |
16 | | -from collections.abc import MutableMapping |
| 15 | +from collections.abc import Iterable, MutableMapping |
17 | 16 | from itertools import chain |
18 | 17 | from typing import Any, Optional |
19 | 18 |
|
20 | 19 | import celpy |
| 20 | +import pytest |
21 | 21 | from celpy import celtypes |
22 | 22 | from google.protobuf import text_format |
23 | 23 |
|
@@ -82,65 +82,53 @@ def get_eval_error_message(test: simple_pb2.SimpleTest) -> Optional[str]: |
82 | 82 | return None |
83 | 83 |
|
84 | 84 |
|
85 | | -class TestFormat(unittest.TestCase): |
86 | | - @classmethod |
87 | | - def setUpClass(cls): |
88 | | - # The test data from the cel-spec conformance tests |
89 | | - cel_test_data = load_test_data(f"test/testdata/string_ext_{CEL_SPEC_VERSION}.textproto") |
90 | | - # Our supplemental tests of functionality not in the cel conformance file, but defined in the spec. |
91 | | - supplemental_test_data = load_test_data("test/testdata/string_ext_supplemental.textproto") |
92 | | - |
93 | | - # Combine the test data from both files into one |
94 | | - sections = cel_test_data.section |
95 | | - sections.extend(supplemental_test_data.section) |
96 | | - |
97 | | - # Find the format tests which test successful formatting |
98 | | - cls._format_tests = chain.from_iterable(x.test for x in sections if x.name == "format") |
99 | | - # Find the format error tests which test errors during formatting |
100 | | - cls._format_error_tests = chain.from_iterable(x.test for x in sections if x.name == "format_errors") |
101 | | - |
102 | | - cls._env = celpy.Environment(runner_class=InterpretedRunner) |
103 | | - |
104 | | - def test_format_successes(self): |
105 | | - """ |
106 | | - Tests success scenarios for string.format |
107 | | - """ |
108 | | - for test in self._format_tests: |
109 | | - if test.name in skipped_tests: |
110 | | - continue |
111 | | - ast = self._env.compile(test.expr) |
112 | | - prog = self._env.program(ast, functions=extra_func.make_extra_funcs()) |
113 | | - |
114 | | - bindings = build_variables(test.bindings) |
115 | | - with self.subTest(test.name): |
116 | | - try: |
117 | | - result = prog.evaluate(bindings) |
118 | | - expected = get_expected_result(test) |
119 | | - if expected is not None: |
120 | | - self.assertEqual(result, expected) |
121 | | - else: |
122 | | - self.fail(f"[{test.name}]: expected a success result to be defined") |
123 | | - except celpy.CELEvalError as e: |
124 | | - self.fail(e) |
125 | | - |
126 | | - def test_format_errors(self): |
127 | | - """ |
128 | | - Tests error scenarios for string.format |
129 | | - """ |
130 | | - for test in self._format_error_tests: |
131 | | - if test.name in skipped_error_tests: |
132 | | - continue |
133 | | - ast = self._env.compile(test.expr) |
134 | | - prog = self._env.program(ast, functions=extra_func.make_extra_funcs()) |
135 | | - |
136 | | - bindings = build_variables(test.bindings) |
137 | | - with self.subTest(test.name): |
138 | | - try: |
139 | | - prog.evaluate(bindings) |
140 | | - self.fail(f"[{test.name}]: expected an error to be raised during evaluation") |
141 | | - except celpy.CELEvalError as e: |
142 | | - msg = get_eval_error_message(test) |
143 | | - if msg is not None: |
144 | | - self.assertEqual(str(e), msg) |
145 | | - else: |
146 | | - self.fail(f"[{test.name}]: expected an eval error to be defined") |
| 85 | +# The test data from the cel-spec conformance tests |
| 86 | +cel_test_data = load_test_data(f"test/testdata/string_ext_{CEL_SPEC_VERSION}.textproto") |
| 87 | +# Our supplemental tests of functionality not in the cel conformance file, but defined in the spec. |
| 88 | +supplemental_test_data = load_test_data("test/testdata/string_ext_supplemental.textproto") |
| 89 | + |
| 90 | +# Combine the test data from both files into one |
| 91 | +sections = cel_test_data.section |
| 92 | +sections.extend(supplemental_test_data.section) |
| 93 | + |
| 94 | +# Find the format tests which test successful formatting |
| 95 | +_format_tests: Iterable[simple_pb2.SimpleTest] = chain.from_iterable(x.test for x in sections if x.name == "format") |
| 96 | +# Find the format error tests which test errors during formatting |
| 97 | +_format_error_tests: Iterable[simple_pb2.SimpleTest] = chain.from_iterable( |
| 98 | + x.test for x in sections if x.name == "format_errors" |
| 99 | +) |
| 100 | + |
| 101 | +env = celpy.Environment(runner_class=InterpretedRunner) |
| 102 | + |
| 103 | + |
| 104 | +@pytest.mark.parametrize("format_test", _format_tests) |
| 105 | +def test_format_successes(format_test): |
| 106 | + """Tests success scenarios for string.format""" |
| 107 | + if format_test.name in skipped_tests: |
| 108 | + pytest.skip(f"skipped test: {format_test.name}") |
| 109 | + ast = env.compile(format_test.expr) |
| 110 | + prog = env.program(ast, functions=extra_func.make_extra_funcs()) |
| 111 | + |
| 112 | + bindings = build_variables(format_test.bindings) |
| 113 | + result = prog.evaluate(bindings) |
| 114 | + expected = get_expected_result(format_test) |
| 115 | + assert expected is not None, f"[{format_test.name}]: expected a success result to be defined" |
| 116 | + assert result == expected |
| 117 | + |
| 118 | + |
| 119 | +@pytest.mark.parametrize("format_error_test", _format_error_tests) |
| 120 | +def test_format_errors(format_error_test): |
| 121 | + """Tests error scenarios for string.format""" |
| 122 | + if format_error_test.name in skipped_error_tests: |
| 123 | + pytest.skip(f"skipped test: {format_error_test.name}") |
| 124 | + ast = env.compile(format_error_test.expr) |
| 125 | + prog = env.program(ast, functions=extra_func.make_extra_funcs()) |
| 126 | + |
| 127 | + bindings = build_variables(format_error_test.bindings) |
| 128 | + try: |
| 129 | + prog.evaluate(bindings) |
| 130 | + pytest.fail(f"[{format_error_test.name}]: expected an error to be raised during evaluation") |
| 131 | + except celpy.CELEvalError as e: |
| 132 | + msg = get_eval_error_message(format_error_test) |
| 133 | + assert msg is not None, f"[{format_error_test.name}]: expected an eval error to be defined" |
| 134 | + assert str(e) == msg |
0 commit comments