Skip to content

Commit 1ddb242

Browse files
committed
clear db for outcomes MVs
1 parent 5bc4663 commit 1ddb242

File tree

1 file changed

+11
-15
lines changed

1 file changed

+11
-15
lines changed

tests/conftest.py

Lines changed: 11 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,7 @@ def pytest_configure() -> None:
2727
Set up the Sentry SDK to avoid errors hidden by configuration.
2828
Ensure the snuba_test database exists
2929
"""
30-
assert (
31-
settings.TESTING
32-
), "settings.TESTING is False, try `SNUBA_SETTINGS=test` or `make test`"
30+
assert settings.TESTING, "settings.TESTING is False, try `SNUBA_SETTINGS=test` or `make test`"
3331

3432
initialize_snuba()
3533
setup_sentry()
@@ -52,9 +50,11 @@ def create_databases() -> None:
5250
storage_sets=cluster["storage_sets"],
5351
single_node=cluster["single_node"],
5452
cluster_name=cluster["cluster_name"] if "cluster_name" in cluster else None,
55-
distributed_cluster_name=cluster["distributed_cluster_name"]
56-
if "distributed_cluster_name" in cluster
57-
else None,
53+
distributed_cluster_name=(
54+
cluster["distributed_cluster_name"]
55+
if "distributed_cluster_name" in cluster
56+
else None
57+
),
5858
)
5959

6060
database_name = cluster["database"]
@@ -161,9 +161,7 @@ def _build_migrations_cache() -> None:
161161
nodes = [*cluster.get_local_nodes(), *cluster.get_distributed_nodes()]
162162
for node in nodes:
163163
if (cluster, node) not in MIGRATIONS_CACHE:
164-
connection = cluster.get_node_connection(
165-
ClickhouseClientSettings.MIGRATE, node
166-
)
164+
connection = cluster.get_node_connection(ClickhouseClientSettings.MIGRATE, node)
167165
rows = connection.execute(
168166
f"SELECT name, create_table_query FROM system.tables WHERE database='{database}'"
169167
)
@@ -195,14 +193,14 @@ def _clear_db() -> None:
195193
or storage_key == StorageKey.EAP_ITEMS_DOWNSAMPLE_8
196194
or storage_key == StorageKey.EAP_ITEMS_DOWNSAMPLE_64
197195
or storage_key == StorageKey.EAP_ITEMS_DOWNSAMPLE_512
196+
or storage_key == StorageKey.OUTCOMES_HOURLY
197+
or storage_key == StorageKey.OUTCOMES_DAILY
198198
):
199199
table_name = schema.get_local_table_name() # type: ignore
200200

201201
nodes = [*cluster.get_local_nodes(), *cluster.get_distributed_nodes()]
202202
for node in nodes:
203-
connection = cluster.get_node_connection(
204-
ClickhouseClientSettings.MIGRATE, node
205-
)
203+
connection = cluster.get_node_connection(ClickhouseClientSettings.MIGRATE, node)
206204
connection.execute(f"TRUNCATE TABLE IF EXISTS {database}.{table_name}")
207205

208206

@@ -240,9 +238,7 @@ def clickhouse_db(
240238
# apply migrations from cache
241239
applied_nodes = set()
242240
for (cluster, node), tables in MIGRATIONS_CACHE.items():
243-
connection = cluster.get_node_connection(
244-
ClickhouseClientSettings.MIGRATE, node
245-
)
241+
connection = cluster.get_node_connection(ClickhouseClientSettings.MIGRATE, node)
246242
for table_name, create_table_query in tables.items():
247243
if (node.host_name, node.port, table_name) in applied_nodes:
248244
continue

0 commit comments

Comments
 (0)