Skip to content

Commit d80c5c3

Browse files
authored
fix(migration): only drop the first bucket to speed up the migration (#7228)
Since the migration was taking too long, we'll only drop the first bucket to both speed up the migration and ensure it is marked as complete
1 parent 16682f2 commit d80c5c3

File tree

1 file changed

+3
-73
lines changed

1 file changed

+3
-73
lines changed
Lines changed: 3 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
1-
from snuba.clickhouse.columns import Array, Column, UInt
21
from snuba.clusters.storage_sets import StorageSetKey
32
from snuba.migrations import migration, operations
4-
from snuba.migrations.columns import MigrationModifiers as Modifiers
53
from snuba.migrations.operations import OperationTarget
64

75

@@ -19,79 +17,11 @@ class Migration(migration.ClickhouseNodeMigration):
1917
blocking = False
2018
storage_set_key = StorageSetKey.EVENTS_ANALYTICS_PLATFORM
2119
downsampled_factors = [8, 64, 512]
22-
buckets = range(5)
20+
buckets = range(1)
2321
table_name_prefix = "eap_items_1"
2422

2523
def forwards_ops(self) -> list[operations.SqlOperation]:
26-
ops: list[operations.SqlOperation] = []
27-
for i in self.buckets:
28-
for ty in {"string", "float"}:
29-
for table_suffix, target in TABLES:
30-
column_to_remove = hash_map_column_name(ty, i)
31-
ops.extend(
32-
[
33-
operations.DropColumn(
34-
storage_set=self.storage_set_key,
35-
table_name=f"{self.table_name_prefix}_{table_suffix}",
36-
column_name=column_to_remove,
37-
target=target,
38-
),
39-
]
40-
)
41-
for factor in self.downsampled_factors:
42-
downsampled_table_prefix = f"eap_items_1_downsample_{factor}"
43-
ops.extend(
44-
[
45-
operations.DropColumn(
46-
storage_set=self.storage_set_key,
47-
table_name=f"{downsampled_table_prefix}_{table_suffix}",
48-
column_name=column_to_remove,
49-
target=target,
50-
),
51-
]
52-
)
53-
return ops
24+
return []
5425

5526
def backwards_ops(self) -> list[operations.SqlOperation]:
56-
ops: list[operations.SqlOperation] = []
57-
for i in self.buckets:
58-
for ty in {"string", "float"}:
59-
for table_suffix, target in reversed(TABLES):
60-
column_to_remove = hash_map_column_name(ty, i)
61-
ops.append(
62-
operations.AddColumn(
63-
storage_set=self.storage_set_key,
64-
table_name=f"{self.table_name_prefix}_{table_suffix}",
65-
column=Column(
66-
column_to_remove,
67-
Array(
68-
UInt(64),
69-
Modifiers(
70-
materialized=f"arrayMap(k -> cityHash64(k), mapKeys(attributes_{ty}_{i}))",
71-
),
72-
),
73-
),
74-
after=f"attributes_{ty}_{i}",
75-
target=target,
76-
)
77-
)
78-
for factor in self.downsampled_factors:
79-
downsampled_table_prefix = f"eap_items_1_downsample_{factor}"
80-
ops.append(
81-
operations.AddColumn(
82-
storage_set=self.storage_set_key,
83-
table_name=f"{downsampled_table_prefix}_{table_suffix}",
84-
column=Column(
85-
column_to_remove,
86-
Array(
87-
UInt(64),
88-
Modifiers(
89-
materialized=f"arrayMap(k -> cityHash64(k), mapKeys(attributes_{ty}_{i}))",
90-
),
91-
),
92-
),
93-
after=f"attributes_{ty}_{i}",
94-
target=target,
95-
)
96-
)
97-
return ops
27+
return []

0 commit comments

Comments
 (0)