Skip to content

Commit 2bed837

Browse files
committed
rptest: Fix timequery test
The test uses constant timestamp to generate records and query them. But the timestamp is too old so retention policy starts to remove data while the test is running. This commit makes the test use proper base timestamp. Signed-off-by: Evgeny Lazin <[email protected]>
1 parent 5703959 commit 2bed837

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

tests/rptest/tests/timequery_test.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def _create_and_produce(
108108
def _test_timequery(self, cluster, cloud_storage: bool, batch_cache: bool):
109109
total_segments = 12
110110
record_size = 1024
111-
base_ts = 1664453149000
111+
base_ts = int(time.time()) * 1000
112112
msg_count = (self.log_segment_size * total_segments) // record_size
113113
local_retention = self.log_segment_size * 4
114114
kcat = KafkaCat(cluster)
@@ -270,7 +270,7 @@ def _test_timequery_below_start_offset(self, cluster):
270270
total_segments = 3
271271
local_retain_segments = 1
272272
record_size = 1024
273-
base_ts = 1664453149000
273+
base_ts = int(time.time()) * 1000
274274
msg_count = (self.log_segment_size * total_segments) // record_size
275275

276276
topic, timestamps = self._create_and_produce(
@@ -416,7 +416,7 @@ def test_timequery_with_local_gc(self):
416416
self.set_up_cluster(cloud_storage=True, batch_cache=False, spillover=False)
417417
local_retention = self.log_segment_size * 4
418418
record_size = 1024
419-
base_ts = 1664453149000
419+
base_ts = int(time.time()) * 1000
420420
msg_count = (self.log_segment_size * total_segments) // record_size
421421

422422
topic, timestamps = self._create_and_produce(
@@ -494,7 +494,7 @@ def test_timequery_with_trim_prefix(self, cloud_storage: bool, spillover: bool):
494494
)
495495
total_segments = 12
496496
record_size = 1024
497-
base_ts = 1664453149000
497+
base_ts = int(time.time()) * 1000
498498
msg_count = (self.log_segment_size * total_segments) // record_size
499499
local_retention = self.log_segment_size * 4
500500
topic, timestamps = self._create_and_produce(
@@ -554,7 +554,7 @@ def test_timequery_with_spillover_gc_delayed(self):
554554
self.set_up_cluster(cloud_storage=True, batch_cache=False, spillover=True)
555555
total_segments = 16
556556
record_size = 1024
557-
base_ts = 1664453149000
557+
base_ts = int(time.time()) * 1000
558558
msg_count = (self.log_segment_size * total_segments) // record_size
559559
local_retention = self.log_segment_size * 4
560560
topic_retention = self.log_segment_size * 8
@@ -655,7 +655,7 @@ def test_timequery_empty_local_log(self):
655655

656656
total_segments = 3
657657
record_size = 1024
658-
base_ts = 1664453149000
658+
base_ts = int(time.time()) * 1000
659659
msg_count = (self.log_segment_size * total_segments) // record_size
660660
local_retention = 1 # Any value works for this test.
661661
topic, timestamps = self._create_and_produce(
@@ -868,7 +868,7 @@ def query_timestamp(self, ts, kcat_src, kcat_rr):
868868

869869
@ducktape_cluster(num_nodes=7)
870870
def test_timequery(self):
871-
base_ts = 1664453149000
871+
base_ts = int(time.time()) * 1000
872872
num_messages = 1000
873873
self.setup_clusters(base_ts, num_messages, 3)
874874

0 commit comments

Comments
 (0)