11
11
12
12
import asyncio
13
13
import time
14
+ from asyncio import sleep
14
15
15
- from aiorpcx import TaskGroup , run_in_thread , CancelledError
16
+ from aiorpcx import TaskGroup , CancelledError
16
17
17
18
import electrumx
18
19
from electrumx .server .daemon import DaemonError
@@ -54,7 +55,7 @@ async def main_loop(self, bp_height):
54
55
# Sleep a while if there is nothing to prefetch
55
56
await self .refill_event .wait ()
56
57
if not await self ._prefetch_blocks ():
57
- await asyncio . sleep (self .polling_delay )
58
+ await sleep (self .polling_delay )
58
59
except DaemonError as e :
59
60
self .logger .info (f'ignoring daemon error: { e } ' )
60
61
except CancelledError as e :
@@ -190,16 +191,14 @@ def __init__(self, env, db, daemon, notifications):
190
191
# Signalled after backing up during a reorg
191
192
self .backed_up_event = asyncio .Event ()
192
193
193
- async def run_in_thread_with_lock (self , func , * args ):
194
- # Run in a thread to prevent blocking. Shielded so that
195
- # cancellations from shutdown don't lose work - when the task
196
- # completes the data will be flushed and then we shut down.
197
- # Take the state lock to be certain in-memory state is
198
- # consistent and not being updated elsewhere.
199
- async def run_in_thread_locked ():
194
+ async def run_with_lock (self , coro ):
195
+ # Shielded so that cancellations from shutdown don't lose work - when the task
196
+ # completes the data will be flushed and then we shut down. Take the state lock
197
+ # to be certain in-memory state is consistent and not being updated elsewhere.
198
+ async def run_locked ():
200
199
async with self .state_lock :
201
- return await run_in_thread ( func , * args )
202
- return await asyncio .shield (run_in_thread_locked ())
200
+ return await coro
201
+ return await asyncio .shield (run_locked ())
203
202
204
203
async def check_and_advance_blocks (self , raw_blocks ):
205
204
'''Process the list of raw blocks passed. Detects and handles
@@ -214,7 +213,7 @@ async def check_and_advance_blocks(self, raw_blocks):
214
213
215
214
if hprevs == chain :
216
215
start = time .monotonic ()
217
- await self .run_in_thread_with_lock (self .advance_blocks , blocks )
216
+ await self .run_with_lock (self .advance_blocks ( blocks ) )
218
217
await self ._maybe_flush ()
219
218
if not self .db .first_sync :
220
219
s = '' if len (blocks ) == 1 else 's'
@@ -256,9 +255,10 @@ async def get_raw_blocks(last_height, hex_hashes):
256
255
except FileNotFoundError :
257
256
return await self .daemon .raw_blocks (hex_hashes )
258
257
259
- def flush_backup ( ):
258
+ async def backup_and_flush ( raw_blocks ):
260
259
# self.touched can include other addresses which is
261
260
# harmless, but remove None.
261
+ await self .backup_blocks (raw_blocks )
262
262
self .touched .discard (None )
263
263
self .db .flush_backup (self .flush_data (), self .touched )
264
264
@@ -267,8 +267,7 @@ def flush_backup():
267
267
hashes = [hash_to_hex_str (hash ) for hash in reversed (hashes )]
268
268
for hex_hashes in chunks (hashes , 50 ):
269
269
raw_blocks = await get_raw_blocks (last , hex_hashes )
270
- await self .run_in_thread_with_lock (self .backup_blocks , raw_blocks )
271
- await self .run_in_thread_with_lock (flush_backup )
270
+ await self .run_with_lock (backup_and_flush (raw_blocks ))
272
271
last -= len (raw_blocks )
273
272
await self .prefetcher .reset_height (self .height )
274
273
self .backed_up_event .set ()
@@ -340,10 +339,10 @@ def flush_data(self):
340
339
self .db_deletes , self .tip )
341
340
342
341
async def flush (self , flush_utxos ):
343
- def flush ():
342
+ async def flush ():
344
343
self .db .flush_dbs (self .flush_data (), flush_utxos ,
345
344
self .estimate_txs_remaining )
346
- await self .run_in_thread_with_lock (flush )
345
+ await self .run_with_lock (flush () )
347
346
348
347
async def _maybe_flush (self ):
349
348
# If caught up, flush everything as client queries are
@@ -382,8 +381,8 @@ def check_cache_size(self):
382
381
return utxo_MB >= cache_MB * 4 // 5
383
382
return None
384
383
385
- def advance_blocks (self , blocks ):
386
- '''Synchronously advance the blocks.
384
+ async def advance_blocks (self , blocks ):
385
+ '''Advance the blocks.
387
386
388
387
It is already verified they correctly connect onto our tip.
389
388
'''
@@ -400,6 +399,8 @@ def advance_blocks(self, blocks):
400
399
self .undo_infos .append ((undo_info , height ))
401
400
self .db .write_raw_block (block .raw , height )
402
401
402
+ await sleep (0 )
403
+
403
404
headers = [block .header for block in blocks ]
404
405
self .height = height
405
406
self .headers .extend (headers )
@@ -457,7 +458,7 @@ def advance_txs(self, txs, is_unspendable):
457
458
458
459
return undo_info
459
460
460
- def backup_blocks (self , raw_blocks ):
461
+ async def backup_blocks (self , raw_blocks ):
461
462
'''Backup the raw blocks and flush.
462
463
463
464
The blocks should be in order of decreasing height, starting at.
@@ -484,6 +485,8 @@ def backup_blocks(self, raw_blocks):
484
485
self .height -= 1
485
486
self .db .tx_counts .pop ()
486
487
488
+ await sleep (0 )
489
+
487
490
self .logger .info ('backed up to height {:,d}' .format (self .height ))
488
491
489
492
def backup_txs (self , txs , is_unspendable ):
0 commit comments