forked from LBRYCommunity/lbry-sdk
consolidate flush_backup
This commit is contained in:
parent
4258cef9bd
commit
ccac4ffa24
1 changed files with 6 additions and 9 deletions
|
@ -271,23 +271,17 @@ class BlockProcessor:
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
return await self.daemon.raw_blocks(hex_hashes)
|
return await self.daemon.raw_blocks(hex_hashes)
|
||||||
|
|
||||||
def flush_backup():
|
|
||||||
# self.touched can include other addresses which is
|
|
||||||
# harmless, but remove None.
|
|
||||||
self.touched.discard(None)
|
|
||||||
self.db.flush_backup(self.flush_data(), self.touched)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self.flush(True)
|
await self.flush(True)
|
||||||
start, last, hashes = await self.reorg_hashes(count)
|
start, last, hashes = await self.reorg_hashes(count)
|
||||||
# Reverse and convert to hex strings.
|
# Reverse and convert to hex strings.
|
||||||
hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
|
hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
|
||||||
self.logger.info("reorg %i block hashes", len(hashes))
|
self.logger.info("reorg %i block hashes", len(hashes))
|
||||||
|
|
||||||
for hex_hashes in chunks(hashes, 50):
|
for hex_hashes in chunks(hashes, 50):
|
||||||
raw_blocks = await get_raw_blocks(last, hex_hashes)
|
raw_blocks = await get_raw_blocks(last, hex_hashes)
|
||||||
self.logger.info("got %i raw blocks", len(raw_blocks))
|
self.logger.info("got %i raw blocks", len(raw_blocks))
|
||||||
await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks)
|
await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks)
|
||||||
await self.run_in_thread_with_lock(flush_backup)
|
|
||||||
last -= len(raw_blocks)
|
last -= len(raw_blocks)
|
||||||
|
|
||||||
await self.prefetcher.reset_height(self.height)
|
await self.prefetcher.reset_height(self.height)
|
||||||
|
@ -438,8 +432,8 @@ class BlockProcessor:
|
||||||
self.block_txs.append((b''.join(tx_hash for tx, tx_hash in txs), [tx.raw for tx, _ in txs]))
|
self.block_txs.append((b''.join(tx_hash for tx, tx_hash in txs), [tx.raw for tx, _ in txs]))
|
||||||
|
|
||||||
undo_info = []
|
undo_info = []
|
||||||
tx_num = self.tx_count
|
|
||||||
hashXs_by_tx = []
|
hashXs_by_tx = []
|
||||||
|
tx_num = self.tx_count
|
||||||
|
|
||||||
# Use local vars for speed in the loops
|
# Use local vars for speed in the loops
|
||||||
put_utxo = self.utxo_cache.__setitem__
|
put_utxo = self.utxo_cache.__setitem__
|
||||||
|
@ -484,7 +478,6 @@ class BlockProcessor:
|
||||||
_unflushed[_hashX].append(_tx_num)
|
_unflushed[_hashX].append(_tx_num)
|
||||||
_count += len(_hashXs)
|
_count += len(_hashXs)
|
||||||
self.db.hist_unflushed_count += _count
|
self.db.hist_unflushed_count += _count
|
||||||
|
|
||||||
self.tx_count = tx_num
|
self.tx_count = tx_num
|
||||||
self.db.tx_counts.append(tx_num)
|
self.db.tx_counts.append(tx_num)
|
||||||
|
|
||||||
|
@ -515,6 +508,10 @@ class BlockProcessor:
|
||||||
self.height -= 1
|
self.height -= 1
|
||||||
self.db.tx_counts.pop()
|
self.db.tx_counts.pop()
|
||||||
|
|
||||||
|
# self.touched can include other addresses which is
|
||||||
|
# harmless, but remove None.
|
||||||
|
self.touched.discard(None)
|
||||||
|
self.db.flush_backup(self.flush_data(), self.touched)
|
||||||
self.logger.info(f'backed up to height {self.height:,d}')
|
self.logger.info(f'backed up to height {self.height:,d}')
|
||||||
|
|
||||||
def backup_txs(self, txs):
|
def backup_txs(self, txs):
|
||||||
|
|
Loading…
Reference in a new issue