forked from LBRYCommunity/lbry-sdk
fix getting block hash during reorg
This commit is contained in:
parent
f52faa8d14
commit
22c75605ee
2 changed files with 6 additions and 10 deletions
|
@ -201,7 +201,6 @@ class BlockProcessor:
|
||||||
self.touched = set()
|
self.touched = set()
|
||||||
|
|
||||||
# Caches of unflushed items.
|
# Caches of unflushed items.
|
||||||
self.block_hashes = []
|
|
||||||
self.block_txs = []
|
self.block_txs = []
|
||||||
self.undo_infos = []
|
self.undo_infos = []
|
||||||
|
|
||||||
|
@ -336,7 +335,7 @@ class BlockProcessor:
|
||||||
for height, block_hash in zip(
|
for height, block_hash in zip(
|
||||||
reversed(range(min_start_height, min_start_height + self.coin.REORG_LIMIT)),
|
reversed(range(min_start_height, min_start_height + self.coin.REORG_LIMIT)),
|
||||||
reversed(block_hashes_from_lbrycrd)):
|
reversed(block_hashes_from_lbrycrd)):
|
||||||
if self.block_hashes[height][::-1].hex() == block_hash:
|
if self.db.get_block_hash(height)[::-1].hex() == block_hash:
|
||||||
break
|
break
|
||||||
count += 1
|
count += 1
|
||||||
self.logger.warning(f"blockchain reorg detected at {self.height}, unwinding last {count} blocks")
|
self.logger.warning(f"blockchain reorg detected at {self.height}, unwinding last {count} blocks")
|
||||||
|
@ -373,8 +372,7 @@ class BlockProcessor:
|
||||||
def flush_data(self):
|
def flush_data(self):
|
||||||
"""The data for a flush. The lock must be taken."""
|
"""The data for a flush. The lock must be taken."""
|
||||||
assert self.state_lock.locked()
|
assert self.state_lock.locked()
|
||||||
return FlushData(self.height, self.tx_count, self.block_hashes,
|
return FlushData(self.height, self.tx_count, self.block_txs, self.db_op_stack, self.tip)
|
||||||
self.block_txs, self.db_op_stack, self.tip)
|
|
||||||
|
|
||||||
async def flush(self):
|
async def flush(self):
|
||||||
def flush():
|
def flush():
|
||||||
|
@ -1137,7 +1135,6 @@ class BlockProcessor:
|
||||||
txs: List[Tuple[Tx, bytes]] = block.transactions
|
txs: List[Tuple[Tx, bytes]] = block.transactions
|
||||||
block_hash = self.coin.header_hash(block.header)
|
block_hash = self.coin.header_hash(block.header)
|
||||||
|
|
||||||
self.block_hashes.append(block_hash)
|
|
||||||
self.db_op_stack.append(RevertablePut(*Prefixes.block_hash.pack_item(height, block_hash)))
|
self.db_op_stack.append(RevertablePut(*Prefixes.block_hash.pack_item(height, block_hash)))
|
||||||
|
|
||||||
tx_count = self.tx_count
|
tx_count = self.tx_count
|
||||||
|
@ -1298,7 +1295,6 @@ class BlockProcessor:
|
||||||
self.removed_claims_to_send_es.update(touched_and_deleted.deleted_claims)
|
self.removed_claims_to_send_es.update(touched_and_deleted.deleted_claims)
|
||||||
|
|
||||||
self.db.headers.pop()
|
self.db.headers.pop()
|
||||||
self.block_hashes.pop()
|
|
||||||
self.db.tx_counts.pop()
|
self.db.tx_counts.pop()
|
||||||
self.tip = self.coin.header_hash(self.db.headers[-1])
|
self.tip = self.coin.header_hash(self.db.headers[-1])
|
||||||
while len(self.db.total_transactions) > self.db.tx_counts[-1]:
|
while len(self.db.total_transactions) > self.db.tx_counts[-1]:
|
||||||
|
|
|
@ -65,7 +65,6 @@ TXO_STRUCT_pack = TXO_STRUCT.pack
|
||||||
class FlushData:
|
class FlushData:
|
||||||
height = attr.ib()
|
height = attr.ib()
|
||||||
tx_count = attr.ib()
|
tx_count = attr.ib()
|
||||||
block_hashes = attr.ib()
|
|
||||||
block_txs = attr.ib()
|
block_txs = attr.ib()
|
||||||
put_and_delete_ops = attr.ib()
|
put_and_delete_ops = attr.ib()
|
||||||
tip = attr.ib()
|
tip = attr.ib()
|
||||||
|
@ -382,8 +381,11 @@ class LevelDB:
|
||||||
|
|
||||||
def get_claim_txo_amount(self, claim_hash: bytes) -> Optional[int]:
|
def get_claim_txo_amount(self, claim_hash: bytes) -> Optional[int]:
|
||||||
v = self.db.get(Prefixes.claim_to_txo.pack_key(claim_hash))
|
v = self.db.get(Prefixes.claim_to_txo.pack_key(claim_hash))
|
||||||
|
|
||||||
|
def get_block_hash(self, height: int) -> Optional[bytes]:
|
||||||
|
v = self.db.get(Prefixes.block_hash.pack_key(height))
|
||||||
if v:
|
if v:
|
||||||
return Prefixes.claim_to_txo.unpack_value(v).amount
|
return Prefixes.block_hash.unpack_value(v).block_hash
|
||||||
|
|
||||||
def get_support_txo_amount(self, claim_hash: bytes, tx_num: int, position: int) -> Optional[int]:
|
def get_support_txo_amount(self, claim_hash: bytes, tx_num: int, position: int) -> Optional[int]:
|
||||||
v = self.db.get(Prefixes.claim_to_support.pack_key(claim_hash, tx_num, position))
|
v = self.db.get(Prefixes.claim_to_support.pack_key(claim_hash, tx_num, position))
|
||||||
|
@ -790,7 +792,6 @@ class LevelDB:
|
||||||
assert flush_data.tx_count == self.fs_tx_count == self.db_tx_count
|
assert flush_data.tx_count == self.fs_tx_count == self.db_tx_count
|
||||||
assert flush_data.height == self.fs_height == self.db_height
|
assert flush_data.height == self.fs_height == self.db_height
|
||||||
assert flush_data.tip == self.db_tip
|
assert flush_data.tip == self.db_tip
|
||||||
assert not flush_data.block_txs
|
|
||||||
assert not len(flush_data.put_and_delete_ops)
|
assert not len(flush_data.put_and_delete_ops)
|
||||||
|
|
||||||
def flush_dbs(self, flush_data: FlushData):
|
def flush_dbs(self, flush_data: FlushData):
|
||||||
|
@ -840,7 +841,6 @@ class LevelDB:
|
||||||
self.write_db_state(batch)
|
self.write_db_state(batch)
|
||||||
|
|
||||||
def flush_backup(self, flush_data):
|
def flush_backup(self, flush_data):
|
||||||
assert not flush_data.block_txs
|
|
||||||
assert flush_data.height < self.db_height
|
assert flush_data.height < self.db_height
|
||||||
assert not self.hist_unflushed
|
assert not self.hist_unflushed
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue