forked from LBRYCommunity/lbry-sdk
refactor from review
This commit is contained in:
parent
6fcb4d93c9
commit
ea1e24d8f9
2 changed files with 14 additions and 16 deletions
|
@ -121,16 +121,13 @@ class BaseHeaders:
|
||||||
h.update(buf.read(verifiable_bytes))
|
h.update(buf.read(verifiable_bytes))
|
||||||
if h.hexdigest().encode() == self.checkpoint[1]:
|
if h.hexdigest().encode() == self.checkpoint[1]:
|
||||||
buf.seek(0)
|
buf.seek(0)
|
||||||
self.io.seek(self.bytes_size, os.SEEK_SET)
|
self._write(len(self), buf.read(verifiable_bytes))
|
||||||
self.io.write(buf.read(verifiable_bytes))
|
|
||||||
self.io.flush()
|
|
||||||
self._size = None
|
|
||||||
remaining = buf.read()
|
remaining = buf.read()
|
||||||
buf.seek(0)
|
buf.seek(0)
|
||||||
buf.write(remaining)
|
buf.write(remaining)
|
||||||
buf.truncate()
|
buf.truncate()
|
||||||
else:
|
else:
|
||||||
log.warning("Checkpoing mismatch, connecting headers through slow method.")
|
log.warning("Checkpoint mismatch, connecting headers through slow method.")
|
||||||
if buf.tell() > 0:
|
if buf.tell() > 0:
|
||||||
await self.connect(len(self), buf.getvalue())
|
await self.connect(len(self), buf.getvalue())
|
||||||
|
|
||||||
|
@ -144,19 +141,20 @@ class BaseHeaders:
|
||||||
except InvalidHeader as e:
|
except InvalidHeader as e:
|
||||||
bail = True
|
bail = True
|
||||||
chunk = chunk[:(height-e.height)*self.header_size]
|
chunk = chunk[:(height-e.height)*self.header_size]
|
||||||
written = 0
|
added += self._write(height, chunk) if chunk else 0
|
||||||
if chunk:
|
if bail:
|
||||||
|
break
|
||||||
|
return added
|
||||||
|
|
||||||
|
def _write(self, height, verified_chunk):
|
||||||
self.io.seek(height * self.header_size, os.SEEK_SET)
|
self.io.seek(height * self.header_size, os.SEEK_SET)
|
||||||
written = self.io.write(chunk) // self.header_size
|
written = self.io.write(verified_chunk) // self.header_size
|
||||||
self.io.truncate()
|
self.io.truncate()
|
||||||
# .seek()/.write()/.truncate() might also .flush() when needed
|
# .seek()/.write()/.truncate() might also .flush() when needed
|
||||||
# the goal here is mainly to ensure we're definitely flush()'ing
|
# the goal here is mainly to ensure we're definitely flush()'ing
|
||||||
self.io.flush()
|
self.io.flush()
|
||||||
self._size = self.io.tell() // self.header_size
|
self._size = self.io.tell() // self.header_size
|
||||||
added += written
|
return written
|
||||||
if bail:
|
|
||||||
break
|
|
||||||
return added
|
|
||||||
|
|
||||||
def validate_chunk(self, height, chunk):
|
def validate_chunk(self, height, chunk):
|
||||||
previous_hash, previous_header, previous_previous_header = None, None, None
|
previous_hash, previous_header, previous_previous_header = None, None, None
|
||||||
|
|
|
@ -316,7 +316,7 @@ class BaseLedger(metaclass=LedgerRegistry):
|
||||||
target = self.network.remote_height
|
target = self.network.remote_height
|
||||||
current = len(self.headers)
|
current = len(self.headers)
|
||||||
get_chunk = partial(self.network.retriable_call, self.network.get_headers, count=4096, b64=True)
|
get_chunk = partial(self.network.retriable_call, self.network.get_headers, count=4096, b64=True)
|
||||||
chunks = [asyncio.ensure_future(get_chunk(height)) for height in range(current, target, 4096)]
|
chunks = [asyncio.create_task(get_chunk(height)) for height in range(current, target, 4096)]
|
||||||
total = 0
|
total = 0
|
||||||
async with self.headers.checkpointed_connector() as connector:
|
async with self.headers.checkpointed_connector() as connector:
|
||||||
for chunk in chunks:
|
for chunk in chunks:
|
||||||
|
|
Loading…
Reference in a new issue