small fixes
This commit is contained in:
parent
07e182aa16
commit
7a56eff1ac
5 changed files with 12 additions and 8 deletions
|
@ -4150,7 +4150,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||||
|
|
||||||
if claim_id:
|
if claim_id:
|
||||||
txo = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
|
txo = await self.ledger.get_claim_by_claim_id(claim_id, wallet.accounts)
|
||||||
if not isinstance(txo, Output) or not txo.is_claim:
|
if not isinstance(txo, Output) or not txo.is_claim:
|
||||||
# TODO: use error from lbry.error
|
# TODO: use error from lbry.error
|
||||||
raise Exception(f"Could not find collection with claim_id '{claim_id}'.")
|
raise Exception(f"Could not find collection with claim_id '{claim_id}'.")
|
||||||
|
|
|
@ -556,7 +556,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
|
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
|
||||||
|
|
||||||
assert len(pending_synced_history) == len(remote_history), \
|
assert len(pending_synced_history) == len(remote_history), \
|
||||||
f"{len(pending_synced_history)} vs {len(remote_history)}"
|
f"{len(pending_synced_history)} vs {len(remote_history)} for {address}"
|
||||||
synced_history = ""
|
synced_history = ""
|
||||||
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
|
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
|
||||||
assert i == remote_i, f"{i} vs {remote_i}"
|
assert i == remote_i, f"{i} vs {remote_i}"
|
||||||
|
|
|
@ -238,7 +238,7 @@ class Network:
|
||||||
log.exception("error looking up dns for spv server %s:%i", server, port)
|
log.exception("error looking up dns for spv server %s:%i", server, port)
|
||||||
|
|
||||||
# accumulate the dns results
|
# accumulate the dns results
|
||||||
if self.config['explicit_servers']:
|
if self.config.get('explicit_servers', []):
|
||||||
hubs = self.config['explicit_servers']
|
hubs = self.config['explicit_servers']
|
||||||
elif self.known_hubs:
|
elif self.known_hubs:
|
||||||
hubs = self.known_hubs
|
hubs = self.known_hubs
|
||||||
|
@ -254,7 +254,7 @@ class Network:
|
||||||
sent_ping_timestamps = {}
|
sent_ping_timestamps = {}
|
||||||
_, ip_to_hostnames = await self.resolve_spv_dns()
|
_, ip_to_hostnames = await self.resolve_spv_dns()
|
||||||
n = len(ip_to_hostnames)
|
n = len(ip_to_hostnames)
|
||||||
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config['explicit_servers']))
|
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config.get('explicit_servers', [])))
|
||||||
pongs = {}
|
pongs = {}
|
||||||
known_hubs = self.known_hubs
|
known_hubs = self.known_hubs
|
||||||
try:
|
try:
|
||||||
|
@ -299,8 +299,8 @@ class Network:
|
||||||
if (pong is not None and self.jurisdiction is not None) and \
|
if (pong is not None and self.jurisdiction is not None) and \
|
||||||
(pong.country_name != self.jurisdiction):
|
(pong.country_name != self.jurisdiction):
|
||||||
continue
|
continue
|
||||||
client = ClientSession(network=self, server=(host, port), timeout=self.config['hub_timeout'],
|
client = ClientSession(network=self, server=(host, port), timeout=self.config.get('hub_timeout', 30),
|
||||||
concurrency=self.config['concurrent_hub_requests'])
|
concurrency=self.config.get('concurrent_hub_requests', 30))
|
||||||
try:
|
try:
|
||||||
await client.create_connection()
|
await client.create_connection()
|
||||||
log.warning("Connected to spv server %s:%i", host, port)
|
log.warning("Connected to spv server %s:%i", host, port)
|
||||||
|
|
|
@ -660,7 +660,7 @@ class LevelDB:
|
||||||
def get_counts():
|
def get_counts():
|
||||||
return tuple(
|
return tuple(
|
||||||
Prefixes.tx_count.unpack_value(packed_tx_count).tx_count
|
Prefixes.tx_count.unpack_value(packed_tx_count).tx_count
|
||||||
for packed_tx_count in self.db.iterator(prefix=Prefixes.tx_count.value, include_key=False)
|
for packed_tx_count in self.db.iterator(prefix=Prefixes.tx_count.prefix, include_key=False)
|
||||||
)
|
)
|
||||||
|
|
||||||
tx_counts = await asyncio.get_event_loop().run_in_executor(self.executor, get_counts)
|
tx_counts = await asyncio.get_event_loop().run_in_executor(self.executor, get_counts)
|
||||||
|
@ -1083,8 +1083,12 @@ class LevelDB:
|
||||||
utxos = []
|
utxos = []
|
||||||
utxo_append = utxos.append
|
utxo_append = utxos.append
|
||||||
for (tx_hash, nout) in prevouts:
|
for (tx_hash, nout) in prevouts:
|
||||||
|
if tx_hash not in self.transaction_num_mapping:
|
||||||
|
continue
|
||||||
tx_num = self.transaction_num_mapping[tx_hash]
|
tx_num = self.transaction_num_mapping[tx_hash]
|
||||||
hashX = self.db.get(Prefixes.hashX_utxo.pack_key(tx_hash[:4], tx_num, nout))
|
hashX = self.db.get(Prefixes.hashX_utxo.pack_key(tx_hash[:4], tx_num, nout))
|
||||||
|
if not hashX:
|
||||||
|
continue
|
||||||
utxo_value = self.db.get(Prefixes.utxo.pack_key(hashX, tx_num, nout))
|
utxo_value = self.db.get(Prefixes.utxo.pack_key(hashX, tx_num, nout))
|
||||||
if utxo_value:
|
if utxo_value:
|
||||||
utxo_append((hashX, Prefixes.utxo.unpack_value(utxo_value).amount))
|
utxo_append((hashX, Prefixes.utxo.unpack_value(utxo_value).amount))
|
||||||
|
|
|
@ -178,7 +178,7 @@ class UDPServerFailDiscoveryTest(AsyncioTestCase):
|
||||||
class ServerPickingTestCase(AsyncioTestCase):
|
class ServerPickingTestCase(AsyncioTestCase):
|
||||||
async def _make_udp_server(self, port, latency) -> StatusServer:
|
async def _make_udp_server(self, port, latency) -> StatusServer:
|
||||||
s = StatusServer()
|
s = StatusServer()
|
||||||
await s.start(0, b'\x00' * 32, '127.0.0.1', port)
|
await s.start(0, b'\x00' * 32, 'US', '127.0.0.1', port, True)
|
||||||
s.set_available()
|
s.set_available()
|
||||||
sendto = s._protocol.transport.sendto
|
sendto = s._protocol.transport.sendto
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue